diff --git a/scripts/unit_tests/compare_tensor.py b/scripts/unit_tests/compare_tensor.py new file mode 100644 index 0000000..20d46e5 --- /dev/null +++ b/scripts/unit_tests/compare_tensor.py @@ -0,0 +1,35 @@ +import os + +import torch +import pytorch_lightning as pl + +import numpy as np +from scripts.utils.params import Parameters + + +if __name__== '__main__': + args = Parameters() + tensor_name = 'arpg_lab_run0' + + + dir = args.directory + vel_dir = args.vel_directory + + vel_tensor_str = os.path.join(vel_dir, tensor_name + '_odom_data/images', str(110) + '.pt') + int_tensor_str = os.path.join(dir, tensor_name + '_odom_data/images', str(110) + '.pt') + vel_tensor = torch.load(vel_tensor_str) + int_tensor = torch.load(int_tensor_str) + + + print(vel_tensor.size()) + print(int_tensor.size()) + vel_tensor_reduced = vel_tensor[0,31,:,31] + int_tensor_reduced = int_tensor[0,31,:,31] + + print('Vel: ', vel_tensor_reduced) + print('Int: ', int_tensor_reduced) + + print(torch.eq(int_tensor_reduced,vel_tensor_reduced)) + + + diff --git a/scripts/unit_tests/train_enc_batch.py b/scripts/unit_tests/train_enc_batch.py index 5578cbb..12cbd82 100644 --- a/scripts/unit_tests/train_enc_batch.py +++ b/scripts/unit_tests/train_enc_batch.py @@ -6,7 +6,8 @@ import torch from torch.utils.data import DataLoader import torch.nn.functional as F -from torch.utils.tensorboard import SummaryWriter +# from torch.utils.tensorboard import SummaryWriter +import wandb from scripts.models.kramer_original import KramerOriginal @@ -134,6 +135,7 @@ def validate(test_loader, full_errors=False, batch_size=16): if __name__ == '__main__': args = Parameters() + run = wandb.init(project = 'TBRO') # Load dataset and create torch dataloader print("Root dir: " + args.directory) @@ -157,7 +159,7 @@ def validate(test_loader, full_errors=False, batch_size=16): print('CPU used.') # Load Model - model = KramerOriginal() + model = KramerOriginal(args) model = model.to(device) # Create an optimizer (TODO: Test optimizers) @@ -177,7 +179,7 @@ def validate(test_loader, full_errors=False, batch_size=16): num_epochs = args.epochs - writer = SummaryWriter() + # writer = SummaryWriter() if not args.eval_only: print('---Training Model---') @@ -213,7 +215,7 @@ def validate(test_loader, full_errors=False, batch_size=16): loss = 1 * orientation_loss + position_loss # writer.add_scalar("Training Loss",loss,i) - writer.add_scalar("Training Loss",loss,i+epoch*len(train_dataset)) + wandb.log({"Training Loss": loss}) train_losses.append(loss.item()) @@ -250,14 +252,14 @@ def validate(test_loader, full_errors=False, batch_size=16): model = model.eval() test_losses = validate(test_loader,full_errors = False, batch_size=args.batch_size) epoch_test_losses.append(sum(test_losses) / float(len(test_losses))) - writer.add_scalar("Validation Loss", sum(test_losses) / float(len(test_losses)),epoch) + wandb.log({"Validation Loss": sum(test_losses) / float(len(test_losses))}) scheduler.step() epoch_train_losses.append(sum(train_losses) / float(len(train_losses))) if args.save_filename != '': torch.save(model.state_dict(), args.save_filename) - writer.flush() + # writer.flush() print('Running final evaluation.') model = model.eval() @@ -270,7 +272,7 @@ def validate(test_loader, full_errors=False, batch_size=16): torch.save(epoch_test_losses, 'lr_'+str(args.learning_rate)+'test_losses.pt') torch.save(epoch_train_losses, 'lr_'+str(args.learning_rate)+'train_losses.pt') - writer.close() + # writer.close() ''' plt.plot(epoch_test_losses, c='b', label='test loss') diff --git a/scripts/utils/dataset_creator.py b/scripts/utils/dataset_creator.py index bbed686..b7205a9 100644 --- a/scripts/utils/dataset_creator.py +++ b/scripts/utils/dataset_creator.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import sys import argparse import os @@ -12,6 +13,8 @@ import scipy.spatial.transform as tf import matplotlib.pyplot as plt from scipy.interpolate import UnivariateSpline, interp1d +from scripts.utils.dataset_creator_params import Parameters +from scripts.utils.dataset import DeepRODataset import rosbag @@ -19,7 +22,6 @@ from dca1000_device_msgs.msg import MimoMsg #from deep_ro_dataset import DeepRODataset -from utils.params import Parameters class ThreeDSample(nn.Module): def __init__(self, method='trilinear'): @@ -123,17 +125,34 @@ def read_bag(filename, radar_topic, odom_topic): bag_msgs = bag.read_messages(topics=[radar_topic]) msgs['radar'] = [process_img(msg,t) for (topic,msg,t) in bag_msgs] - max_intensity = 0 - min_intensity = 1e10 + # Normalize intensity on [0,1] + ############################################# + # Removed for velocity filtering + # + # max_intensity = 0 + # min_intensity = 1e10 + # for radar_im in msgs['radar']: + # if radar_im[1][0,:,:,:].max() > max_intensity: + # max_intensity = radar_im[1][0,:,:,:].max() + # if radar_im[1][0,:,:,:].min() < min_intensity: + # min_intensity = radar_im[1][0,:,:,:].min() + # + # for radar_im in msgs['radar']: + # radar_im[1][0,:,:,:] -= min_intensity + # radar_im[1][0,:,:,:] /= (max_intensity - min_intensity) + # + ############################################################ + # Normalize velocity on [-1,1] + max_velocity = 0 for radar_im in msgs['radar']: - if radar_im[1][0,:,:,:].max() > max_intensity: - max_intensity = radar_im[1][0,:,:,:].max() - if radar_im[1][0,:,:,:].min() < min_intensity: - min_intensity = radar_im[1][0,:,:,:].min() + if radar_im[1][0,:,:,:].max() > max_velocity: + max_velocity = radar_im[1][0,:,:,:].max() + if radar_im[1][0,:,:,:].min() < -1*max_velocity: + max_velocity = -1*radar_im[1][0,:,:,:].min() for radar_im in msgs['radar']: - radar_im[1][0,:,:,:] -= min_intensity - radar_im[1][0,:,:,:] /= (max_intensity - min_intensity) + radar_im[1][0,:,:,:] /= max_velocity + msgs['odom'].sort() msgs['radar'].sort() @@ -219,13 +238,19 @@ def resample_to_cartesian(polar_img, dim, vox_width, bin_widths): im_points[i,j,k,:] = torch.tensor([r_bin,theta_bin,phi_bin]) im_points = im_points.view(1,-1,3,1).to(device) - if verbose_debug: - print("Shape of cartesian tensor:", im_points.shape) + # if verbose_debug: + # print("Shape of cartesian tensor:", im_points.shape) polar_img = torch.from_numpy(polar_img[0:1,:,:,:]) polar_img = polar_img.to(device) # need to verify sampler function with multi-channel images + # if verbose_debug: + # print("Polar_img_shape as passed to cartesian_arr: 1", polar_img.shape[0], ',', + # polar_img.shape[1], ',', + # polar_img.shape[2], ',', + # polar_img.shape[3]) + # print("Oringinal Polar Image Shape: ", polar_img.size()) cartesian_arr = sampler(im_points, polar_img.view(1, polar_img.shape[0], @@ -268,8 +293,16 @@ def process_img(msg,t): for el_idx in range(height): angle_idx = az_idx + width * el_idx; src_idx = 2 * (range_idx + depth * angle_idx) - img[0,range_idx,az_idx,el_idx] = arr[src_idx] - img[1,range_idx,az_idx,el_idx] = arr[src_idx+1] + ############################################## + # Switched for velocity + # + # img[0,range_idx,az_idx,el_idx] = arr[src_idx] + # img[1,range_idx,az_idx,el_idx] = arr[src_idx+1] + # + ############################################### + img[0,range_idx,az_idx,el_idx] = arr[src_idx+1] + img[1,range_idx,az_idx,el_idx] = arr[src_idx] + if im_coords is None: im_coords = np.zeros((3, depth, width, height)) @@ -362,6 +395,8 @@ def temporal_align(msgs, T_br): # get radar-to-world transformation T_wr = np.dot(T_wb, T_br) + + ####### TODO: Try msgs['radar'][radar_idx][0] for velocity?########### synced_msgs.append({'timestamp':radar_stamp, 'img':msgs['radar'][radar_idx][1], 'pose':torch.from_numpy(T_wr).float()}) @@ -393,6 +428,36 @@ def read_tf_file(filename): return T_br +class simple_dataset(): + def __init__(self, args: Parameters, root_dir): + self.img_list = [] + self.root_dir = root_dir + + def add_item(self, img, pose, name, seq): + name = name[:name.find('.')] # get name without file extension + + if not os.path.exists(self.root_dir + name + '_odom_data'): + os.mkdir(self.root_dir + name + '_odom_data') + os.mkdir(self.root_dir + name + '_odom_data/images') + os.mkdir(self.root_dir + name + '_odom_data/poses') + + img_filename = self.root_dir + name + '_odom_data/images/' + str(seq) + '.pt' + pose_filename = self.root_dir + name + '_odom_data/poses/' + str(seq) + '.pt' + + torch.save(img, img_filename) + torch.save(pose, pose_filename) + + self.img_list.append(name + '_' + str(seq)) + + + def save_img_list(self, name): + name = name[:name.find('.')] + filename = self.root_dir + name + '_odom_data/img_list.txt' + with open(filename, 'w') as file: + for img_name in self.img_list: + file.write(img_name) + file.write('\n') + if __name__ == "__main__": @@ -406,7 +471,10 @@ def read_tf_file(filename): if args.directory[-1] != '/': args.directory = args.directory + '/' + # Read in base to radar file to align radar and imu T_br = read_tf_file(args.directory + args.base_to_radar) + + # Read in rosbags bag_list = open(args.directory + args.bags) for bag_name in bag_list: print('getting messages') @@ -417,7 +485,7 @@ def read_tf_file(filename): msgs_aligned = temporal_align(msgs, T_br) print('creating data tuples') - dataset = DeepRODataset(root_dir=args.directory) + dataset = simple_dataset(args, args.directory + 'velocity_resampled/') for i in range(len(msgs_aligned)): # get pose relative to initial pose dataset.add_item(msgs_aligned[i]['img'], diff --git a/scripts/utils/dataset_creator_params.py b/scripts/utils/dataset_creator_params.py new file mode 100644 index 0000000..efe82ab --- /dev/null +++ b/scripts/utils/dataset_creator_params.py @@ -0,0 +1,27 @@ +import os + +class Parameters(): + def __init__(self): + + # Root directory in which the ColoRadar Bags are stored + # self.directory = '/media/giantdrive/andrew/ColoRadar/resampled/' + # self.directory = '/home/kharlow/andrew/' + # self.directory = '/home/arpg/datasets/andrew/resampled/' + self.directory = '/home/arpg/datasets/coloradar/' + # Text file containing bag file names + self.bags = 'bag_list_test.txt' + # Radar topic, type dca1000_device/RadarCubeMsg + self.radar_topic = '/cascade/heatmap' + # Odometry Topic used to align heatmaps, type nav_msgs/Odometry + self.odom_topic = '/lidar_ground_truth' + # File containing odom to radar transform + self.base_to_radar = 'calib/transforms/base_to_cascade.txt' + # File containing pre-trained encoder weights + # self.pretrained_enc_path = '/home/arpg/results_training/results/best_encoder_modified.model' + # self.pretrained_enc_path = '/home/kharlow/ws/TBRO/src/TBRO/scripts/models/weights/best_encoder_modified.model' + + + self.radar_shape = [64, 128, 64] + + + diff --git a/scripts/utils/params.py b/scripts/utils/params.py index c861478..3e4a1ca 100644 --- a/scripts/utils/params.py +++ b/scripts/utils/params.py @@ -5,9 +5,11 @@ def __init__(self): # Root directory in which the ColoRadar Bags are stored # self.directory = '/media/giantdrive/andrew/ColoRadar/resampled/' - self.directory = '/home/kharlow/andrew/resampled/' + # self.directory = '/home/kharlow/andrew/resampled/' # self.directory = '/home/kharlow/andrew/velocity_resampled/' # self.directory = '/home/arpg/datasets/andrew/resampled/' + self.directory = '/home/arpg/datasets/andrew/velocity_resampled/' + # Text file containing bag file names self.list_bags = 'bag_list.txt' # Radar topic, type dca1000_device/RadarCubeMsg @@ -33,8 +35,8 @@ def __init__(self): self.hidden_size = 1000 # self.learning_rate = 1.0e-7 - # self.learning_rate = 1.0e-5 - self.learning_rate = 4.0e-3 + self.learning_rate = 1.0e-5 + # self.learning_rate = 4.0e-3 self.alphas = [1.0, 1.0] self.betas = [1.0, 1.0] @@ -45,7 +47,7 @@ def __init__(self): # Text file with filenames from which to load validation data self.val_files = 'med_test_dataset.txt' # File in which to save the model weight - self.save_filename = 'epoch_'+str(self.epochs)+'_batch_'+str(self.batch_size)+'_lr_'+str(self.learning_rate)+'_pretrained_tbro_test_batch.model' + self.save_filename = 'vel_epoch_'+str(self.epochs)+'_batch_'+str(self.batch_size)+'_lr_'+str(self.learning_rate)+'_pretrained_tbro_test_batch.model' # File from which to load model for testing only self.load_filename = '' # Bool, load model and run on test data only without training