源码链接: PCDet.
很遗憾,SECOND的算法复现也要依赖于spconv库,而spconv库目前只支持在ubuntu系统中进行编译(虚拟机也不行),复现还是没有成功,想去安装双系统了…
1 数据预处理PCDet-master\pcdet\datasets\kitti\kitti_dataset.py
SECOND数据的预处理与大多数点云目标检测的预处理相似,主要任务在于
1>生成储存train/val数据info的文件kitti_infos_%s.pkl
2>生成数据增强时使用的groundtruth database
def create_kitti_infos(data_path, save_path, workers=4):
dataset = BaseKittiDataset(root_path=data_path)
train_split, val_split = 'train', 'val'
train_filename = save_path / ('kitti_infos_%s.pkl' % train_split)
val_filename = save_path / ('kitti_infos_%s.pkl' % val_split)
trainval_filename = save_path / 'kitti_infos_trainval.pkl'
test_filename = save_path / 'kitti_infos_test.pkl'
print('---------------Start to generate data infos---------------')
# 存储info
dataset.set_split(train_split)
kitti_infos_train = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(train_filename, 'wb') as f:
pickle.dump(kitti_infos_train, f)
print('Kitti info train file is saved to %s' % train_filename)
dataset.set_split(val_split)
kitti_infos_val = dataset.get_infos(num_workers=workers, has_label=True, count_inside_pts=True)
with open(val_filename, 'wb') as f:
pickle.dump(kitti_infos_val, f)
print('Kitti info val file is saved to %s' % val_filename)
with open(trainval_filename, 'wb') as f:
pickle.dump(kitti_infos_train + kitti_infos_val, f)
print('Kitti info trainval file is saved to %s' % trainval_filename)
dataset.set_split('test')
kitti_infos_test = dataset.get_infos(num_workers=workers, has_label=False, count_inside_pts=False)
with open(test_filename, 'wb') as f:
pickle.dump(kitti_infos_test, f)
print('Kitti info test file is saved to %s' % test_filename)
# 生成db文件
print('---------------Start create groundtruth database for data augmentation---------------')
dataset.set_split(train_split)
dataset.create_groundtruth_database(train_filename, split=train_split)
print('---------------Data preparation Done---------------')
if __name__ == '__main__':
# 需要从外部输入arg,比如python kitti_dataset.py create_kitti_infos
if sys.argv.__len__() > 1 and sys.argv[1] == 'create_kitti_infos':
create_kitti_infos(
data_path=cfg.ROOT_DIR / 'data' / 'kitti',
save_path=cfg.ROOT_DIR / 'data' / 'kitti'
)
else:
A = KittiDataset(root_path='data/kitti', class_names=cfg.CLASS_NAMES, split='train', training=True)
# 下面不太懂,大概意思类似于设置一个断点,程序在此处停下
import pdb
pdb.set_trace()
ans = A[1]
其中调用到的函数:
class BaseKittiDataset(DatasetTemplate):
def __init__(self, root_path, split='train'):
super().__init__()
self.root_path = root_path
self.root_split_path = os.path.join(self.root_path, 'training' if split != 'test' else 'testing')
self.split = split
if split in ['train', 'val', 'test']:
split_dir = os.path.join(self.root_path, 'ImageSets', split + '.txt')
# 文件名称中拿出序号
self.sample_id_list = [x.strip() for x in open(split_dir).readlines()] if os.path.exists(split_dir) else None
# 对不同任务重新初始化BaseKittiDataset
def set_split(self, split):
self.__init__(self.root_path, split)
# 获得雷达数据,使用np.fromfile
def get_lidar(self, idx):
lidar_file = os.path.join(self.root_split_path, 'velodyne', '%s.bin' % idx)
assert os.path.exists(lidar_file)
return np.fromfile(lidar_file, dtype=np.float32).reshape(-1, 4)
# 获得图像的shape,使用np.array
def get_image_shape(self, idx):
img_file = os.path.join(self.root_split_path, 'image_2', '%s.png' % idx)
assert os.path.exists(img_file)
return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
# 获得label的数据,使用get_objects_from_label
def get_label(self, idx):
label_file = os.path.join(self.root_split_path, 'label_2', '%s.txt' % idx)
assert os.path.exists(label_file)
return object3d_utils.get_objects_from_label(label_file)
# 获得校准器,使用calibration
def get_calib(self, idx):
calib_file = os.path.join(self.root_split_path, 'calib', '%s.txt' % idx)
assert os.path.exists(calib_file)
return calibration.Calibration(calib_file)
# 获得路面数据,使用f.readlines
def get_road_plane(self, idx):
plane_file = os.path.join(self.root_split_path, 'planes', '%s.txt' % idx)
with open(plane_file, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]

最低0.47元/天 解锁文章
1万+

被折叠的 条评论
为什么被折叠?



