@@ -85,13 +85,13 @@ def parse_args():
85
85
parser = argparse .ArgumentParser (description = 'Convert dataset' )
86
86
parser .add_argument (
87
87
'--outdir' , help = "output dir for json files" ,
88
- default = '/media/longc/LSSD/Public/PILSNU/detections_keypoints' , type = str )
88
+ required = True , type = str )
89
89
parser .add_argument (
90
90
'--datadir' , help = "keypoint file" ,
91
- default = '/data/MCMTT/Public/PILSNU' , type = str )
91
+ required = True , type = str )
92
92
parser .add_argument (
93
93
'--keypoint-file' , help = "keypoint file" ,
94
- default = '/extra/code/deep-high-resolution-net.pytorch/output/aifi/pose_hrnet/mpii_w32_256x256_adam_lr1e-3/box_keypoints.json' , type = str )
94
+ required = True , type = str )
95
95
96
96
return parser .parse_args ()
97
97
@@ -135,15 +135,13 @@ def convert_tracking(data_dir, out_dir, keypoint_file, min_score=0.4):
135
135
136
136
# index images
137
137
image_names = load_images (image_root )
138
- image_wh = None
139
138
for image_filename in tqdm (image_names , total = len (image_names )):
140
139
name = os .path .splitext (image_filename )[0 ]
141
140
camera_id , timestamp = os .path .split (name )
142
141
camera_id = os .path .basename (camera_id )
143
142
144
- if image_wh is None :
145
- image = cv2 .imread (os .path .join (image_root , image_filename ))
146
- image_wh = (image .shape [1 ], image .shape [0 ])
143
+ image = cv2 .imread (os .path .join (image_root , image_filename ))
144
+ image_wh = (image .shape [1 ], image .shape [0 ])
147
145
148
146
file_data = {
149
147
'camera_id' : int (camera_id ),
0 commit comments