Skip to content

Commit 157aaea

Browse files
authored
Changed for ActEV inferencing
1 parent 19b9929 commit 157aaea

File tree

1 file changed

+9
-1
lines changed

1 file changed

+9
-1
lines changed

lib/core/function.py

+9-1
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,14 @@ def validate(config, val_loader, val_dataset, model, criterion, output_dir,
179179
all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1)
180180
all_boxes[idx:idx + num_images, 5] = score
181181
image_path.extend(meta['image'])
182+
183+
# output the result per image
184+
this_pred = all_preds[idx:idx + num_images, :, 0:3] # (batch_size, 17, 3)
185+
#assert len(meta['image']) == len(this_pred) == len(meta['pid']), (len(meta['image']), this_pred.shape, len(meta['pid']))
186+
for i in range(len(this_pred)):
187+
imgname = os.path.splitext(os.path.basename(meta['image'][i]))[0]
188+
target_file = os.path.join(config.OUTPUT_DIR, "%s_%s.npy" % (imgname, int(meta['pid'][i])))
189+
np.save(target_file, this_pred[i])
182190

183191
idx += num_images
184192

@@ -196,7 +204,7 @@ def validate(config, val_loader, val_dataset, model, criterion, output_dir,
196204
)
197205
save_debug_images(config, input, meta, target, pred*4, output,
198206
prefix)
199-
207+
return None # added by Junwei. We only need to run inference
200208
name_values, perf_indicator = val_dataset.evaluate(
201209
config, all_preds, output_dir, all_boxes, image_path,
202210
filenames, imgnums

0 commit comments

Comments
 (0)