diff --git a/.gitignore b/.gitignore index 634d85f5..816c0e29 100644 --- a/.gitignore +++ b/.gitignore @@ -35,3 +35,5 @@ datasets/vg/ # MacOS .DS_Store +# slurm +slurm-output/ \ No newline at end of file diff --git a/configs/e2e_relation_X_101_32_8_FPN_1x.yaml b/configs/e2e_relation_X_101_32_8_FPN_1x.yaml index 66d6d822..eadc5a10 100644 --- a/configs/e2e_relation_X_101_32_8_FPN_1x.yaml +++ b/configs/e2e_relation_X_101_32_8_FPN_1x.yaml @@ -120,6 +120,7 @@ SOLVER: FACTOR: 0.1 MAX_DECAY_STEP: 3 OUTPUT_DIR: './output/relation_baseline' +DATA_STAT_DIR: './output/relation_baseline' TEST: ALLOW_LOAD_FROM_CACHE: False RELATION: @@ -128,3 +129,11 @@ TEST: LATER_NMS_PREDICTION_THRES: 0.5 CUSTUM_EVAL: False # eval SGDet model on custum images, output a json CUSTUM_PATH: '.' # the folder that contains the custum images, only jpg files are allowed +GEN_IMG: # Generated images eval options + EVAL: False + ANNO_DIR: "" + ANNO_FILE: "" + BASE_DIR: "" + FOLDER_NAME: "" + NUM_ROUNDS: 1 + RESOLUTION: 256 \ No newline at end of file diff --git a/maskrcnn_benchmark/config/defaults.py b/maskrcnn_benchmark/config/defaults.py index b51a8fcc..b0907ca8 100644 --- a/maskrcnn_benchmark/config/defaults.py +++ b/maskrcnn_benchmark/config/defaults.py @@ -584,6 +584,7 @@ # Misc options # ---------------------------------------------------------------------------- # _C.OUTPUT_DIR = "." +_C.DATA_STAT_DIR = "." _C.DETECTED_SGG_DIR = "." _C.GLOVE_DIR = "." @@ -599,3 +600,16 @@ # Enable verbosity in apex.amp _C.AMP_VERBOSE = False + +# ---------------------------------------------------------------------------- # +# Generated images eval options +# ---------------------------------------------------------------------------- # +_C.GEN_IMG = CN() + +_C.GEN_IMG.EVAL = False +_C.GEN_IMG.ANNO_DIR = "" +_C.GEN_IMG.ANNO_FILE = "" +_C.GEN_IMG.BASE_DIR = "" +_C.GEN_IMG.FOLDER_NAME = "" +_C.GEN_IMG.NUM_ROUNDS = 1 +_C.GEN_IMG.RESOLUTION = 256 \ No newline at end of file diff --git a/maskrcnn_benchmark/config/paths_catalog.py b/maskrcnn_benchmark/config/paths_catalog.py index 80f30685..abe2e731 100644 --- a/maskrcnn_benchmark/config/paths_catalog.py +++ b/maskrcnn_benchmark/config/paths_catalog.py @@ -7,7 +7,7 @@ class DatasetCatalog(object): #DATA_DIR = "/home/users/alatif/data/ImageCorpora/" - DATA_DIR = "/media/rafi/Samsung_T5/_DATASETS/" + DATA_DIR = "datasets" DATASETS = { "coco_2017_train": { "img_dir": "coco/train2017", @@ -107,14 +107,14 @@ class DatasetCatalog(object): "ann_file": "cityscapes/annotations/instancesonly_filtered_gtFine_test.json" }, "VG_stanford_filtered": { - "img_dir": "vg/VG_100K", + "img_dir": "/h/bichengx/site-pkgs/VG-SGG/VG_100K_3", "roidb_file": "vg/VG-SGG.h5", "dict_file": "vg/VG-SGG-dicts.json", "image_file": "vg/image_data.json", }, "VG_stanford_filtered_with_attribute": { - "img_dir": "vg/VG_100K", - "roidb_file": "vg/VG-SGG-with-attri.h5", + "img_dir": "/h/bichengx/site-pkgs/VG-SGG/VG_100K_3", + "roidb_file": "/h/bichengx/site-pkgs/VG-SGG/VG-SGG-with-attri.h5", "dict_file": "vg/VG-SGG-dicts-with-attri.json", "image_file": "vg/image_data.json", "capgraphs_file": "vg/vg_capgraphs_anno.json", diff --git a/maskrcnn_benchmark/data/build.py b/maskrcnn_benchmark/data/build.py index 652ea288..9919f609 100644 --- a/maskrcnn_benchmark/data/build.py +++ b/maskrcnn_benchmark/data/build.py @@ -16,6 +16,7 @@ from .collate_batch import BatchCollator, BBoxAugCollator from .transforms import build_transforms +from maskrcnn_benchmark.data.datasets.vg_gen_img import build_gen_img_dataset # by Jiaxin def get_dataset_statistics(cfg): @@ -33,7 +34,8 @@ def get_dataset_statistics(cfg): dataset_names = cfg.DATASETS.TRAIN data_statistics_name = ''.join(dataset_names) + '_statistics' - save_file = os.path.join(cfg.OUTPUT_DIR, "{}.cache".format(data_statistics_name)) + # save_file = os.path.join(cfg.OUTPUT_DIR, "{}.cache".format(data_statistics_name)) + save_file = os.path.join(cfg.DATA_STAT_DIR, "{}.cache".format(data_statistics_name)) if os.path.exists(save_file): logger.info('Loading data statistics from: ' + str(save_file)) @@ -224,7 +226,12 @@ def make_data_loader(cfg, mode='train', is_distributed=False, start_iter=0, data # If bbox aug is enabled in testing, simply set transforms to None and we will apply transforms later transforms = None if not is_train and cfg.TEST.BBOX_AUG.ENABLED else build_transforms(cfg, is_train) - datasets = build_dataset(cfg, dataset_list, transforms, DatasetCatalog, is_train) + + if cfg.GEN_IMG.EVAL: + assert (aspect_grouping == []) + datasets = build_gen_img_dataset(cfg, transforms) + else: + datasets = build_dataset(cfg, dataset_list, transforms, DatasetCatalog, is_train) if is_train: # save category_id to label name mapping diff --git a/maskrcnn_benchmark/data/datasets/__init__.py b/maskrcnn_benchmark/data/datasets/__init__.py index 702b55ed..41024f4b 100644 --- a/maskrcnn_benchmark/data/datasets/__init__.py +++ b/maskrcnn_benchmark/data/datasets/__init__.py @@ -3,5 +3,6 @@ from .voc import PascalVOCDataset from .concat_dataset import ConcatDataset from .visual_genome import VGDataset +from .vg_gen_img import VG_Gen_Img_Dataset -__all__ = ["COCODataset", "ConcatDataset", "PascalVOCDataset", "VGDataset"] +__all__ = ["COCODataset", "ConcatDataset", "PascalVOCDataset", "VGDataset", "VG_Gen_Img_Dataset"] diff --git a/maskrcnn_benchmark/data/datasets/evaluation/__init__.py b/maskrcnn_benchmark/data/datasets/evaluation/__init__.py index 4a582fe1..a3d5c73c 100644 --- a/maskrcnn_benchmark/data/datasets/evaluation/__init__.py +++ b/maskrcnn_benchmark/data/datasets/evaluation/__init__.py @@ -25,6 +25,8 @@ def evaluate(cfg, dataset, predictions, output_folder, logger, **kwargs): return voc_evaluation(**args) elif isinstance(dataset, datasets.VGDataset): return vg_evaluation(**args) + elif isinstance(dataset, datasets.VG_Gen_Img_Dataset): + return vg_evaluation(**args) else: dataset_name = dataset.__class__.__name__ raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name)) diff --git a/maskrcnn_benchmark/data/datasets/evaluation/vg/sgg_eval.py b/maskrcnn_benchmark/data/datasets/evaluation/vg/sgg_eval.py index 0b56aa03..ad7c29dc 100644 --- a/maskrcnn_benchmark/data/datasets/evaluation/vg/sgg_eval.py +++ b/maskrcnn_benchmark/data/datasets/evaluation/vg/sgg_eval.py @@ -84,6 +84,9 @@ def calculate_recall(self, global_container, local_container, mode): ) local_container['pred_to_gt'] = pred_to_gt + my_pred_to_gt = _my_compute_pred_matches(pred_rel_inds, pred_triplets, gt_rels, gt_triplets) + local_container['my_pred_to_gt'] = my_pred_to_gt + for k in self.result_dict[mode + '_recall']: # the following code are copied from Neural-MOTIFS match = reduce(np.union1d, pred_to_gt[:k]) @@ -252,6 +255,9 @@ def __init__(self, result_dict): def register_container(self, mode): self.result_dict[mode + '_accuracy_hit'] = {20: [], 50: [], 100: []} self.result_dict[mode + '_accuracy_count'] = {20: [], 50: [], 100: []} + self.result_dict[mode + '_accuracy_rate'] = {20: [], 50: [], 100: []} + self.result_dict[mode + '_bbox_accuracy'] = [] + self.result_dict[mode + '_accuracy_rate_mine'] = [] def generate_print_string(self, mode): result_str = 'SGG eval: ' @@ -261,6 +267,25 @@ def generate_print_string(self, mode): result_str += ' A @ %d: %.4f; ' % (k, a_hit/a_count) result_str += ' for mode=%s, type=TopK Accuracy.' % mode result_str += '\n' + + result_str += 'SGG eval: ' + for k, v in self.result_dict[mode + '_accuracy_rate'].items(): + a_rate = np.mean(v) + result_str += ' A_rate @ %d: %.4f; ' % (k, a_rate) + result_str += ' for mode=%s, type=TopK Accuracy_Rate.' % mode + result_str += '\n' + + result_str += 'SGG eval: ' + my_a_rate_mine = np.mean(self.result_dict[mode + '_accuracy_rate_mine']) + result_str += ' My_A_rate_Mine: %.4f; ' % (my_a_rate_mine) + result_str += ' for mode=%s, type=My Accuracy_Rate-Mine.' % mode + result_str += '\n' + + result_str += 'SGG-BBox eval: ' + bbox_acc = np.mean(self.result_dict[mode + '_bbox_accuracy']) + result_str += ' BBox_Acc: %.4f; ' % (bbox_acc) + result_str += ' for mode=%s, type=BBox Accuracy.' % mode + result_str += '\n' return result_str def prepare_gtpair(self, local_container): @@ -287,6 +312,192 @@ def calculate_recall(self, global_container, local_container, mode): gt_pair_match = [] self.result_dict[mode + '_accuracy_hit'][k].append(float(len(gt_pair_match))) self.result_dict[mode + '_accuracy_count'][k].append(float(gt_rels.shape[0])) + self.result_dict[mode + '_accuracy_rate'][k].append(float(len(gt_pair_match)) / float(gt_rels.shape[0])) + + # my version of accuracy + self.result_dict[mode + '_accuracy_rate_mine'].append(float(reduce(np.union1d, local_container['my_pred_to_gt']).shape[0]) / float(gt_rels.shape[0])) + assert (local_container['pred_classes'].shape == local_container['gt_classes'].shape) + bbox_label_match = local_container['pred_classes'] == local_container['gt_classes'] + self.result_dict[mode + '_bbox_accuracy'].append(float(bbox_label_match.sum()) / float(bbox_label_match.shape[0])) + + +class SGMeanAcc(SceneGraphEvaluation): + def __init__(self, result_dict, num_rel, ind_to_predicates, hbt_group, print_detail=False): + super(SGMeanAcc, self).__init__(result_dict) + self.num_rel = num_rel + self.print_detail = print_detail + self.rel_name_list = ind_to_predicates[1:] # remove __background__ + self.hbt_group = hbt_group[1:] # remove __background__ + + def register_container(self, mode): + self.result_dict[mode + '_mean_acc'] = {20: 0.0, 50: 0.0, 100: 0.0} + self.result_dict[mode + '_mean_acc_collect'] = {20: [[] for i in range(self.num_rel)], 50: [[] for i in range(self.num_rel)], 100: [[] for i in range(self.num_rel)]} + self.result_dict[mode + '_mean_acc_list'] = {20: [], 50: [], 100: []} + self.result_dict[mode + '_mean_acc_global'] = {20: 0.0, 50: 0.0, 100: 0.0} + self.result_dict[mode + '_mean_acc_collect_hit_global'] = {20: [[] for i in range(self.num_rel)], 50: [[] for i in range(self.num_rel)], 100: [[] for i in range(self.num_rel)]} + self.result_dict[mode + '_mean_acc_collect_count_global'] = {20: [[] for i in range(self.num_rel)], 50: [[] for i in range(self.num_rel)], 100: [[] for i in range(self.num_rel)]} + self.result_dict[mode + '_mean_acc_list_global'] = {20: [], 50: [], 100: []} + # my version of accuracy + self.result_dict[mode + '_mean_acc_mine'] = 0.0 + self.result_dict[mode + '_mean_acc_collect_mine'] = [[] for i in range(self.num_rel)] + self.result_dict[mode + '_mean_acc_list_mine'] = [] + self.result_dict[mode + '_mean_acc_head'] = 0.0 + self.result_dict[mode + '_mean_acc_body'] = 0.0 + self.result_dict[mode + '_mean_acc_tail'] = 0.0 + + def generate_print_string(self, mode): + result_str = 'SGG eval: ' + for k, v in self.result_dict[mode + '_mean_acc'].items(): + result_str += ' mAcc @ %d: %.4f; ' % (k, float(v)) + result_str += ' for mode=%s, type=Mean Acc.' % mode + result_str += '\n' + if self.print_detail: + result_str += '----------------------- Details ------------------------\n' + for n, r in zip(self.rel_name_list, self.result_dict[mode + '_mean_acc_list'][100]): + result_str += '({}:{:.4f}) '.format(str(n), r) + result_str += '\n' + result_str += '--------------------------------------------------------\n' + + result_str += 'SGG eval: ' + for k, v in self.result_dict[mode + '_mean_acc_global'].items(): + result_str += ' mAccGlobal @ %d: %.4f; ' % (k, float(v)) + result_str += ' for mode=%s, type=Mean Acc Global.' % mode + result_str += '\n' + if self.print_detail: + result_str += '----------------------- Details ------------------------\n' + for n, r in zip(self.rel_name_list, self.result_dict[mode + '_mean_acc_list_global'][100]): + result_str += '({}:{:.4f}) '.format(str(n), r) + result_str += '\n' + result_str += '--------------------------------------------------------\n' + + result_str += 'SGG eval: ' + result_str += ' my_mAcc_Mine: %.4f; ' % (float(self.result_dict[mode + '_mean_acc_mine'])) + result_str += ' for mode=%s, type=My Mean Acc-Mine.' % mode + result_str += '\n' + if self.print_detail: + result_str += '----------------------- Details ------------------------\n' + for n, r in zip(self.rel_name_list, self.result_dict[mode + '_mean_acc_list_mine']): + result_str += '({}:{:.4f}) '.format(str(n), r) + result_str += '\n' + result_str += '--------------------------------------------------------\n' + + result_str += 'SGG eval: ' + result_str += ' mAcc HEAD: %.4f; ' % (self.result_dict[mode + '_mean_acc_head']) + result_str += ' mAcc BODY: %.4f; ' % (self.result_dict[mode + '_mean_acc_body']) + result_str += ' mAcc TAIL: %.4f; ' % (self.result_dict[mode + '_mean_acc_tail']) + result_str += ' for mode=%s, type=Mean Acc HBT.' % mode + result_str += '\n' + return result_str + + def prepare_gtpair(self, local_container): + pred_pair_idx = local_container['pred_rel_inds'][:, 0] * 1024 + local_container['pred_rel_inds'][:, 1] + gt_pair_idx = local_container['gt_rels'][:, 0] * 1024 + local_container['gt_rels'][:, 1] + self.pred_pair_in_gt = (pred_pair_idx[:, None] == gt_pair_idx[None, :]).sum(-1) > 0 + + def collect_mean_recall_items(self, global_container, local_container, mode): + pred_to_gt = local_container['pred_to_gt'] + gt_rels = local_container['gt_rels'] + + for k in self.result_dict[mode + '_mean_acc_collect']: + if mode != 'sgdet': + # the following code are copied from Neural-MOTIFS + # match = reduce(np.union1d, pred_to_gt[:k]) + gt_pair_pred_to_gt = [] + for p, flag in zip(pred_to_gt, self.pred_pair_in_gt): + if flag: + gt_pair_pred_to_gt.append(p) + if len(gt_pair_pred_to_gt) > 0: + gt_pair_match = reduce(np.union1d, gt_pair_pred_to_gt[:k]) + else: + gt_pair_match = [] + + # NOTE: by kaihua, calculate Mean Recall for each category independently + # this metric is proposed by: CVPR 2019 oral paper "Learning to Compose Dynamic Tree Structures for Visual Contexts" + recall_hit = [0] * self.num_rel + recall_count = [0] * self.num_rel + for idx in range(gt_rels.shape[0]): + local_label = gt_rels[idx,2] + recall_count[int(local_label)] += 1 + recall_count[0] += 1 + + for idx in range(len(gt_pair_match)): + local_label = gt_rels[int(gt_pair_match[idx]),2] + recall_hit[int(local_label)] += 1 + recall_hit[0] += 1 + + for n in range(self.num_rel): + if recall_count[n] > 0: + self.result_dict[mode + '_mean_acc_collect'][k][n].append(float(recall_hit[n]) / float(recall_count[n])) + self.result_dict[mode + '_mean_acc_collect_hit_global'][k][n].append(float(recall_hit[n])) + self.result_dict[mode + '_mean_acc_collect_count_global'][k][n].append(float(recall_count[n])) + + # my version of accuracy + gt_pair_match = reduce(np.union1d, local_container['my_pred_to_gt']) + + recall_hit = [0] * self.num_rel + recall_count = [0] * self.num_rel + for idx in range(gt_rels.shape[0]): + local_label = gt_rels[idx,2] + recall_count[int(local_label)] += 1 + recall_count[0] += 1 + + for idx in range(len(gt_pair_match)): + local_label = gt_rels[int(gt_pair_match[idx]),2] + recall_hit[int(local_label)] += 1 + recall_hit[0] += 1 + + for n in range(self.num_rel): + if recall_count[n] > 0: + self.result_dict[mode + '_mean_acc_collect_mine'][n].append(float(recall_hit[n]) / float(recall_count[n])) + + def calculate_mean_recall(self, mode): + for k, v in self.result_dict[mode + '_mean_acc'].items(): + sum_recall = 0 + num_rel_no_bg = self.num_rel - 1 + for idx in range(num_rel_no_bg): + if len(self.result_dict[mode + '_mean_acc_collect'][k][idx+1]) == 0: + tmp_recall = 0.0 + else: + tmp_recall = np.mean(self.result_dict[mode + '_mean_acc_collect'][k][idx+1]) + self.result_dict[mode + '_mean_acc_list'][k].append(tmp_recall) + sum_recall += tmp_recall + + self.result_dict[mode + '_mean_acc'][k] = sum_recall / float(num_rel_no_bg) + + for k, v in self.result_dict[mode + '_mean_acc_global'].items(): + sum_recall = 0 + num_rel_no_bg = self.num_rel - 1 + for idx in range(num_rel_no_bg): + if len(self.result_dict[mode + '_mean_acc_collect_count_global'][k][idx+1]) == 0: + tmp_recall = 0.0 + else: + tmp_recall = np.mean(self.result_dict[mode + '_mean_acc_collect_hit_global'][k][idx+1]) / np.mean(self.result_dict[mode + '_mean_acc_collect_count_global'][k][idx+1]) + self.result_dict[mode + '_mean_acc_list_global'][k].append(tmp_recall) + sum_recall += tmp_recall + + self.result_dict[mode + '_mean_acc_global'][k] = sum_recall / float(num_rel_no_bg) + + # my version of accuracy + sum_recall = 0 + num_rel_no_bg = self.num_rel - 1 + for idx in range(num_rel_no_bg): + if len(self.result_dict[mode + '_mean_acc_collect_mine'][idx+1]) == 0: + tmp_recall = 0.0 + else: + tmp_recall = np.mean(self.result_dict[mode + '_mean_acc_collect_mine'][idx+1]) + self.result_dict[mode + '_mean_acc_list_mine'].append(tmp_recall) + sum_recall += tmp_recall + + self.result_dict[mode + '_mean_acc_mine'] = sum_recall / float(num_rel_no_bg) + + # Head, Body, Tail + mean_acc_np = np.array(self.result_dict[mode + '_mean_acc_list_mine']) + hbt_np = np.array(self.hbt_group) + assert (mean_acc_np.shape[0] == hbt_np.shape[0]) + self.result_dict[mode + '_mean_acc_head'] = mean_acc_np[hbt_np == 'head'].mean() + self.result_dict[mode + '_mean_acc_body'] = mean_acc_np[hbt_np == 'body'].mean() + self.result_dict[mode + '_mean_acc_tail'] = mean_acc_np[hbt_np == 'tail'].mean() + return """ @@ -294,11 +505,12 @@ def calculate_recall(self, global_container, local_container, mode): https://arxiv.org/pdf/1812.01880.pdf CVPR, 2019 """ class SGMeanRecall(SceneGraphEvaluation): - def __init__(self, result_dict, num_rel, ind_to_predicates, print_detail=False): + def __init__(self, result_dict, num_rel, ind_to_predicates, hbt_group, print_detail=False): super(SGMeanRecall, self).__init__(result_dict) self.num_rel = num_rel self.print_detail = print_detail self.rel_name_list = ind_to_predicates[1:] # remove __background__ + self.hbt_group = hbt_group[1:] # remove __background__ def register_container(self, mode): #self.result_dict[mode + '_recall_hit'] = {20: [0]*self.num_rel, 50: [0]*self.num_rel, 100: [0]*self.num_rel} @@ -306,6 +518,9 @@ def register_container(self, mode): self.result_dict[mode + '_mean_recall'] = {20: 0.0, 50: 0.0, 100: 0.0} self.result_dict[mode + '_mean_recall_collect'] = {20: [[] for i in range(self.num_rel)], 50: [[] for i in range(self.num_rel)], 100: [[] for i in range(self.num_rel)]} self.result_dict[mode + '_mean_recall_list'] = {20: [], 50: [], 100: []} + self.result_dict[mode + '_mean_recall_head'] = 0.0 + self.result_dict[mode + '_mean_recall_body'] = 0.0 + self.result_dict[mode + '_mean_recall_tail'] = 0.0 def generate_print_string(self, mode): result_str = 'SGG eval: ' @@ -320,6 +535,12 @@ def generate_print_string(self, mode): result_str += '\n' result_str += '--------------------------------------------------------\n' + result_str += 'SGG eval: ' + result_str += ' mR HEAD: %.4f; ' % (self.result_dict[mode + '_mean_recall_head']) + result_str += ' mR BODY: %.4f; ' % (self.result_dict[mode + '_mean_recall_body']) + result_str += ' mR TAIL: %.4f; ' % (self.result_dict[mode + '_mean_recall_tail']) + result_str += ' for mode=%s, type=Mean Recall HBT.' % mode + result_str += '\n' return result_str def collect_mean_recall_items(self, global_container, local_container, mode): @@ -361,6 +582,14 @@ def calculate_mean_recall(self, mode): sum_recall += tmp_recall self.result_dict[mode + '_mean_recall'][k] = sum_recall / float(num_rel_no_bg) + + # Head, Body, Tail + mean_recall_100_np = np.array(self.result_dict[mode + '_mean_recall_list'][100]) + hbt_np = np.array(self.hbt_group) + assert (mean_recall_100_np.shape[0] == hbt_np.shape[0]) + self.result_dict[mode + '_mean_recall_head'] = mean_recall_100_np[hbt_np == 'head'].mean() + self.result_dict[mode + '_mean_recall_body'] = mean_recall_100_np[hbt_np == 'body'].mean() + self.result_dict[mode + '_mean_recall_tail'] = mean_recall_100_np[hbt_np == 'tail'].mean() return @@ -528,3 +757,27 @@ def _compute_pred_matches(gt_triplets, pred_triplets, return pred_to_gt +def _my_compute_pred_matches(pred_rel_inds, pred_triplets, gt_rels, gt_triplets): + # find matching based on the object index + my_keeps = intersect_2d(pred_rel_inds, gt_rels[:,:2]) + assert (my_keeps.sum() == gt_rels.shape[0]) + + my_pred_to_gt = [] + match_cnt_location = 0 + for idx in range(pred_rel_inds.shape[0]): + if my_keeps[idx].any(): + # if there is a matching, there is only one matching + match_gt_idx_list = np.where(my_keeps[idx])[0].tolist() + assert (len(match_gt_idx_list) == 1) + match_cnt_location += 1 + # keep the matching if all the labels match + if (pred_triplets[idx] == gt_triplets[match_gt_idx_list[0]]).all(): + my_pred_to_gt.append(match_gt_idx_list) + else: + my_pred_to_gt.append([]) + else: + my_pred_to_gt.append([]) + + assert (match_cnt_location == gt_rels.shape[0]) + assert (len(my_pred_to_gt) == pred_rel_inds.shape[0]) + return my_pred_to_gt diff --git a/maskrcnn_benchmark/data/datasets/evaluation/vg/vg_eval.py b/maskrcnn_benchmark/data/datasets/evaluation/vg/vg_eval.py index a1d3a421..a7505240 100644 --- a/maskrcnn_benchmark/data/datasets/evaluation/vg/vg_eval.py +++ b/maskrcnn_benchmark/data/datasets/evaluation/vg/vg_eval.py @@ -12,7 +12,7 @@ from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou from maskrcnn_benchmark.utils.miscellaneous import intersect_2d, argsort_desc, bbox_overlaps -from maskrcnn_benchmark.data.datasets.evaluation.vg.sgg_eval import SGRecall, SGNoGraphConstraintRecall, SGZeroShotRecall, SGNGZeroShotRecall, SGPairAccuracy, SGMeanRecall, SGNGMeanRecall, SGAccumulateRecall +from maskrcnn_benchmark.data.datasets.evaluation.vg.sgg_eval import SGRecall, SGNoGraphConstraintRecall, SGZeroShotRecall, SGNGZeroShotRecall, SGPairAccuracy, SGMeanRecall, SGNGMeanRecall, SGAccumulateRecall, SGMeanAcc def do_vg_evaluation( cfg, @@ -140,8 +140,12 @@ def do_vg_evaluation( eval_pair_accuracy.register_container(mode) evaluator['eval_pair_accuracy'] = eval_pair_accuracy + eval_mean_acc = SGMeanAcc(result_dict, num_rel_category, dataset.ind_to_predicates, dataset.hbt_group, print_detail=True) + eval_mean_acc.register_container(mode) + evaluator['eval_mean_acc'] = eval_mean_acc + # used for meanRecall@K - eval_mean_recall = SGMeanRecall(result_dict, num_rel_category, dataset.ind_to_predicates, print_detail=True) + eval_mean_recall = SGMeanRecall(result_dict, num_rel_category, dataset.ind_to_predicates, dataset.hbt_group, print_detail=True) eval_mean_recall.register_container(mode) evaluator['eval_mean_recall'] = eval_mean_recall @@ -164,6 +168,7 @@ def do_vg_evaluation( for groundtruth, prediction in zip(groundtruths, predictions): evaluate_relation_of_one_image(groundtruth, prediction, global_container, evaluator) + eval_mean_acc.calculate_mean_recall(mode) # calculate mean recall eval_mean_recall.calculate_mean_recall(mode) eval_ng_mean_recall.calculate_mean_recall(mode) @@ -177,6 +182,7 @@ def do_vg_evaluation( result_str += eval_ng_mean_recall.generate_print_string(mode) if cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOX: + result_str += eval_mean_acc.generate_print_string(mode) result_str += eval_pair_accuracy.generate_print_string(mode) result_str += '=' * 100 + '\n' @@ -186,7 +192,11 @@ def do_vg_evaluation( if "relations" in iou_types: if output_folder: torch.save(result_dict, os.path.join(output_folder, 'result_dict.pytorch')) - return float(np.mean(result_dict[mode + '_recall'][100])) + # return float(np.mean(result_dict[mode + '_recall'][100])) + # ["BBox_mAP", "BBox_Acc", "Acc", "mean_Acc", "My_Acc_Mine", "My_mean_Acc_Mine", "mean_Acc_Head", "mean_Acc_Body", "mean_Acc_Tail"] + return np.array([float(mAp), np.mean(result_dict[mode + '_bbox_accuracy']), np.mean((result_dict[mode + '_accuracy_rate'][100])), result_dict[mode + '_mean_acc'][100], \ + np.mean(result_dict[mode + '_accuracy_rate_mine']), result_dict[mode + '_mean_acc_mine'], \ + result_dict[mode + '_mean_acc_head'], result_dict[mode + '_mean_acc_body'], result_dict[mode + '_mean_acc_tail']]) elif "bbox" in iou_types: return float(mAp) else: @@ -256,6 +266,8 @@ def evaluate_relation_of_one_image(groundtruth, prediction, global_container, ev # for sgcls and predcls if mode != 'sgdet': evaluator['eval_pair_accuracy'].prepare_gtpair(local_container) + evaluator['eval_mean_acc'].prepare_gtpair(local_container) + assert (np.array_equal(evaluator['eval_pair_accuracy'].pred_pair_in_gt, evaluator['eval_mean_acc'].pred_pair_in_gt)) # to calculate the prior label based on statistics evaluator['eval_zeroshot_recall'].prepare_zeroshot(global_container, local_container) @@ -265,8 +277,8 @@ def evaluate_relation_of_one_image(groundtruth, prediction, global_container, ev local_container['pred_boxes'] = local_container['gt_boxes'] local_container['pred_classes'] = local_container['gt_classes'] local_container['obj_scores'] = np.ones(local_container['gt_classes'].shape[0]) - elif mode == 'sgcls': + assert (local_container['gt_boxes'].shape[0] == local_container['pred_boxes'].shape[0]) if local_container['gt_boxes'].shape[0] != local_container['pred_boxes'].shape[0]: print('Num of GT boxes is not matching with num of pred boxes in SGCLS') elif mode == 'sgdet' or mode == 'phrdet': @@ -308,6 +320,7 @@ def evaluate_relation_of_one_image(groundtruth, prediction, global_container, ev evaluator['eval_nog_recall'].calculate_recall(global_container, local_container, mode) # GT Pair Accuracy evaluator['eval_pair_accuracy'].calculate_recall(global_container, local_container, mode) + evaluator['eval_mean_acc'].collect_mean_recall_items(global_container, local_container, mode) # Mean Recall evaluator['eval_mean_recall'].collect_mean_recall_items(global_container, local_container, mode) # No Graph Constraint Mean Recall diff --git a/maskrcnn_benchmark/data/datasets/vg_gen_img.py b/maskrcnn_benchmark/data/datasets/vg_gen_img.py new file mode 100644 index 00000000..54e6da0b --- /dev/null +++ b/maskrcnn_benchmark/data/datasets/vg_gen_img.py @@ -0,0 +1,134 @@ +import torch +import os +import pickle +from PIL import Image +import json +from maskrcnn_benchmark.structures.bounding_box import BoxList +import numpy as np + +class VG_Gen_Img_Dataset(torch.utils.data.Dataset): + def __init__(self, cfg, round_num, transforms): + self.img_folder_name = cfg.GEN_IMG.FOLDER_NAME + self.img_folder = os.path.join(cfg.GEN_IMG.BASE_DIR, self.img_folder_name) + self.round_num = round_num + self.transforms = transforms + self.i_resolution = cfg.GEN_IMG.RESOLUTION + self.val_anno_data = pickle.load(open(os.path.join(cfg.GEN_IMG.ANNO_DIR, cfg.GEN_IMG.ANNO_FILE), "rb")) + self.filenames = load_filenames(self.val_anno_data, self.img_folder_name, self.img_folder, self.round_num) + + # dictionary comparison + my_idx_to_word = pickle.load(open(os.path.join(cfg.GEN_IMG.ANNO_DIR, "idx_to_word.pkl"), "rb")) + self.my_ind_to_classes = my_idx_to_word["ind_to_classes"] + my_ind_to_classes_cmp = ['__background__'] + my_ind_to_classes_cmp.extend(self.my_ind_to_classes[:-1]) + my_ind_to_predicates = my_idx_to_word["ind_to_predicates"] + + dict_file = "datasets/vg/VG-SGG-dicts-with-attri.json" + self.ind_to_classes, self.ind_to_predicates, _ = load_info(dict_file) # contiguous 151, 51 containing __background__ + assert (my_ind_to_classes_cmp == self.ind_to_classes) + assert (my_ind_to_predicates == self.ind_to_predicates) + self.categories = {i : self.ind_to_classes[i] for i in range(len(self.ind_to_classes))} + + self.hbt_group = [None, 'body', 'tail', 'tail', 'tail', 'body', 'body', 'body', 'head', 'body', 'tail', 'body', 'tail', 'tail', 'tail', 'tail', 'body', \ + 'tail', 'tail', 'body', 'head', 'body', 'head', 'body', 'tail', 'body', 'tail', 'tail', 'tail', 'head', 'head', 'head', 'tail', 'body', \ + 'tail', 'body', 'tail', 'tail', 'body', 'tail', 'body', 'body', 'tail', 'body', 'tail', 'tail', 'body', 'body', 'head', 'body', 'body'] + + def __len__(self): + return len(self.val_anno_data) + + def __getitem__(self, index): + file_name = self.val_anno_data[index]['file_name'] + file_name_id = file_name.split('.')[0] + + if self.img_folder_name == "validation_image_gt": + img_path = os.path.join(self.img_folder, file_name_id+".png") + else: + img_path = os.path.join(self.img_folder, file_name_id+"_"+str(self.round_num)+".png") + assert (img_path == self.filenames[index]) + + img = Image.open(img_path).convert("RGB") + assert (img.size == (self.i_resolution, self.i_resolution)) + target = self.get_groundtruth(index) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target, index + + def get_img_info(self, index): + return {"width": self.i_resolution, "height": self.i_resolution} + + def get_groundtruth(self, index, evaluation=False): + item = self.val_anno_data[index] + node_bboxes_xcyc = torch.tensor(item['node_bboxes_xcyc']) + node_bboxes_xyxy = torch.zeros(node_bboxes_xcyc.shape, dtype=node_bboxes_xcyc.dtype) + node_bboxes_xyxy[:, 0] = (node_bboxes_xcyc[:, 0] - node_bboxes_xcyc[:, 2]/2).clamp(0, 1) + node_bboxes_xyxy[:, 1] = (node_bboxes_xcyc[:, 1] - node_bboxes_xcyc[:, 3]/2).clamp(0, 1) + node_bboxes_xyxy[:, 2] = (node_bboxes_xcyc[:, 0] + node_bboxes_xcyc[:, 2]/2).clamp(0, 1) + node_bboxes_xyxy[:, 3] = (node_bboxes_xcyc[:, 1] + node_bboxes_xcyc[:, 3]/2).clamp(0, 1) + node_bboxes_xyxy = node_bboxes_xyxy * self.i_resolution + assert (torch.all(node_bboxes_xyxy[:, 2] >= node_bboxes_xyxy[:, 0])) + assert (torch.all(node_bboxes_xyxy[:, 3] >= node_bboxes_xyxy[:, 1])) + + target = BoxList(node_bboxes_xyxy, (self.i_resolution, self.i_resolution), 'xyxy') # xyxy + + cmp_list = [self.my_ind_to_classes[entry] == self.ind_to_classes[entry+1] for entry in item['node_labels'].tolist()] + assert (torch.tensor(cmp_list).all()) + + target.add_field("labels", torch.from_numpy(item['node_labels'] + 1)) + target.add_field("attributes", torch.zeros(item['node_labels'].shape[0], 10, dtype=torch.int64)) + target.add_field("relation", torch.from_numpy(item['edge_map']), is_triplet=True) + + if evaluation: + target = target.clip_to_image(remove_empty=False) + + relation = [] + subj_node_idxes, obj_node_idxes = np.where(item['edge_map']) + for subj_idx, obj_idx in zip(subj_node_idxes, obj_node_idxes): + relation.append([subj_idx, obj_idx, item['edge_map'][subj_idx, obj_idx]]) + + target.add_field("relation_tuple", torch.LongTensor(relation)) # for evaluation + return target + else: + target = target.clip_to_image(remove_empty=False) + return target + + +def load_filenames(val_anno_data, img_folder_name, img_folder, round_num): + filenames = [] + for item in val_anno_data: + file_name = item['file_name'] + file_name_id = file_name.split('.')[0] + if img_folder_name == "validation_image_gt": + filenames.append(os.path.join(img_folder, file_name_id+".png")) + else: + filenames.append(os.path.join(img_folder, file_name_id+"_"+str(round_num)+".png")) + return filenames + + +def load_info(dict_file, add_bg=True): + """ + Loads the file containing the visual genome label meanings + """ + info = json.load(open(dict_file, 'r')) + if add_bg: + info['label_to_idx']['__background__'] = 0 + info['predicate_to_idx']['__background__'] = 0 + info['attribute_to_idx']['__background__'] = 0 + + class_to_ind = info['label_to_idx'] + predicate_to_ind = info['predicate_to_idx'] + attribute_to_ind = info['attribute_to_idx'] + ind_to_classes = sorted(class_to_ind, key=lambda k: class_to_ind[k]) + ind_to_predicates = sorted(predicate_to_ind, key=lambda k: predicate_to_ind[k]) + ind_to_attributes = sorted(attribute_to_ind, key=lambda k: attribute_to_ind[k]) + + return ind_to_classes, ind_to_predicates, ind_to_attributes + + +def build_gen_img_dataset(cfg, transforms): + datasets = [] + for round_num in range(cfg.GEN_IMG.NUM_ROUNDS): + dataset = VG_Gen_Img_Dataset(cfg, round_num, transforms) + datasets.append(dataset) + return datasets \ No newline at end of file diff --git a/maskrcnn_benchmark/structures/bounding_box.py b/maskrcnn_benchmark/structures/bounding_box.py index 6e3db710..a67c6692 100644 --- a/maskrcnn_benchmark/structures/bounding_box.py +++ b/maskrcnn_benchmark/structures/bounding_box.py @@ -235,7 +235,7 @@ def __len__(self): return self.bbox.shape[0] def clip_to_image(self, remove_empty=True): - TO_REMOVE = 1 + TO_REMOVE = 0 # 1 self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE) self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE) self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE) diff --git a/tools/relation_test_net.py b/tools/relation_test_net.py index 21922ef1..2b08d5bc 100644 --- a/tools/relation_test_net.py +++ b/tools/relation_test_net.py @@ -56,7 +56,9 @@ def main(): cfg.merge_from_list(args.opts) cfg.freeze() - save_dir = "" + save_dir = os.path.join(cfg.OUTPUT_DIR, "inference_test_log") + if save_dir: + mkdir(save_dir) logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank()) logger.info("Using {} GPUs".format(num_gpus)) logger.info(cfg) @@ -73,7 +75,8 @@ def main(): output_dir = cfg.OUTPUT_DIR checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir) - _ = checkpointer.load(cfg.MODEL.WEIGHT) + # _ = checkpointer.load(cfg.MODEL.WEIGHT) + _ = checkpointer.load(cfg.MODEL.PRETRAINED_DETECTOR_CKPT) iou_types = ("bbox",) if cfg.MODEL.MASK_ON: @@ -99,7 +102,7 @@ def main(): if cfg.OUTPUT_DIR: for idx, dataset_name in enumerate(dataset_names): - output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name) + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference_test", dataset_name) mkdir(output_folder) output_folders[idx] = output_folder data_loaders_val = make_data_loader(cfg=cfg, mode="test", is_distributed=distributed, dataset_to_test=cfg.DATASETS.TO_TEST) diff --git a/tools/relation_val_net.py b/tools/relation_val_net.py new file mode 100644 index 00000000..1e5d5295 --- /dev/null +++ b/tools/relation_val_net.py @@ -0,0 +1,127 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# Set up custom environment before nearly anything else is imported +# NOTE: this should be the first import (no not reorder) +from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip + +import argparse +import os + +import torch +from maskrcnn_benchmark.config import cfg +from maskrcnn_benchmark.data import make_data_loader +from maskrcnn_benchmark.engine.inference import inference +from maskrcnn_benchmark.modeling.detector import build_detection_model +from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer +from maskrcnn_benchmark.utils.collect_env import collect_env_info +from maskrcnn_benchmark.utils.comm import synchronize, get_rank +from maskrcnn_benchmark.utils.logger import setup_logger +from maskrcnn_benchmark.utils.miscellaneous import mkdir + +# Check if we can enable mixed-precision via apex.amp +try: + from apex import amp +except ImportError: + raise ImportError('Use APEX for mixed precision via apex.amp') + + +def main(): + parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference") + parser.add_argument( + "--config-file", + default="/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml", + metavar="FILE", + help="path to config file", + ) + parser.add_argument("--local_rank", type=int, default=0) + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + + args = parser.parse_args() + + num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 + distributed = num_gpus > 1 + + if distributed: + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group( + backend="nccl", init_method="env://" + ) + synchronize() + + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + + save_dir = os.path.join(cfg.OUTPUT_DIR, "inference_val_log") + if save_dir: + mkdir(save_dir) + logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank()) + logger.info("Using {} GPUs".format(num_gpus)) + logger.info(cfg) + + logger.info("Collecting env info (might take some time)") + logger.info("\n" + collect_env_info()) + + model = build_detection_model(cfg) + model.to(cfg.MODEL.DEVICE) + + # Initialize mixed-precision if necessary + use_mixed_precision = cfg.DTYPE == 'float16' + amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE) + + output_dir = cfg.OUTPUT_DIR + checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir) + # _ = checkpointer.load(cfg.MODEL.WEIGHT) + _ = checkpointer.load(cfg.MODEL.PRETRAINED_DETECTOR_CKPT) + + iou_types = ("bbox",) + if cfg.MODEL.MASK_ON: + iou_types = iou_types + ("segm",) + if cfg.MODEL.KEYPOINT_ON: + iou_types = iou_types + ("keypoints",) + if cfg.MODEL.RELATION_ON: + iou_types = iou_types + ("relations", ) + if cfg.MODEL.ATTRIBUTE_ON: + iou_types = iou_types + ("attributes", ) + output_folders = [None] * len(cfg.DATASETS.VAL) + + dataset_names = cfg.DATASETS.VAL + + # This variable enables the script to run the test on any dataset split. + if cfg.DATASETS.TO_TEST: + assert cfg.DATASETS.TO_TEST in {'train', 'val', 'test', None} + if cfg.DATASETS.TO_TEST == 'train': + dataset_names = cfg.DATASETS.TRAIN + elif cfg.DATASETS.TO_TEST == 'val': + dataset_names = cfg.DATASETS.VAL + + + if cfg.OUTPUT_DIR: + for idx, dataset_name in enumerate(dataset_names): + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference_val", dataset_name) + mkdir(output_folder) + output_folders[idx] = output_folder + data_loaders_val = make_data_loader(cfg=cfg, mode="val", is_distributed=distributed, dataset_to_test=cfg.DATASETS.TO_TEST) + for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val): + inference( + cfg, + model, + data_loader_val, + dataset_name=dataset_name, + iou_types=iou_types, + box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY, + device=cfg.MODEL.DEVICE, + expected_results=cfg.TEST.EXPECTED_RESULTS, + expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, + output_folder=output_folder, + ) + synchronize() + + +if __name__ == "__main__": + main() + torch.cuda.empty_cache() diff --git a/tools/relation_val_net_gen_img.py b/tools/relation_val_net_gen_img.py new file mode 100644 index 00000000..d0c8c497 --- /dev/null +++ b/tools/relation_val_net_gen_img.py @@ -0,0 +1,142 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# Set up custom environment before nearly anything else is imported +# NOTE: this should be the first import (no not reorder) +from maskrcnn_benchmark.utils.env import setup_environment # noqa F401 isort:skip + +import argparse +import os +import numpy as np + +import torch +from maskrcnn_benchmark.config import cfg +from maskrcnn_benchmark.data import make_data_loader +from maskrcnn_benchmark.engine.inference import inference +from maskrcnn_benchmark.modeling.detector import build_detection_model +from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer +from maskrcnn_benchmark.utils.collect_env import collect_env_info +from maskrcnn_benchmark.utils.comm import synchronize, get_rank +from maskrcnn_benchmark.utils.logger import setup_logger +from maskrcnn_benchmark.utils.miscellaneous import mkdir + +# Check if we can enable mixed-precision via apex.amp +try: + from apex import amp +except ImportError: + raise ImportError('Use APEX for mixed precision via apex.amp') + + +def main(): + parser = argparse.ArgumentParser(description="PyTorch Object Detection Inference") + parser.add_argument( + "--config-file", + default="/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml", + metavar="FILE", + help="path to config file", + ) + parser.add_argument("--local_rank", type=int, default=0) + parser.add_argument( + "opts", + help="Modify config options using the command-line", + default=None, + nargs=argparse.REMAINDER, + ) + + args = parser.parse_args() + + num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 + distributed = num_gpus > 1 + + if distributed: + torch.cuda.set_device(args.local_rank) + torch.distributed.init_process_group( + backend="nccl", init_method="env://" + ) + synchronize() + + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + + if cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOX and cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL: + sgg_mode = "_predcls_" + elif cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOX and not cfg.MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL: + sgg_mode = "_sgcls_" + else: + raise NotImplementedError + + # update output_dir + output_dir = os.path.join(cfg.GEN_IMG.BASE_DIR, cfg.GEN_IMG.FOLDER_NAME + sgg_mode + cfg.MODEL.ROI_RELATION_HEAD.PREDICTOR + "_" + cfg.MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE + "_22") + evalset_name = cfg.GEN_IMG.ANNO_FILE.split('.')[0] + + save_dir = os.path.join(output_dir, "inference_"+evalset_name+"_log") + if save_dir: + mkdir(save_dir) + logger = setup_logger("maskrcnn_benchmark", save_dir, get_rank()) + logger.info("Using {} GPUs".format(num_gpus)) + logger.info(cfg) + + logger.info("Collecting env info (might take some time)") + logger.info("\n" + collect_env_info()) + + model = build_detection_model(cfg) + model.to(cfg.MODEL.DEVICE) + + # Initialize mixed-precision if necessary + use_mixed_precision = cfg.DTYPE == 'float16' + amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE) + + checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir) + # _ = checkpointer.load(cfg.MODEL.WEIGHT) + _ = checkpointer.load(cfg.MODEL.PRETRAINED_DETECTOR_CKPT) + + iou_types = ("bbox",) + if cfg.MODEL.MASK_ON: + iou_types = iou_types + ("segm",) + if cfg.MODEL.KEYPOINT_ON: + iou_types = iou_types + ("keypoints",) + if cfg.MODEL.RELATION_ON: + iou_types = iou_types + ("relations", ) + if cfg.MODEL.ATTRIBUTE_ON: + iou_types = iou_types + ("attributes", ) + output_folders = [None] * cfg.GEN_IMG.NUM_ROUNDS + + # This variable enables the script to run the test on any dataset split. + assert (cfg.DATASETS.TO_TEST is None) + + if output_dir: + for idx in range(cfg.GEN_IMG.NUM_ROUNDS): + output_folder = os.path.join(output_dir, "inference_"+evalset_name+"_round_"+str(idx)) + mkdir(output_folder) + output_folders[idx] = output_folder + data_loaders_val = make_data_loader(cfg=cfg, mode="val", is_distributed=distributed, dataset_to_test=cfg.DATASETS.TO_TEST) + + assert (len(output_folders) == cfg.GEN_IMG.NUM_ROUNDS) + assert (len(data_loaders_val) == cfg.GEN_IMG.NUM_ROUNDS) + + rt_array_all = [] + for output_folder, data_loader_val in zip(output_folders, data_loaders_val): + rt_array = inference( + cfg, + model, + data_loader_val, + dataset_name=evalset_name, + iou_types=iou_types, + box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY, + device=cfg.MODEL.DEVICE, + expected_results=cfg.TEST.EXPECTED_RESULTS, + expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, + output_folder=output_folder, + ) + rt_array_all.append(rt_array) + synchronize() + + key_list = ["BBox_mAP", "BBox_Label_Acc", "Acc", "mean_Acc", "My_Acc_Mine", "My_mean_Acc_Mine", "mean_Acc_Head", "mean_Acc_Body", "mean_Acc_Tail"] + value_list = np.mean(np.vstack(rt_array_all), axis=0).tolist() + logger.info("========== Averged Results ==========") + for key, value in zip(key_list, value_list): + logger.info('%s: %.4f' % (key, value)) + + +if __name__ == "__main__": + main() + torch.cuda.empty_cache() diff --git a/vector-scripts/debug-example-sgcls-val.sh b/vector-scripts/debug-example-sgcls-val.sh new file mode 100755 index 00000000..552ec0ea --- /dev/null +++ b/vector-scripts/debug-example-sgcls-val.sh @@ -0,0 +1,26 @@ +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port 10098 \ +--nproc_per_node=1 tools/relation_val_net_gen_img.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL False \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.ALLOW_LOAD_FROM_CACHE False \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DATALOADER.ASPECT_RATIO_GROUPING False \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls/model_0032000.pth \ +DATA_STAT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls \ +GEN_IMG.EVAL True \ +GEN_IMG.ANNO_DIR "/h/bichengx/site-pkgs/VG-SGG/V2-Last" \ +GEN_IMG.ANNO_FILE "validation_data_bbox_dbox32_np.pkl" \ +GEN_IMG.BASE_DIR "/scratch/hdd001/home/bichengx/projects/ControlNet/my-diff-cnet/vector-scripts-BX/val-scripts/val-output/vg-a40-single-run_0-1234" \ +GEN_IMG.FOLDER_NAME "validation_image_gt" \ +GEN_IMG.NUM_ROUNDS 1 \ No newline at end of file diff --git a/vector-scripts/debug-img-sgcls-val-999-template-pass.sh b/vector-scripts/debug-img-sgcls-val-999-template-pass.sh new file mode 100755 index 00000000..ccf20142 --- /dev/null +++ b/vector-scripts/debug-img-sgcls-val-999-template-pass.sh @@ -0,0 +1,31 @@ +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +GEN_IMG_COMMON_PATH="/scratch/hdd001/home/bichengx/projects/ControlNet/my-diff-cnet/vector-scripts-BX/val-scripts/val-output" +PORT_NUM=$1 +GEN_IMG_ANNO_FILE=$2 +GEN_IMG_BASE_DIR=$3 +GEN_IMG_FOLDER_NAME=$4 + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port ${PORT_NUM} \ +--nproc_per_node=1 tools/relation_val_net_gen_img.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL False \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DATALOADER.ASPECT_RATIO_GROUPING False \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls/model_0032000.pth \ +DATA_STAT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls \ +GEN_IMG.EVAL True \ +GEN_IMG.ANNO_DIR "/h/bichengx/site-pkgs/VG-SGG/V2-Last" \ +GEN_IMG.ANNO_FILE ${GEN_IMG_ANNO_FILE} \ +GEN_IMG.BASE_DIR "${GEN_IMG_COMMON_PATH}/${GEN_IMG_BASE_DIR}" \ +GEN_IMG.FOLDER_NAME ${GEN_IMG_FOLDER_NAME} \ +GEN_IMG.NUM_ROUNDS 5 \ No newline at end of file diff --git a/vector-scripts/example-predcls-test.sh b/vector-scripts/example-predcls-test.sh new file mode 100755 index 00000000..bf82c31d --- /dev/null +++ b/vector-scripts/example-predcls-test.sh @@ -0,0 +1,39 @@ +#!/bin/bash +#SBATCH --job-name=example-predcls-test +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138 +#SBATCH --mem=80GB +#SBATCH -c 18 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=normal +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port 10018 \ +--nproc_per_node=1 tools/relation_test_net.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL True \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +DTYPE "float16" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls/model_0030000.pth \ +OUTPUT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls_test_2 \ No newline at end of file diff --git a/vector-scripts/example-predcls-val-999.sh b/vector-scripts/example-predcls-val-999.sh new file mode 100755 index 00000000..119a5e30 --- /dev/null +++ b/vector-scripts/example-predcls-val-999.sh @@ -0,0 +1,40 @@ +#!/bin/bash +#SBATCH --job-name=example-predcls-val-999 +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138 +#SBATCH --mem=40GB +#SBATCH -c 9 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port 10018 \ +--nproc_per_node=1 tools/relation_val_net.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL True \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls/model_0030000.pth \ +OUTPUT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls_val-999 \ No newline at end of file diff --git a/vector-scripts/example-predcls-val.sh b/vector-scripts/example-predcls-val.sh new file mode 100755 index 00000000..4e573732 --- /dev/null +++ b/vector-scripts/example-predcls-val.sh @@ -0,0 +1,39 @@ +#!/bin/bash +#SBATCH --job-name=example-predcls-val +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138 +#SBATCH --mem=40GB +#SBATCH -c 9 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port 10018 \ +--nproc_per_node=1 tools/relation_val_net.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL True \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls/model_0030000.pth \ +OUTPUT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls_val \ No newline at end of file diff --git a/vector-scripts/example-sgcls-test.sh b/vector-scripts/example-sgcls-test.sh new file mode 100755 index 00000000..f557d755 --- /dev/null +++ b/vector-scripts/example-sgcls-test.sh @@ -0,0 +1,39 @@ +#!/bin/bash +#SBATCH --job-name=example-sgcls-test +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138 +#SBATCH --mem=80GB +#SBATCH -c 18 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=normal +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port 10028 \ +--nproc_per_node=1 tools/relation_test_net.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL False \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +DTYPE "float16" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls/model_0032000.pth \ +OUTPUT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls_test_2 \ No newline at end of file diff --git a/vector-scripts/example-sgcls-val-999.sh b/vector-scripts/example-sgcls-val-999.sh new file mode 100755 index 00000000..3ebbf7ea --- /dev/null +++ b/vector-scripts/example-sgcls-val-999.sh @@ -0,0 +1,40 @@ +#!/bin/bash +#SBATCH --job-name=example-sgcls-val-999 +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138 +#SBATCH --mem=40GB +#SBATCH -c 9 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port 10028 \ +--nproc_per_node=1 tools/relation_val_net.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL False \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls/model_0032000.pth \ +OUTPUT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls_val-999 \ No newline at end of file diff --git a/vector-scripts/example-sgcls-val.sh b/vector-scripts/example-sgcls-val.sh new file mode 100755 index 00000000..b45e319e --- /dev/null +++ b/vector-scripts/example-sgcls-val.sh @@ -0,0 +1,39 @@ +#!/bin/bash +#SBATCH --job-name=example-sgcls-val +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138 +#SBATCH --mem=40GB +#SBATCH -c 9 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port 10028 \ +--nproc_per_node=1 tools/relation_val_net.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL False \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls/model_0032000.pth \ +OUTPUT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls_val \ No newline at end of file diff --git a/vector-scripts/gen-img-motif-sgcls-val-999-template-pass.sh b/vector-scripts/gen-img-motif-sgcls-val-999-template-pass.sh new file mode 100755 index 00000000..030d3a90 --- /dev/null +++ b/vector-scripts/gen-img-motif-sgcls-val-999-template-pass.sh @@ -0,0 +1,54 @@ +#!/bin/bash +#SBATCH --job-name=example-motif-sgcls-val-999-pass +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138,gpu169 +#SBATCH --mem=30GB +#SBATCH -c 8 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo $(date) "--" ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +GEN_IMG_COMMON_PATH="/scratch/hdd001/home/bichengx/projects/ControlNet/my-diff-cnet/vector-scripts-BX/val-scripts/val-output" +PORT_NUM=$1 +GEN_IMG_ANNO_FILE=$2 +GEN_IMG_BASE_DIR=$3 +GEN_IMG_FOLDER_NAME=$4 + +echo ${GEN_IMG_ANNO_FILE} +echo ${GEN_IMG_BASE_DIR} +echo ${GEN_IMG_FOLDER_NAME} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port ${PORT_NUM} \ +--nproc_per_node=1 tools/relation_val_net_gen_img.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL False \ +MODEL.ROI_RELATION_HEAD.PREDICTOR MotifPredictor \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DATALOADER.ASPECT_RATIO_GROUPING False \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/motif-sgcls/model_0032000.pth \ +DATA_STAT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/motif-sgcls \ +GEN_IMG.EVAL True \ +GEN_IMG.ANNO_DIR "/h/bichengx/site-pkgs/VG-SGG/V2-Last" \ +GEN_IMG.ANNO_FILE ${GEN_IMG_ANNO_FILE} \ +GEN_IMG.BASE_DIR "${GEN_IMG_COMMON_PATH}/${GEN_IMG_BASE_DIR}" \ +GEN_IMG.FOLDER_NAME ${GEN_IMG_FOLDER_NAME} \ +GEN_IMG.NUM_ROUNDS 5 \ No newline at end of file diff --git a/vector-scripts/gen-img-predcls-val-999-template-pass.sh b/vector-scripts/gen-img-predcls-val-999-template-pass.sh new file mode 100755 index 00000000..5ae7ae4f --- /dev/null +++ b/vector-scripts/gen-img-predcls-val-999-template-pass.sh @@ -0,0 +1,57 @@ +#!/bin/bash +#SBATCH --job-name=example-predcls-val-999-pass +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138,gpu169 +#SBATCH --mem=30GB +#SBATCH -c 8 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo $(date) "--" ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +GEN_IMG_COMMON_PATH="/scratch/hdd001/home/bichengx/projects/ControlNet/my-diff-cnet/vector-scripts-BX/val-scripts/val-output" +PORT_NUM=$1 +GEN_IMG_ANNO_FILE=$2 +GEN_IMG_BASE_DIR=$3 +GEN_IMG_FOLDER_NAME=$4 + +echo ${GEN_IMG_ANNO_FILE} +echo ${GEN_IMG_BASE_DIR} +echo ${GEN_IMG_FOLDER_NAME} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port ${PORT_NUM} \ +--nproc_per_node=1 tools/relation_val_net_gen_img.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL True \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DATALOADER.ASPECT_RATIO_GROUPING False \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls/model_0030000.pth \ +DATA_STAT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls \ +GEN_IMG.EVAL True \ +GEN_IMG.ANNO_DIR "/h/bichengx/site-pkgs/VG-SGG/V2-Last" \ +GEN_IMG.ANNO_FILE ${GEN_IMG_ANNO_FILE} \ +GEN_IMG.BASE_DIR "${GEN_IMG_COMMON_PATH}/${GEN_IMG_BASE_DIR}" \ +GEN_IMG.FOLDER_NAME ${GEN_IMG_FOLDER_NAME} \ +GEN_IMG.NUM_ROUNDS 5 \ No newline at end of file diff --git a/vector-scripts/gen-img-predcls-val-999-template.sh b/vector-scripts/gen-img-predcls-val-999-template.sh new file mode 100755 index 00000000..b0e4ed13 --- /dev/null +++ b/vector-scripts/gen-img-predcls-val-999-template.sh @@ -0,0 +1,47 @@ +#!/bin/bash +#SBATCH --job-name=example-predcls-val-999 +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138 +#SBATCH --mem=40GB +#SBATCH -c 9 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m3 +#SBATCH --time=4:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port 10111 \ +--nproc_per_node=1 tools/relation_val_net_gen_img.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL True \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DATALOADER.ASPECT_RATIO_GROUPING False \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls/model_0030000.pth \ +DATA_STAT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls \ +GEN_IMG.EVAL True \ +GEN_IMG.ANNO_DIR "/h/bichengx/site-pkgs/VG-SGG/V2-Last" \ +GEN_IMG.ANNO_FILE "validation_data_bbox_dbox32_np.pkl" \ +GEN_IMG.BASE_DIR "/scratch/hdd001/home/bichengx/projects/ControlNet/my-diff-cnet/vector-scripts-BX/val-scripts/val-output/vg-a40-single-run_0-1234" \ +GEN_IMG.FOLDER_NAME "validation_image_gt" \ +GEN_IMG.NUM_ROUNDS 5 \ No newline at end of file diff --git a/vector-scripts/gen-img-sgcls-val-999-template-pass.sh b/vector-scripts/gen-img-sgcls-val-999-template-pass.sh new file mode 100755 index 00000000..d03a1283 --- /dev/null +++ b/vector-scripts/gen-img-sgcls-val-999-template-pass.sh @@ -0,0 +1,57 @@ +#!/bin/bash +#SBATCH --job-name=example-sgcls-val-999-pass +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138,gpu169 +#SBATCH --mem=30GB +#SBATCH -c 8 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo $(date) "--" ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +GEN_IMG_COMMON_PATH="/scratch/hdd001/home/bichengx/projects/ControlNet/my-diff-cnet/vector-scripts-BX/val-scripts/val-output" +PORT_NUM=$1 +GEN_IMG_ANNO_FILE=$2 +GEN_IMG_BASE_DIR=$3 +GEN_IMG_FOLDER_NAME=$4 + +echo ${GEN_IMG_ANNO_FILE} +echo ${GEN_IMG_BASE_DIR} +echo ${GEN_IMG_FOLDER_NAME} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port ${PORT_NUM} \ +--nproc_per_node=1 tools/relation_val_net_gen_img.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL False \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DATALOADER.ASPECT_RATIO_GROUPING False \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls/model_0032000.pth \ +DATA_STAT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls \ +GEN_IMG.EVAL True \ +GEN_IMG.ANNO_DIR "/h/bichengx/site-pkgs/VG-SGG/V2-Last" \ +GEN_IMG.ANNO_FILE ${GEN_IMG_ANNO_FILE} \ +GEN_IMG.BASE_DIR "${GEN_IMG_COMMON_PATH}/${GEN_IMG_BASE_DIR}" \ +GEN_IMG.FOLDER_NAME ${GEN_IMG_FOLDER_NAME} \ +GEN_IMG.NUM_ROUNDS 5 \ No newline at end of file diff --git a/vector-scripts/gen-img-sgcls-val-999-template.sh b/vector-scripts/gen-img-sgcls-val-999-template.sh new file mode 100755 index 00000000..338a637a --- /dev/null +++ b/vector-scripts/gen-img-sgcls-val-999-template.sh @@ -0,0 +1,47 @@ +#!/bin/bash +#SBATCH --job-name=example-sgcls-val-999 +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138 +#SBATCH --mem=40GB +#SBATCH -c 9 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m3 +#SBATCH --time=4:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port 11011 \ +--nproc_per_node=1 tools/relation_val_net_gen_img.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL False \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DATALOADER.ASPECT_RATIO_GROUPING False \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls/model_0032000.pth \ +DATA_STAT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls \ +GEN_IMG.EVAL True \ +GEN_IMG.ANNO_DIR "/h/bichengx/site-pkgs/VG-SGG/V2-Last" \ +GEN_IMG.ANNO_FILE "validation_data_bbox_dbox32_np.pkl" \ +GEN_IMG.BASE_DIR "/scratch/hdd001/home/bichengx/projects/ControlNet/my-diff-cnet/vector-scripts-BX/val-scripts/val-output/vg-a40-single-run_0-1234" \ +GEN_IMG.FOLDER_NAME "validation_image_gt" \ +GEN_IMG.NUM_ROUNDS 5 \ No newline at end of file diff --git a/vector-scripts/gen-img-vctree-sgcls-val-999-template-pass-debug.sh b/vector-scripts/gen-img-vctree-sgcls-val-999-template-pass-debug.sh new file mode 100755 index 00000000..a966b4e6 --- /dev/null +++ b/vector-scripts/gen-img-vctree-sgcls-val-999-template-pass-debug.sh @@ -0,0 +1,54 @@ +#!/bin/bash +#SBATCH --job-name=example-vctree-sgcls-val-999-pass +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138,gpu169 +#SBATCH --mem=30GB +#SBATCH -c 8 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo $(date) "--" ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +GEN_IMG_COMMON_PATH="/scratch/hdd001/home/bichengx/projects/ControlNet/my-diff-cnet/vector-scripts-BX/val-scripts/val-output" +PORT_NUM=$1 +GEN_IMG_ANNO_FILE=$2 +GEN_IMG_BASE_DIR=$3 +GEN_IMG_FOLDER_NAME=$4 + +echo ${GEN_IMG_ANNO_FILE} +echo ${GEN_IMG_BASE_DIR} +echo ${GEN_IMG_FOLDER_NAME} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port ${PORT_NUM} \ +--nproc_per_node=1 tools/relation_val_net_gen_img.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL False \ +MODEL.ROI_RELATION_HEAD.PREDICTOR VCTreePredictor \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DATALOADER.ASPECT_RATIO_GROUPING False \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/vctree-sgcls_again_upload/model_0036000.pth \ +DATA_STAT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/vctree-sgcls_again_upload \ +GEN_IMG.EVAL True \ +GEN_IMG.ANNO_DIR "/h/bichengx/site-pkgs/VG-SGG/V2-Last" \ +GEN_IMG.ANNO_FILE ${GEN_IMG_ANNO_FILE} \ +GEN_IMG.BASE_DIR "${GEN_IMG_COMMON_PATH}/${GEN_IMG_BASE_DIR}" \ +GEN_IMG.FOLDER_NAME ${GEN_IMG_FOLDER_NAME} \ +GEN_IMG.NUM_ROUNDS 5 \ No newline at end of file diff --git a/vector-scripts/gen-img-vctree-sgcls-val-999-template-pass.sh b/vector-scripts/gen-img-vctree-sgcls-val-999-template-pass.sh new file mode 100755 index 00000000..a966b4e6 --- /dev/null +++ b/vector-scripts/gen-img-vctree-sgcls-val-999-template-pass.sh @@ -0,0 +1,54 @@ +#!/bin/bash +#SBATCH --job-name=example-vctree-sgcls-val-999-pass +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138,gpu169 +#SBATCH --mem=30GB +#SBATCH -c 8 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo $(date) "--" ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +GEN_IMG_COMMON_PATH="/scratch/hdd001/home/bichengx/projects/ControlNet/my-diff-cnet/vector-scripts-BX/val-scripts/val-output" +PORT_NUM=$1 +GEN_IMG_ANNO_FILE=$2 +GEN_IMG_BASE_DIR=$3 +GEN_IMG_FOLDER_NAME=$4 + +echo ${GEN_IMG_ANNO_FILE} +echo ${GEN_IMG_BASE_DIR} +echo ${GEN_IMG_FOLDER_NAME} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port ${PORT_NUM} \ +--nproc_per_node=1 tools/relation_val_net_gen_img.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL False \ +MODEL.ROI_RELATION_HEAD.PREDICTOR VCTreePredictor \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DATALOADER.ASPECT_RATIO_GROUPING False \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/vctree-sgcls_again_upload/model_0036000.pth \ +DATA_STAT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/vctree-sgcls_again_upload \ +GEN_IMG.EVAL True \ +GEN_IMG.ANNO_DIR "/h/bichengx/site-pkgs/VG-SGG/V2-Last" \ +GEN_IMG.ANNO_FILE ${GEN_IMG_ANNO_FILE} \ +GEN_IMG.BASE_DIR "${GEN_IMG_COMMON_PATH}/${GEN_IMG_BASE_DIR}" \ +GEN_IMG.FOLDER_NAME ${GEN_IMG_FOLDER_NAME} \ +GEN_IMG.NUM_ROUNDS 5 \ No newline at end of file diff --git a/vector-scripts/relation-test-gen-img-predcls-val-999-template-pass.sh b/vector-scripts/relation-test-gen-img-predcls-val-999-template-pass.sh new file mode 100755 index 00000000..e085d866 --- /dev/null +++ b/vector-scripts/relation-test-gen-img-predcls-val-999-template-pass.sh @@ -0,0 +1,57 @@ +#!/bin/bash +#SBATCH --job-name=relation-test-predcls-val-999-pass +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138,gpu169 +#SBATCH --mem=30GB +#SBATCH -c 8 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo $(date) "--" ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +GEN_IMG_COMMON_PATH="/scratch/hdd001/home/bichengx/projects/ControlNet/my-diff-cnet/vector-scripts-BX/val-scripts/val-output-relation-test" +PORT_NUM=$1 +GEN_IMG_ANNO_FILE=$2 +GEN_IMG_BASE_DIR=$3 +GEN_IMG_FOLDER_NAME=$4 + +echo ${GEN_IMG_ANNO_FILE} +echo ${GEN_IMG_BASE_DIR} +echo ${GEN_IMG_FOLDER_NAME} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port ${PORT_NUM} \ +--nproc_per_node=1 tools/relation_val_net_gen_img.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL True \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DATALOADER.ASPECT_RATIO_GROUPING False \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls/model_0030000.pth \ +DATA_STAT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_predcls \ +GEN_IMG.EVAL True \ +GEN_IMG.ANNO_DIR "/scratch/hdd001/home/bichengx/projects/VG-SGG/VG-relation-test" \ +GEN_IMG.ANNO_FILE ${GEN_IMG_ANNO_FILE} \ +GEN_IMG.BASE_DIR "${GEN_IMG_COMMON_PATH}/${GEN_IMG_BASE_DIR}" \ +GEN_IMG.FOLDER_NAME ${GEN_IMG_FOLDER_NAME} \ +GEN_IMG.NUM_ROUNDS 5 \ No newline at end of file diff --git a/vector-scripts/relation-test-gen-img-sgcls-val-999-template-pass.sh b/vector-scripts/relation-test-gen-img-sgcls-val-999-template-pass.sh new file mode 100755 index 00000000..33fc85a8 --- /dev/null +++ b/vector-scripts/relation-test-gen-img-sgcls-val-999-template-pass.sh @@ -0,0 +1,57 @@ +#!/bin/bash +#SBATCH --job-name=relation-test-sgcls-val-999-pass +#SBATCH --nodes=1 +#SBATCH --partition=rtx6000 +#SBATCH --exclude=gpu138,gpu169 +#SBATCH --mem=30GB +#SBATCH -c 8 +#SBATCH --gres=gpu:rtx6000:1 +#SBATCH --qos=m +#SBATCH --time=12:00:00 +#SBATCH --export=ALL +#SBATCH --open-mode=append +#SBATCH --output=slurm-output/%x.%j.out +#SBATCH --error=slurm-output/%x.%j.err +#SBATCH --mail-user=bichengx@cs.ubc.ca +#SBATCH --mail-type=ALL + +echo $(date) "--" ${SLURM_JOB_ID} +module load my-cuda10.1+cudnn7.6.3 + +WORKSPACE="/scratch/hdd001/home/bichengx/projects/Scene-Graph-Benchmark.pytorch" +cd ${WORKSPACE} + +GEN_IMG_COMMON_PATH="/scratch/hdd001/home/bichengx/projects/ControlNet/my-diff-cnet/vector-scripts-BX/val-scripts/val-output-relation-test" +PORT_NUM=$1 +GEN_IMG_ANNO_FILE=$2 +GEN_IMG_BASE_DIR=$3 +GEN_IMG_FOLDER_NAME=$4 + +echo ${GEN_IMG_ANNO_FILE} +echo ${GEN_IMG_BASE_DIR} +echo ${GEN_IMG_FOLDER_NAME} + +source /h/bichengx/site-pkgs/anaconda3/bin/activate SGed + +CUDA_VISIBLE_DEVICES=0 python -m torch.distributed.launch --master_port ${PORT_NUM} \ +--nproc_per_node=1 tools/relation_val_net_gen_img.py \ +--config-file "configs/e2e_relation_X_101_32_8_FPN_1x.yaml" \ +MODEL.ROI_RELATION_HEAD.USE_GT_BOX True \ +MODEL.ROI_RELATION_HEAD.USE_GT_OBJECT_LABEL False \ +MODEL.ROI_RELATION_HEAD.PREDICTOR CausalAnalysisPredictor \ +MODEL.ROI_RELATION_HEAD.CAUSAL.EFFECT_TYPE TDE \ +MODEL.ROI_RELATION_HEAD.CAUSAL.FUSION_TYPE sum \ +MODEL.ROI_RELATION_HEAD.CAUSAL.CONTEXT_LAYER motifs \ +TEST.IMS_PER_BATCH 1 \ +TEST.RELATION.IOU_THRESHOLD 0.999 \ +DATALOADER.ASPECT_RATIO_GROUPING False \ +DTYPE "float32" \ +GLOVE_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/glove \ +MODEL.PRETRAINED_DETECTOR_CKPT /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls/model_0032000.pth \ +DATA_STAT_DIR /scratch/hdd001/home/bichengx/projects/SG-Models/upload_causal_motif_sgcls \ +GEN_IMG.EVAL True \ +GEN_IMG.ANNO_DIR "/scratch/hdd001/home/bichengx/projects/VG-SGG/VG-relation-test" \ +GEN_IMG.ANNO_FILE ${GEN_IMG_ANNO_FILE} \ +GEN_IMG.BASE_DIR "${GEN_IMG_COMMON_PATH}/${GEN_IMG_BASE_DIR}" \ +GEN_IMG.FOLDER_NAME ${GEN_IMG_FOLDER_NAME} \ +GEN_IMG.NUM_ROUNDS 5 \ No newline at end of file diff --git a/vector-scripts/zz-submit-pass-2-a1.sh b/vector-scripts/zz-submit-pass-2-a1.sh new file mode 100755 index 00000000..9ec995ce --- /dev/null +++ b/vector-scripts/zz-submit-pass-2-a1.sh @@ -0,0 +1,17 @@ +echo 5 +sbatch relation-test-gen-img-sgcls-val-999-template-pass.sh 12051 "triplet_swaped_dict_training_1_input.pkl" "vg-norel-prompt-object-original-a40-triplet_swaped_dict_training_1_input-object-original-iter_48200-run_0-1234" "generated_image" +sleep 1s +echo 16 +sbatch relation-test-gen-img-sgcls-val-999-template-pass.sh 12161 "triplet_swaped_dict_training_1_input.pkl" "vg-norel-prompt-object-original-a40-triplet_swaped_dict_training_1_input-object-original-iter_96400-run_0-1234" "generated_image" +sleep 1s +echo 10 +sbatch relation-test-gen-img-sgcls-val-999-template-pass.sh 12101 "triplet_swaped_dict_validation_1_input.pkl" "vg-norel-prompt-object-original-a40-triplet_swaped_dict_validation_1_input-object-original-iter_48200-run_0-1234" "generated_image" +sleep 1s +echo 17 +sbatch relation-test-gen-img-sgcls-val-999-template-pass.sh 12171 "triplet_swaped_dict_validation_1_input.pkl" "vg-norel-prompt-object-original-a40-triplet_swaped_dict_validation_1_input-object-original-iter_96400-run_0-1234" "generated_image" +sleep 1s +echo 15 +sbatch relation-test-gen-img-sgcls-val-999-template-pass.sh 12151 "validation_common_semantic_triplets.pkl" "vg-norel-prompt-object-original-a40-validation_common_semantic_triplets-object-original-iter_48200-run_0-1234" "generated_image" +sleep 1s +echo 18 +sbatch relation-test-gen-img-sgcls-val-999-template-pass.sh 12181 "validation_common_semantic_triplets.pkl" "vg-norel-prompt-object-original-a40-validation_common_semantic_triplets-object-original-iter_96400-run_0-1234" "generated_image" \ No newline at end of file diff --git a/vector-scripts/zz-submit-pass-2.sh b/vector-scripts/zz-submit-pass-2.sh new file mode 100755 index 00000000..b9164dc2 --- /dev/null +++ b/vector-scripts/zz-submit-pass-2.sh @@ -0,0 +1,23 @@ +echo 1 +sbatch gen-img-sgcls-val-999-template-pass.sh 12011 "validation_common_data_bbox_dbox32_np.pkl" "vg-norel-prompt-object-original-a40-common-object-original-iter_96400-run_22-1234" "generated_image" +sleep 1s +echo 2 +sbatch gen-img-sgcls-val-999-template-pass.sh 12021 "validation_common_data_bbox_dbox32_np.pkl" "vg-norel-prompt-object-original-a40-common-object-original-iter_48200-run_22-1234" "generated_image" +sleep 1s +echo 3 +sbatch gen-img-sgcls-val-999-template-pass.sh 12031 "validation_common_data_bbox_dbox32_np.pkl" "vg-prompt-relation-unique-original-a40-common-relation-unique-original-iter_96400-run_22-1234" "generated_image" +sleep 1s +echo 4 +sbatch gen-img-sgcls-val-999-template-pass.sh 12041 "validation_common_data_bbox_dbox32_np.pkl" "vg-prompt-relation-unique-original-a40-common-relation-unique-original-iter_48200-run_22-1234" "generated_image" +sleep 1s +echo 5 +sbatch gen-img-sgcls-val-999-template-pass.sh 12051 "validation_common_data_bbox_dbox32_np.pkl" "vg-norel-prompt-object-random-a40-common-object-original-iter_96400-run_22-1234" "generated_image" +sleep 1s +echo 6 +sbatch gen-img-sgcls-val-999-template-pass.sh 12061 "validation_common_data_bbox_dbox32_np.pkl" "vg-prompt-relation-unique-random-a40-common-relation-unique-original-iter_96400-run_22-1234" "generated_image" +sleep 1s +echo 7 +sbatch gen-img-sgcls-val-999-template-pass.sh 12071 "validation_common_data_bbox_dbox32_np.pkl" "vg-norel-prompt-object-random-a40-common-object-original-iter_48200-run_22-1234" "generated_image" +sleep 1s +echo 8 +sbatch gen-img-sgcls-val-999-template-pass.sh 12081 "validation_common_data_bbox_dbox32_np.pkl" "vg-prompt-relation-unique-random-a40-common-relation-unique-original-iter_48338-run_22-1234" "generated_image" \ No newline at end of file diff --git a/vector-scripts/zz-submit-pass-a1-second.sh b/vector-scripts/zz-submit-pass-a1-second.sh new file mode 100755 index 00000000..06a18178 --- /dev/null +++ b/vector-scripts/zz-submit-pass-a1-second.sh @@ -0,0 +1,17 @@ +echo 5 +sbatch relation-test-gen-img-predcls-val-999-template-pass.sh 11051 "triplet_swaped_dict_training_1_input.pkl" "vg-norel-prompt-object-original-a40-triplet_swaped_dict_training_1_input-object-original-iter_48200-run_0-1234" "generated_image" +sleep 1s +echo 16 +sbatch relation-test-gen-img-predcls-val-999-template-pass.sh 11161 "triplet_swaped_dict_training_1_input.pkl" "vg-norel-prompt-object-original-a40-triplet_swaped_dict_training_1_input-object-original-iter_96400-run_0-1234" "generated_image" +sleep 1s +echo 10 +sbatch relation-test-gen-img-predcls-val-999-template-pass.sh 11101 "triplet_swaped_dict_validation_1_input.pkl" "vg-norel-prompt-object-original-a40-triplet_swaped_dict_validation_1_input-object-original-iter_48200-run_0-1234" "generated_image" +sleep 1s +echo 17 +sbatch relation-test-gen-img-predcls-val-999-template-pass.sh 11171 "triplet_swaped_dict_validation_1_input.pkl" "vg-norel-prompt-object-original-a40-triplet_swaped_dict_validation_1_input-object-original-iter_96400-run_0-1234" "generated_image" +sleep 1s +echo 15 +sbatch relation-test-gen-img-predcls-val-999-template-pass.sh 11151 "validation_common_semantic_triplets.pkl" "vg-norel-prompt-object-original-a40-validation_common_semantic_triplets-object-original-iter_48200-run_0-1234" "generated_image" +sleep 1s +echo 18 +sbatch relation-test-gen-img-predcls-val-999-template-pass.sh 11181 "validation_common_semantic_triplets.pkl" "vg-norel-prompt-object-original-a40-validation_common_semantic_triplets-object-original-iter_96400-run_0-1234" "generated_image" \ No newline at end of file diff --git a/vector-scripts/zz-submit-pass-a1.sh b/vector-scripts/zz-submit-pass-a1.sh new file mode 100755 index 00000000..e69de29b diff --git a/vector-scripts/zz-submit-pass.sh b/vector-scripts/zz-submit-pass.sh new file mode 100755 index 00000000..e327970a --- /dev/null +++ b/vector-scripts/zz-submit-pass.sh @@ -0,0 +1,23 @@ +echo 1 +sbatch gen-img-predcls-val-999-template-pass.sh 11011 "validation_common_data_bbox_dbox32_np.pkl" "vg-norel-prompt-object-original-a40-common-object-original-iter_96400-run_22-1234" "generated_image" +sleep 1s +echo 2 +sbatch gen-img-predcls-val-999-template-pass.sh 11021 "validation_common_data_bbox_dbox32_np.pkl" "vg-norel-prompt-object-original-a40-common-object-original-iter_48200-run_22-1234" "generated_image" +sleep 1s +echo 3 +sbatch gen-img-predcls-val-999-template-pass.sh 11031 "validation_common_data_bbox_dbox32_np.pkl" "vg-prompt-relation-unique-original-a40-common-relation-unique-original-iter_96400-run_22-1234" "generated_image" +sleep 1s +echo 4 +sbatch gen-img-predcls-val-999-template-pass.sh 11041 "validation_common_data_bbox_dbox32_np.pkl" "vg-prompt-relation-unique-original-a40-common-relation-unique-original-iter_48200-run_22-1234" "generated_image" +sleep 1s +echo 5 +sbatch gen-img-predcls-val-999-template-pass.sh 11051 "validation_common_data_bbox_dbox32_np.pkl" "vg-norel-prompt-object-random-a40-common-object-original-iter_96400-run_22-1234" "generated_image" +sleep 1s +echo 6 +sbatch gen-img-predcls-val-999-template-pass.sh 11061 "validation_common_data_bbox_dbox32_np.pkl" "vg-prompt-relation-unique-random-a40-common-relation-unique-original-iter_96400-run_22-1234" "generated_image" +sleep 1s +echo 7 +sbatch gen-img-predcls-val-999-template-pass.sh 11071 "validation_common_data_bbox_dbox32_np.pkl" "vg-norel-prompt-object-random-a40-common-object-original-iter_48200-run_22-1234" "generated_image" +sleep 1s +echo 8 +sbatch gen-img-predcls-val-999-template-pass.sh 11081 "validation_common_data_bbox_dbox32_np.pkl" "vg-prompt-relation-unique-random-a40-common-relation-unique-original-iter_48338-run_22-1234" "generated_image" \ No newline at end of file