-
Notifications
You must be signed in to change notification settings - Fork 7k
/
Copy pathtest_models_detection_negative_samples.py
167 lines (127 loc) · 6.28 KB
/
test_models_detection_negative_samples.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
import pytest
import torch
import torchvision.models
from common_utils import assert_equal
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead
from torchvision.models.detection.roi_heads import RoIHeads
from torchvision.models.detection.rpn import AnchorGenerator, RegionProposalNetwork, RPNHead
from torchvision.ops import MultiScaleRoIAlign
class TestModelsDetectionNegativeSamples:
def _make_empty_sample(self, add_masks=False, add_keypoints=False):
images = [torch.rand((3, 100, 100), dtype=torch.float32)]
boxes = torch.zeros((0, 4), dtype=torch.float32)
negative_target = {
"boxes": boxes,
"labels": torch.zeros(0, dtype=torch.int64),
"image_id": 4,
"area": (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]),
"iscrowd": torch.zeros((0,), dtype=torch.int64),
}
if add_masks:
negative_target["masks"] = torch.zeros(0, 100, 100, dtype=torch.uint8)
if add_keypoints:
negative_target["keypoints"] = torch.zeros(17, 0, 3, dtype=torch.float32)
targets = [negative_target]
return images, targets
def test_targets_to_anchors(self):
_, targets = self._make_empty_sample()
anchors = [torch.randint(-50, 50, (3, 4), dtype=torch.float32)]
anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
rpn_head = RPNHead(4, rpn_anchor_generator.num_anchors_per_location()[0])
head = RegionProposalNetwork(rpn_anchor_generator, rpn_head, 0.5, 0.3, 256, 0.5, 2000, 2000, 0.7, 0.05)
labels, matched_gt_boxes = head.assign_targets_to_anchors(anchors, targets)
assert labels[0].sum() == 0
assert labels[0].shape == torch.Size([anchors[0].shape[0]])
assert labels[0].dtype == torch.float32
assert matched_gt_boxes[0].sum() == 0
assert matched_gt_boxes[0].shape == anchors[0].shape
assert matched_gt_boxes[0].dtype == torch.float32
def test_assign_targets_to_proposals(self):
proposals = [torch.randint(-50, 50, (20, 4), dtype=torch.float32)]
gt_boxes = [torch.zeros((0, 4), dtype=torch.float32)]
gt_labels = [torch.tensor([[0]], dtype=torch.int64)]
box_roi_pool = MultiScaleRoIAlign(featmap_names=["0", "1", "2", "3"], output_size=7, sampling_ratio=2)
resolution = box_roi_pool.output_size[0]
representation_size = 1024
box_head = TwoMLPHead(4 * resolution**2, representation_size)
representation_size = 1024
box_predictor = FastRCNNPredictor(representation_size, 2)
roi_heads = RoIHeads(
# Box
box_roi_pool,
box_head,
box_predictor,
0.5,
0.5,
512,
0.25,
None,
0.05,
0.5,
100,
)
matched_idxs, labels = roi_heads.assign_targets_to_proposals(proposals, gt_boxes, gt_labels)
assert matched_idxs[0].sum() == 0
assert matched_idxs[0].shape == torch.Size([proposals[0].shape[0]])
assert matched_idxs[0].dtype == torch.int64
assert labels[0].sum() == 0
assert labels[0].shape == torch.Size([proposals[0].shape[0]])
assert labels[0].dtype == torch.int64
@pytest.mark.parametrize(
"name",
[
"fasterrcnn_resnet50_fpn",
"fasterrcnn_mobilenet_v3_large_fpn",
"fasterrcnn_mobilenet_v3_large_320_fpn",
],
)
def test_forward_negative_sample_frcnn(self, name):
model = torchvision.models.get_model(
name, weights=None, weights_backbone=None, num_classes=2, min_size=100, max_size=100
)
images, targets = self._make_empty_sample()
loss_dict = model(images, targets)
assert_equal(loss_dict["loss_box_reg"], torch.tensor(0.0))
assert_equal(loss_dict["loss_rpn_box_reg"], torch.tensor(0.0))
def test_forward_negative_sample_mrcnn(self):
model = torchvision.models.detection.maskrcnn_resnet50_fpn(
weights=None, weights_backbone=None, num_classes=2, min_size=100, max_size=100
)
images, targets = self._make_empty_sample(add_masks=True)
loss_dict = model(images, targets)
assert_equal(loss_dict["loss_box_reg"], torch.tensor(0.0))
assert_equal(loss_dict["loss_rpn_box_reg"], torch.tensor(0.0))
assert_equal(loss_dict["loss_mask"], torch.tensor(0.0))
def test_forward_negative_sample_krcnn(self):
model = torchvision.models.detection.keypointrcnn_resnet50_fpn(
weights=None, weights_backbone=None, num_classes=2, min_size=100, max_size=100
)
images, targets = self._make_empty_sample(add_keypoints=True)
loss_dict = model(images, targets)
assert_equal(loss_dict["loss_box_reg"], torch.tensor(0.0))
assert_equal(loss_dict["loss_rpn_box_reg"], torch.tensor(0.0))
assert_equal(loss_dict["loss_keypoint"], torch.tensor(0.0))
def test_forward_negative_sample_retinanet(self):
model = torchvision.models.detection.retinanet_resnet50_fpn(
weights=None, weights_backbone=None, num_classes=2, min_size=100, max_size=100
)
images, targets = self._make_empty_sample()
loss_dict = model(images, targets)
assert_equal(loss_dict["bbox_regression"], torch.tensor(0.0))
def test_forward_negative_sample_fcos(self):
model = torchvision.models.detection.fcos_resnet50_fpn(
weights=None, weights_backbone=None, num_classes=2, min_size=100, max_size=100
)
images, targets = self._make_empty_sample()
loss_dict = model(images, targets)
assert_equal(loss_dict["bbox_regression"], torch.tensor(0.0))
assert_equal(loss_dict["bbox_ctrness"], torch.tensor(0.0))
def test_forward_negative_sample_ssd(self):
model = torchvision.models.detection.ssd300_vgg16(weights=None, weights_backbone=None, num_classes=2)
images, targets = self._make_empty_sample()
loss_dict = model(images, targets)
assert_equal(loss_dict["bbox_regression"], torch.tensor(0.0))
if __name__ == "__main__":
pytest.main([__file__])