|
| 1 | +from collections import defaultdict |
| 2 | + |
1 | 3 | import torch
|
2 |
| -import transforms as T |
| 4 | +import transforms as reference_transforms |
| 5 | + |
| 6 | + |
| 7 | +def get_modules(use_v2): |
| 8 | + # We need a protected import to avoid the V2 warning in case just V1 is used |
| 9 | + if use_v2: |
| 10 | + import torchvision.datapoints |
| 11 | + import torchvision.transforms.v2 |
| 12 | + |
| 13 | + return torchvision.transforms.v2, torchvision.datapoints |
| 14 | + else: |
| 15 | + return reference_transforms, None |
3 | 16 |
|
4 | 17 |
|
5 | 18 | class DetectionPresetTrain:
|
6 |
| - def __init__(self, *, data_augmentation, hflip_prob=0.5, mean=(123.0, 117.0, 104.0)): |
| 19 | + # Note: this transform assumes that the input to forward() are always PIL |
| 20 | + # images, regardless of the backend parameter. |
| 21 | + def __init__( |
| 22 | + self, |
| 23 | + *, |
| 24 | + data_augmentation, |
| 25 | + hflip_prob=0.5, |
| 26 | + mean=(123.0, 117.0, 104.0), |
| 27 | + backend="pil", |
| 28 | + use_v2=False, |
| 29 | + ): |
| 30 | + |
| 31 | + T, datapoints = get_modules(use_v2) |
| 32 | + |
| 33 | + transforms = [] |
| 34 | + backend = backend.lower() |
| 35 | + if backend == "datapoint": |
| 36 | + transforms.append(T.ToImageTensor()) |
| 37 | + elif backend == "tensor": |
| 38 | + transforms.append(T.PILToTensor()) |
| 39 | + elif backend != "pil": |
| 40 | + raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}") |
| 41 | + |
7 | 42 | if data_augmentation == "hflip":
|
8 |
| - self.transforms = T.Compose( |
9 |
| - [ |
10 |
| - T.RandomHorizontalFlip(p=hflip_prob), |
11 |
| - T.PILToTensor(), |
12 |
| - T.ConvertImageDtype(torch.float), |
13 |
| - ] |
14 |
| - ) |
| 43 | + transforms += [T.RandomHorizontalFlip(p=hflip_prob)] |
15 | 44 | elif data_augmentation == "lsj":
|
16 |
| - self.transforms = T.Compose( |
17 |
| - [ |
18 |
| - T.ScaleJitter(target_size=(1024, 1024)), |
19 |
| - T.FixedSizeCrop(size=(1024, 1024), fill=mean), |
20 |
| - T.RandomHorizontalFlip(p=hflip_prob), |
21 |
| - T.PILToTensor(), |
22 |
| - T.ConvertImageDtype(torch.float), |
23 |
| - ] |
24 |
| - ) |
| 45 | + transforms += [ |
| 46 | + T.ScaleJitter(target_size=(1024, 1024), antialias=True), |
| 47 | + # TODO: FixedSizeCrop below doesn't work on tensors! |
| 48 | + reference_transforms.FixedSizeCrop(size=(1024, 1024), fill=mean), |
| 49 | + T.RandomHorizontalFlip(p=hflip_prob), |
| 50 | + ] |
25 | 51 | elif data_augmentation == "multiscale":
|
26 |
| - self.transforms = T.Compose( |
27 |
| - [ |
28 |
| - T.RandomShortestSize( |
29 |
| - min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333 |
30 |
| - ), |
31 |
| - T.RandomHorizontalFlip(p=hflip_prob), |
32 |
| - T.PILToTensor(), |
33 |
| - T.ConvertImageDtype(torch.float), |
34 |
| - ] |
35 |
| - ) |
| 52 | + transforms += [ |
| 53 | + T.RandomShortestSize(min_size=(480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800), max_size=1333), |
| 54 | + T.RandomHorizontalFlip(p=hflip_prob), |
| 55 | + ] |
36 | 56 | elif data_augmentation == "ssd":
|
37 |
| - self.transforms = T.Compose( |
38 |
| - [ |
39 |
| - T.RandomPhotometricDistort(), |
40 |
| - T.RandomZoomOut(fill=list(mean)), |
41 |
| - T.RandomIoUCrop(), |
42 |
| - T.RandomHorizontalFlip(p=hflip_prob), |
43 |
| - T.PILToTensor(), |
44 |
| - T.ConvertImageDtype(torch.float), |
45 |
| - ] |
46 |
| - ) |
| 57 | + fill = defaultdict(lambda: mean, {datapoints.Mask: 0}) if use_v2 else list(mean) |
| 58 | + transforms += [ |
| 59 | + T.RandomPhotometricDistort(), |
| 60 | + T.RandomZoomOut(fill=fill), |
| 61 | + T.RandomIoUCrop(), |
| 62 | + T.RandomHorizontalFlip(p=hflip_prob), |
| 63 | + ] |
47 | 64 | elif data_augmentation == "ssdlite":
|
48 |
| - self.transforms = T.Compose( |
49 |
| - [ |
50 |
| - T.RandomIoUCrop(), |
51 |
| - T.RandomHorizontalFlip(p=hflip_prob), |
52 |
| - T.PILToTensor(), |
53 |
| - T.ConvertImageDtype(torch.float), |
54 |
| - ] |
55 |
| - ) |
| 65 | + transforms += [ |
| 66 | + T.RandomIoUCrop(), |
| 67 | + T.RandomHorizontalFlip(p=hflip_prob), |
| 68 | + ] |
56 | 69 | else:
|
57 | 70 | raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"')
|
58 | 71 |
|
| 72 | + if backend == "pil": |
| 73 | + # Note: we could just convert to pure tensors even in v2. |
| 74 | + transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()] |
| 75 | + |
| 76 | + transforms += [T.ConvertImageDtype(torch.float)] |
| 77 | + |
| 78 | + if use_v2: |
| 79 | + transforms += [ |
| 80 | + T.ConvertBoundingBoxFormat(datapoints.BoundingBoxFormat.XYXY), |
| 81 | + T.SanitizeBoundingBox(), |
| 82 | + ] |
| 83 | + |
| 84 | + self.transforms = T.Compose(transforms) |
| 85 | + |
59 | 86 | def __call__(self, img, target):
|
60 | 87 | return self.transforms(img, target)
|
61 | 88 |
|
62 | 89 |
|
63 | 90 | class DetectionPresetEval:
|
64 |
| - def __init__(self): |
65 |
| - self.transforms = T.Compose( |
66 |
| - [ |
67 |
| - T.PILToTensor(), |
68 |
| - T.ConvertImageDtype(torch.float), |
69 |
| - ] |
70 |
| - ) |
| 91 | + def __init__(self, backend="pil", use_v2=False): |
| 92 | + T, _ = get_modules(use_v2) |
| 93 | + transforms = [] |
| 94 | + backend = backend.lower() |
| 95 | + if backend == "pil": |
| 96 | + # Note: we could just convert to pure tensors even in v2? |
| 97 | + transforms += [T.ToImageTensor() if use_v2 else T.PILToTensor()] |
| 98 | + elif backend == "tensor": |
| 99 | + transforms += [T.PILToTensor()] |
| 100 | + elif backend == "datapoint": |
| 101 | + transforms += [T.ToImageTensor()] |
| 102 | + else: |
| 103 | + raise ValueError(f"backend can be 'datapoint', 'tensor' or 'pil', but got {backend}") |
| 104 | + |
| 105 | + transforms += [T.ConvertImageDtype(torch.float)] |
| 106 | + self.transforms = T.Compose(transforms) |
71 | 107 |
|
72 | 108 | def __call__(self, img, target):
|
73 | 109 | return self.transforms(img, target)
|
0 commit comments