Skip to content

Commit f0c54fe

Browse files
authored
added custom test dataset
1 parent f35d6f9 commit f0c54fe

File tree

1 file changed

+12
-6
lines changed

1 file changed

+12
-6
lines changed

datasets.py

+12-6
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ class DAE_dataset(Dataset):
66
def __init__(self, data_dir, transform = None):
77
self.data_dir = data_dir
88
self.transform = transform
9-
self.imgs_data = self.get_data(os.path.join(self.data_dir, 'imgs'))
9+
self.imgs_data = self.get_data(os.path.join(self.data_dir, 'imgs'))
1010
self.noisy_imgs_data = self.get_data(os.path.join(self.data_dir, 'noisy'))
1111

1212
def get_data(self, data_path):
@@ -16,22 +16,25 @@ def get_data(self, data_path):
1616
return data
1717

1818
def __getitem__(self, index):
19+
# read images in grayscale, then invert them
1920
img = 255 - cv2.imread(self.imgs_data[index] ,0)
2021
noisy_img = 255 - cv2.imread(self.noisy_imgs_data[index] ,0)
2122

22-
img = np.pad(img, ((96, 96), (0,0)), constant_values=(0,0))
23-
noisy_img = np.pad(noisy_img, ((96, 96), (0,0)), constant_values=(0,0))
23+
# padding the images
24+
# img = np.pad(img, ((96, 96), (0,0)), constant_values=(0,0))
25+
# noisy_img = np.pad(noisy_img, ((96, 96), (0,0)), constant_values=(0,0))
2426

2527
if self.transform is not None:
2628
img = self.transform(img)
2729
noisy_img = self.transform(noisy_img)
30+
2831
return img, noisy_img
2932

3033
def __len__(self):
3134
return len(self.imgs_data)
3235

3336
class custom_test_dataset(Dataset):
34-
def __init__(self, data_dir, transform = None, out_size = (256, 256)):
37+
def __init__(self, data_dir, transform = None, out_size = (64, 256)):
3538
self.data_dir = data_dir
3639
self.transform = transform
3740
self.out_size = out_size
@@ -44,13 +47,16 @@ def get_data(self, data_path):
4447
return data
4548

4649
def __getitem__(self, index):
50+
# read images in grayscale, then invert them
4751
img = 255 - cv2.imread(self.imgs_data[index] ,0)
4852

53+
# aspect ratio of the image required to be fed into the model (height/width)
4954
out_ar = self.out_size[0]/self.out_size[1]
5055

51-
# aspect ratio is the padding criteria here
56+
# aspect ratio of the image read
5257
ar = img.shape[0]/img.shape[1] # heigth/width
5358

59+
# aspect ratio is the padding criteria here
5460
if ar >= out_ar:
5561
# pad width
5662
pad = int(img.shape[0]/out_ar) - img.shape[1]
@@ -68,7 +74,7 @@ def __getitem__(self, index):
6874

6975
if self.transform is not None:
7076
img = self.transform(img)
71-
return img, img
77+
return img
7278

7379
def __len__(self):
7480
return len(self.imgs_data)

0 commit comments

Comments
 (0)