|
18 | 18 | device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") |
19 | 19 | print('device: ', device) |
20 | 20 |
|
| 21 | +script_time = time.time() |
| 22 | + |
21 | 23 | def q(text = ''): |
22 | 24 | print('> {}'.format(text)) |
23 | 25 | sys.exit() |
@@ -70,7 +72,6 @@ def plot_losses(running_train_loss, running_val_loss, train_epoch_loss, val_epoc |
70 | 72 |
|
71 | 73 | transform = transforms.Compose([transforms.ToPILImage(), transforms.ToTensor()]) |
72 | 74 |
|
73 | | - |
74 | 75 | train_dataset = DAE_dataset(os.path.join(data_dir, train_dir), transform = transform) |
75 | 76 | val_dataset = DAE_dataset(os.path.join(data_dir, val_dir), transform = transform) |
76 | 77 |
|
@@ -127,7 +128,7 @@ def plot_losses(running_train_loss, running_val_loss, train_epoch_loss, val_epoc |
127 | 128 | ### |
128 | 129 |
|
129 | 130 | for epoch in range(epochs_till_now, epochs_till_now+epochs): |
130 | | - print('\n===== EPOCH {}/{} ====='.format(epochs_till_now + 1, epochs_till_now + epochs)) |
| 131 | + print('\n===== EPOCH {}/{} ====='.format(epoch + 1, epochs_till_now + epochs)) |
131 | 132 | print('\nTRAINING...') |
132 | 133 | epoch_train_start_time = time.time() |
133 | 134 | model.train() |
@@ -189,3 +190,10 @@ def plot_losses(running_train_loss, running_val_loss, train_epoch_loss, val_epoc |
189 | 190 | 'val_epoch_loss': val_epoch_loss}, |
190 | 191 | 'epochs_till_now': epoch+1}, |
191 | 192 | os.path.join(models_dir, 'model{}.pth'.format(str(epoch + 1).zfill(2)))) |
| 193 | + |
| 194 | +total_script_time = time.time() - script_time |
| 195 | +m, s = divmod(total_script_time, 60) |
| 196 | +h, m = divmod(m, 60) |
| 197 | +print(f'\ntotal time taken for running this script: {int(h)} hrs {int(m)} mins {int(s)} secs') |
| 198 | + |
| 199 | +print('\nFin.') |
0 commit comments