|
1 | 1 | # coding: utf-8
|
| 2 | +import os |
2 | 3 | import torch
|
3 | 4 | import torchvision.utils as vutils
|
4 | 5 | import numpy as np
|
|
7 | 8 | from tensorboardX import SummaryWriter
|
8 | 9 |
|
9 | 10 | resnet18 = models.resnet18(False)
|
10 |
| -writer = SummaryWriter('../../Result/runs') |
| 11 | +writer = SummaryWriter(os.path.join("..", "..", "Result", "runs")) |
11 | 12 | sample_rate = 44100
|
12 | 13 | freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
|
13 | 14 |
|
|
23 | 24 | s1 = torch.rand(1) # value to keep
|
24 | 25 | s2 = torch.rand(1)
|
25 | 26 | # data grouping by `slash`
|
26 |
| - writer.add_scalar('data/scalar_systemtime', s1[0], n_iter) |
| 27 | + writer.add_scalar(os.path.join("data", "scalar_systemtime"), s1[0], n_iter) |
27 | 28 | # data grouping by `slash`
|
28 |
| - writer.add_scalar('data/scalar_customtime', s1[0], n_iter, walltime=n_iter) |
29 |
| - writer.add_scalars('data/scalar_group', {"xsinx": n_iter * np.sin(n_iter), |
| 29 | + writer.add_scalar(os.path.join("data", "scalar_customtime"), s1[0], n_iter, walltime=n_iter) |
| 30 | + writer.add_scalars(os.path.join("data", "scalar_group"), {"xsinx": n_iter * np.sin(n_iter), |
30 | 31 | "xcosx": n_iter * np.cos(n_iter),
|
31 | 32 | "arctanx": np.arctan(n_iter)}, n_iter)
|
32 | 33 | x = torch.rand(32, 3, 64, 64) # output from network
|
|
56 | 57 | precision,
|
57 | 58 | recall, n_iter)
|
58 | 59 | # export scalar data to JSON for external processing
|
59 |
| -writer.export_scalars_to_json("../../Result/all_scalars.json") |
| 60 | +writer.export_scalars_to_json(os.path.join("..", "..", "Result", "all_scalars.json")) |
60 | 61 |
|
61 |
| -dataset = datasets.MNIST('../../Data/mnist', train=False, download=True) |
| 62 | +dataset = datasets.MNIST(os.path.join("..", "..", "Data", "mnist"), train=False, download=True) |
62 | 63 | images = dataset.test_data[:100].float()
|
63 | 64 | label = dataset.test_labels[:100]
|
64 | 65 | features = images.view(100, 784)
|
65 | 66 | writer.add_embedding(features, metadata=label, label_img=images.unsqueeze(1))
|
66 | 67 | writer.add_embedding(features, global_step=1, tag='noMetadata')
|
67 |
| -dataset = datasets.MNIST('../../Data/mnist', train=True, download=True) |
| 68 | +dataset = datasets.MNIST(os.path.join("..", "..", "Data", "mnist"), train=True, download=True) |
68 | 69 | images_train = dataset.train_data[:100].float()
|
69 | 70 | labels_train = dataset.train_labels[:100]
|
70 | 71 | features_train = images_train.view(100, 784)
|
|
0 commit comments