diff --git a/.gitignore b/.gitignore index 234e54d..854eb76 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,14 @@ .idea *.key -*.pdf +.nsml* +*.pt X* -data +mnist_data ppts -client_secret.json \ No newline at end of file +.ipynb_checkpoints +client_secret.json +__pycache__/ +.py* +tmp +template.pdf +*.ipynb diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..a9e0d08 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,13 @@ +# code below is taken from https://github.com/fchollet/keras/blob/master/.travis.yml +sudo: required +dist: trusty +language: python +python: # Only two versions for now + - "2.7" + - "3.6" +# command to install dependencies +install: "pip install -r requirements.txt" + +script: + - python -m compileall . + - ls ??_*.py|xargs -n 1 -P 3 python diff --git a/01_basics.py b/01_basics.py index 493e0fa..a96bbc2 100644 --- a/01_basics.py +++ b/01_basics.py @@ -5,7 +5,7 @@ y_data = [2.0, 4.0, 6.0] -# our model forward pass +# our model for the forward pass def forward(x): return x * w @@ -15,22 +15,29 @@ def loss(x, y): y_pred = forward(x) return (y_pred - y) * (y_pred - y) - +# List of weights/Mean square Error (Mse) for each input w_list = [] mse_list = [] for w in np.arange(0.0, 4.1, 0.1): + # Print the weights and initialize the lost print("w=", w) l_sum = 0 + for x_val, y_val in zip(x_data, y_data): + # For each input and output, calculate y_hat + # Compute the total loss and add to the total error y_pred_val = forward(x_val) l = loss(x_val, y_val) l_sum += l print("\t", x_val, y_val, y_pred_val, l) - print("NSE=", l_sum / 3) + # Now compute the Mean squared error (mse) of each + # Aggregate the weight/mse from this run + print("MSE=", l_sum / len(x_data)) w_list.append(w) - mse_list.append(l_sum / 3) + mse_list.append(l_sum / len(x_data)) +# Plot it all plt.plot(w_list, mse_list) plt.ylabel('Loss') plt.xlabel('w') diff --git a/02_manual_gradient.py b/02_manual_gradient.py index 37f60b6..a84e789 100644 --- a/02_manual_gradient.py +++ b/02_manual_gradient.py @@ -1,7 +1,8 @@ +# Training Data x_data = [1.0, 2.0, 3.0] y_data = [2.0, 4.0, 6.0] -w = 1.0 # any random value +w = 1.0 # a random guess: random value # our model forward pass @@ -19,18 +20,21 @@ def loss(x, y): def gradient(x, y): # d_loss/d_w return 2 * x * (x * w - y) + # Before training -print("predict (before training)", 4, forward(4)) +print("Prediction (before training)", 4, forward(4)) # Training loop for epoch in range(10): for x_val, y_val in zip(x_data, y_data): + # Compute derivative w.r.t to the learned weights + # Update the weights + # Compute the loss and print progress grad = gradient(x_val, y_val) w = w - 0.01 * grad - print("\tgrad: ", x_val, y_val, grad) + print("\tgrad: ", x_val, y_val, round(grad, 2)) l = loss(x_val, y_val) - - print("progress:", epoch, l) + print("progress:", epoch, "w=", round(w, 2), "loss=", round(l, 2)) # After training -print("predict (after training)", 4, forward(4)) +print("Predicted score (after training)", "4 hours of studying: ", forward(4)) diff --git a/03_auto_gradient.py b/03_auto_gradient.py index 12d4cc8..9689703 100644 --- a/03_auto_gradient.py +++ b/03_auto_gradient.py @@ -1,41 +1,34 @@ import torch -from torch import nn -from torch.autograd import Variable - +import pdb x_data = [1.0, 2.0, 3.0] y_data = [2.0, 4.0, 6.0] - -w = Variable(torch.Tensor([1.0]), requires_grad=True) # Any random value +w = torch.tensor([1.0], requires_grad=True) # our model forward pass - - def forward(x): return x * w # Loss function - - -def loss(x, y): - y_pred = forward(x) - return (y_pred - y) * (y_pred - y) +def loss(y_pred, y_val): + return (y_pred - y_val) ** 2 # Before training -print("predict (before training)", 4, forward(4).data[0]) +print("Prediction (before training)", 4, forward(4).item()) # Training loop for epoch in range(10): for x_val, y_val in zip(x_data, y_data): - l = loss(x_val, y_val) - l.backward() - print("\tgrad: ", x_val, y_val, w.grad.data[0]) - w.data = w.data - 0.01 * w.grad.data + y_pred = forward(x_val) # 1) Forward pass + l = loss(y_pred, y_val) # 2) Compute loss + l.backward() # 3) Back propagation to update weights + print("\tgrad: ", x_val, y_val, w.grad.item()) + w.data = w.data - 0.01 * w.grad.item() # Manually zero the gradients after updating weights w.grad.data.zero_() - print("progress:", epoch, l.data[0]) + print(f"Epoch: {epoch} | Loss: {l.item()}") # After training -print("predict (after training)", 4, forward(4).data[0]) +print("Prediction (after training)", 4, forward(4).item()) diff --git a/05_linear_regression.py b/05_linear_regression.py index 936ecc4..84238a7 100644 --- a/05_linear_regression.py +++ b/05_linear_regression.py @@ -1,13 +1,12 @@ - +from torch import nn import torch -from torch.autograd import Variable - -x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0]])) -y_data = Variable(torch.Tensor([[2.0], [4.0], [6.0]])) +from torch import tensor +x_data = tensor([[1.0], [2.0], [3.0]]) +y_data = tensor([[2.0], [4.0], [6.0]]) -class Model(torch.nn.Module): +class Model(nn.Module): def __init__(self): """ In the constructor we instantiate two nn.Linear module @@ -24,24 +23,24 @@ def forward(self, x): y_pred = self.linear(x) return y_pred + # our model model = Model() - # Construct our loss function and an Optimizer. The call to model.parameters() # in the SGD constructor will contain the learnable parameters of the two # nn.Linear modules which are members of the model. -criterion = torch.nn.MSELoss(size_average=False) +criterion = torch.nn.MSELoss(reduction='sum') optimizer = torch.optim.SGD(model.parameters(), lr=0.01) # Training loop for epoch in range(500): - # Forward pass: Compute predicted y by passing x to the model + # 1) Forward pass: Compute predicted y by passing x to the model y_pred = model(x_data) - # Compute and print loss + # 2) Compute and print loss loss = criterion(y_pred, y_data) - print(epoch, loss.data[0]) + print(f'Epoch: {epoch} | Loss: {loss.item()} ') # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() @@ -50,5 +49,6 @@ def forward(self, x): # After training -hour_var = Variable(torch.Tensor([[4.0]])) -print("predict (after training)", 4, model.forward(hour_var).data[0][0]) +hour_var = tensor([[4.0]]) +y_pred = model(hour_var) +print("Prediction (after training)", 4, model(hour_var).data[0][0].item()) diff --git a/06_logistic_regression.py b/06_logistic_regression.py index d377be8..db903c6 100644 --- a/06_logistic_regression.py +++ b/06_logistic_regression.py @@ -1,57 +1,57 @@ - -import torch -from torch.autograd import Variable +from torch import tensor +from torch import nn +from torch import sigmoid import torch.nn.functional as F +import torch.optim as optim -x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0], [4.0]])) -y_data = Variable(torch.Tensor([[0.], [0.], [1.], [1.]])) - +# Training data and ground truth +x_data = tensor([[1.0], [2.0], [3.0], [4.0]]) +y_data = tensor([[0.], [0.], [1.], [1.]]) -class Model(torch.nn.Module): +class Model(nn.Module): def __init__(self): """ - In the constructor we instantiate two nn.Linear module + In the constructor we instantiate nn.Linear module """ super(Model, self).__init__() - self.linear = torch.nn.Linear(1, 1) # One in and one out + self.linear = nn.Linear(1, 1) # One in and one out def forward(self, x): """ In the forward function we accept a Variable of input data and we must return - a Variable of output data. We can use Modules defined in the constructor as - well as arbitrary operators on Variables. + a Variable of output data. """ - y_pred = F.sigmoid(self.linear(x)) + y_pred = sigmoid(self.linear(x)) return y_pred + # our model model = Model() - # Construct our loss function and an Optimizer. The call to model.parameters() # in the SGD constructor will contain the learnable parameters of the two # nn.Linear modules which are members of the model. -criterion = torch.nn.BCELoss(size_average=True) -optimizer = torch.optim.SGD(model.parameters(), lr=0.01) +criterion = nn.BCELoss(reduction='mean') +optimizer = optim.SGD(model.parameters(), lr=0.01) # Training loop -for epoch in range(500): - # Forward pass: Compute predicted y by passing x to the model +for epoch in range(1000): + # Forward pass: Compute predicted y by passing x to the model y_pred = model(x_data) # Compute and print loss loss = criterion(y_pred, y_data) - print(epoch, loss.data[0]) + print(f'Epoch {epoch + 1}/1000 | Loss: {loss.item():.4f}') # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() loss.backward() optimizer.step() - # After training -hour_var = Variable(torch.Tensor([[0.5]])) -print("predict (after training)", 0.5, model.forward(hour_var).data[0][0]) -hour_var = Variable(torch.Tensor([[7.0]])) -print("predict (after training)", 7.0, model.forward(hour_var).data[0][0]) +print(f'\nLet\'s predict the hours need to score above 50%\n{"=" * 50}') +hour_var = model(tensor([[1.0]])) +print(f'Prediction after 1 hour of training: {hour_var.item():.4f} | Above 50%: {hour_var.item() > 0.5}') +hour_var = model(tensor([[7.0]])) +print(f'Prediction after 7 hours of training: {hour_var.item():.4f} | Above 50%: { hour_var.item() > 0.5}') diff --git a/07_diabets_logistic.py b/07_diabets_logistic.py index 4d08540..0a510ac 100644 --- a/07_diabets_logistic.py +++ b/07_diabets_logistic.py @@ -1,28 +1,23 @@ - -import torch -from torch.autograd import Variable +from torch import nn, optim, from_numpy import numpy as np -xy = np.loadtxt('data-diabetes.csv', delimiter=',', dtype=np.float32) -x_data = Variable(torch.from_numpy(xy[:, 0:-1])) -y_data = Variable(torch.from_numpy(xy[:, [-1]])) - -print(x_data.data.shape) -print(y_data.data.shape) +xy = np.loadtxt('./data/diabetes.csv.gz', delimiter=',', dtype=np.float32) +x_data = from_numpy(xy[:, 0:-1]) +y_data = from_numpy(xy[:, [-1]]) +print(f'X\'s shape: {x_data.shape} | Y\'s shape: {y_data.shape}') -class Model(torch.nn.Module): - +class Model(nn.Module): def __init__(self): """ In the constructor we instantiate two nn.Linear module """ super(Model, self).__init__() - self.l1 = torch.nn.Linear(8, 6) - self.l2 = torch.nn.Linear(6, 4) - self.l3 = torch.nn.Linear(4, 1) + self.l1 = nn.Linear(8, 6) + self.l2 = nn.Linear(6, 4) + self.l3 = nn.Linear(4, 1) - self.sigmoid = torch.nn.Sigmoid() + self.sigmoid = nn.Sigmoid() def forward(self, x): """ @@ -35,6 +30,7 @@ def forward(self, x): y_pred = self.sigmoid(self.l3(out2)) return y_pred + # our model model = Model() @@ -42,17 +38,17 @@ def forward(self, x): # Construct our loss function and an Optimizer. The call to model.parameters() # in the SGD constructor will contain the learnable parameters of the two # nn.Linear modules which are members of the model. -criterion = torch.nn.BCELoss(size_average=True) -optimizer = torch.optim.SGD(model.parameters(), lr=0.1) +criterion = nn.BCELoss(reduction='mean') +optimizer = optim.SGD(model.parameters(), lr=0.1) # Training loop for epoch in range(100): - # Forward pass: Compute predicted y by passing x to the model + # Forward pass: Compute predicted y by passing x to the model y_pred = model(x_data) # Compute and print loss loss = criterion(y_pred, y_data) - print(epoch, loss.data[0]) + print(f'Epoch: {epoch + 1}/100 | Loss: {loss.item():.4f}') # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() diff --git a/08_1_dataset_loader.py b/08_1_dataset_loader.py index e329e3d..2921894 100644 --- a/08_1_dataset_loader.py +++ b/08_1_dataset_loader.py @@ -1,21 +1,20 @@ # References # https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/01-basics/pytorch_basics/main.py # http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#dataset-class -import torch -import numpy as np -from torch.autograd import Variable from torch.utils.data import Dataset, DataLoader - +from torch import from_numpy, tensor +import numpy as np class DiabetesDataset(Dataset): """ Diabetes dataset.""" # Initialize your data, download, etc. def __init__(self): - xy = np.loadtxt('data-diabetes.csv', delimiter=',', dtype=np.float32) + xy = np.loadtxt('./data/diabetes.csv.gz', + delimiter=',', dtype=np.float32) self.len = xy.shape[0] - self.x_data = torch.from_numpy(xy[:, 0:-1]) - self.y_data = torch.from_numpy(xy[:, [-1]]) + self.x_data = from_numpy(xy[:, 0:-1]) + self.y_data = from_numpy(xy[:, [-1]]) def __getitem__(self, index): return self.x_data[index], self.y_data[index] @@ -36,7 +35,7 @@ def __len__(self): inputs, labels = data # wrap them in Variable - inputs, labels = Variable(inputs), Variable(labels) + inputs, labels = tensor(inputs), tensor(labels) # Run your training process - print(epoch, i, "inputs", inputs.data, "labels", labels.data) + print(f'Epoch: {i} | Inputs {inputs.data} | Labels {labels.data}') diff --git a/08_2_dataset_loade_logisticr.py b/08_2_dataset_loade_logistic.py similarity index 75% rename from 08_2_dataset_loade_logisticr.py rename to 08_2_dataset_loade_logistic.py index ba7105a..ec43dbf 100644 --- a/08_2_dataset_loade_logisticr.py +++ b/08_2_dataset_loade_logistic.py @@ -1,21 +1,20 @@ # References # https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/01-basics/pytorch_basics/main.py # http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#dataset-class -import torch -import numpy as np -from torch.autograd import Variable from torch.utils.data import Dataset, DataLoader +from torch import nn, from_numpy, optim +import numpy as np class DiabetesDataset(Dataset): """ Diabetes dataset.""" - # Initialize your data, download, etc. def __init__(self): - xy = np.loadtxt('data-diabetes.csv', delimiter=',', dtype=np.float32) + xy = np.loadtxt('./data/diabetes.csv.gz', + delimiter=',', dtype=np.float32) self.len = xy.shape[0] - self.x_data = torch.from_numpy(xy[:, 0:-1]) - self.y_data = torch.from_numpy(xy[:, [-1]]) + self.x_data = from_numpy(xy[:, 0:-1]) + self.y_data = from_numpy(xy[:, [-1]]) def __getitem__(self, index): return self.x_data[index], self.y_data[index] @@ -31,18 +30,18 @@ def __len__(self): num_workers=2) -class Model(torch.nn.Module): +class Model(nn.Module): def __init__(self): """ In the constructor we instantiate two nn.Linear module """ super(Model, self).__init__() - self.l1 = torch.nn.Linear(8, 6) - self.l2 = torch.nn.Linear(6, 4) - self.l3 = torch.nn.Linear(4, 1) + self.l1 = nn.Linear(8, 6) + self.l2 = nn.Linear(6, 4) + self.l3 = nn.Linear(4, 1) - self.sigmoid = torch.nn.Sigmoid() + self.sigmoid = nn.Sigmoid() def forward(self, x): """ @@ -55,15 +54,15 @@ def forward(self, x): y_pred = self.sigmoid(self.l3(out2)) return y_pred + # our model model = Model() - # Construct our loss function and an Optimizer. The call to model.parameters() # in the SGD constructor will contain the learnable parameters of the two # nn.Linear modules which are members of the model. -criterion = torch.nn.BCELoss(size_average=True) -optimizer = torch.optim.SGD(model.parameters(), lr=0.1) +criterion = nn.BCELoss(reduction='sum') +optimizer = optim.SGD(model.parameters(), lr=0.1) # Training loop for epoch in range(2): @@ -71,15 +70,12 @@ def forward(self, x): # get the inputs inputs, labels = data - # wrap them in Variable - inputs, labels = Variable(inputs), Variable(labels) - # Forward pass: Compute predicted y by passing x to the model y_pred = model(inputs) # Compute and print loss loss = criterion(y_pred, labels) - print(epoch, i, loss.data[0]) + print(f'Epoch {epoch + 1} | Batch: {i+1} | Loss: {loss.item():.4f}') # Zero gradients, perform a backward pass, and update the weights. optimizer.zero_grad() diff --git a/09_01_softmax_loss.py b/09_01_softmax_loss.py index 2fd8f42..ffea7a9 100644 --- a/09_01_softmax_loss.py +++ b/09_01_softmax_loss.py @@ -1,24 +1,52 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim -from torchvision import datasets, transforms -from torch.autograd import Variable +from torch import nn, tensor, max +import numpy as np +# Cross entropy example +# One hot +# 0: 1 0 0 +# 1: 0 1 0 +# 2: 0 0 1 +Y = np.array([1, 0, 0]) +Y_pred1 = np.array([0.7, 0.2, 0.1]) +Y_pred2 = np.array([0.1, 0.3, 0.6]) +print(f'Loss1: {np.sum(-Y * np.log(Y_pred1)):.4f}') +print(f'Loss2: {np.sum(-Y * np.log(Y_pred2)):.4f}') -# http://pytorch.org/docs/master/nn.html#nllloss -logsm = nn.LogSoftmax() -loss = nn.NLLLoss() +# Softmax + CrossEntropy (logSoftmax + NLLLoss) +loss = nn.CrossEntropyLoss() -# input is of size nBatch x nClasses = 3 x 5 -input = Variable(torch.randn(3, 5), requires_grad=True) -logsm_out = logsm(input) +# target is of size nBatch +# each element in target has to have 0 <= value < nClasses (0-2) +# Input is class, not one-hot +Y = tensor([0], requires_grad=False) + +# input is of size nBatch x nClasses = 1 x 4 +# Y_pred are logits (not softmax) +Y_pred1 = tensor([[2.0, 1.0, 0.1]]) +Y_pred2 = tensor([[0.5, 2.0, 0.3]]) + +l1 = loss(Y_pred1, Y) +l2 = loss(Y_pred2, Y) + +print(f'PyTorch Loss1: {l1.item():.4f} \nPyTorch Loss2: {l2.item():.4f}') +print(f'Y_pred1: {max(Y_pred1.data, 1)[1].item()}') +print(f'Y_pred2: {max(Y_pred2.data, 1)[1].item()}') # target is of size nBatch -# each element in target has to have 0 <= value < nclasses -target = Variable(torch.LongTensor([1, 0, 4])) +# each element in target has to have 0 <= value < nClasses (0-2) +# Input is class, not one-hot +Y = tensor([2, 0, 1], requires_grad=False) + +# input is of size nBatch x nClasses = 2 x 4 +# Y_pred are logits (not softmax) +Y_pred1 = tensor([[0.1, 0.2, 0.9], + [1.1, 0.1, 0.2], + [0.2, 2.1, 0.1]]) -l = loss((logsm_out), target) -l.backward() +Y_pred2 = tensor([[0.8, 0.2, 0.3], + [0.2, 0.3, 0.5], + [0.2, 0.2, 0.5]]) -print(input.size(), target.size(), l.size()) +l1 = loss(Y_pred1, Y) +l2 = loss(Y_pred2, Y) +print(f'Batch Loss1: {l1.item():.4f} \nBatch Loss2: {l2.data:.4f}') diff --git a/09_2_softmax_mnist.py b/09_2_softmax_mnist.py index 2be2c32..9e035a3 100644 --- a/09_2_softmax_mnist.py +++ b/09_2_softmax_mnist.py @@ -1,31 +1,32 @@ # https://github.com/pytorch/examples/blob/master/mnist/main.py from __future__ import print_function -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.optim as optim +from torch import nn, optim, cuda +from torch.utils import data from torchvision import datasets, transforms -from torch.autograd import Variable +import torch.nn.functional as F +import time # Training settings batch_size = 64 +device = 'cuda' if cuda.is_available() else 'cpu' +print(f'Training MNIST Model on {device}\n{"=" * 44}') # MNIST Dataset -train_dataset = datasets.MNIST(root='./data/', +train_dataset = datasets.MNIST(root='./mnist_data/', train=True, transform=transforms.ToTensor(), download=True) -test_dataset = datasets.MNIST(root='./data/', +test_dataset = datasets.MNIST(root='./mnist_data/', train=False, transform=transforms.ToTensor()) # Data Loader (Input Pipeline) -train_loader = torch.utils.data.DataLoader(dataset=train_dataset, +train_loader = data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True) -test_loader = torch.utils.data.DataLoader(dataset=test_dataset, +test_loader = data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) @@ -46,28 +47,28 @@ def forward(self, x): x = F.relu(self.l2(x)) x = F.relu(self.l3(x)) x = F.relu(self.l4(x)) - x = F.relu(self.l5(x)) - return F.log_softmax(x) + return self.l5(x) model = Net() - +model.to(device) +criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5) def train(epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader): - data, target = Variable(data), Variable(target) + data, target = data.to(device), target.to(device) optimizer.zero_grad() output = model(data) - loss = F.nll_loss(output, target) + loss = criterion(output, target) loss.backward() optimizer.step() if batch_idx % 10 == 0: - print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( + print('Train Epoch: {} | Batch Status: {}/{} ({:.0f}%) | Loss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), - 100. * batch_idx / len(train_loader), loss.data[0])) + 100. * batch_idx / len(train_loader), loss.item())) def test(): @@ -75,20 +76,30 @@ def test(): test_loss = 0 correct = 0 for data, target in test_loader: - data, target = Variable(data, volatile=True), Variable(target) + data, target = data.to(device), target.to(device) output = model(data) # sum up batch loss - test_loss += F.nll_loss(output, target, size_average=False).data[0] - # get the index of the max log-probability + test_loss += criterion(output, target).item() + # get the index of the max pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).cpu().sum() test_loss /= len(test_loader.dataset) - print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( - test_loss, correct, len(test_loader.dataset), - 100. * correct / len(test_loader.dataset))) + print(f'===========================\nTest set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} ' + f'({100. * correct / len(test_loader.dataset):.0f}%)') + + +if __name__ == '__main__': + since = time.time() + for epoch in range(1, 10): + epoch_start = time.time() + train(epoch) + m, s = divmod(time.time() - epoch_start, 60) + print(f'Training time: {m:.0f}m {s:.0f}s') + test() + m, s = divmod(time.time() - epoch_start, 60) + print(f'Testing time: {m:.0f}m {s:.0f}s') + m, s = divmod(time.time() - since, 60) + print(f'Total Time: {m:.0f}m {s:.0f}s\nModel was trained on {device}!') -for epoch in range(1, 10): - train(epoch) - test() diff --git a/10_1_cnn_mnist.py b/10_1_cnn_mnist.py index 547c477..3f851f9 100644 --- a/10_1_cnn_mnist.py +++ b/10_1_cnn_mnist.py @@ -66,7 +66,7 @@ def train(epoch): if batch_idx % 10 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), - 100. * batch_idx / len(train_loader), loss.data[0])) + 100. * batch_idx / len(train_loader), loss.item())) def test(): @@ -77,7 +77,7 @@ def test(): data, target = Variable(data, volatile=True), Variable(target) output = model(data) # sum up batch loss - test_loss += F.nll_loss(output, target, size_average=False).data[0] + test_loss += F.nll_loss(output, target, size_average=False).data # get the index of the max log-probability pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).cpu().sum() diff --git a/10_2_toy_inception_mnist.py b/11_1_toy_inception_mnist.py similarity index 100% rename from 10_2_toy_inception_mnist.py rename to 11_1_toy_inception_mnist.py diff --git a/11_1_rnn_basics.py b/12_1_rnn_basics.py similarity index 52% rename from 11_1_rnn_basics.py rename to 12_1_rnn_basics.py index e891e85..d26ceb0 100644 --- a/11_1_rnn_basics.py +++ b/12_1_rnn_basics.py @@ -11,26 +11,28 @@ # One cell RNN input_dim (4) -> output_dim (2). sequence: 5 cell = nn.RNN(input_size=4, hidden_size=2, batch_first=True) -# (num_layers * num_directions, batch, hidden_size) -hidden = (Variable(torch.randn(1, 1, 2))) +# (num_layers * num_directions, batch, hidden_size) whether batch_first=True or False +hidden = Variable(torch.randn(1, 1, 2)) # Propagate input through RNN # Input: (batch, seq_len, input_size) when batch_first=True -inputs = Variable(torch.Tensor([[h, e, l, l, o]])) -print("input size", inputs.size()) - -for one in inputs[0]: +inputs = Variable(torch.Tensor([h, e, l, l, o])) +for one in inputs: one = one.view(1, 1, -1) # Input: (batch, seq_len, input_size) when batch_first=True out, hidden = cell(one, hidden) - print(out.size()) + print("one input size", one.size(), "out size", out.size()) # We can do the whole at once # Propagate input through RNN # Input: (batch, seq_len, input_size) when batch_first=True +inputs = inputs.view(1, 5, -1) out, hidden = cell(inputs, hidden) -print("out size", out.size()) +print("sequence input size", inputs.size(), "out size", out.size()) + +# hidden : (num_layers * num_directions, batch, hidden_size) whether batch_first=True or False +hidden = Variable(torch.randn(1, 3, 2)) # One cell RNN input_dim (4) -> output_dim (2). sequence: 5, batch 3 # 3 batches 'hello', 'eolll', 'lleel' @@ -38,9 +40,21 @@ inputs = Variable(torch.Tensor([[h, e, l, l, o], [e, o, l, l, l], [l, l, e, e, l]])) -print("input size", inputs.size()) # input size torch.Size([3, 5, 4]) # Propagate input through RNN # Input: (batch, seq_len, input_size) when batch_first=True +# B x S x I +out, hidden = cell(inputs, hidden) +print("batch input size", inputs.size(), "out size", out.size()) + + +# One cell RNN input_dim (4) -> output_dim (2) +cell = nn.RNN(input_size=4, hidden_size=2) + +# The given dimensions dim0 and dim1 are swapped. +inputs = inputs.transpose(dim0=0, dim1=1) +# Propagate input through RNN +# Input: (seq_len, batch_size, input_size) when batch_first=False (default) +# S x B x I out, hidden = cell(inputs, hidden) -print("out size", out.size()) # out size torch.Size([3, 5, 2]) +print("batch input size", inputs.size(), "out size", out.size()) diff --git a/12_2_hello_rnn.py b/12_2_hello_rnn.py new file mode 100644 index 0000000..d196472 --- /dev/null +++ b/12_2_hello_rnn.py @@ -0,0 +1,85 @@ +# Lab 12 RNN +import sys +import torch +import torch.nn as nn +from torch.autograd import Variable + +torch.manual_seed(777) # reproducibility +# 0 1 2 3 4 +idx2char = ['h', 'i', 'e', 'l', 'o'] + +# Teach hihell -> ihello +x_data = [0, 1, 0, 2, 3, 3] # hihell +one_hot_lookup = [[1, 0, 0, 0, 0], # 0 + [0, 1, 0, 0, 0], # 1 + [0, 0, 1, 0, 0], # 2 + [0, 0, 0, 1, 0], # 3 + [0, 0, 0, 0, 1]] # 4 + +y_data = [1, 0, 2, 3, 3, 4] # ihello +x_one_hot = [one_hot_lookup[x] for x in x_data] + +# As we have one batch of samples, we will change them to variables only once +inputs = Variable(torch.Tensor(x_one_hot)) +labels = Variable(torch.LongTensor(y_data)) + +num_classes = 5 +input_size = 5 # one-hot size +hidden_size = 5 # output from the RNN. 5 to directly predict one-hot +batch_size = 1 # one sentence +sequence_length = 1 # One by one +num_layers = 1 # one-layer rnn + + +class Model(nn.Module): + + def __init__(self): + super(Model, self).__init__() + self.rnn = nn.RNN(input_size=input_size, + hidden_size=hidden_size, batch_first=True) + + def forward(self, hidden, x): + # Reshape input (batch first) + x = x.view(batch_size, sequence_length, input_size) + + # Propagate input through RNN + # Input: (batch, seq_len, input_size) + # hidden: (num_layers * num_directions, batch, hidden_size) + out, hidden = self.rnn(x, hidden) + return hidden, out.view(-1, num_classes) + + def init_hidden(self): + # Initialize hidden and cell states + # (num_layers * num_directions, batch, hidden_size) + return Variable(torch.zeros(num_layers, batch_size, hidden_size)) + + +# Instantiate RNN model +model = Model() +print(model) + +# Set loss and optimizer function +# CrossEntropyLoss = LogSoftmax + NLLLoss +criterion = nn.CrossEntropyLoss() +optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + +# Train the model +for epoch in range(100): + optimizer.zero_grad() + loss = 0 + hidden = model.init_hidden() + + sys.stdout.write("predicted string: ") + for input, label in zip(inputs, labels): + # print(input.size(), label.size()) + hidden, output = model(hidden, input) + val, idx = output.max(1) + sys.stdout.write(idx2char[idx.data[0]]) + loss += criterion(output, torch.LongTensor([label])) + + print(", epoch: %d, loss: %1.3f" % (epoch + 1, loss)) + + loss.backward() + optimizer.step() + +print("Learning finished!") diff --git a/11_2_hello_rnn.py b/12_3_hello_rnn_seq.py similarity index 92% rename from 11_2_hello_rnn.py rename to 12_3_hello_rnn_seq.py index 52def9c..5a0c2f2 100644 --- a/11_2_hello_rnn.py +++ b/12_3_hello_rnn_seq.py @@ -23,7 +23,6 @@ inputs = Variable(torch.Tensor(x_one_hot)) labels = Variable(torch.LongTensor(y_data)) - num_classes = 5 input_size = 5 # one-hot size hidden_size = 5 # output from the LSTM. 5 to directly predict one-hot @@ -47,15 +46,16 @@ def __init__(self, num_classes, input_size, hidden_size, num_layers): def forward(self, x): # Initialize hidden and cell states + # (num_layers * num_directions, batch, hidden_size) for batch_first=True h_0 = Variable(torch.zeros( - x.size(0), self.num_layers, self.hidden_size)) + self.num_layers, x.size(0), self.hidden_size)) # Reshape input x.view(x.size(0), self.sequence_length, self.input_size) # Propagate input through RNN # Input: (batch, seq_len, input_size) - # h_0: (batch, num_layers * num_directions, hidden_size) + # h_0: (num_layers * num_directions, batch, hidden_size) out, _ = self.rnn(x, h_0) return out.view(-1, num_classes) diff --git a/12_4_hello_rnn_emb.py b/12_4_hello_rnn_emb.py new file mode 100644 index 0000000..aa783a1 --- /dev/null +++ b/12_4_hello_rnn_emb.py @@ -0,0 +1,77 @@ +# Lab 12 RNN +import torch +import torch.nn as nn +from torch.autograd import Variable + +torch.manual_seed(777) # reproducibility + + +idx2char = ['h', 'i', 'e', 'l', 'o'] + +# Teach hihell -> ihello +x_data = [[0, 1, 0, 2, 3, 3]] # hihell +y_data = [1, 0, 2, 3, 3, 4] # ihello + +# As we have one batch of samples, we will change them to variables only once +inputs = Variable(torch.LongTensor(x_data)) +labels = Variable(torch.LongTensor(y_data)) + +num_classes = 5 +input_size = 5 +embedding_size = 10 # embedding size +hidden_size = 5 # output from the LSTM. 5 to directly predict one-hot +batch_size = 1 # one sentence +sequence_length = 6 # |ihello| == 6 +num_layers = 1 # one-layer rnn + + +class Model(nn.Module): + + def __init__(self, num_layers, hidden_size): + super(Model, self).__init__() + self.num_layers = num_layers + self.hidden_size = hidden_size + self.embedding = nn.Embedding(input_size, embedding_size) + self.rnn = nn.RNN(input_size=embedding_size, + hidden_size=5, batch_first=True) + self.fc = nn.Linear(hidden_size, num_classes) + + def forward(self, x): + # Initialize hidden and cell states + # (num_layers * num_directions, batch, hidden_size) + h_0 = Variable(torch.zeros( + self.num_layers, x.size(0), self.hidden_size)) + + emb = self.embedding(x) + emb = emb.view(batch_size, sequence_length, -1) + + # Propagate embedding through RNN + # Input: (batch, seq_len, embedding_size) + # h_0: (num_layers * num_directions, batch, hidden_size) + out, _ = self.rnn(emb, h_0) + return self.fc(out.view(-1, num_classes)) + + +# Instantiate RNN model +model = Model(num_layers, hidden_size) +print(model) + +# Set loss and optimizer function +# CrossEntropyLoss = LogSoftmax + NLLLoss +criterion = torch.nn.CrossEntropyLoss() +optimizer = torch.optim.Adam(model.parameters(), lr=0.1) + +# Train the model +for epoch in range(100): + outputs = model(inputs) + optimizer.zero_grad() + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + _, idx = outputs.max(1) + idx = idx.data.numpy() + result_str = [idx2char[c] for c in idx.squeeze()] + print("epoch: %d, loss: %1.3f" % (epoch + 1, loss.item())) + print("Predicted string: ", ''.join(result_str)) + +print("Learning finished!") diff --git a/13_1_rnn_classification_basics.py b/13_1_rnn_classification_basics.py new file mode 100644 index 0000000..65b0033 --- /dev/null +++ b/13_1_rnn_classification_basics.py @@ -0,0 +1,95 @@ +# Original code is from https://github.com/spro/practical-pytorch +import time +import math +import torch +import torch.nn as nn +from torch.autograd import Variable +from torch.utils.data import DataLoader + +from name_dataset import NameDataset +from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence + +# Parameters and DataLoaders +HIDDEN_SIZE = 100 +N_CHARS = 128 # ASCII +N_CLASSES = 18 + + +class RNNClassifier(nn.Module): + + def __init__(self, input_size, hidden_size, output_size, n_layers=1): + super(RNNClassifier, self).__init__() + self.hidden_size = hidden_size + self.n_layers = n_layers + + self.embedding = nn.Embedding(input_size, hidden_size) + self.gru = nn.GRU(hidden_size, hidden_size, n_layers) + self.fc = nn.Linear(hidden_size, output_size) + + def forward(self, input): + # Note: we run this all at once (over the whole input sequence) + + # input = B x S . size(0) = B + batch_size = input.size(0) + + # input: B x S -- (transpose) --> S x B + input = input.t() + + # Embedding S x B -> S x B x I (embedding size) + print(" input", input.size()) + embedded = self.embedding(input) + print(" embedding", embedded.size()) + + # Make a hidden + hidden = self._init_hidden(batch_size) + + output, hidden = self.gru(embedded, hidden) + print(" gru hidden output", hidden.size()) + # Use the last layer output as FC's input + # No need to unpack, since we are going to use hidden + fc_output = self.fc(hidden) + print(" fc output", fc_output.size()) + return fc_output + + def _init_hidden(self, batch_size): + hidden = torch.zeros(self.n_layers, batch_size, self.hidden_size) + return Variable(hidden) + +# Help functions + + +def str2ascii_arr(msg): + arr = [ord(c) for c in msg] + return arr, len(arr) + +# pad sequences and sort the tensor +def pad_sequences(vectorized_seqs, seq_lengths): + seq_tensor = torch.zeros((len(vectorized_seqs), seq_lengths.max())).long() + for idx, (seq, seq_len) in enumerate(zip(vectorized_seqs, seq_lengths)): + seq_tensor[idx, :seq_len] = torch.LongTensor(seq) + return seq_tensor + +# Create necessary variables, lengths, and target +def make_variables(names): + sequence_and_length = [str2ascii_arr(name) for name in names] + vectorized_seqs = [sl[0] for sl in sequence_and_length] + seq_lengths = torch.LongTensor([sl[1] for sl in sequence_and_length]) + return pad_sequences(vectorized_seqs, seq_lengths) + + +if __name__ == '__main__': + names = ['adylov', 'solan', 'hard', 'san'] + classifier = RNNClassifier(N_CHARS, HIDDEN_SIZE, N_CLASSES) + + for name in names: + arr, _ = str2ascii_arr(name) + inp = Variable(torch.LongTensor([arr])) + out = classifier(inp) + print("in", inp.size(), "out", out.size()) + + + inputs = make_variables(names) + out = classifier(inputs) + print("batch in", inputs.size(), "batch out", out.size()) + + diff --git a/13_2_rnn_classification.py b/13_2_rnn_classification.py new file mode 100644 index 0000000..7bfe286 --- /dev/null +++ b/13_2_rnn_classification.py @@ -0,0 +1,212 @@ +# Original code is from https://github.com/spro/practical-pytorch +import time +import math +import torch +import torch.nn as nn +from torch.autograd import Variable +from torch.utils.data import DataLoader + +from name_dataset import NameDataset +from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence + +# Parameters and DataLoaders +HIDDEN_SIZE = 100 +N_LAYERS = 2 +BATCH_SIZE = 256 +N_EPOCHS = 100 + +test_dataset = NameDataset(is_train_set=False) +test_loader = DataLoader(dataset=test_dataset, + batch_size=BATCH_SIZE, shuffle=True) + + +train_dataset = NameDataset(is_train_set=True) +train_loader = DataLoader(dataset=train_dataset, + batch_size=BATCH_SIZE, shuffle=True) + +N_COUNTRIES = len(train_dataset.get_countries()) +print(N_COUNTRIES, "countries") +N_CHARS = 128 # ASCII + + +# Some utility functions +def time_since(since): + s = time.time() - since + m = math.floor(s / 60) + s -= m * 60 + return '%dm %ds' % (m, s) + + +def create_variable(tensor): + # Do cuda() before wrapping with variable + if torch.cuda.is_available(): + return Variable(tensor.cuda()) + else: + return Variable(tensor) + + +# pad sequences and sort the tensor +def pad_sequences(vectorized_seqs, seq_lengths, countries): + seq_tensor = torch.zeros((len(vectorized_seqs), seq_lengths.max())).long() + for idx, (seq, seq_len) in enumerate(zip(vectorized_seqs, seq_lengths)): + seq_tensor[idx, :seq_len] = torch.LongTensor(seq) + + # Sort tensors by their length + seq_lengths, perm_idx = seq_lengths.sort(0, descending=True) + seq_tensor = seq_tensor[perm_idx] + + # Also sort the target (countries) in the same order + target = countries2tensor(countries) + if len(countries): + target = target[perm_idx] + + # Return variables + # DataParallel requires everything to be a Variable + return create_variable(seq_tensor), \ + create_variable(seq_lengths), \ + create_variable(target) + + +# Create necessary variables, lengths, and target +def make_variables(names, countries): + sequence_and_length = [str2ascii_arr(name) for name in names] + vectorized_seqs = [sl[0] for sl in sequence_and_length] + seq_lengths = torch.LongTensor([sl[1] for sl in sequence_and_length]) + return pad_sequences(vectorized_seqs, seq_lengths, countries) + + +def str2ascii_arr(msg): + arr = [ord(c) for c in msg] + return arr, len(arr) + + +def countries2tensor(countries): + country_ids = [train_dataset.get_country_id( + country) for country in countries] + return torch.LongTensor(country_ids) + + +class RNNClassifier(nn.Module): + # Our model + + def __init__(self, input_size, hidden_size, output_size, n_layers=1, bidirectional=True): + super(RNNClassifier, self).__init__() + self.hidden_size = hidden_size + self.n_layers = n_layers + self.n_directions = int(bidirectional) + 1 + + self.embedding = nn.Embedding(input_size, hidden_size) + self.gru = nn.GRU(hidden_size, hidden_size, n_layers, + bidirectional=bidirectional) + self.fc = nn.Linear(hidden_size, output_size) + + def forward(self, input, seq_lengths): + # Note: we run this all at once (over the whole input sequence) + # input shape: B x S (input size) + # transpose to make S(sequence) x B (batch) + input = input.t() + batch_size = input.size(1) + + # Make a hidden + hidden = self._init_hidden(batch_size) + + # Embedding S x B -> S x B x I (embedding size) + embedded = self.embedding(input) + + # Pack them up nicely + gru_input = pack_padded_sequence( + embedded, seq_lengths.data.cpu().numpy()) + + # To compact weights again call flatten_parameters(). + self.gru.flatten_parameters() + output, hidden = self.gru(gru_input, hidden) + + # Use the last layer output as FC's input + # No need to unpack, since we are going to use hidden + fc_output = self.fc(hidden[-1]) + return fc_output + + def _init_hidden(self, batch_size): + hidden = torch.zeros(self.n_layers * self.n_directions, + batch_size, self.hidden_size) + return create_variable(hidden) + + +# Train cycle +def train(): + total_loss = 0 + + for i, (names, countries) in enumerate(train_loader, 1): + input, seq_lengths, target = make_variables(names, countries) + output = classifier(input, seq_lengths) + + loss = criterion(output, target) + total_loss += loss.data[0] + + classifier.zero_grad() + loss.backward() + optimizer.step() + + if i % 10 == 0: + print('[{}] Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.2f}'.format( + time_since(start), epoch, i * + len(names), len(train_loader.dataset), + 100. * i * len(names) / len(train_loader.dataset), + total_loss / i * len(names))) + + return total_loss + + +# Testing cycle +def test(name=None): + # Predict for a given name + if name: + input, seq_lengths, target = make_variables([name], []) + output = classifier(input, seq_lengths) + pred = output.data.max(1, keepdim=True)[1] + country_id = pred.cpu().numpy()[0][0] + print(name, "is", train_dataset.get_country(country_id)) + return + + print("evaluating trained model ...") + correct = 0 + train_data_size = len(test_loader.dataset) + + for names, countries in test_loader: + input, seq_lengths, target = make_variables(names, countries) + output = classifier(input, seq_lengths) + pred = output.data.max(1, keepdim=True)[1] + correct += pred.eq(target.data.view_as(pred)).cpu().sum() + + print('\nTest set: Accuracy: {}/{} ({:.0f}%)\n'.format( + correct, train_data_size, 100. * correct / train_data_size)) + + +if __name__ == '__main__': + + classifier = RNNClassifier(N_CHARS, HIDDEN_SIZE, N_COUNTRIES, N_LAYERS) + if torch.cuda.device_count() > 1: + print("Let's use", torch.cuda.device_count(), "GPUs!") + # dim = 0 [33, xxx] -> [11, ...], [11, ...], [11, ...] on 3 GPUs + classifier = nn.DataParallel(classifier) + + if torch.cuda.is_available(): + classifier.cuda() + + optimizer = torch.optim.Adam(classifier.parameters(), lr=0.001) + criterion = nn.CrossEntropyLoss() + + start = time.time() + print("Training for %d epochs..." % N_EPOCHS) + for epoch in range(1, N_EPOCHS + 1): + # Train cycle + train() + + # Testing + test() + + # Testing several samples + test("Sung") + test("Jungwoo") + test("Soojin") + test("Nako") diff --git a/13_3_char_rnn.py b/13_3_char_rnn.py new file mode 100644 index 0000000..cf3bb70 --- /dev/null +++ b/13_3_char_rnn.py @@ -0,0 +1,147 @@ +# https://github.com/spro/practical-pytorch +import torch +import torch.nn as nn +from torch.autograd import Variable +from torch.utils.data import DataLoader + +from text_loader import TextDataset + +hidden_size = 100 +n_layers = 3 +batch_size = 1 +n_epochs = 100 +n_characters = 128 # ASCII + + +class RNN(nn.Module): + + def __init__(self, input_size, hidden_size, output_size, n_layers=1): + super(RNN, self).__init__() + self.input_size = input_size + self.hidden_size = hidden_size + self.output_size = output_size + self.n_layers = n_layers + + self.embedding = nn.Embedding(input_size, hidden_size) + self.gru = nn.GRU(hidden_size, hidden_size, n_layers) + self.linear = nn.Linear(hidden_size, output_size) + + # This runs this one step at a time + # It's extremely slow, and please do not use in practice. + # We need to use (1) batch and (2) data parallelism + def forward(self, input, hidden): + embed = self.embedding(input.view(1, -1)) # S(=1) x I + embed = embed.view(1, 1, -1) # S(=1) x B(=1) x I (embedding size) + output, hidden = self.gru(embed, hidden) + output = self.linear(output.view(1, -1)) # S(=1) x I + return output, hidden + + def init_hidden(self): + if torch.cuda.is_available(): + hidden = torch.zeros(self.n_layers, 1, self.hidden_size).cuda() + else: + hidden = torch.zeros(self.n_layers, 1, self.hidden_size) + + return Variable(hidden) + + +def str2tensor(string): + tensor = [ord(c) for c in string] + tensor = torch.LongTensor(tensor) + + if torch.cuda.is_available(): + tensor = tensor.cuda() + + return Variable(tensor) + + +def generate(decoder, prime_str='A', predict_len=100, temperature=0.8): + hidden = decoder.init_hidden() + prime_input = str2tensor(prime_str) + predicted = prime_str + + # Use priming string to "build up" hidden state + for p in range(len(prime_str) - 1): + _, hidden = decoder(prime_input[p], hidden) + + inp = prime_input[-1] + + for p in range(predict_len): + output, hidden = decoder(inp, hidden) + + # Sample from the network as a multinomial distribution + output_dist = output.data.view(-1).div(temperature).exp() + top_i = torch.multinomial(output_dist, 1)[0] + + # Add predicted character to string and use as next input + predicted_char = chr(top_i) + predicted += predicted_char + inp = str2tensor(predicted_char) + + return predicted + +# Train for a given src and target +# It feeds single string to demonstrate seq2seq +# It's extremely slow, and we need to use (1) batch and (2) data parallelism +# http://pytorch.org/tutorials/beginner/former_torchies/parallelism_tutorial.html. + + +def train_teacher_forching(line): + input = str2tensor(line[:-1]) + target = str2tensor(line[1:]) + + hidden = decoder.init_hidden() + loss = 0 + + for c in range(len(input)): + output, hidden = decoder(input[c], hidden) + loss += criterion(output, target[c]) + + decoder.zero_grad() + loss.backward() + decoder_optimizer.step() + + return loss.data[0] / len(input) + + +def train(line): + input = str2tensor(line[:-1]) + target = str2tensor(line[1:]) + + hidden = decoder.init_hidden() + decoder_in = input[0] + loss = 0 + + for c in range(len(input)): + output, hidden = decoder(decoder_in, hidden) + loss += criterion(output, target[c]) + decoder_in = output.max(1)[1] + + decoder.zero_grad() + loss.backward() + decoder_optimizer.step() + + return loss.data[0] / len(input) + +if __name__ == '__main__': + + decoder = RNN(n_characters, hidden_size, n_characters, n_layers) + if torch.cuda.is_available(): + decoder.cuda() + + decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=0.001) + criterion = nn.CrossEntropyLoss() + + train_loader = DataLoader(dataset=TextDataset(), + batch_size=batch_size, + shuffle=True) + + print("Training for %d epochs..." % n_epochs) + for epoch in range(1, n_epochs + 1): + for i, (lines, _) in enumerate(train_loader): + loss = train(lines[0]) # Batch size is 1 + + if i % 100 == 0: + print('[(%d %d%%) loss: %.4f]' % + (epoch, epoch / n_epochs * 100, loss)) + print(generate(decoder, 'Wh', 100), '\n') diff --git a/13_4_pack_pad.py b/13_4_pack_pad.py new file mode 100644 index 0000000..2ac6454 --- /dev/null +++ b/13_4_pack_pad.py @@ -0,0 +1,70 @@ +# Original source from +# https://gist.github.com/Tushar-N/dfca335e370a2bc3bc79876e6270099e +# torch +import torch +import torch.nn as nn +from torch.autograd import Variable +from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence +import torch.nn.functional as F +import numpy as np +import itertools + + +def flatten(l): + return list(itertools.chain.from_iterable(l)) + +seqs = ['ghatmasala', 'nicela', 'chutpakodas'] + +# make idx 0 +vocab = [''] + sorted(list(set(flatten(seqs)))) + +# make model +embedding_size = 3 +embed = nn.Embedding(len(vocab), embedding_size) +lstm = nn.LSTM(embedding_size, 5) + +vectorized_seqs = [[vocab.index(tok) for tok in seq]for seq in seqs] +print("vectorized_seqs", vectorized_seqs) + +print([x for x in map(len, vectorized_seqs)]) +# get the length of each seq in your batch +seq_lengths = torch.LongTensor([x for x in map(len, vectorized_seqs)]) + +# dump padding everywhere, and place seqs on the left. +# NOTE: you only need a tensor as big as your longest sequence +seq_tensor = Variable(torch.zeros( + (len(vectorized_seqs), seq_lengths.max()))).long() +for idx, (seq, seqlen) in enumerate(zip(vectorized_seqs, seq_lengths)): + seq_tensor[idx, :seqlen] = torch.LongTensor(seq) + +print("seq_tensor", seq_tensor) + +# SORT YOUR TENSORS BY LENGTH! +seq_lengths, perm_idx = seq_lengths.sort(0, descending=True) +seq_tensor = seq_tensor[perm_idx] + +print("seq_tensor after sorting", seq_tensor) + +# utils.rnn lets you give (B,L,D) tensors where B is the batch size, L is the maxlength, if you use batch_first=True +# Otherwise, give (L,B,D) tensors +seq_tensor = seq_tensor.transpose(0, 1) # (B,L,D) -> (L,B,D) +print("seq_tensor after transposing", seq_tensor.size(), seq_tensor.data) + +# embed your sequences +embeded_seq_tensor = embed(seq_tensor) +print("seq_tensor after embeding", embeded_seq_tensor.size(), seq_tensor.data) + +# pack them up nicely +packed_input = pack_padded_sequence( + embeded_seq_tensor, seq_lengths.cpu().numpy()) + +# throw them through your LSTM (remember to give batch_first=True here if +# you packed with it) +packed_output, (ht, ct) = lstm(packed_input) + +# unpack your output if required +output, _ = pad_packed_sequence(packed_output) +print("Lstm output", output.size(), output.data) + +# Or if you just want the final hidden state? +print("Last output", ht[-1].size(), ht[-1].data) diff --git a/14_1_seq2seq.py b/14_1_seq2seq.py new file mode 100644 index 0000000..d92aebc --- /dev/null +++ b/14_1_seq2seq.py @@ -0,0 +1,121 @@ +# https://github.com/spro/practical-pytorch/blob/master/seq2seq-translation/seq2seq-translation.ipynb +import torch +import torch.nn as nn +from torch.utils.data import DataLoader +from text_loader import TextDataset +import seq2seq_models as sm +from seq2seq_models import str2tensor, EOS_token, SOS_token + +HIDDEN_SIZE = 100 +N_LAYERS = 1 +BATCH_SIZE = 1 +N_EPOCH = 100 +N_CHARS = 128 # ASCII + + +# Simple test to show how our network works +def test(): + encoder_hidden = encoder.init_hidden() + word_input = str2tensor('hello') + encoder_outputs, encoder_hidden = encoder(word_input, encoder_hidden) + print(encoder_outputs) + + decoder_hidden = encoder_hidden + + word_target = str2tensor('pytorch') + for c in range(len(word_target)): + decoder_output, decoder_hidden = decoder( + word_target[c], decoder_hidden) + print(decoder_output.size(), decoder_hidden.size()) + + +# Train for a given src and target +# To demonstrate seq2seq, We don't handle batch in the code, +# and our encoder runs this one step at a time +# It's extremely slow, and please do not use in practice. +# We need to use (1) batch and (2) data parallelism +# http://pytorch.org/tutorials/beginner/former_torchies/parallelism_tutorial.html. +def train(src, target): + src_var = str2tensor(src) + target_var = str2tensor(target, eos=True) # Add the EOS token + + encoder_hidden = encoder.init_hidden() + encoder_outputs, encoder_hidden = encoder(src_var, encoder_hidden) + + hidden = encoder_hidden + loss = 0 + + for c in range(len(target_var)): + # First, we feed SOS + # Others, we use teacher forcing + token = target_var[c - 1] if c else str2tensor(SOS_token) + output, hidden = decoder(token, hidden) + loss += criterion(output, target_var[c]) + + encoder.zero_grad() + decoder.zero_grad() + loss.backward() + optimizer.step() + + return loss.data[0] / len(target_var) + + +# Translate the given input +def translate(enc_input='thisissungkim.iloveyou.', predict_len=100, temperature=0.9): + input_var = str2tensor(enc_input) + encoder_hidden = encoder.init_hidden() + encoder_outputs, encoder_hidden = encoder(input_var, encoder_hidden) + + hidden = encoder_hidden + + predicted = '' + dec_input = str2tensor(SOS_token) + for c in range(predict_len): + output, hidden = decoder(dec_input, hidden) + + # Sample from the network as a multi nominal distribution + output_dist = output.data.view(-1).div(temperature).exp() + top_i = torch.multinomial(output_dist, 1)[0] + + # Stop at the EOS + if top_i is EOS_token: + break + + predicted_char = chr(top_i) + predicted += predicted_char + + dec_input = str2tensor(predicted_char) + + return enc_input, predicted + + +encoder = sm.EncoderRNN(N_CHARS, HIDDEN_SIZE, N_LAYERS) +decoder = sm.DecoderRNN(HIDDEN_SIZE, N_CHARS, N_LAYERS) + +if torch.cuda.is_available(): + decoder.cuda() + encoder.cuda() +print(encoder, decoder) +test() + +params = list(encoder.parameters()) + list(decoder.parameters()) +optimizer = torch.optim.Adam(params, lr=0.001) +criterion = nn.CrossEntropyLoss() + + +train_loader = DataLoader(dataset=TextDataset(), + batch_size=BATCH_SIZE, + shuffle=True, + num_workers=2) + +print("Training for %d epochs..." % N_EPOCH) +for epoch in range(1, N_EPOCH + 1): + # Get srcs and targets from data loader + for i, (srcs, targets) in enumerate(train_loader): + train_loss = train(srcs[0], targets[0]) # Batch is 1 + + if i % 100 is 0: + print('[(%d %d%%) %.4f]' % + (epoch, epoch / N_EPOCH * 100, train_loss)) + print(translate(srcs[0]), '\n') + print(translate(), '\n') diff --git a/14_2_seq2seq_att.py b/14_2_seq2seq_att.py new file mode 100644 index 0000000..45156d8 --- /dev/null +++ b/14_2_seq2seq_att.py @@ -0,0 +1,145 @@ +# Original code from +# https://github.com/spro/practical-pytorch/blob/master/seq2seq-translation/seq2seq-translation.ipynb + +#import matplotlib.pyplot as plt + +import torch +import torch.nn as nn + +from torch.utils.data import DataLoader +from text_loader import TextDataset +import seq2seq_models as sm +from seq2seq_models import cuda_variable, str2tensor, EOS_token, SOS_token + + +N_LAYERS = 1 +BATCH_SIZE = 1 +N_EPOCH = 100 +N_CHARS = 128 # ASCII +HIDDEN_SIZE = N_CHARS + + +# Simple test to show how our train works +def test(): + encoder_test = sm.EncoderRNN(10, 10, 2) + decoder_test = sm.AttnDecoderRNN(10, 10, 2) + + if torch.cuda.is_available(): + encoder_test.cuda() + decoder_test.cuda() + + encoder_hidden = encoder_test.init_hidden() + word_input = cuda_variable(torch.LongTensor([1, 2, 3])) + encoder_outputs, encoder_hidden = encoder_test(word_input, encoder_hidden) + print(encoder_outputs.size()) + + word_target = cuda_variable(torch.LongTensor([1, 2, 3])) + decoder_attns = torch.zeros(1, 3, 3) + decoder_hidden = encoder_hidden + + for c in range(len(word_target)): + decoder_output, decoder_hidden, decoder_attn = \ + decoder_test(word_target[c], + decoder_hidden, encoder_outputs) + print(decoder_output.size(), decoder_hidden.size(), decoder_attn.size()) + decoder_attns[0, c] = decoder_attn.squeeze(0).cpu().data + + +# Train for a given src and target +# To demonstrate seq2seq, We don't handle batch in the code, +# and our encoder runs this one step at a time +# It's extremely slow, and please do not use in practice. +# We need to use (1) batch and (2) data parallelism +# http://pytorch.org/tutorials/beginner/former_torchies/parallelism_tutorial.html. +def train(src, target): + loss = 0 + + src_var = str2tensor(src) + target_var = str2tensor(target, eos=True) # Add the EOS token + + encoder_hidden = encoder.init_hidden() + encoder_outputs, encoder_hidden = encoder(src_var, encoder_hidden) + + hidden = encoder_hidden + + for c in range(len(target_var)): + # First, we feed SOS. Others, we use teacher forcing. + token = target_var[c - 1] if c else str2tensor(SOS_token) + output, hidden, attention = decoder(token, hidden, encoder_outputs) + loss += criterion(output, target_var[c]) + + encoder.zero_grad() + decoder.zero_grad() + loss.backward() + optimizer.step() + + return loss.data[0] / len(target_var) + + +# Translate the given input +def translate(enc_input='thisissungkim.iloveyou.', predict_len=100, temperature=0.9): + input_var = str2tensor(enc_input) + encoder_hidden = encoder.init_hidden() + encoder_outputs, encoder_hidden = encoder(input_var, encoder_hidden) + + hidden = encoder_hidden + + predicted = '' + dec_input = str2tensor(SOS_token) + attentions = [] + for c in range(predict_len): + output, hidden, attention = decoder(dec_input, hidden, encoder_outputs) + # Sample from the network as a multi nominal distribution + output_dist = output.data.view(-1).div(temperature).exp() + top_i = torch.multinomial(output_dist, 1)[0] + attentions.append(attention.view(-1).data.cpu().numpy().tolist()) + + # Stop at the EOS + if top_i is EOS_token: + break + + predicted_char = chr(top_i) + predicted += predicted_char + + dec_input = str2tensor(predicted_char) + + return predicted, attentions + + +if __name__ == '__main__': + encoder = sm.EncoderRNN(N_CHARS, HIDDEN_SIZE, N_LAYERS) + decoder = sm.AttnDecoderRNN(HIDDEN_SIZE, N_CHARS, N_LAYERS) + + if torch.cuda.is_available(): + decoder.cuda() + encoder.cuda() + print(encoder, decoder) + # test() + + params = list(encoder.parameters()) + list(decoder.parameters()) + optimizer = torch.optim.Adam(params, lr=0.001) + criterion = nn.CrossEntropyLoss() + + train_loader = DataLoader(dataset=TextDataset(), + batch_size=BATCH_SIZE, + shuffle=True, + num_workers=2) + + print("Training for %d epochs..." % N_EPOCH) + for epoch in range(1, N_EPOCH + 1): + # Get srcs and targets from data loader + for i, (srcs, targets) in enumerate(train_loader): + train_loss = train(srcs[0], targets[0]) + + if i % 1000 is 0: + print('[(%d/%d %d%%) %.4f]' % + (epoch, N_EPOCH, i * len(srcs) * 100 / len(train_loader), train_loss)) + output, _ = translate(srcs[0]) + print(srcs[0], output, '\n') + + output, attentions = translate() + print('thisissungkim.iloveyou.', output, '\n') + + # plt.matshow(attentions) + # plt.show() + # print(attentions) diff --git a/README.md b/README.md index b1b2ad5..b63d22f 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,16 @@ +[![Build Status](https://travis-ci.org/hunkim/PythonZeroToAll.svg?branch=master)](https://travis-ci.org/hunkim/PythonZeroToAll) + # PyTorchZeroToAll -Quick 3~4 day lecture meterials for HKUST students. +Quick 3~4 day lecture materials for HKUST students. + +## Video Lectures: (RNN TBA) +* [Youtube](http://bit.ly/PyTorchVideo) +* [Bilibili](https://www.bilibili.com/video/av15823922/) + +## Slides +* [Lecture Slides @GoogleDrive](http://bit.ly/PyTorchZeroAll) -* [Lecture Slides @Googledoc](http://bit.ly/PyTorchZeroAll). -If you cannot access the GoogleDoc for somehow, please check out pdf files in slides. However, slides in GoogleDoc is always latest. +If you cannot access the GoogleDoc for somehow, please check out pdf files in slides. However, slides in GoogleDrive are always latest. We really appreciate your comments. ## Previous Lectures -* cf., http://bit.ly/TF_HKUST (previous 3 day lectures using TensorFlow) +* cf., http://bit.ly/TF_HKUST (3 day crash course using TensorFlow) diff --git a/data-diabetes.csv b/data-diabetes.csv deleted file mode 100644 index 3eac001..0000000 --- a/data-diabetes.csv +++ /dev/null @@ -1,759 +0,0 @@ --0.294118,0.487437,0.180328,-0.292929,0,0.00149028,-0.53117,-0.0333333,0 --0.882353,-0.145729,0.0819672,-0.414141,0,-0.207153,-0.766866,-0.666667,1 --0.0588235,0.839196,0.0491803,0,0,-0.305514,-0.492741,-0.633333,0 --0.882353,-0.105528,0.0819672,-0.535354,-0.777778,-0.162444,-0.923997,0,1 -0,0.376884,-0.344262,-0.292929,-0.602837,0.28465,0.887276,-0.6,0 --0.411765,0.165829,0.213115,0,0,-0.23696,-0.894962,-0.7,1 --0.647059,-0.21608,-0.180328,-0.353535,-0.791962,-0.0760059,-0.854825,-0.833333,0 -0.176471,0.155779,0,0,0,0.052161,-0.952178,-0.733333,1 --0.764706,0.979899,0.147541,-0.0909091,0.283688,-0.0909091,-0.931682,0.0666667,0 --0.0588235,0.256281,0.57377,0,0,0,-0.868488,0.1,0 --0.529412,0.105528,0.508197,0,0,0.120715,-0.903501,-0.7,1 -0.176471,0.688442,0.213115,0,0,0.132638,-0.608027,-0.566667,0 -0.176471,0.396985,0.311475,0,0,-0.19225,0.163962,0.2,1 --0.882353,0.899497,-0.0163934,-0.535354,1,-0.102832,-0.726729,0.266667,0 --0.176471,0.00502513,0,0,0,-0.105812,-0.653288,-0.633333,0 -0,0.18593,0.377049,-0.0505051,-0.456265,0.365127,-0.596072,-0.666667,0 --0.176471,0.0753769,0.213115,0,0,-0.117735,-0.849701,-0.666667,0 --0.882353,0.0351759,-0.508197,-0.232323,-0.803783,0.290611,-0.910333,-0.6,1 --0.882353,0.155779,0.147541,-0.393939,-0.77305,0.0312965,-0.614859,-0.633333,0 --0.647059,0.266332,0.442623,-0.171717,-0.444444,0.171386,-0.465414,-0.8,1 --0.0588235,-0.00502513,0.377049,0,0,0.0551417,-0.735269,-0.0333333,1 --0.176471,0.969849,0.47541,0,0,0.186289,-0.681469,-0.333333,0 -0.0588235,0.19598,0.311475,-0.292929,0,-0.135618,-0.842015,-0.733333,0 -0.176471,0.256281,0.147541,-0.474747,-0.728132,-0.0730253,-0.891546,-0.333333,0 --0.176471,0.477387,0.245902,0,0,0.174367,-0.847139,-0.266667,0 --0.882353,-0.0251256,0.0819672,-0.69697,-0.669031,-0.308495,-0.650726,-0.966667,1 -0.529412,0.457286,0.344262,-0.616162,-0.739953,-0.338301,-0.857387,0.2,1 --0.411765,0.175879,0.508197,0,0,0.0163934,-0.778822,-0.433333,1 --0.411765,0.0954774,0.229508,-0.474747,0,0.0730254,-0.600342,0.3,1 --0.647059,0.58794,0.245902,-0.272727,-0.420804,-0.0581222,-0.33988,-0.766667,0 --0.647059,-0.115578,-0.0491803,-0.777778,-0.87234,-0.260805,-0.838599,-0.966667,1 --0.294118,-0.0753769,0.508197,0,0,-0.406855,-0.906063,-0.766667,1 -0.176471,0.226131,0.278689,-0.373737,0,-0.177347,-0.629377,-0.2,1 --0.529412,0.0351759,-0.0163934,-0.333333,-0.546099,-0.28465,-0.241674,-0.6,1 -0.294118,0.386935,0.245902,0,0,-0.0104321,-0.707942,-0.533333,1 -0.0588235,0.0251256,0.245902,-0.252525,0,-0.019374,-0.498719,-0.166667,0 --0.764706,-0.0954774,0.114754,-0.151515,0,0.138599,-0.637062,-0.8,0 --0.529412,0.115578,0.180328,-0.0505051,-0.510638,0.105812,0.12041,0.166667,0 --0.647059,0.809045,0.0491803,-0.494949,-0.834515,0.0134128,-0.835184,-0.833333,1 --0.176471,0.336683,0.377049,0,0,0.198212,-0.472246,-0.466667,1 --0.176471,0.0653266,0.508197,-0.636364,0,-0.323398,-0.865927,-0.1,1 -0.0588235,0.718593,0.803279,-0.515152,-0.432624,0.353204,-0.450897,0.1,0 --0.176471,0.59799,0.0491803,0,0,-0.183308,-0.815542,-0.366667,1 -0,0.809045,0.0819672,-0.212121,0,0.251863,0.549957,-0.866667,0 --0.882353,0.467337,-0.0819672,0,0,-0.114754,-0.58497,-0.733333,1 --0.764706,-0.286432,0.147541,-0.454545,0,-0.165425,-0.566183,-0.966667,1 --0.176471,0.0351759,0.0819672,-0.353535,0,0.165425,-0.772844,-0.666667,0 --0.176471,0.0552764,0,0,0,0,-0.806149,-0.9,1 --0.882353,0.0351759,0.311475,-0.777778,-0.806147,-0.421759,-0.64731,-0.966667,1 --0.882353,0.0150754,-0.180328,-0.69697,-0.914894,-0.278688,-0.617421,-0.833333,1 --0.411765,-0.115578,0.0819672,-0.575758,-0.945626,-0.272727,-0.774552,-0.7,1 --0.0588235,0.768844,0.47541,-0.313131,-0.29078,0.004471,-0.667805,0.233333,0 --0.176471,0.507538,0.0819672,-0.151515,-0.191489,0.0342773,-0.453459,-0.3,1 --0.882353,-0.266332,-0.180328,-0.79798,0,-0.314456,-0.854825,0,1 --0.176471,0.879397,0.114754,-0.212121,-0.281324,0.123696,-0.849701,-0.333333,0 -0,0.00502513,0.442623,0.212121,-0.739953,0.394933,-0.24509,-0.666667,1 -0,0.467337,0.344262,0,0,0.207154,0.454313,-0.233333,1 -0,0.0552764,0.0491803,-0.171717,-0.664303,0.23696,-0.918873,-0.966667,1 --0.764706,-0.155779,0,0,0,0,-0.807003,0,1 --0.0588235,0.336683,0.180328,0,0,-0.019374,-0.836038,-0.4,0 --0.411765,-0.557789,0.0163934,0,0,-0.254843,-0.565329,-0.5,1 --0.764706,0.417085,-0.0491803,-0.313131,-0.6974,-0.242921,-0.469684,-0.9,1 --0.176471,0.145729,0.0819672,0,0,-0.0223547,-0.846285,-0.3,0 --0.411765,-0.00502513,0.213115,-0.454545,0,-0.135618,-0.893254,-0.633333,1 -0,0.0954774,0.442623,-0.393939,0,-0.0312965,-0.336465,-0.433333,0 --0.764706,0.0954774,0.508197,0,0,0.272727,-0.345004,0.1,1 --0.882353,-0.0452261,0.0819672,-0.737374,-0.910165,-0.415797,-0.781383,-0.866667,1 --0.529412,0.467337,0.393443,-0.454545,-0.763593,-0.138599,-0.905209,-0.8,1 --0.764706,0.00502513,0.0819672,-0.59596,-0.787234,-0.019374,-0.326217,-0.766667,0 --0.411765,0.396985,0.0491803,-0.292929,-0.669031,-0.147541,-0.715628,-0.833333,1 -0.529412,0.266332,0.47541,0,0,0.293592,-0.568745,-0.3,0 --0.529412,0.296482,0.409836,-0.59596,-0.361702,0.0461997,-0.869342,-0.933333,1 --0.882353,-0.20603,0.229508,-0.393939,0,-0.0461997,-0.728437,-0.966667,1 --0.882353,0,-0.213115,-0.59596,0,-0.263785,-0.947054,-0.966667,1 --0.176471,-0.376884,0.278689,0,0,-0.028316,-0.732707,-0.333333,1 --0.411765,-0.0452261,0.180328,-0.333333,0,0.123696,-0.75064,-0.8,1 -0,0.316583,0,0,0,0.28763,-0.836038,-0.833333,0 --0.764706,0.125628,0.0819672,-0.555556,0,-0.254843,-0.804441,-0.9,1 --0.647059,0.135678,-0.278689,-0.737374,0,-0.33234,-0.947054,-0.966667,1 --0.764706,-0.256281,0,0,0,0,-0.979505,-0.966667,1 --0.176471,-0.165829,0.278689,-0.474747,-0.832151,-0.126677,-0.411614,-0.5,1 -0,0.0150754,0.0655738,-0.434343,0,-0.266766,-0.864219,-0.966667,1 --0.411765,0.376884,0.770492,0,0,0.454545,-0.872758,-0.466667,0 --0.764706,0.105528,0.213115,-0.414141,-0.704492,-0.0342771,-0.470538,-0.8,1 -0.529412,0.0653266,0.180328,0.0909091,0,0.0909091,-0.914603,-0.2,1 --0.764706,0.00502513,0.114754,-0.494949,-0.832151,0.147541,-0.789923,-0.833333,1 -0.764706,0.366834,0.147541,-0.353535,-0.739953,0.105812,-0.935952,-0.266667,0 --0.882353,0.0753769,0.114754,-0.616162,0,-0.210134,-0.925705,-0.9,1 --0.882353,-0.19598,-0.0983607,0,0,-0.4307,-0.846285,0,1 --0.529412,0.236181,0.311475,-0.69697,-0.583924,-0.0461997,-0.688301,-0.566667,1 --0.176471,-0.18593,0.278689,-0.191919,-0.886525,0.391952,-0.843723,-0.3,1 --0.529412,0.346734,0.180328,0,0,-0.290611,-0.83006,0.3,0 --0.764706,0.427136,0.344262,-0.636364,-0.8487,-0.263785,-0.416738,0,1 --0.294118,0.447236,0.180328,-0.454545,-0.460993,0.0104323,-0.848847,-0.366667,1 --0.764706,-0.0753769,0.0163934,-0.434343,0,-0.0581222,-0.955594,-0.9,1 --0.882353,-0.286432,-0.213115,-0.636364,-0.820331,-0.391952,-0.790777,-0.966667,1 --0.294118,-0.0653266,-0.180328,-0.393939,-0.8487,-0.14456,-0.762596,-0.933333,1 --0.882353,0.226131,0.47541,0.030303,-0.479905,0.481371,-0.789069,-0.666667,0 --0.882353,0.638191,0.180328,0,0,0.162444,-0.0230572,-0.6,0 --0.882353,0.517588,-0.0163934,0,0,-0.222057,-0.913749,-0.966667,1 -0,0.256281,0.57377,0,0,-0.329359,-0.842869,0,1 --0.882353,-0.18593,0.180328,-0.636364,-0.905437,-0.207153,-0.824936,-0.9,1 --0.764706,-0.145729,0.0655738,0,0,0.180328,-0.272417,-0.8,1 --0.882353,0.266332,-0.0819672,-0.414141,-0.640662,-0.14456,-0.382579,0,1 --0.882353,-0.0351759,1,0,0,-0.33234,-0.889838,-0.8,1 --0.529412,0.447236,-0.0491803,-0.434343,-0.669031,-0.120715,-0.82152,-0.466667,1 --0.647059,-0.165829,-0.0491803,-0.373737,-0.957447,0.0223547,-0.779675,-0.866667,1 -0,-0.0452261,0.393443,-0.494949,-0.914894,0.114754,-0.855679,-0.9,0 --0.647059,0.718593,0.180328,-0.333333,-0.680851,-0.00745157,-0.89667,-0.9,0 --0.0588235,0.557789,0.0163934,-0.474747,0.170213,0.0134128,-0.602904,-0.166667,0 --0.882353,-0.105528,0.245902,-0.313131,-0.91253,-0.0700447,-0.902647,-0.933333,1 --0.529412,-0.236181,0.0163934,0,0,0.0134128,-0.732707,-0.866667,1 --0.176471,0.60804,-0.114754,-0.353535,-0.586288,-0.0909091,-0.564475,-0.4,0 --0.529412,0.467337,0.508197,0,0,-0.0700447,-0.606319,0.333333,0 --0.411765,0.246231,0.213115,0,0,0.0134128,-0.878736,-0.433333,0 --0.411765,-0.21608,-0.213115,0,0,0.004471,-0.508113,-0.866667,1 --0.529412,-0.0251256,-0.0163934,-0.535354,0,-0.159463,-0.688301,-0.966667,1 --0.529412,-0.00502513,0.245902,-0.69697,-0.879433,-0.308495,-0.876174,0,1 -0,0.628141,0.245902,0.131313,-0.763593,0.585693,-0.418446,-0.866667,0 --0.294118,0.115578,0.0491803,-0.212121,0,0.0193741,-0.844577,-0.9,1 --0.764706,0.0753769,0.213115,-0.393939,-0.763593,0.00149028,-0.721605,-0.933333,1 --0.411765,0.326633,0.311475,0,0,-0.201192,-0.907771,0.6,1 -0,0.135678,0.245902,0,0,-0.00745157,-0.829206,-0.933333,0 --0.882353,-0.115578,-0.508197,-0.151515,-0.765957,0.639344,-0.64304,-0.833333,0 --0.647059,0.20603,0.147541,-0.393939,-0.680851,0.278689,-0.680615,-0.7,1 --0.882353,0.18593,-0.0491803,-0.272727,-0.777778,-0.00745157,-0.843723,-0.933333,1 --0.882353,0.175879,0.442623,-0.515152,-0.65721,0.028316,-0.722459,-0.366667,0 -0,0.0552764,0.377049,0,0,-0.168405,-0.433817,0.366667,0 --0.529412,0.738693,0.147541,-0.717172,-0.602837,-0.114754,-0.758326,-0.6,0 -0.0588235,0.226131,-0.0819672,0,0,-0.00745157,-0.115286,-0.6,0 --0.647059,0.708543,0.0491803,-0.252525,-0.468085,0.028316,-0.762596,-0.7,0 --0.0588235,-0.155779,0.213115,-0.373737,0,0.14158,-0.676345,-0.4,1 --0.764706,-0.0351759,0.114754,-0.737374,-0.884161,-0.371088,-0.514091,-0.833333,1 --0.764706,0.256281,-0.0163934,-0.59596,-0.669031,0.00745157,-0.99146,-0.666667,1 -0,0.00502513,0.147541,-0.474747,-0.881797,-0.0819672,-0.556789,0,1 -0,-0.0653266,-0.0163934,-0.494949,-0.782506,-0.14456,-0.612297,-0.966667,1 -0,0.296482,0.311475,0,0,-0.0700447,-0.466268,-0.733333,1 --0.411765,0.0552764,0.180328,-0.414141,-0.231678,0.0998511,-0.930828,-0.766667,1 --0.647059,0.286432,0.278689,0,0,-0.371088,-0.837746,0.133333,1 --0.411765,0.0653266,0.344262,-0.393939,0,0.177347,-0.822374,-0.433333,1 --0.764706,0.0854271,-0.147541,-0.474747,-0.851064,-0.0312965,-0.795047,-0.966667,1 -0.176471,0.0854271,0.0819672,0,0,-0.0342771,-0.83433,-0.3,0 --0.529412,0.547739,0.0163934,-0.373737,-0.328605,-0.0223547,-0.864219,-0.933333,1 -0,0.0251256,0.229508,-0.535354,0,0,-0.578138,0,1 -0.0588235,-0.427136,0.311475,-0.252525,0,-0.0223547,-0.984629,-0.333333,1 --0.764706,0.0653266,0.0491803,-0.292929,-0.718676,-0.0909091,0.12895,-0.566667,1 --0.411765,0.477387,0.278689,0,0,0.004471,-0.880444,0.466667,1 --0.764706,-0.0954774,0.147541,-0.656566,0,-0.186289,-0.994022,-0.966667,1 --0.882353,0.366834,0.213115,0.010101,-0.51773,0.114754,-0.725875,-0.9,1 --0.529412,0.145729,0.0655738,0,0,-0.347243,-0.697694,-0.466667,1 -0.0588235,0.567839,0.409836,-0.434343,-0.63357,0.0223547,-0.0512383,-0.3,0 --0.882353,0.537688,0.344262,-0.151515,0.146572,0.210134,-0.479932,-0.933333,1 --0.0588235,0.889447,0.278689,0,0,0.42772,-0.949616,-0.266667,0 --0.176471,0.527638,0.442623,-0.111111,0,0.490313,-0.778822,-0.5,0 --0.764706,-0.00502513,-0.147541,-0.69697,-0.777778,-0.266766,-0.52263,0,1 --0.882353,0.0954774,-0.0819672,-0.575758,-0.680851,-0.248882,-0.355252,-0.933333,1 --0.764706,-0.115578,0.213115,-0.616162,-0.874704,-0.135618,-0.87105,-0.966667,1 -1,0.638191,0.180328,-0.171717,-0.730496,0.219076,-0.368915,-0.133333,0 --0.529412,0.517588,0.47541,-0.232323,0,-0.114754,-0.815542,-0.5,1 --0.176471,0.0251256,0.213115,-0.191919,-0.751773,0.108793,-0.8924,-0.2,1 -0,0.145729,0.311475,-0.313131,-0.326241,0.317437,-0.923997,-0.8,1 --0.764706,0.00502513,0.0491803,-0.535354,0,-0.114754,-0.752348,0,1 -0,0.316583,0.442623,0,0,-0.0581222,-0.432109,-0.633333,0 --0.294118,0.0452261,0.213115,-0.636364,-0.631206,-0.108793,-0.450043,-0.333333,0 --0.647059,0.487437,0.0819672,-0.494949,0,-0.0312965,-0.847993,-0.966667,1 --0.529412,0.20603,0.114754,0,0,-0.117735,-0.461144,-0.566667,1 --0.529412,0.105528,0.0819672,0,0,-0.0491803,-0.664389,-0.733333,1 --0.647059,0.115578,0.47541,-0.757576,-0.815603,-0.153502,-0.643894,-0.733333,1 --0.294118,0.0251256,0.344262,0,0,-0.0819672,-0.912895,-0.5,0 --0.294118,0.346734,0.147541,-0.535354,-0.692671,0.0551417,-0.603757,-0.733333,0 --0.764706,-0.125628,0,-0.535354,0,-0.138599,-0.40649,-0.866667,1 --0.882353,-0.20603,-0.0163934,-0.151515,-0.886525,0.296572,-0.487617,-0.933333,1 --0.764706,-0.246231,0.0491803,-0.515152,-0.869976,-0.114754,-0.75064,-0.6,1 --0.0588235,0.798995,0.180328,-0.151515,-0.692671,-0.0253353,-0.452605,-0.5,0 --0.294118,-0.145729,0.278689,0,0,-0.0700447,-0.740393,-0.3,1 -0,0.296482,0.803279,-0.0707071,-0.692671,1,-0.794193,-0.833333,0 --0.411765,0.437186,0.278689,0,0,0.341282,-0.904355,-0.133333,1 --0.411765,0.306533,0.344262,0,0,0.165425,-0.250213,-0.466667,0 --0.294118,-0.125628,0.311475,0,0,-0.308495,-0.994876,-0.633333,1 -0,0.19598,0.0491803,-0.636364,-0.782506,0.0402385,-0.447481,-0.933333,1 --0.882353,0,0.213115,-0.59596,-0.945626,-0.174367,-0.811272,0,1 --0.411765,-0.266332,-0.0163934,0,0,-0.201192,-0.837746,-0.8,1 --0.529412,0.417085,0.213115,0,0,-0.177347,-0.858241,-0.366667,1 --0.176471,0.949749,0.114754,-0.434343,0,0.0700448,-0.430401,-0.333333,0 --0.0588235,0.819095,0.114754,-0.272727,0.170213,-0.102832,-0.541418,0.3,0 --0.882353,0.286432,0.606557,-0.171717,-0.862884,-0.0461997,0.0614859,-0.6,0 --0.0588235,0.0954774,0.245902,-0.212121,-0.730496,-0.168405,-0.520068,-0.666667,0 --0.411765,0.396985,0.311475,-0.292929,-0.621749,-0.0581222,-0.758326,-0.866667,0 --0.647059,0.115578,0.0163934,0,0,-0.326379,-0.945346,0,1 -0.0588235,0.236181,0.147541,-0.111111,-0.777778,-0.0134128,-0.747225,-0.366667,1 --0.176471,0.59799,0.0819672,0,0,-0.0938897,-0.739539,-0.5,0 -0.294118,0.356784,0,0,0,0.558867,-0.573015,-0.366667,0 --0.0588235,-0.145729,-0.0983607,-0.59596,0,-0.272727,-0.95047,-0.3,1 --0.411765,0.58794,0.377049,-0.171717,-0.503546,0.174367,-0.729291,-0.733333,0 --0.882353,0.0552764,-0.0491803,0,0,-0.275708,-0.906917,0,1 --0.647059,0.0753769,0.0163934,-0.737374,-0.886525,-0.317437,-0.487617,-0.933333,0 --0.529412,0.0954774,0.0491803,-0.111111,-0.765957,0.0372578,-0.293766,-0.833333,0 --0.529412,0.487437,-0.0163934,-0.454545,-0.248227,-0.0789866,-0.938514,-0.733333,0 -0,0.135678,0.311475,-0.676768,0,-0.0760059,-0.320239,0,1 --0.882353,0.386935,0.344262,0,0,0.195231,-0.865073,-0.766667,1 -0,0.0854271,0.114754,-0.59596,0,-0.186289,-0.394535,-0.633333,1 --0.764706,-0.00502513,0.147541,-0.676768,-0.895981,-0.391952,-0.865927,-0.8,1 --0.294118,0.0351759,0.180328,-0.353535,-0.550827,0.123696,-0.789923,0.133333,1 --0.411765,0.115578,0.180328,-0.434343,0,-0.28763,-0.719044,-0.8,1 --0.0588235,0.969849,0.245902,-0.414141,-0.338061,0.117735,-0.549957,0.2,0 --0.411765,0.628141,0.704918,0,0,0.123696,-0.93766,0.0333333,0 --0.882353,-0.0351759,0.0491803,-0.454545,-0.794326,-0.0104321,-0.819812,0,1 --0.176471,0.849246,0.377049,-0.333333,0,0.0581222,-0.76345,-0.333333,0 --0.764706,-0.18593,-0.0163934,-0.555556,0,-0.174367,-0.818958,-0.866667,1 -0,0.477387,0.393443,0.0909091,0,0.275708,-0.746371,-0.9,1 --0.176471,0.798995,0.557377,-0.373737,0,0.0193741,-0.926558,0.3,1 -0,0.407035,0.0655738,-0.474747,-0.692671,0.269747,-0.698548,-0.9,0 -0.0588235,0.125628,0.344262,-0.353535,-0.586288,0.0193741,-0.844577,-0.5,0 -0.411765,0.517588,0.147541,-0.191919,-0.359338,0.245902,-0.432963,-0.433333,0 --0.411765,0.0954774,0.0163934,-0.171717,-0.695035,0.0670641,-0.627669,-0.866667,0 --0.294118,0.256281,0.114754,-0.393939,-0.716312,-0.105812,-0.670367,-0.633333,1 --0.411765,-0.145729,0.213115,-0.555556,0,-0.135618,-0.0213493,-0.633333,0 --0.411765,0.125628,0.0819672,0,0,0.126677,-0.843723,-0.333333,0 -0,0.778894,-0.0163934,-0.414141,0.130024,0.0312965,-0.151153,0,0 --0.764706,0.58794,0.47541,0,0,-0.0581222,-0.379163,0.5,0 --0.176471,0.19598,0,0,0,-0.248882,-0.88813,-0.466667,1 --0.176471,0.427136,-0.0163934,-0.333333,-0.550827,-0.14158,-0.479932,0.333333,1 --0.882353,0.00502513,0.0819672,-0.69697,-0.867612,-0.296572,-0.497865,-0.833333,1 --0.882353,-0.125628,0.278689,-0.454545,-0.92435,0.0312965,-0.980359,-0.966667,1 -0,0.0150754,0.245902,0,0,0.0640835,-0.897523,-0.833333,1 --0.647059,0.628141,-0.147541,-0.232323,0,0.108793,-0.509821,-0.9,0 --0.529412,0.979899,0.147541,-0.212121,0.758865,0.0938898,0.922289,-0.666667,1 -0,0.175879,0.311475,-0.373737,-0.874704,0.347243,-0.990606,-0.9,1 --0.529412,0.427136,0.409836,0,0,0.311475,-0.515798,-0.966667,0 --0.294118,0.346734,0.311475,-0.252525,-0.125296,0.377049,-0.863365,-0.166667,0 --0.882353,-0.20603,0.311475,-0.494949,-0.91253,-0.242921,-0.568745,-0.966667,1 --0.529412,0.226131,0.114754,0,0,0.0432191,-0.730145,-0.733333,1 --0.647059,-0.256281,0.114754,-0.434343,-0.893617,-0.114754,-0.816396,-0.933333,1 --0.529412,0.718593,0.180328,0,0,0.299553,-0.657558,-0.833333,0 -0,0.798995,0.47541,-0.454545,0,0.314456,-0.480786,-0.933333,0 -0.0588235,0.648241,0.377049,-0.575758,0,-0.0819672,-0.35696,-0.633333,0 -0,0.0452261,0.245902,0,0,-0.451565,-0.569599,-0.8,1 --0.882353,-0.0854271,0.0491803,-0.515152,0,-0.129657,-0.902647,0,1 --0.529412,-0.0854271,0.147541,-0.353535,-0.791962,-0.0134128,-0.685739,-0.966667,1 --0.647059,0.396985,-0.114754,0,0,-0.23696,-0.723313,-0.966667,0 --0.294118,0.19598,-0.180328,-0.555556,-0.583924,-0.19225,0.058924,-0.6,0 --0.764706,0.467337,0.245902,-0.292929,-0.541371,0.138599,-0.785653,-0.733333,1 -0.0588235,0.849246,0.393443,-0.69697,0,-0.105812,-0.030743,-0.0666667,0 -0.176471,0.226131,0.114754,0,0,-0.0700447,-0.846285,-0.333333,1 -0,0.658291,0.47541,-0.333333,0.607565,0.558867,-0.701964,-0.933333,1 -0.0588235,0.246231,0.147541,-0.333333,-0.0496454,0.0551417,-0.82579,-0.566667,1 --0.882353,0.115578,0.409836,-0.616162,0,-0.102832,-0.944492,-0.933333,1 -0.0588235,0.0653266,-0.147541,0,0,-0.0700447,-0.742101,-0.3,1 --0.764706,0.296482,0.377049,0,0,-0.165425,-0.824082,-0.8,1 --0.764706,-0.0954774,0.311475,-0.717172,-0.869976,-0.272727,-0.853971,-0.9,1 -0,-0.135678,0.114754,-0.353535,0,0.0670641,-0.863365,-0.866667,1 -0.411765,-0.0753769,0.0163934,-0.858586,-0.390071,-0.177347,-0.275833,-0.233333,0 --0.882353,0.135678,0.0491803,-0.292929,0,0.00149028,-0.602904,0,0 --0.647059,0.115578,-0.0819672,-0.212121,0,-0.102832,-0.590948,-0.7,1 --0.764706,0.145729,0.114754,-0.555556,0,-0.14456,-0.988044,-0.866667,1 --0.882353,0.939698,-0.180328,-0.676768,-0.113475,-0.228018,-0.507259,-0.9,1 --0.647059,0.919598,0.114754,-0.69697,-0.692671,-0.0789866,-0.811272,-0.566667,1 --0.647059,0.417085,0,0,0,-0.105812,-0.416738,-0.8,0 --0.529412,-0.0452261,0.147541,-0.353535,0,-0.0432191,-0.54398,-0.9,1 --0.647059,0.427136,0.311475,-0.69697,0,-0.0342771,-0.895816,0.4,1 --0.529412,0.236181,0.0163934,0,0,-0.0461997,-0.873612,-0.533333,0 --0.411765,-0.0351759,0.213115,-0.636364,-0.841608,0.00149028,-0.215201,-0.266667,1 -0,0.386935,0,0,0,0.0819672,-0.269855,-0.866667,0 --0.764706,0.286432,0.0491803,-0.151515,0,0.19225,-0.126388,-0.9,1 -0,0.0251256,-0.147541,0,0,-0.251863,0,0,1 --0.764706,0.467337,0,0,0,-0.180328,-0.861657,-0.766667,0 -0.176471,0.0150754,0.409836,-0.252525,0,0.359165,-0.0964987,-0.433333,0 --0.764706,0.0854271,0.0163934,-0.353535,-0.867612,-0.248882,-0.957301,0,1 --0.647059,0.226131,0.278689,0,0,-0.314456,-0.849701,-0.366667,1 --0.882353,-0.286432,0.278689,0.010101,-0.893617,-0.0104321,-0.706234,0,1 -0.529412,0.0653266,0.147541,0,0,0.0193741,-0.852263,0.0333333,1 --0.764706,0.00502513,0.147541,0.0505051,-0.865248,0.207154,-0.488471,-0.866667,1 --0.176471,0.0653266,-0.0163934,-0.515152,0,-0.210134,-0.813834,-0.733333,0 -0,0.0452261,0.0491803,-0.535354,-0.725768,-0.171386,-0.678907,-0.933333,1 --0.411765,0.145729,0.213115,0,0,-0.257824,-0.431255,0.2,1 --0.764706,0.0854271,0.0163934,-0.79798,-0.34279,-0.245902,-0.314261,-0.966667,1 -0,0.467337,0.147541,0,0,0.129657,-0.781383,-0.766667,0 -0.176471,0.296482,0.245902,-0.434343,-0.711584,0.0700448,-0.827498,-0.4,1 --0.176471,0.336683,0.442623,-0.69697,-0.63357,-0.0342771,-0.842869,-0.466667,1 --0.176471,0.61809,0.409836,0,0,-0.0938897,-0.925705,-0.133333,0 --0.764706,0.0854271,0.311475,0,0,-0.195231,-0.845431,0.0333333,0 --0.411765,0.557789,0.377049,-0.111111,0.288416,0.153502,-0.538002,-0.566667,1 --0.882353,0.19598,0.409836,-0.212121,-0.479905,0.359165,-0.376601,-0.733333,0 --0.529412,-0.0351759,-0.0819672,-0.656566,-0.884161,-0.38003,-0.77626,-0.833333,1 --0.411765,0.0854271,0.180328,-0.131313,-0.822695,0.0760059,-0.842015,-0.6,1 -0,-0.21608,0.442623,-0.414141,-0.905437,0.0998511,-0.695986,0,1 -0,0.0753769,0.0163934,-0.393939,-0.825059,0.0909091,-0.420154,-0.866667,0 --0.764706,0.286432,0.278689,-0.252525,-0.56974,0.290611,-0.0213493,-0.666667,0 --0.882353,0.286432,-0.213115,-0.0909091,-0.541371,0.207154,-0.543126,-0.9,0 -0,0.61809,-0.180328,0,0,-0.347243,-0.849701,0.466667,1 --0.294118,0.517588,0.0163934,-0.373737,-0.716312,0.0581222,-0.475662,-0.766667,1 --0.764706,0.467337,0.147541,-0.232323,-0.148936,-0.165425,-0.778822,-0.733333,0 -0,0.266332,0.377049,-0.414141,-0.491726,-0.0849478,-0.622545,-0.9,1 -0.647059,0.00502513,0.278689,-0.494949,-0.565012,0.0909091,-0.714774,-0.166667,0 --0.0588235,0.125628,0.180328,0,0,-0.296572,-0.349274,0.233333,1 -0,0.678392,0,0,0,-0.0372578,-0.350128,-0.7,0 --0.764706,0.447236,-0.0491803,-0.333333,-0.680851,-0.0581222,-0.706234,-0.866667,0 --0.411765,-0.226131,0.344262,-0.171717,-0.900709,0.0670641,-0.93339,-0.533333,1 --0.411765,0.155779,0.606557,0,0,0.576751,-0.88813,-0.766667,0 --0.647059,0.507538,0.245902,0,0,-0.374069,-0.889838,-0.466667,1 --0.764706,0.20603,0.245902,-0.252525,-0.751773,0.183309,-0.883006,-0.733333,1 -0.176471,0.61809,0.114754,-0.535354,-0.687943,-0.23994,-0.788215,-0.133333,0 -0,0.376884,0.114754,-0.717172,-0.650118,-0.260805,-0.944492,0,1 -0,0.286432,0.114754,-0.616162,-0.574468,-0.0909091,0.121264,-0.866667,0 --0.764706,0.246231,0.114754,-0.434343,-0.515366,-0.019374,-0.319385,-0.7,0 --0.294118,-0.19598,0.0819672,-0.393939,0,-0.219076,-0.799317,-0.333333,1 -0,0.0653266,0.147541,-0.252525,-0.650118,0.174367,-0.549957,-0.966667,1 --0.764706,0.557789,0.213115,-0.656566,-0.77305,-0.207153,-0.69684,-0.8,0 --0.647059,0.135678,-0.180328,-0.79798,-0.799054,-0.120715,-0.532024,-0.866667,1 --0.176471,0.0954774,0.311475,-0.373737,0,0.0700448,-0.104184,-0.266667,0 --0.764706,0.125628,0.114754,-0.555556,-0.777778,0.0163934,-0.797609,-0.833333,1 --0.647059,-0.00502513,0.311475,-0.777778,-0.8487,-0.424739,-0.824082,-0.7,1 --0.647059,0.829146,0.213115,0,0,-0.0909091,-0.77199,-0.733333,0 --0.647059,0.155779,0.0819672,-0.212121,-0.669031,0.135618,-0.938514,-0.766667,1 --0.294118,0.949749,0.278689,0,0,-0.299553,-0.956447,0.266667,0 --0.529412,0.296482,-0.0163934,-0.757576,-0.453901,-0.180328,-0.616567,-0.666667,1 --0.647059,0.125628,0.213115,-0.393939,0,-0.0581222,-0.898377,-0.866667,0 -0,0.246231,0.147541,-0.59596,0,-0.183308,-0.849701,-0.5,0 -0.529412,0.527638,0.47541,-0.333333,-0.931442,-0.201192,-0.442357,-0.266667,0 --0.764706,0.125628,0.229508,-0.353535,0,0.0640835,-0.940222,0,1 --0.882353,0.577889,0.180328,-0.575758,-0.602837,-0.23696,-0.961571,-0.9,1 --0.882353,0.226131,0.0491803,-0.353535,-0.631206,0.0461997,-0.475662,-0.7,0 -0.176471,0.798995,0.147541,0,0,0.0461997,-0.895816,-0.466667,1 --0.764706,0.0251256,0.409836,-0.272727,-0.716312,0.356185,-0.958155,-0.933333,0 --0.294118,0.0552764,0.147541,-0.353535,-0.839243,-0.0819672,-0.962425,-0.466667,1 --0.0588235,0.18593,0.180328,-0.616162,0,-0.311475,0.193851,-0.166667,1 --0.764706,-0.125628,-0.0491803,-0.676768,-0.877069,-0.0253353,-0.924851,-0.866667,1 --0.882353,0.809045,0,0,0,0.290611,-0.82579,-0.333333,0 -0.411765,0.0653266,0.311475,0,0,-0.296572,-0.949616,-0.233333,1 --0.882353,-0.0452261,-0.0163934,-0.636364,-0.862884,-0.28763,-0.844577,-0.966667,1 -0,0.658291,0.245902,-0.131313,-0.397163,0.42772,-0.845431,-0.833333,1 -0,0.175879,0,0,0,0.00745157,-0.270709,-0.233333,1 --0.411765,0.155779,0.245902,0,0,-0.0700447,-0.773698,-0.233333,0 -0.0588235,0.527638,0.278689,-0.313131,-0.595745,0.0193741,-0.304014,-0.6,0 --0.176471,0.788945,0.377049,0,0,0.18927,-0.783945,-0.333333,0 --0.882353,0.306533,0.147541,-0.737374,-0.751773,-0.228018,-0.663535,-0.966667,1 --0.882353,-0.0452261,0.213115,-0.575758,-0.827423,-0.228018,-0.491887,-0.5,1 --0.882353,0,0.114754,-0.292929,0,-0.0461997,-0.734415,-0.966667,1 --0.411765,0.226131,0.409836,0,0,0.0342773,-0.818958,-0.6,1 --0.0588235,-0.0452261,0.180328,0,0,0.0968703,-0.652434,0.2,1 --0.0588235,0.266332,0.442623,-0.272727,-0.744681,0.147541,-0.768574,-0.0666667,1 --0.882353,0.396985,-0.245902,-0.616162,-0.803783,-0.14456,-0.508113,-0.966667,1 --0.647059,0.165829,0,0,0,-0.299553,-0.906917,-0.933333,1 --0.647059,-0.00502513,0.0163934,-0.616162,-0.825059,-0.350224,-0.828352,-0.833333,1 --0.411765,0,0.311475,-0.353535,0,0.222057,-0.771136,-0.466667,0 --0.529412,-0.0753769,0.311475,0,0,0.257824,-0.864219,-0.733333,1 --0.529412,0.376884,0.377049,0,0,-0.0700447,-0.851409,-0.7,1 --0.647059,-0.386935,0.344262,-0.434343,0,0.0253354,-0.859095,-0.166667,1 --0.882353,-0.0954774,0.0163934,-0.757576,-0.898345,-0.18927,-0.571307,-0.9,1 --0.647059,-0.0954774,0.278689,0,0,0.272727,-0.58924,0,1 -0.0588235,0.658291,0.442623,0,0,-0.0938897,-0.808711,-0.0666667,0 --0.882353,0.256281,-0.180328,-0.191919,-0.605201,-0.00745157,-0.24509,-0.766667,0 -0.529412,0.296482,0,-0.393939,0,0.18927,-0.5807,-0.233333,0 -0.411765,-0.115578,0.213115,-0.191919,-0.87234,0.052161,-0.743809,-0.1,1 --0.882353,0.969849,0.245902,-0.272727,-0.411348,0.0879285,-0.319385,-0.733333,0 --0.411765,0.899497,0.0491803,-0.333333,-0.231678,-0.0700447,-0.568745,-0.733333,0 --0.411765,0.58794,0.147541,0,0,-0.111773,-0.889838,0.4,1 --0.411765,0.0351759,0.770492,-0.252525,0,0.168405,-0.806149,0.466667,1 --0.529412,0.467337,0.278689,0,0,0.147541,-0.622545,0.533333,0 --0.529412,0.477387,0.213115,-0.494949,-0.307329,0.0402385,-0.737831,-0.7,1 --0.411765,-0.00502513,-0.114754,-0.434343,-0.803783,0.0134128,-0.640478,-0.7,1 --0.294118,0.246231,0.180328,0,0,-0.177347,-0.752348,-0.733333,0 -0,0.0150754,0.0491803,-0.656566,0,-0.374069,-0.851409,0,1 --0.647059,-0.18593,0.409836,-0.676768,-0.843972,-0.180328,-0.805295,-0.966667,1 --0.882353,0.336683,0.672131,-0.434343,-0.669031,-0.0223547,-0.866781,-0.2,0 --0.647059,0.738693,0.344262,-0.030303,0.0992908,0.14456,0.758326,-0.866667,0 -0,0.18593,0.0491803,-0.535354,-0.789598,0,0.411614,0,1 -0,-0.155779,0.0491803,-0.555556,-0.843972,0.0670641,-0.601196,0,1 --0.764706,0.0552764,-0.0491803,-0.191919,-0.777778,0.0402385,-0.874466,-0.866667,1 --0.764706,0.226131,-0.147541,-0.131313,-0.626478,0.0789866,-0.369769,-0.766667,1 -0.411765,0.407035,0.344262,-0.131313,-0.231678,0.168405,-0.615713,0.233333,0 -0,-0.0150754,0.344262,-0.69697,-0.801418,-0.248882,-0.811272,-0.966667,1 --0.882353,-0.125628,-0.0163934,-0.252525,-0.822695,0.108793,-0.631939,-0.966667,1 --0.529412,0.567839,0.229508,0,0,0.439642,-0.863365,-0.633333,0 -0,-0.0653266,0.639344,-0.212121,-0.829787,0.293592,-0.194705,-0.533333,1 --0.882353,0.0753769,0.180328,-0.393939,-0.806147,-0.0819672,-0.3655,-0.9,1 -0,0.0552764,0.114754,-0.555556,0,-0.403875,-0.865073,-0.966667,1 --0.882353,0.0954774,-0.0163934,-0.838384,-0.56974,-0.242921,-0.257899,0,1 --0.882353,-0.0954774,0.0163934,-0.636364,-0.86052,-0.251863,0.0162254,-0.866667,1 --0.882353,0.256281,0.147541,-0.515152,-0.739953,-0.275708,-0.877882,-0.866667,1 --0.882353,0.19598,-0.114754,-0.737374,-0.881797,-0.33532,-0.891546,-0.9,1 --0.411765,0.165829,0.213115,-0.414141,0,-0.0372578,-0.502989,-0.533333,0 --0.0588235,0.0552764,0.639344,-0.272727,0,0.290611,-0.862511,-0.2,0 --0.411765,0.447236,0.344262,-0.474747,-0.326241,-0.0461997,-0.680615,0.233333,0 --0.647059,0.00502513,0.114754,-0.535354,-0.808511,-0.0581222,-0.256191,-0.766667,1 --0.882353,0.00502513,0.0819672,-0.414141,-0.536643,-0.0461997,-0.687447,-0.3,1 --0.411765,0.668342,0.245902,0,0,0.362146,-0.77626,-0.8,0 --0.882353,0.316583,0.0491803,-0.717172,-0.0189125,-0.293592,-0.734415,0,1 --0.529412,0.165829,0.180328,-0.757576,-0.794326,-0.341282,-0.671221,-0.466667,1 --0.529412,0.58794,0.278689,0,0,-0.019374,-0.380871,-0.666667,0 --0.764706,0.276382,-0.0491803,-0.515152,-0.349882,-0.174367,0.299744,-0.866667,1 --0.647059,-0.0351759,-0.0819672,-0.313131,-0.728132,-0.263785,-0.260461,-0.4,1 -0,0.316583,0.0819672,-0.191919,0,0.0223547,-0.899231,-0.966667,0 --0.647059,-0.175879,0.147541,0,0,-0.371088,-0.734415,-0.866667,1 --0.647059,0.939698,0.147541,-0.373737,0,0.0402385,-0.860803,-0.866667,0 --0.529412,-0.0452261,0.0491803,0,0,-0.0461997,-0.92912,-0.666667,0 --0.411765,0.366834,0.377049,-0.171717,-0.791962,0.0432191,-0.822374,-0.533333,0 -0.0588235,-0.276382,0.278689,-0.494949,0,-0.0581222,-0.827498,-0.433333,1 --0.411765,0.688442,0.0491803,0,0,-0.019374,-0.951324,-0.333333,0 --0.764706,0.236181,-0.213115,-0.353535,-0.609929,0.254843,-0.622545,-0.833333,1 --0.529412,0.155779,0.180328,0,0,-0.138599,-0.745517,-0.166667,0 -0,0.0150754,0.0163934,0,0,-0.347243,-0.779675,-0.866667,1 --0.0588235,0.979899,0.213115,0,0,-0.228018,-0.0495303,-0.4,0 --0.882353,0.728643,0.114754,-0.010101,0.368794,0.263785,-0.467122,-0.766667,0 --0.294118,0.0251256,0.47541,-0.212121,0,0.0640835,-0.491033,-0.766667,1 --0.882353,0.125628,0.180328,-0.393939,-0.583924,0.0253354,-0.615713,-0.866667,1 --0.882353,0.437186,0.377049,-0.535354,-0.267139,0.263785,-0.147737,-0.966667,1 --0.882353,0.437186,0.213115,-0.555556,-0.855792,-0.219076,-0.847993,0,1 -0,0.386935,-0.0163934,-0.292929,-0.605201,0.0312965,-0.610589,0,0 --0.647059,0.738693,0.377049,-0.333333,0.120567,0.0640835,-0.846285,-0.966667,0 --0.882353,-0.0251256,0.114754,-0.575758,0,-0.18927,-0.131512,-0.966667,1 --0.529412,0.447236,0.344262,-0.353535,0,0.147541,-0.59351,-0.466667,0 --0.882353,-0.165829,0.114754,0,0,-0.457526,-0.533732,-0.8,1 --0.647059,0.296482,0.0491803,-0.414141,-0.728132,-0.213115,-0.87959,-0.766667,0 --0.882353,0.19598,0.442623,-0.171717,-0.598109,0.350224,-0.633646,-0.833333,1 --0.764706,-0.0552764,0.114754,-0.636364,-0.820331,-0.225037,-0.587532,0,1 -0,0.0251256,0.0491803,-0.0707071,-0.815603,0.210134,-0.64304,0,1 --0.764706,0.155779,0.0491803,-0.555556,0,-0.0819672,-0.707088,0,1 --0.0588235,0.517588,0.278689,-0.353535,-0.503546,0.278689,-0.625961,-0.5,0 --0.529412,0.849246,0.278689,-0.212121,-0.345154,0.102832,-0.841161,-0.666667,0 -0,-0.0552764,0,0,0,0,-0.847993,-0.866667,1 --0.882353,0.819095,0.0491803,-0.393939,-0.574468,0.0163934,-0.786507,-0.433333,0 -0,0.356784,0.540984,-0.0707071,-0.65721,0.210134,-0.824082,-0.833333,1 --0.882353,-0.0452261,0.344262,-0.494949,-0.574468,0.0432191,-0.867635,-0.266667,0 --0.764706,-0.00502513,0,0,0,-0.338301,-0.974381,-0.933333,1 --0.647059,-0.105528,0.213115,-0.676768,-0.799054,-0.0938897,-0.596072,-0.433333,1 --0.882353,-0.19598,0.213115,-0.777778,-0.858156,-0.105812,-0.616567,-0.966667,1 --0.764706,0.396985,0.229508,0,0,-0.23696,-0.923997,-0.733333,1 --0.882353,-0.0954774,0.114754,-0.838384,0,-0.269747,-0.0947908,-0.5,1 -0,0.417085,0,0,0,0.263785,-0.891546,-0.733333,0 -0.411765,0.407035,0.393443,-0.333333,0,0.114754,-0.858241,-0.333333,1 --0.411765,0.477387,0.229508,0,0,-0.108793,-0.695986,-0.766667,1 --0.882353,-0.0251256,0.147541,-0.69697,0,-0.457526,-0.941076,0,1 --0.294118,0.0753769,0.442623,0,0,0.0968703,-0.445773,-0.666667,1 -0,0.899497,0.704918,-0.494949,0,0.0223547,-0.695132,-0.333333,0 --0.764706,-0.165829,0.0819672,-0.535354,-0.881797,-0.0402384,-0.642186,-0.966667,1 --0.529412,0.175879,0.0491803,-0.454545,-0.716312,-0.0104321,-0.870196,-0.9,1 --0.0588235,0.0854271,0.147541,0,0,-0.0909091,-0.251067,-0.6,0 --0.529412,0.175879,0.0163934,-0.757576,0,-0.114754,-0.742101,-0.7,0 -0,0.809045,0.278689,0.272727,-0.966903,0.770492,1,-0.866667,0 --0.882353,0.00502513,0.180328,-0.757576,-0.834515,-0.245902,-0.504697,-0.766667,1 -0,-0.0452261,0.311475,-0.0909091,-0.782506,0.0879285,-0.784799,-0.833333,1 -0,0.0452261,0.0491803,-0.252525,-0.8487,0.00149028,-0.631085,-0.966667,0 -0,0.20603,0.213115,-0.636364,-0.851064,-0.0909091,-0.823228,-0.833333,1 --0.882353,-0.175879,0.0491803,-0.737374,-0.775414,-0.368107,-0.712212,-0.933333,1 --0.764706,0.346734,0.147541,0,0,-0.138599,-0.603757,-0.933333,0 -0,-0.0854271,0.114754,-0.353535,-0.503546,0.18927,-0.741247,-0.866667,1 --0.764706,0.19598,0,0,0,-0.415797,-0.356106,0.7,1 --0.764706,0.00502513,-0.114754,-0.434343,-0.751773,0.126677,-0.641332,-0.9,1 -0.647059,0.758794,0.0163934,-0.393939,0,0.00149028,-0.885568,-0.433333,0 --0.882353,0.356784,-0.114754,0,0,-0.204173,-0.479932,0.366667,1 --0.411765,-0.135678,0.114754,-0.434343,-0.832151,-0.0998509,-0.755764,-0.9,1 -0.0588235,0.346734,0.213115,-0.333333,-0.858156,-0.228018,-0.673783,1,1 -0.0588235,0.20603,0.180328,-0.555556,-0.867612,-0.38003,-0.440649,-0.1,1 --0.882353,-0.286432,0.0163934,0,0,-0.350224,-0.711358,-0.833333,1 --0.0588235,-0.256281,0.147541,-0.191919,-0.884161,0.052161,-0.46456,-0.4,1 --0.411765,-0.115578,0.278689,-0.393939,0,-0.177347,-0.846285,-0.466667,1 -0.176471,0.155779,0.606557,0,0,-0.28465,-0.193851,-0.566667,1 -0,0.246231,-0.0819672,-0.737374,-0.751773,-0.350224,-0.680615,0,1 -0,-0.256281,-0.147541,-0.79798,-0.914894,-0.171386,-0.836892,-0.966667,1 -0,-0.0251256,0.0491803,-0.272727,-0.763593,0.0968703,-0.554227,-0.866667,1 --0.0588235,0.20603,0,0,0,-0.105812,-0.910333,-0.433333,0 --0.294118,0.547739,0.278689,-0.171717,-0.669031,0.374069,-0.578992,-0.8,1 --0.882353,0.447236,0.344262,-0.191919,0,0.230999,-0.548249,-0.766667,1 -0,0.376884,0.147541,-0.232323,0,-0.0104321,-0.921435,-0.966667,1 -0,0.19598,0.0819672,-0.454545,0,0.156483,-0.845431,-0.966667,1 --0.176471,0.366834,0.47541,0,0,-0.108793,-0.887276,-0.0333333,1 --0.529412,0.145729,0.0491803,0,0,-0.138599,-0.959009,-0.9,1 -0,0.376884,0.377049,-0.454545,0,-0.186289,-0.869342,0.266667,1 --0.764706,0.0552764,0.311475,-0.0909091,-0.548463,0.004471,-0.459436,-0.733333,0 --0.176471,0.145729,0.245902,-0.656566,-0.739953,-0.290611,-0.668659,-0.666667,1 --0.0588235,0.266332,0.213115,-0.232323,-0.822695,-0.228018,-0.928266,-0.4,1 --0.529412,0.326633,0.409836,-0.373737,0,-0.165425,-0.708796,0.4,1 --0.647059,0.58794,0.147541,-0.393939,-0.224586,0.0581222,-0.772844,-0.533333,0 -0,0.236181,0.442623,-0.252525,0,0.0491804,-0.898377,-0.733333,1 --0.529412,-0.145729,-0.0491803,-0.555556,-0.884161,-0.171386,-0.805295,-0.766667,1 -0,-0.155779,0.344262,-0.373737,-0.704492,0.138599,-0.867635,-0.933333,1 -0,0.457286,0,0,0,0.317437,-0.528608,-0.666667,0 -0,0.356784,0.114754,-0.151515,-0.408983,0.260805,-0.75491,-0.9,0 --0.882353,0.396985,0.0163934,-0.171717,0.134752,0.213115,-0.608881,0,1 -0,0.738693,0.278689,-0.353535,-0.373522,0.385991,-0.0768574,0.233333,1 --0.529412,-0.00502513,0.180328,-0.656566,0,-0.23696,-0.815542,-0.766667,1 --0.0588235,0.949749,0.311475,0,0,-0.222057,-0.596072,0.533333,1 --0.764706,-0.165829,0.0655738,-0.434343,-0.843972,0.0968703,-0.529462,-0.9,1 --0.764706,-0.105528,0.47541,-0.393939,0,-0.00149028,-0.81725,-0.3,1 --0.529412,-0.00502513,0.114754,-0.232323,0,-0.0223547,-0.942784,-0.6,1 --0.529412,0.256281,0.147541,-0.636364,-0.711584,-0.138599,-0.089667,-0.2,0 --0.647059,-0.19598,0,0,0,0,-0.918019,-0.966667,1 --0.294118,0.668342,0.213115,0,0,-0.207153,-0.807003,0.5,1 --0.411765,0.105528,0.114754,0,0,-0.225037,-0.81725,-0.7,1 --0.764706,-0.18593,0.180328,-0.69697,-0.820331,-0.102832,-0.599488,-0.866667,1 --0.176471,0.959799,0.147541,-0.333333,-0.65721,-0.251863,-0.927412,0.133333,0 --0.294118,0.547739,0.213115,-0.353535,-0.543735,-0.126677,-0.350128,-0.4,1 --0.764706,0.175879,0.47541,-0.616162,-0.832151,-0.248882,-0.799317,0,1 --0.647059,-0.155779,0.180328,-0.353535,0,0.108793,-0.838599,-0.766667,1 --0.294118,0,0.114754,-0.171717,0,0.162444,-0.445773,-0.333333,0 --0.176471,-0.0552764,0.0491803,-0.494949,-0.813239,-0.00745157,-0.436379,-0.333333,1 --0.647059,-0.0351759,0.278689,-0.212121,0,0.111773,-0.863365,-0.366667,1 -0.176471,-0.246231,0.344262,0,0,-0.00745157,-0.842015,-0.433333,1 -0,0.809045,0.47541,-0.474747,-0.787234,0.0879285,-0.798463,-0.533333,0 --0.882353,0.306533,-0.0163934,-0.535354,-0.598109,-0.147541,-0.475662,0,1 --0.764706,-0.155779,-0.180328,-0.535354,-0.820331,-0.0938897,-0.239966,0,1 --0.0588235,0.20603,0.278689,0,0,-0.254843,-0.717336,0.433333,1 -0.411765,-0.155779,0.180328,-0.373737,0,-0.114754,-0.81298,-0.166667,0 -0,0.396985,0.0163934,-0.656566,-0.503546,-0.341282,-0.889838,0,1 -0.0588235,-0.0854271,0.114754,0,0,-0.278688,-0.895816,0.233333,1 --0.764706,-0.0854271,0.0163934,0,0,-0.186289,-0.618275,-0.966667,1 --0.647059,-0.00502513,-0.114754,-0.616162,-0.79669,-0.23696,-0.935098,-0.9,1 --0.647059,0.638191,0.147541,-0.636364,-0.751773,-0.0581222,-0.837746,-0.766667,0 -0.0588235,0.457286,0.442623,-0.313131,-0.609929,-0.0968703,-0.408198,0.0666667,0 -0.529412,-0.236181,-0.0163934,0,0,-0.0223547,-0.912895,-0.333333,1 --0.294118,0.296482,0.47541,-0.858586,-0.229314,-0.415797,-0.569599,0.3,1 --0.764706,-0.316583,0.147541,-0.353535,-0.843972,-0.254843,-0.906917,-0.866667,1 --0.647059,0.246231,0.311475,-0.333333,-0.692671,-0.0104321,-0.806149,-0.833333,1 --0.294118,0.145729,0,0,0,0,-0.905209,-0.833333,1 -0.0588235,0.306533,0.147541,0,0,0.0193741,-0.509821,-0.2,0 --0.647059,0.256281,-0.0491803,0,0,-0.0581222,-0.93766,-0.9,1 --0.647059,-0.125628,-0.0163934,-0.636364,0,-0.350224,-0.687447,0,1 --0.882353,-0.0251256,0.0491803,-0.616162,-0.806147,-0.457526,-0.811272,0,1 --0.647059,0.165829,0.213115,-0.69697,-0.751773,-0.216095,-0.975235,-0.9,1 -0,0.175879,0.0819672,-0.373737,-0.555556,-0.0819672,-0.645602,-0.966667,1 -0,0.115578,0.0655738,0,0,-0.266766,-0.502989,-0.666667,1 --0.764706,0.226131,-0.0163934,-0.636364,-0.749409,-0.111773,-0.454313,-0.966667,1 -0,0.0753769,0.245902,0,0,0.350224,-0.480786,-0.9,1 --0.882353,-0.135678,0.0819672,0.0505051,-0.846336,0.230999,-0.283518,-0.733333,1 --0.294118,-0.0854271,0,0,0,-0.111773,-0.63877,-0.666667,1 --0.882353,-0.226131,-0.0819672,-0.393939,-0.867612,-0.00745157,0.00170794,-0.9,1 --0.529412,0.326633,0,0,0,-0.019374,-0.808711,-0.933333,0 -0,0.0552764,0.47541,0,0,-0.117735,-0.898377,-0.166667,1 -0,-0.427136,-0.0163934,0,0,-0.353204,-0.438941,0.533333,1 -0,0.276382,0.311475,-0.252525,-0.503546,0.0819672,-0.380017,-0.933333,1 --0.647059,0.296482,0.508197,-0.010101,-0.63357,0.0849479,-0.239966,-0.633333,0 --0.0588235,0.00502513,0.213115,-0.191919,-0.491726,0.174367,-0.502135,-0.266667,0 --0.647059,0.286432,0.180328,-0.494949,-0.550827,-0.0342771,-0.59778,-0.8,0 -0.176471,-0.0954774,0.393443,-0.353535,0,0.0402385,-0.362084,0.166667,0 --0.529412,-0.155779,0.47541,-0.535354,-0.867612,0.177347,-0.930828,-0.866667,1 --0.882353,-0.115578,0.278689,-0.414141,-0.820331,-0.0461997,-0.75491,-0.733333,1 --0.0588235,0.869347,0.47541,-0.292929,-0.468085,0.028316,-0.70538,-0.466667,0 --0.411765,0.879397,0.245902,-0.454545,-0.510638,0.299553,-0.183604,0.0666667,0 --0.529412,0.316583,0.114754,-0.575758,-0.607565,-0.0134128,-0.929974,-0.766667,1 --0.882353,0.648241,0.344262,-0.131313,-0.841608,-0.0223547,-0.775406,-0.0333333,1 --0.529412,0.899497,0.803279,-0.373737,0,-0.150522,-0.485909,-0.466667,1 --0.882353,0.165829,0.147541,-0.434343,0,-0.183308,-0.8924,0,1 --0.647059,-0.155779,0.114754,-0.393939,-0.749409,-0.0491803,-0.561913,-0.866667,1 --0.294118,0.145729,0.442623,0,0,-0.171386,-0.855679,0.5,1 --0.882353,-0.115578,0.0163934,-0.515152,-0.895981,-0.108793,-0.706234,-0.933333,1 --0.882353,-0.155779,0.0491803,-0.535354,-0.728132,0.0998511,-0.664389,-0.766667,1 --0.176471,0.246231,0.147541,-0.333333,-0.491726,-0.23994,-0.92912,-0.466667,1 --0.882353,-0.0251256,0.147541,-0.191919,0,0.135618,-0.880444,-0.7,1 --0.0588235,0.105528,0.245902,0,0,-0.171386,-0.864219,0.233333,1 -0.294118,0.0351759,0.114754,-0.191919,0,0.377049,-0.959009,-0.3,1 -0.294118,-0.145729,0.213115,0,0,-0.102832,-0.810418,-0.533333,1 --0.294118,0.256281,0.245902,0,0,0.00745157,-0.963279,0.1,0 -0,0.98995,0.0819672,-0.353535,-0.352246,0.230999,-0.637916,-0.766667,0 --0.882353,-0.125628,0.114754,-0.313131,-0.817967,0.120715,-0.724167,-0.9,1 --0.294118,-0.00502513,-0.0163934,-0.616162,-0.87234,-0.198212,-0.642186,-0.633333,1 -0,-0.0854271,0.311475,0,0,-0.0342771,-0.553373,-0.8,1 --0.764706,-0.0452261,-0.114754,-0.717172,-0.791962,-0.222057,-0.427839,-0.966667,1 --0.882353,-0.00502513,0.180328,-0.393939,-0.957447,0.150522,-0.714774,0,1 --0.294118,-0.0753769,0.0163934,-0.353535,-0.702128,-0.0461997,-0.994022,-0.166667,1 --0.529412,0.547739,0.180328,-0.414141,-0.702128,-0.0670641,-0.777968,-0.466667,1 -0,0.21608,0.0819672,-0.393939,-0.609929,0.0223547,-0.893254,-0.6,0 --0.647059,-0.21608,0.147541,0,0,-0.0312965,-0.836038,-0.4,1 --0.764706,0.306533,0.57377,0,0,-0.326379,-0.837746,0,1 --0.647059,0.115578,-0.0491803,-0.373737,-0.895981,-0.120715,-0.699402,-0.966667,1 --0.764706,-0.0150754,-0.0163934,-0.656566,-0.716312,0.0342773,-0.897523,-0.966667,1 --0.882353,0.437186,0.409836,-0.393939,-0.219858,-0.102832,-0.304868,-0.933333,1 --0.882353,0.19598,-0.278689,-0.0505051,-0.851064,0.0581222,-0.827498,-0.866667,1 --0.294118,0.0854271,-0.278689,-0.59596,-0.692671,-0.28465,-0.372331,-0.533333,1 --0.764706,0.18593,0.311475,0,0,0.278689,-0.474808,0,0 -0.176471,0.336683,0.114754,0,0,-0.195231,-0.857387,-0.5,1 --0.764706,0.979899,0.147541,1,0,0.0342773,-0.575576,0.366667,0 -0,0.517588,0.47541,-0.0707071,0,0.254843,-0.749787,0,0 --0.294118,0.0954774,-0.0163934,-0.454545,0,-0.254843,-0.890692,-0.8,1 -0.411765,0.21608,0.278689,-0.656566,0,-0.210134,-0.845431,0.366667,1 --0.0588235,0.00502513,0.245902,0,0,0.153502,-0.904355,-0.3,1 --0.0588235,0.246231,0.245902,-0.515152,0.41844,-0.14456,-0.479932,0.0333333,0 --0.882353,-0.0653266,-0.0819672,-0.777778,0,-0.329359,-0.710504,-0.966667,1 --0.0588235,0.437186,0.0819672,0,0,0.0402385,-0.956447,-0.333333,0 --0.294118,0.0351759,0.0819672,0,0,-0.275708,-0.853971,-0.733333,1 --0.647059,0.768844,0.409836,-0.454545,-0.631206,-0.00745157,-0.0811272,0.0333333,0 -0,-0.266332,0,0,0,-0.371088,-0.774552,-0.866667,1 -0.294118,0.115578,0.377049,-0.191919,0,0.394933,-0.276687,-0.2,0 --0.764706,0.125628,0.278689,0.010101,-0.669031,0.174367,-0.917165,-0.9,1 --0.647059,0.326633,0.311475,0,0,0.0253354,-0.723313,-0.233333,0 --0.764706,-0.175879,-0.147541,-0.555556,-0.728132,-0.150522,0.384287,-0.866667,1 --0.294118,0.236181,0.180328,-0.0909091,-0.456265,0.00149028,-0.440649,-0.566667,1 -0,0.889447,0.344262,-0.717172,-0.562648,-0.0461997,-0.484202,-0.966667,0 -0,-0.326633,0.245902,0,0,0.350224,-0.900939,-0.166667,1 --0.882353,-0.105528,-0.606557,-0.616162,-0.940898,-0.171386,-0.58924,0,1 --0.882353,0.738693,0.213115,0,0,0.0968703,-0.99146,-0.433333,0 --0.882353,0.0954774,-0.377049,-0.636364,-0.716312,-0.311475,-0.719044,-0.833333,1 --0.882353,0.0854271,0.442623,-0.616162,0,-0.19225,-0.725021,-0.9,1 --0.294118,-0.0351759,0,0,0,-0.293592,-0.904355,-0.766667,1 --0.882353,0.246231,0.213115,-0.272727,0,-0.171386,-0.981213,-0.7,1 --0.176471,0.507538,0.278689,-0.414141,-0.702128,0.0491804,-0.475662,0.1,0 --0.529412,0.839196,0,0,0,-0.153502,-0.885568,-0.5,0 --0.882353,0.246231,-0.0163934,-0.353535,0,0.0670641,-0.627669,0,1 --0.882353,0.819095,0.278689,-0.151515,-0.307329,0.19225,0.00768574,-0.966667,0 --0.882353,-0.0753769,0.0163934,-0.494949,-0.903073,-0.418778,-0.654996,-0.866667,1 -0,0.527638,0.344262,-0.212121,-0.356974,0.23696,-0.836038,-0.8,1 --0.882353,0.115578,0.0163934,-0.737374,-0.56974,-0.28465,-0.948762,-0.933333,1 --0.647059,0.0653266,-0.114754,-0.575758,-0.626478,-0.0789866,-0.81725,-0.9,1 --0.647059,0.748744,-0.0491803,-0.555556,-0.541371,-0.019374,-0.560205,-0.5,0 --0.176471,0.688442,0.442623,-0.151515,-0.241135,0.138599,-0.394535,-0.366667,0 --0.294118,0.0552764,0.311475,-0.434343,0,-0.0312965,-0.316823,-0.833333,1 -0.294118,0.386935,0.213115,-0.474747,-0.659574,0.0760059,-0.590948,-0.0333333,0 --0.647059,0.0653266,0.180328,0,0,-0.230999,-0.889838,-0.8,1 --0.294118,0.175879,0.57377,0,0,-0.14456,-0.932536,-0.7,1 --0.764706,-0.316583,0.0163934,-0.737374,-0.964539,-0.400894,-0.847139,-0.933333,1 -0.0588235,0.125628,0.344262,-0.515152,0,-0.159463,0.028181,-0.0333333,0 -0,0.19598,0,0,0,-0.0342771,-0.9462,-0.9,0 --0.764706,0.125628,0.409836,-0.151515,-0.621749,0.14456,-0.856533,-0.766667,1 --0.764706,-0.0753769,0.245902,-0.59596,0,-0.278688,0.383433,-0.766667,1 --0.294118,0.839196,0.540984,0,0,0.216095,0.181042,-0.2,1 -0,-0.0552764,0.147541,-0.454545,-0.728132,0.296572,-0.770282,0,1 --0.764706,0.0854271,0.0491803,0,0,-0.0819672,-0.931682,0,1 --0.529412,-0.0954774,0.442623,-0.0505051,-0.87234,0.123696,-0.757472,-0.733333,1 -0,0.256281,0.114754,0,0,-0.263785,-0.890692,0,1 -0,0.326633,0.278689,0,0,-0.0342771,-0.730999,0,1 --0.411765,0.286432,0.311475,0,0,0.0312965,-0.943638,-0.2,1 --0.529412,-0.0552764,0.0655738,-0.555556,0,-0.263785,-0.940222,0,1 --0.176471,0.145729,0.0491803,0,0,-0.183308,-0.441503,-0.566667,0 -0,0.0251256,0.278689,-0.191919,-0.787234,0.028316,-0.863365,-0.9,1 --0.764706,0.115578,-0.0163934,0,0,-0.219076,-0.773698,-0.933333,1 --0.882353,0.286432,0.344262,-0.656566,-0.567376,-0.180328,-0.968403,-0.966667,1 -0.176471,-0.0753769,0.0163934,0,0,-0.228018,-0.923997,-0.666667,1 -0.529412,0.0452261,0.180328,0,0,-0.0700447,-0.669513,-0.433333,0 --0.411765,0.0452261,0.213115,0,0,-0.14158,-0.935952,-0.1,1 --0.764706,-0.0552764,0.245902,-0.636364,-0.843972,-0.0581222,-0.512383,-0.933333,1 --0.176471,-0.0251256,0.245902,-0.353535,-0.78487,0.219076,-0.322801,-0.633333,0 --0.882353,0.00502513,0.213115,-0.757576,-0.891253,-0.418778,-0.939368,-0.766667,1 -0,0.0251256,0.409836,-0.656566,-0.751773,-0.126677,-0.4731,-0.8,1 --0.529412,0.286432,0.147541,0,0,0.0223547,-0.807857,-0.9,1 --0.294118,0.477387,0.311475,0,0,-0.120715,-0.914603,-0.0333333,0 --0.529412,-0.0954774,0,0,0,-0.165425,-0.545687,-0.666667,1 --0.647059,0.0351759,0.180328,-0.393939,-0.640662,-0.177347,-0.443211,-0.8,1 --0.764706,0.577889,0.213115,-0.292929,0.0401891,0.174367,-0.952178,-0.7,1 --0.882353,0.678392,0.213115,-0.656566,-0.659574,-0.302534,-0.684885,-0.6,0 -0,0.798995,-0.180328,-0.272727,-0.624113,0.126677,-0.678053,-0.966667,0 -0.294118,0.366834,0.377049,-0.292929,-0.692671,-0.156483,-0.844577,-0.3,0 -0,0.0753769,-0.0163934,-0.494949,0,-0.213115,-0.953032,-0.933333,1 --0.882353,-0.0854271,-0.114754,-0.494949,-0.763593,-0.248882,-0.866781,-0.933333,1 --0.882353,0.175879,-0.0163934,-0.535354,-0.749409,0.00745157,-0.668659,-0.8,1 --0.411765,0.236181,0.213115,-0.191919,-0.817967,0.0163934,-0.836892,-0.766667,1 --0.764706,0.20603,-0.114754,0,0,-0.201192,-0.678053,-0.8,1 --0.882353,0.0653266,0.147541,-0.434343,-0.680851,0.0193741,-0.945346,-0.966667,1 --0.764706,0.557789,-0.147541,-0.454545,0.276596,0.153502,-0.861657,-0.866667,0 --0.764706,0.0150754,-0.0491803,-0.292929,-0.787234,-0.350224,-0.934244,-0.966667,1 --0.882353,0.20603,0.311475,-0.030303,-0.527187,0.159464,-0.0742955,-0.333333,1 --0.647059,-0.19598,0.344262,-0.373737,-0.834515,0.0193741,0.0367208,-0.8,0 -0.176471,0.628141,0.377049,0,0,-0.174367,-0.911187,0.1,1 --0.882353,1,0.245902,-0.131313,0,0.278689,0.123826,-0.966667,0 --0.0588235,0.678392,0.737705,-0.0707071,-0.453901,0.120715,-0.925705,-0.266667,0 -0.0588235,0.457286,0.311475,-0.0707071,-0.692671,0.129657,-0.52263,-0.366667,0 --0.294118,0.155779,-0.0163934,-0.212121,0,0.004471,-0.857387,-0.366667,0 --0.882353,0.125628,0.311475,-0.0909091,-0.687943,0.0372578,-0.881298,-0.9,1 --0.529412,0.457286,0.344262,-0.636364,0,-0.0312965,-0.865927,0.633333,0 -0.176471,0.115578,0.147541,-0.454545,0,-0.180328,-0.9462,-0.366667,0 --0.294118,-0.0150754,-0.0491803,-0.333333,-0.550827,0.0134128,-0.699402,-0.266667,1 -0.0588235,0.547739,0.278689,-0.393939,-0.763593,-0.0789866,-0.926558,-0.2,1 --0.294118,0.658291,0.114754,-0.474747,-0.602837,0.00149028,-0.527754,-0.0666667,1 --0.882353,-0.00502513,-0.0491803,-0.79798,0,-0.242921,-0.596072,0,1 -0.176471,-0.316583,0.737705,-0.535354,-0.884161,0.0581222,-0.823228,-0.133333,1 --0.647059,0.236181,0.639344,-0.292929,-0.432624,0.707899,-0.315115,-0.966667,1 --0.0588235,-0.0854271,0.344262,0,0,0.0611028,-0.565329,0.566667,1 --0.294118,0.959799,0.147541,0,0,-0.0789866,-0.786507,-0.666667,0 -0.0588235,0.567839,0.409836,0,0,-0.260805,-0.870196,0.0666667,0 -0,-0.0653266,-0.0163934,0,0,0.052161,-0.842015,-0.866667,1 --0.647059,0.21608,-0.147541,0,0,0.0730254,-0.958155,-0.866667,0 --0.764706,0.0150754,-0.0491803,-0.656566,-0.373522,-0.278688,-0.542272,-0.933333,1 --0.764706,-0.437186,-0.0819672,-0.434343,-0.893617,-0.278688,-0.783091,-0.966667,1 -0,0.628141,0.245902,-0.272727,0,0.47839,-0.755764,-0.833333,0 -0,-0.0452261,0.0491803,-0.212121,-0.751773,0.329359,-0.754056,-0.966667,1 --0.529412,0.256281,0.311475,0,0,-0.0372578,-0.608881,-0.8,0 --0.411765,0.366834,0.344262,0,0,0,-0.520068,0.6,1 --0.764706,0.296482,0.213115,-0.474747,-0.515366,-0.0104321,-0.561913,-0.866667,1 --0.647059,0.306533,0.0491803,0,0,-0.311475,-0.798463,-0.966667,1 --0.882353,0.0753769,-0.180328,-0.616162,0,-0.156483,-0.912041,-0.733333,1 --0.882353,0.407035,0.213115,-0.474747,-0.574468,-0.281669,-0.359522,-0.933333,1 --0.882353,0.447236,0.344262,-0.0707071,-0.574468,0.374069,-0.780529,-0.166667,0 --0.0588235,0.0753769,0.311475,0,0,-0.266766,-0.335611,-0.566667,1 -0.529412,0.58794,0.868852,0,0,0.260805,-0.847139,-0.233333,0 --0.764706,0.21608,0.147541,-0.353535,-0.775414,0.165425,-0.309991,-0.933333,1 --0.176471,0.296482,0.114754,-0.010101,-0.704492,0.147541,-0.691716,-0.266667,0 --0.764706,-0.0954774,-0.0163934,0,0,-0.299553,-0.903501,-0.866667,1 --0.176471,0.427136,0.47541,-0.515152,0.134752,-0.0938897,-0.957301,-0.266667,0 --0.647059,0.698492,0.213115,-0.616162,-0.704492,-0.108793,-0.837746,-0.666667,0 -0,-0.00502513,0,0,0,-0.254843,-0.850555,-0.966667,1 --0.529412,0.276382,0.442623,-0.777778,-0.63357,0.028316,-0.555935,-0.766667,1 --0.529412,0.18593,0.147541,0,0,0.326379,-0.29462,-0.833333,1 --0.764706,0.226131,0.245902,-0.454545,-0.527187,0.0700448,-0.654142,-0.833333,1 --0.294118,0.256281,0.278689,-0.373737,0,-0.177347,-0.584116,-0.0666667,0 --0.882353,0.688442,0.442623,-0.414141,0,0.0432191,-0.293766,0.0333333,0 --0.764706,0.296482,0,0,0,0.147541,-0.807003,-0.333333,1 --0.529412,0.105528,0.245902,-0.59596,-0.763593,-0.153502,-0.965841,-0.8,1 --0.294118,-0.19598,0.311475,-0.272727,0,0.186289,-0.915457,-0.766667,1 -0.176471,0.155779,0,0,0,0,-0.843723,-0.7,0 --0.764706,0.276382,-0.245902,-0.575758,-0.208038,0.0253354,-0.916311,-0.966667,1 -0.0588235,0.648241,0.278689,0,0,-0.0223547,-0.940222,-0.2,0 --0.764706,-0.0653266,0.0491803,-0.353535,-0.621749,0.132638,-0.491033,-0.933333,0 --0.647059,0.58794,0.0491803,-0.737374,-0.0851064,-0.0700447,-0.814688,-0.9,1 --0.411765,0.266332,0.278689,-0.454545,-0.947991,-0.117735,-0.691716,-0.366667,1 -0.176471,0.296482,0.0163934,-0.272727,0,0.228018,-0.690009,-0.433333,0 -0,0.346734,-0.0491803,-0.59596,-0.312057,-0.213115,-0.766012,0,1 --0.647059,0.0251256,0.213115,0,0,-0.120715,-0.963279,-0.633333,1 --0.176471,0.879397,-0.180328,-0.333333,-0.0732861,0.0104323,-0.36123,-0.566667,0 --0.647059,0.738693,0.278689,-0.212121,-0.562648,0.00745157,-0.238258,-0.666667,0 -0.176471,-0.0552764,0.180328,-0.636364,0,-0.311475,-0.558497,0.166667,1 --0.882353,0.0854271,-0.0163934,-0.0707071,-0.579196,0.0581222,-0.712212,-0.9,1 --0.411765,-0.0251256,0.245902,-0.454545,0,0.0611028,-0.743809,0.0333333,0 --0.529412,-0.165829,0.409836,-0.616162,0,-0.126677,-0.795901,-0.566667,1 --0.882353,0.145729,0.0819672,-0.272727,-0.527187,0.135618,-0.819812,0,1 --0.882353,0.497487,0.114754,-0.414141,-0.699764,-0.126677,-0.768574,-0.3,0 --0.411765,0.175879,0.409836,-0.393939,-0.751773,0.165425,-0.852263,-0.3,1 --0.882353,0.115578,0.540984,0,0,-0.0223547,-0.840307,-0.2,1 --0.529412,0.125628,0.278689,-0.191919,0,0.174367,-0.865073,-0.433333,1 --0.882353,0.165829,0.278689,-0.414141,-0.574468,0.0760059,-0.64304,-0.866667,1 -0,0.417085,0.377049,-0.474747,0,-0.0342771,-0.69684,-0.966667,1 --0.764706,0.758794,0.442623,0,0,-0.317437,-0.788215,-0.966667,1 --0.764706,-0.0753769,-0.147541,0,0,-0.102832,-0.9462,-0.966667,1 --0.647059,0.306533,0.278689,-0.535354,-0.813239,-0.153502,-0.790777,-0.566667,0 --0.0588235,0.20603,0.409836,0,0,-0.153502,-0.845431,-0.966667,0 --0.764706,0.748744,0.442623,-0.252525,-0.716312,0.326379,-0.514944,-0.9,0 --0.764706,0.0653266,-0.0819672,-0.454545,-0.609929,-0.135618,-0.702818,-0.966667,1 --0.764706,0.0552764,0.229508,0,0,-0.305514,-0.588386,0.0666667,1 --0.529412,-0.0452261,-0.0163934,-0.353535,0,0.0551417,-0.824082,-0.766667,1 -0,0.266332,0.409836,-0.454545,-0.716312,-0.183308,-0.626815,0,1 --0.0588235,-0.346734,0.180328,-0.535354,0,-0.0461997,-0.554227,-0.3,1 --0.764706,-0.00502513,-0.0163934,-0.656566,-0.621749,0.0909091,-0.679761,0,1 --0.882353,0.0251256,0.213115,0,0,0.177347,-0.816396,-0.3,0 -0.294118,0.20603,0.311475,-0.252525,-0.64539,0.260805,-0.396243,-0.1,0 --0.647059,0.0251256,-0.278689,-0.59596,-0.777778,-0.0819672,-0.725021,-0.833333,1 --0.882353,0.0954774,-0.0491803,-0.636364,-0.725768,-0.150522,-0.87959,-0.966667,1 -0.0588235,0.407035,0.540984,0,0,-0.0253353,-0.439795,-0.2,0 -0.529412,0.537688,0.442623,-0.252525,-0.669031,0.210134,-0.0640478,-0.4,1 -0.411765,0.00502513,0.377049,-0.333333,-0.751773,-0.105812,-0.649872,-0.166667,1 --0.882353,0.477387,0.540984,-0.171717,0,0.469449,-0.760888,-0.8,0 --0.882353,-0.18593,0.213115,-0.171717,-0.865248,0.38003,-0.130658,-0.633333,1 --0.647059,0.879397,0.147541,-0.555556,-0.527187,0.0849479,-0.71819,-0.5,0 --0.294118,0.628141,0.0163934,0,0,-0.275708,-0.914603,-0.0333333,0 --0.529412,0.366834,0.147541,0,0,-0.0700447,-0.0572161,-0.966667,0 --0.882353,0.21608,0.278689,-0.212121,-0.825059,0.162444,-0.843723,-0.766667,1 --0.647059,0.0854271,0.0163934,-0.515152,0,-0.225037,-0.876174,-0.866667,1 -0,0.819095,0.442623,-0.111111,0.205674,0.290611,-0.877028,-0.833333,0 --0.0588235,0.547739,0.278689,-0.353535,0,-0.0342771,-0.688301,-0.2,0 --0.882353,0.286432,0.442623,-0.212121,-0.739953,0.0879285,-0.163962,-0.466667,0 --0.176471,0.376884,0.47541,-0.171717,0,-0.0461997,-0.732707,-0.4,1 -0,0.236181,0.180328,0,0,0.0819672,-0.846285,0.0333333,0 --0.882353,0.0653266,0.245902,0,0,0.117735,-0.898377,-0.833333,1 --0.294118,0.909548,0.508197,0,0,0.0581222,-0.829206,0.5,0 --0.764706,-0.115578,-0.0491803,-0.474747,-0.962175,-0.153502,-0.412468,-0.966667,1 -0.0588235,0.708543,0.213115,-0.373737,0,0.311475,-0.722459,-0.266667,0 -0.0588235,-0.105528,0.0163934,0,0,-0.329359,-0.945346,-0.6,1 -0.176471,0.0150754,0.245902,-0.030303,-0.574468,-0.019374,-0.920581,0.4,1 --0.764706,0.226131,0.147541,-0.454545,0,0.0968703,-0.77626,-0.8,1 --0.411765,0.21608,0.180328,-0.535354,-0.735225,-0.219076,-0.857387,-0.7,1 --0.882353,0.266332,-0.0163934,0,0,-0.102832,-0.768574,-0.133333,0 --0.882353,-0.0653266,0.147541,-0.373737,0,-0.0938897,-0.797609,-0.933333,1 diff --git a/data/diabetes.csv.gz b/data/diabetes.csv.gz new file mode 100644 index 0000000..f922112 Binary files /dev/null and b/data/diabetes.csv.gz differ diff --git a/data/names_test.csv.gz b/data/names_test.csv.gz new file mode 100644 index 0000000..1db58c7 Binary files /dev/null and b/data/names_test.csv.gz differ diff --git a/data/names_train.csv.gz b/data/names_train.csv.gz new file mode 100644 index 0000000..a30d18e Binary files /dev/null and b/data/names_train.csv.gz differ diff --git a/data/shakespeare.txt.gz b/data/shakespeare.txt.gz new file mode 100644 index 0000000..fb882fc Binary files /dev/null and b/data/shakespeare.txt.gz differ diff --git a/name_dataset.py b/name_dataset.py new file mode 100644 index 0000000..8f2067f --- /dev/null +++ b/name_dataset.py @@ -0,0 +1,58 @@ +# References +# https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/01-basics/pytorch_basics/main.py +# http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#dataset-class +import torch +import numpy as np +from torch.autograd import Variable +from torch.utils.data import Dataset, DataLoader +import csv +import gzip + + +class NameDataset(Dataset): + """ Diabetes dataset.""" + + # Initialize your data, download, etc. + def __init__(self, is_train_set=False): + filename = './data/names_train.csv.gz' if is_train_set else './data/names_test.csv.gz' + with gzip.open(filename, "rt") as f: + reader = csv.reader(f) + rows = list(reader) + + self.names = [row[0] for row in rows] + self.countries = [row[1] for row in rows] + self.len = len(self.countries) + + self.country_list = list(sorted(set(self.countries))) + + def __getitem__(self, index): + return self.names[index], self.countries[index] + + def __len__(self): + return self.len + + def get_countries(self): + return self.country_list + + def get_country(self, id): + return self.country_list[id] + + def get_country_id(self, country): + return self.country_list.index(country) + +# Test the loader +if __name__ == "__main__": + dataset = NameDataset(False) + print(dataset.get_countries()) + print(dataset.get_country(3)) + print(dataset.get_country_id('Korean')) + + train_loader = DataLoader(dataset=dataset, + batch_size=10, + shuffle=True) + + print(len(train_loader.dataset)) + for epoch in range(2): + for i, (names, countries) in enumerate(train_loader): + # Run your training process + print(epoch, i, "names", names, "countries", countries) diff --git a/requirements.txt b/requirements.txt index 712d5cc..db9a191 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,8 @@ +#nonsml: digitalgenius/ubuntu-pytorch +#varunagrawal/pytorch +httplib2==0.18.0 matplotlib==2.0.0 numpy==1.13.3 -torch==0.2.0.post3 +torch +torchvision==0.1.9 +Unidecode==0.04.21 diff --git a/seq2seq_models.py b/seq2seq_models.py new file mode 100644 index 0000000..70523bb --- /dev/null +++ b/seq2seq_models.py @@ -0,0 +1,143 @@ +# Original code from +# https://github.com/spro/practical-pytorch/blob/master/seq2seq-translation/seq2seq-translation.ipynb +import torch +import torch.nn as nn +from torch.autograd import Variable +import torch.nn.functional as F + +MAX_LENGTH = 100 + +SOS_token = chr(0) +EOS_token = 1 + +# Helper function to create Variable based on +# the cuda availability + + +def cuda_variable(tensor): + # Do cuda() before wrapping with variable + if torch.cuda.is_available(): + return Variable(tensor.cuda()) + else: + return Variable(tensor) + + +# Sting to char tensor +def str2tensor(msg, eos=False): + tensor = [ord(c) for c in msg] + if eos: + tensor.append(EOS_token) + + return cuda_variable(torch.LongTensor(tensor)) + + +# To demonstrate seq2seq, We don't handle batch in the code, +# and our encoder runs this one step at a time +# It's extremely slow, and please do not use in practice. +# We need to use (1) batch and (2) data parallelism +# http://pytorch.org/tutorials/beginner/former_torchies/parallelism_tutorial.html. + +class EncoderRNN(nn.Module): + + def __init__(self, input_size, hidden_size, n_layers=1): + self.hidden_size = hidden_size + self.n_layers = n_layers + + super(EncoderRNN, self).__init__() + + self.embedding = nn.Embedding(input_size, hidden_size) + self.gru = nn.GRU(hidden_size, hidden_size, n_layers) + + def forward(self, word_inputs, hidden): + # Note: we run this all at once (over the whole input sequence) + seq_len = len(word_inputs) + # input shape: S x B (=1) x I (input size) + embedded = self.embedding(word_inputs).view(seq_len, 1, -1) + output, hidden = self.gru(embedded, hidden) + return output, hidden + + def init_hidden(self): + # (num_layers * num_directions, batch, hidden_size) + return cuda_variable(torch.zeros(self.n_layers, 1, self.hidden_size)) + + +class DecoderRNN(nn.Module): + + def __init__(self, hidden_size, output_size, n_layers=1): + super(DecoderRNN, self).__init__() + + self.embedding = nn.Embedding(output_size, hidden_size) + self.gru = nn.GRU(hidden_size, hidden_size, n_layers) + self.out = nn.Linear(hidden_size, output_size) + + def forward(self, input, hidden): + # input shape: S(=1) x B (=1) x I (input size) + # Note: we run this one step at a time. (Sequence size = 1) + output = self.embedding(input).view(1, 1, -1) + output, hidden = self.gru(output, hidden) + output = self.out(output[0]) + # No need softmax, since we are using CrossEntropyLoss + return output, hidden + + def init_hidden(self): + # (num_layers * num_directions, batch, hidden_size) + return cuda_variable(torch.zeros(self.n_layers, 1, self.hidden_size)) + + +class AttnDecoderRNN(nn.Module): + + def __init__(self, hidden_size, output_size, n_layers=1, dropout_p=0.1): + super(AttnDecoderRNN, self).__init__() + + # Linear for attention + self.attn = nn.Linear(hidden_size, hidden_size) + + # Define layers + self.embedding = nn.Embedding(output_size, hidden_size) + self.gru = nn.GRU(hidden_size, hidden_size, + n_layers, dropout=dropout_p) + self.out = nn.Linear(hidden_size * 2, output_size) + + def forward(self, word_input, last_hidden, encoder_hiddens): + # Note: we run this one step (S=1) at a time + # Get the embedding of the current input word (last output word) + rnn_input = self.embedding(word_input).view(1, 1, -1) # S=1 x B x I + rnn_output, hidden = self.gru(rnn_input, last_hidden) + + # Calculate attention from current RNN state and all encoder outputs; + # apply to encoder outputs + attn_weights = self.get_att_weight( + rnn_output.squeeze(0), encoder_hiddens) + context = attn_weights.bmm( + encoder_hiddens.transpose(0, 1)) # B x S(=1) x I + + # Final output layer (next word prediction) using the RNN hidden state + # and context vector + rnn_output = rnn_output.squeeze(0) # S(=1) x B x I -> B x I + context = context.squeeze(1) # B x S(=1) x I -> B x I + output = self.out(torch.cat((rnn_output, context), 1)) + + # Return final output, hidden state, and attention weights (for + # visualization) + return output, hidden, attn_weights + + def get_att_weight(self, hidden, encoder_hiddens): + seq_len = len(encoder_hiddens) + + # Create variable to store attention energies + attn_scores = cuda_variable(torch.zeros(seq_len)) # B x 1 x S + + # Calculate energies for each encoder hidden + for i in range(seq_len): + attn_scores[i] = self.get_att_score(hidden, encoder_hiddens[i]) + + # Normalize scores to weights in range 0 to 1, + # resize to 1 x 1 x seq_len + # print("att_scores", attn_scores.size()) + return F.softmax(attn_scores).view(1, 1, -1) + + # score = h^T W h^e = h dot (W h^e) + # TODO: We need to implement different score models + def get_att_score(self, hidden, encoder_hidden): + score = self.attn(encoder_hidden) + return torch.dot(hidden.view(-1), score.view(-1)) diff --git a/slides/Lecture 01: Overview.pdf b/slides/Lecture 01: Overview.pdf index 8c25f27..2cfc448 100644 Binary files a/slides/Lecture 01: Overview.pdf and b/slides/Lecture 01: Overview.pdf differ diff --git a/slides/Lecture 02: Linear Model.pdf b/slides/Lecture 02: Linear Model.pdf index 139f5f3..8b35252 100644 Binary files a/slides/Lecture 02: Linear Model.pdf and b/slides/Lecture 02: Linear Model.pdf differ diff --git a/slides/Lecture 03: Gradient Descent.pdf b/slides/Lecture 03: Gradient Descent.pdf index a176b90..29fe09e 100644 Binary files a/slides/Lecture 03: Gradient Descent.pdf and b/slides/Lecture 03: Gradient Descent.pdf differ diff --git a/slides/Lecture 04: Back-propagation.pdf b/slides/Lecture 04: Back-propagation.pdf deleted file mode 100644 index 8f20b80..0000000 Binary files a/slides/Lecture 04: Back-propagation.pdf and /dev/null differ diff --git a/slides/Lecture 05: Linear regression in PyTorch way.pdf b/slides/Lecture 05: Linear regression in PyTorch way.pdf index 6cd927c..7c7de35 100644 Binary files a/slides/Lecture 05: Linear regression in PyTorch way.pdf and b/slides/Lecture 05: Linear regression in PyTorch way.pdf differ diff --git a/slides/Lecture 06: Logistic Regression.pdf b/slides/Lecture 06: Logistic Regression.pdf index b9963e0..a15629a 100644 Binary files a/slides/Lecture 06: Logistic Regression.pdf and b/slides/Lecture 06: Logistic Regression.pdf differ diff --git a/slides/Lecture 07: Wide & Deep.pdf b/slides/Lecture 07: Wide & Deep.pdf index 43935f0..cbc2125 100644 Binary files a/slides/Lecture 07: Wide & Deep.pdf and b/slides/Lecture 07: Wide & Deep.pdf differ diff --git a/slides/Lecture 08: DataLoader.pdf b/slides/Lecture 08: DataLoader.pdf index ae22b89..badda50 100644 Binary files a/slides/Lecture 08: DataLoader.pdf and b/slides/Lecture 08: DataLoader.pdf differ diff --git a/slides/Lecture 09: Softmax Classifier.pdf b/slides/Lecture 09: Softmax Classifier.pdf index ee4aeab..b4c327f 100644 Binary files a/slides/Lecture 09: Softmax Classifier.pdf and b/slides/Lecture 09: Softmax Classifier.pdf differ diff --git a/slides/Lecture 10: CNN.pdf b/slides/Lecture 10: CNN.pdf deleted file mode 100644 index c3ca838..0000000 Binary files a/slides/Lecture 10: CNN.pdf and /dev/null differ diff --git a/slides/Lecture 11: RNN.pdf b/slides/Lecture 11: RNN.pdf deleted file mode 100644 index 35b16bb..0000000 Binary files a/slides/Lecture 11: RNN.pdf and /dev/null differ diff --git a/slides/Lecture 12: NSML, Smartest ML Platform.pdf b/slides/Lecture 12: NSML, Smartest ML Platform.pdf deleted file mode 100644 index 2c2eace..0000000 Binary files a/slides/Lecture 12: NSML, Smartest ML Platform.pdf and /dev/null differ diff --git a/slides/P-Epilogue: What's the next?.pdf b/slides/P-Epilogue: What's the next?.pdf index e28a448..1feb1d2 100644 Binary files a/slides/P-Epilogue: What's the next?.pdf and b/slides/P-Epilogue: What's the next?.pdf differ diff --git a/text_loader.py b/text_loader.py new file mode 100644 index 0000000..1788479 --- /dev/null +++ b/text_loader.py @@ -0,0 +1,35 @@ +# References +# https://github.com/yunjey/pytorch-tutorial/blob/master/tutorials/01-basics/pytorch_basics/main.py +# http://pytorch.org/tutorials/beginner/data_loading_tutorial.html#dataset-class +import gzip +from torch.utils.data import Dataset, DataLoader + + +class TextDataset(Dataset): + # Initialize your data, download, etc. + + def __init__(self, filename="./data/shakespeare.txt.gz"): + self.len = 0 + with gzip.open(filename, 'rt') as f: + self.targetLines = [x.strip() for x in f if x.strip()] + self.srcLines = [x.lower().replace(' ', '') + for x in self.targetLines] + self.len = len(self.srcLines) + + def __getitem__(self, index): + return self.srcLines[index], self.targetLines[index] + + def __len__(self): + return self.len + + +# Test the loader +if __name__ == "__main__": + dataset = TextDataset() + train_loader = DataLoader(dataset=dataset, + batch_size=3, + shuffle=True, + num_workers=2) + + for i, (src, target) in enumerate(train_loader): + print(i, "data", src)