Skip to content

Commit

Permalink
Automatic convert from py to ipynb
Browse files Browse the repository at this point in the history
  • Loading branch information
graykode committed Aug 15, 2020
1 parent 3bd37d8 commit f97cef9
Show file tree
Hide file tree
Showing 12 changed files with 1,908 additions and 0 deletions.
111 changes: 111 additions & 0 deletions 1-1.NNLM/NNLM.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
{
"cells": [
{
"cell_type": "code",
"metadata": {},
"source": [
"# code by Tae Hwan Jung @graykode\n",
"import torch\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"\n",
"def make_batch():\n",
" input_batch = []\n",
" target_batch = []\n",
"\n",
" for sen in sentences:\n",
" word = sen.split() # space tokenizer\n",
" input = [word_dict[n] for n in word[:-1]] # create (1~n-1) as input\n",
" target = word_dict[word[-1]] # create (n) as target, We usually call this 'casual language model'\n",
"\n",
" input_batch.append(input)\n",
" target_batch.append(target)\n",
"\n",
" return input_batch, target_batch\n",
"\n",
"# Model\n",
"class NNLM(nn.Module):\n",
" def __init__(self):\n",
" super(NNLM, self).__init__()\n",
" self.C = nn.Embedding(n_class, m)\n",
" self.H = nn.Linear(n_step * m, n_hidden, bias=False)\n",
" self.d = nn.Parameter(torch.ones(n_hidden))\n",
" self.U = nn.Linear(n_hidden, n_class, bias=False)\n",
" self.W = nn.Linear(n_step * m, n_class, bias=False)\n",
" self.b = nn.Parameter(torch.ones(n_class))\n",
"\n",
" def forward(self, X):\n",
" X = self.C(X) # X : [batch_size, n_step, n_class]\n",
" X = X.view(-1, n_step * m) # [batch_size, n_step * n_class]\n",
" tanh = torch.tanh(self.d + self.H(X)) # [batch_size, n_hidden]\n",
" output = self.b + self.W(X) + self.U(tanh) # [batch_size, n_class]\n",
" return output\n",
"\n",
"if __name__ == '__main__':\n",
" n_step = 2 # number of steps, n-1 in paper\n",
" n_hidden = 2 # number of hidden size, h in paper\n",
" m = 2 # embedding size, m in paper\n",
"\n",
" sentences = [\"i like dog\", \"i love coffee\", \"i hate milk\"]\n",
"\n",
" word_list = \" \".join(sentences).split()\n",
" word_list = list(set(word_list))\n",
" word_dict = {w: i for i, w in enumerate(word_list)}\n",
" number_dict = {i: w for i, w in enumerate(word_list)}\n",
" n_class = len(word_dict) # number of Vocabulary\n",
"\n",
" model = NNLM()\n",
"\n",
" criterion = nn.CrossEntropyLoss()\n",
" optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
"\n",
" input_batch, target_batch = make_batch()\n",
" input_batch = torch.LongTensor(input_batch)\n",
" target_batch = torch.LongTensor(target_batch)\n",
"\n",
" # Training\n",
" for epoch in range(5000):\n",
" optimizer.zero_grad()\n",
" output = model(input_batch)\n",
"\n",
" # output : [batch_size, n_class], target_batch : [batch_size]\n",
" loss = criterion(output, target_batch)\n",
" if (epoch + 1) % 1000 == 0:\n",
" print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))\n",
"\n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
" # Predict\n",
" predict = model(input_batch).data.max(1, keepdim=True)[1]\n",
"\n",
" # Test\n",
" print([sen.split()[:2] for sen in sentences], '->', [number_dict[n.item()] for n in predict.squeeze()])"
],
"outputs": [],
"execution_count": null
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.1"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
115 changes: 115 additions & 0 deletions 1-2.Word2Vec/Word2Vec-Skipgram(Softmax).ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
{
"cells": [
{
"cell_type": "code",
"metadata": {},
"source": [
"# code by Tae Hwan Jung @graykode\n",
"import numpy as np\n",
"import torch\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"import matplotlib.pyplot as plt\n",
"\n",
"def random_batch():\n",
" random_inputs = []\n",
" random_labels = []\n",
" random_index = np.random.choice(range(len(skip_grams)), batch_size, replace=False)\n",
"\n",
" for i in random_index:\n",
" random_inputs.append(np.eye(voc_size)[skip_grams[i][0]]) # target\n",
" random_labels.append(skip_grams[i][1]) # context word\n",
"\n",
" return random_inputs, random_labels\n",
"\n",
"# Model\n",
"class Word2Vec(nn.Module):\n",
" def __init__(self):\n",
" super(Word2Vec, self).__init__()\n",
" # W and WT is not Traspose relationship\n",
" self.W = nn.Linear(voc_size, embedding_size, bias=False) # voc_size > embedding_size Weight\n",
" self.WT = nn.Linear(embedding_size, voc_size, bias=False) # embedding_size > voc_size Weight\n",
"\n",
" def forward(self, X):\n",
" # X : [batch_size, voc_size]\n",
" hidden_layer = self.W(X) # hidden_layer : [batch_size, embedding_size]\n",
" output_layer = self.WT(hidden_layer) # output_layer : [batch_size, voc_size]\n",
" return output_layer\n",
"\n",
"if __name__ == '__main__':\n",
" batch_size = 2 # mini-batch size\n",
" embedding_size = 2 # embedding size\n",
"\n",
" sentences = [\"apple banana fruit\", \"banana orange fruit\", \"orange banana fruit\",\n",
" \"dog cat animal\", \"cat monkey animal\", \"monkey dog animal\"]\n",
"\n",
" word_sequence = \" \".join(sentences).split()\n",
" word_list = \" \".join(sentences).split()\n",
" word_list = list(set(word_list))\n",
" word_dict = {w: i for i, w in enumerate(word_list)}\n",
" voc_size = len(word_list)\n",
"\n",
" # Make skip gram of one size window\n",
" skip_grams = []\n",
" for i in range(1, len(word_sequence) - 1):\n",
" target = word_dict[word_sequence[i]]\n",
" context = [word_dict[word_sequence[i - 1]], word_dict[word_sequence[i + 1]]]\n",
" for w in context:\n",
" skip_grams.append([target, w])\n",
"\n",
" model = Word2Vec()\n",
"\n",
" criterion = nn.CrossEntropyLoss()\n",
" optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
"\n",
" # Training\n",
" for epoch in range(5000):\n",
" input_batch, target_batch = random_batch()\n",
" input_batch = torch.Tensor(input_batch)\n",
" target_batch = torch.LongTensor(target_batch)\n",
"\n",
" optimizer.zero_grad()\n",
" output = model(input_batch)\n",
"\n",
" # output : [batch_size, voc_size], target_batch : [batch_size] (LongTensor, not one-hot)\n",
" loss = criterion(output, target_batch)\n",
" if (epoch + 1) % 1000 == 0:\n",
" print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))\n",
"\n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
" for i, label in enumerate(word_list):\n",
" W, WT = model.parameters()\n",
" x, y = W[0][i].item(), W[1][i].item()\n",
" plt.scatter(x, y)\n",
" plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom')\n",
" plt.show()\n"
],
"outputs": [],
"execution_count": null
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.1"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
117 changes: 117 additions & 0 deletions 2-1.TextCNN/TextCNN.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
{
"cells": [
{
"cell_type": "code",
"metadata": {},
"source": [
"# code by Tae Hwan Jung @graykode\n",
"import numpy as np\n",
"import torch\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"import torch.nn.functional as F\n",
"\n",
"class TextCNN(nn.Module):\n",
" def __init__(self):\n",
" super(TextCNN, self).__init__()\n",
" self.num_filters_total = num_filters * len(filter_sizes)\n",
" self.W = nn.Embedding(vocab_size, embedding_size)\n",
" self.Weight = nn.Linear(self.num_filters_total, num_classes, bias=False)\n",
" self.Bias = nn.Parameter(torch.ones([num_classes]))\n",
" self.filter_list = nn.ModuleList([nn.Conv2d(1, num_filters, (size, embedding_size)) for size in filter_sizes])\n",
"\n",
" def forward(self, X):\n",
" embedded_chars = self.W(X) # [batch_size, sequence_length, sequence_length]\n",
" embedded_chars = embedded_chars.unsqueeze(1) # add channel(=1) [batch, channel(=1), sequence_length, embedding_size]\n",
"\n",
" pooled_outputs = []\n",
" for i, conv in enumerate(self.filter_list):\n",
" # conv : [input_channel(=1), output_channel(=3), (filter_height, filter_width), bias_option]\n",
" h = F.relu(conv(embedded_chars))\n",
" # mp : ((filter_height, filter_width))\n",
" mp = nn.MaxPool2d((sequence_length - filter_sizes[i] + 1, 1))\n",
" # pooled : [batch_size(=6), output_height(=1), output_width(=1), output_channel(=3)]\n",
" pooled = mp(h).permute(0, 3, 2, 1)\n",
" pooled_outputs.append(pooled)\n",
"\n",
" h_pool = torch.cat(pooled_outputs, len(filter_sizes)) # [batch_size(=6), output_height(=1), output_width(=1), output_channel(=3) * 3]\n",
" h_pool_flat = torch.reshape(h_pool, [-1, self.num_filters_total]) # [batch_size(=6), output_height * output_width * (output_channel * 3)]\n",
" model = self.Weight(h_pool_flat) + self.Bias # [batch_size, num_classes]\n",
" return model\n",
"\n",
"if __name__ == '__main__':\n",
" embedding_size = 2 # embedding size\n",
" sequence_length = 3 # sequence length\n",
" num_classes = 2 # number of classes\n",
" filter_sizes = [2, 2, 2] # n-gram windows\n",
" num_filters = 3 # number of filters\n",
"\n",
" # 3 words sentences (=sequence_length is 3)\n",
" sentences = [\"i love you\", \"he loves me\", \"she likes baseball\", \"i hate you\", \"sorry for that\", \"this is awful\"]\n",
" labels = [1, 1, 1, 0, 0, 0] # 1 is good, 0 is not good.\n",
"\n",
" word_list = \" \".join(sentences).split()\n",
" word_list = list(set(word_list))\n",
" word_dict = {w: i for i, w in enumerate(word_list)}\n",
" vocab_size = len(word_dict)\n",
"\n",
" model = TextCNN()\n",
"\n",
" criterion = nn.CrossEntropyLoss()\n",
" optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
"\n",
" inputs = torch.LongTensor([np.asarray([word_dict[n] for n in sen.split()]) for sen in sentences])\n",
" targets = torch.LongTensor([out for out in labels]) # To using Torch Softmax Loss function\n",
"\n",
" # Training\n",
" for epoch in range(5000):\n",
" optimizer.zero_grad()\n",
" output = model(inputs)\n",
"\n",
" # output : [batch_size, num_classes], target_batch : [batch_size] (LongTensor, not one-hot)\n",
" loss = criterion(output, targets)\n",
" if (epoch + 1) % 1000 == 0:\n",
" print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))\n",
"\n",
" loss.backward()\n",
" optimizer.step()\n",
"\n",
" # Test\n",
" test_text = 'sorry hate you'\n",
" tests = [np.asarray([word_dict[n] for n in test_text.split()])]\n",
" test_batch = torch.LongTensor(tests)\n",
"\n",
" # Predict\n",
" predict = model(test_batch).data.max(1, keepdim=True)[1]\n",
" if predict[0][0] == 0:\n",
" print(test_text,\"is Bad Mean...\")\n",
" else:\n",
" print(test_text,\"is Good Mean!!\")"
],
"outputs": [],
"execution_count": null
}
],
"metadata": {
"anaconda-cloud": {},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.1"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
Loading

0 comments on commit f97cef9

Please sign in to comment.