%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.io import *
from fastai.conv_learner import *
from fastai.column_data import *
We're going to download the collected works of Nietzsche to use as our data for this class.
PATH = 'data/nietzsche/'
get_data('https://s3.amazonaws.com/text-datasets/nietzsche.txt', f'{PATH}nietzsche.txt')
text = open(f'{PATH}nietzsche.txt').read()
print('corpus length:', len(text))
nietzsche.txt: 606KB [00:00, 1.86MB/s]
corpus length: 600893
text[:400]
'PREFACE\n\n\nSUPPOSING that Truth is a woman--what then? Is there not ground\nfor suspecting that all philosophers, in so far as they have been\ndogmatists, have failed to understand women--that the terrible\nseriousness and clumsy importunity with which they have usually paid\ntheir addresses to Truth, have been unskilled and unseemly methods for\nwinning a woman? Certainly she has never allowed herself '
chars = sorted(list(set(text)))
vocab_size = len(chars) + 1
print('total chars:', vocab_size)
total chars: 85
Sometimes it's useful to have a zero value in the dataset, e.g. for padding
chars.insert(0, '\0')
''.join(chars[1:-6])
'\n !"\'(),-.0123456789:;=?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]_abcdefghijklmnopqrstuvwxy'
Map from chars to indices and back again
char_indices = {c: i for i, c in enumerate(chars)}
indices_char = {i: c for i, c in enumerate(chars)}
idx will be the data we use from now on - it simply converts all the characters to their index (based on the mapping above)
idx = [char_indices[c] for c in text]
idx[:10]
[40, 42, 29, 30, 25, 27, 29, 1, 1, 1]
''.join(indices_char[i] for i in idx[:70])
'PREFACE\n\n\nSUPPOSING that Truth is a woman--what then? Is there not gro'
Create a list of every 4th character, starting at the 0th, 1st, 2nd, then 3rd characters
cs=3
c1_dat = [idx[i] for i in range(0, len(idx) - cs, cs)]
c2_dat = [idx[i + 1] for i in range(0, len(idx) - cs, cs)]
c3_dat = [idx[i + 2] for i in range(0, len(idx) - cs, cs)]
c4_dat = [idx[i + 3] for i in range(0, len(idx) - cs, cs)]
Our inputs
x1 = np.stack(c1_dat)
x2 = np.stack(c2_dat)
x3 = np.stack(c3_dat)
Our output
y = np.stack(c4_dat)
The first 4 inputs and outputs
x1[:4], x2[:4], x3[:4]
(array([40, 30, 29, 1]), array([42, 25, 1, 43]), array([29, 27, 1, 45]))
y[:4]
array([30, 29, 1, 40])
x1.shape, y.shape
((200297,), (200297,))
Pick a size for our hidden state
n_hidden = 256
The number of latent factors to create (i.e. the size of the embedding matrix)
n_fac = 42
class Char3Model(nn.Module):
def __init__(self, vocab_size, n_fac):
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac)
# The 'green arrow' from our diagram - the layer operation from input to hidden
self.l_in = nn.Linear(n_fac, n_hidden)
# The 'orange arrow' from our diagram - the layer operation from hidden to hidden
self.l_hidden = nn.Linear(n_hidden, n_hidden)
# The 'blue arrow' from our diagram - the layer operation from hidden to output
self.l_out = nn.Linear(n_hidden, vocab_size)
def forward(self, c1, c2, c3):
in1 = F.relu(self.l_in(self.e(c1)))
in2 = F.relu(self.l_in(self.e(c2)))
in3 = F.relu(self.l_in(self.e(c3)))
h = V(torch.zeros(in1.size()).cuda())
h = F.tanh(self.l_hidden(h + in1))
h = F.tanh(self.l_hidden(h + in2))
h = F.tanh(self.l_hidden(h + in3))
return F.log_softmax(self.l_out(h))
md = ColumnarModelData.from_arrays('.', [-1], np.stack([x1,x2,x3], axis=1), y, bs=512)
m = Char3Model(vocab_size, n_fac).cuda()
it = iter(md.trn_dl)
*xs, yt = next(it)
t = m(*V(xs))
opt = optim.Adam(m.parameters(), 1e-2)
fit(m, md, 1, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 2.096911 1.226288
[array([1.22629])]
set_lrs(opt, 0.001)
fit(m, md, 1, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.84777 0.387795
[array([0.3878])]
def get_next(inp):
idxs = T(np.array([char_indices[c] for c in inp]))
p = m(*VV(idxs))
i = np.argmax(to_np(p))
return chars[i]
get_next('y. ')
'T'
get_next('ppl')
'a'
get_next(' th')
'e'
get_next('and')
' '
This is the size of our unrolled RNN.
cs = 8
For each of 0 through 7, create a list of every 8th character with that starting point. These will be the 8 inputs to our model.
c_in_dat = [[idx[i + j] for i in range(cs)] for j in range(len(idx) - cs)]
Then create a list of the next character in each of these series. This will be the labels for our model.
c_out_dat = [idx[j + cs] for j in range(len(idx) - cs)]
xs = np.stack(c_in_dat, axis=0)
xs.shape
(600885, 8)
y = np.stack(c_out_dat)
So each column below is one series of 8 characters from the text.
xs[:cs, :cs]
array([[40, 42, 29, 30, 25, 27, 29, 1], [42, 29, 30, 25, 27, 29, 1, 1], [29, 30, 25, 27, 29, 1, 1, 1], [30, 25, 27, 29, 1, 1, 1, 43], [25, 27, 29, 1, 1, 1, 43, 45], [27, 29, 1, 1, 1, 43, 45, 40], [29, 1, 1, 1, 43, 45, 40, 40], [ 1, 1, 1, 43, 45, 40, 40, 39]])
...and this is the next character after each sequence.
y[:cs]
array([ 1, 1, 43, 45, 40, 40, 39, 43])
val_idx = get_cv_idxs(len(idx) - cs - 1)
md = ColumnarModelData.from_arrays('.', val_idx, xs, y, bs=512)
class CharLoopModel(nn.Module):
# This is an RNN!
def __init__(self, vocab_size, n_fac):
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac)
self.l_in = nn.Linear(n_fac, n_hidden)
self.l_hidden = nn.Linear(n_hidden, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
def forward(self, *cs):
bs = cs[0].size(0)
h = V(torch.zeros(bs, n_hidden).cuda())
for c in cs:
inp = F.relu(self.l_in(self.e(c)))
h = F.tanh(self.l_hidden(h + inp))
return F.log_softmax(self.l_out(h), dim=-1)
m = CharLoopModel(vocab_size, n_fac).cuda()
opt = optim.Adam(m.parameters(), 1e-2)
fit(m, md, 1, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 2.022557 2.007089
[array([2.00709])]
set_lrs(opt, 0.001)
fit(m, md, 1, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.729759 1.729243
[array([1.72924])]
class CharLoopConcatModel(nn.Module):
def __init__(self, vocab_size, n_fac):
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac)
self.l_in = nn.Linear(n_fac + n_hidden, n_hidden)
self.l_hidden = nn.Linear(n_hidden, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
def forward(self, *cs):
bs = cs[0].size(0)
h = V(torch.zeros(bs, n_hidden).cuda())
for c in cs:
inp = torch.cat((h, self.e(c)), 1)
inp = F.relu(self.l_in(inp))
h = F.tanh(self.l_hidden(inp))
return F.log_softmax(self.l_out(h), dim=-1)
m = CharLoopConcatModel(vocab_size, n_fac).cuda()
opt = optim.Adam(m.parameters(), 1e-3)
it = iter(md.trn_dl)
*xs, yt = next(it)
t = m(*V(xs))
fit(m, md, 1, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.873534 1.854928
[array([1.85493])]
set_lrs(opt, 1e-4)
fit(m, md, 1, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.75918 1.758367
[array([1.75837])]
def get_next(inp):
idxs = T(np.array([char_indices[c] for c in inp]))
p = m(*VV(idxs))
i = np.argmax(to_np(p))
return chars[i]
get_next('for thos')
'e'
get_next('part of ')
't'
get_next('queens a')
'n'
class CharRnn(nn.Module):
def __init__(self, vocab_size, n_fac):
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.RNN(n_fac, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
def forward(self, *cs):
bs = cs[0].size(0)
h = V(torch.zeros(1, bs, n_hidden))
inp = self.e(torch.stack(cs))
outp,h = self.rnn(inp, h)
return F.log_softmax(self.l_out(outp[-1]), dim=-1)
m = CharRnn(vocab_size, n_fac).cuda()
opt = optim.Adam(m.parameters(), 1e-3)
it = iter(md.trn_dl)
*xs, yt = next(it)
t = m.e(V(torch.stack(xs)))
t.size()
torch.Size([8, 512, 42])
ht = V(torch.zeros(1, 512, n_hidden))
outp, hn = m.rnn(t, ht)
outp.size(), hn.size()
(torch.Size([8, 512, 256]), torch.Size([1, 512, 256]))
t = m(*V(xs))
t.size()
torch.Size([512, 85])
fit(m, md, 4, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.874941 1.848242 1 1.686629 1.681533 2 1.59148 1.597262 3 1.537338 1.551838
[array([1.55184])]
set_lrs(opt, 1e-4)
fit(m, md, 2, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.473922 1.514578 1 1.46794 1.508425
[array([1.50842])]
def get_next(inp):
idxs = T(np.array([char_indices[c] for c in inp]))
p = m(*VV(idxs))
i = np.argmax(to_np(p))
return chars[i]
get_next('for thos')
'e'
def get_next_n(inp, n):
res = inp
for i in range(n):
c = get_next(inp)
res += c
inp = inp[1:] + c
return res
get_next_n('for thos', 40)
'for those and the same to the same to the same t'
Let's take non-overlapping sets of characters this time
c_in_dat = [[idx[i + j] for i in range(cs)] for j in range(0, len(idx) - cs - 1, cs)]
Then create the exact same thing, offset by 1, as our labels
c_out_dat = [[idx[i + j] for i in range(cs)] for j in range(1, len(idx) - cs, cs)]
xs = np.stack(c_in_dat)
xs.shape
(75111, 8)
ys = np.stack(c_out_dat)
ys.shape
(75111, 8)
xs[:cs, :cs]
array([[40, 42, 29, 30, 25, 27, 29, 1], [ 1, 1, 43, 45, 40, 40, 39, 43], [33, 38, 31, 2, 73, 61, 54, 73], [ 2, 44, 71, 74, 73, 61, 2, 62], [72, 2, 54, 2, 76, 68, 66, 54], [67, 9, 9, 76, 61, 54, 73, 2], [73, 61, 58, 67, 24, 2, 33, 72], [ 2, 73, 61, 58, 71, 58, 2, 67]])
ys[:cs, :cs]
array([[42, 29, 30, 25, 27, 29, 1, 1], [ 1, 43, 45, 40, 40, 39, 43, 33], [38, 31, 2, 73, 61, 54, 73, 2], [44, 71, 74, 73, 61, 2, 62, 72], [ 2, 54, 2, 76, 68, 66, 54, 67], [ 9, 9, 76, 61, 54, 73, 2, 73], [61, 58, 67, 24, 2, 33, 72, 2], [73, 61, 58, 71, 58, 2, 67, 68]])
val_idx = get_cv_idxs(len(xs) - cs - 1)
md = ColumnarModelData.from_arrays('.', val_idx, xs, ys, bs=512)
class CharSeqRnn(nn.Module):
def __init__(self, vocab_size, n_fac):
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.RNN(n_fac, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
def forward(self, *cs):
bs = cs[0].size(0)
h = V(torch.zeros(1, bs, n_hidden))
inp = self.e(torch.stack(cs))
outp,h = self.rnn(inp, h)
return F.log_softmax(self.l_out(outp), dim=-1)
m = CharSeqRnn(vocab_size, n_fac).cuda()
opt = optim.Adam(m.parameters(), 1e-3)
it = iter(md.trn_dl)
*xst, yt = next(it)
def nll_loss_seq(inp, targ):
sl, bs, nh = inp.size()
targ = targ.transpose(0,1).contiguous().view(-1)
return F.nll_loss(inp.view(-1, nh), targ)
fit(m, md, 4, opt, nll_loss_seq)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 2.606978 2.421097 1 2.300751 2.210837 2 2.148818 2.094267 3 2.05272 2.018827
[array([2.01883])]
set_lrs(opt, 1e-4)
fit(m, md, 1, opt, nll_loss_seq)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 2.003635 2.00427
[array([2.00427])]
m = CharSeqRnn(vocab_size, n_fac).cuda()
opt = optim.Adam(m.parameters(), 1e-2)
m.rnn.weight_hh_l0.data.copy_(torch.eye(n_hidden))
1 0 0 ... 0 0 0 0 1 0 ... 0 0 0 0 0 1 ... 0 0 0 ... ⋱ ... 0 0 0 ... 1 0 0 0 0 0 ... 0 1 0 0 0 0 ... 0 0 1 [torch.cuda.FloatTensor of size 256x256 (GPU 0)]
fit(m, md, 4, opt, nll_loss_seq)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 2.380318 2.210412 1 2.124148 2.065752 2 2.023889 1.99549 3 1.973753 1.964885
[array([1.96489])]
set_lrs(opt, 1e-3)
fit(m, md, 4, opt, nll_loss_seq)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.885918 1.898218 1 1.877252 1.891765 2 1.868084 1.886029 3 1.860838 1.882068
[array([1.88207])]
from torchtext import vocab, data
from fastai.nlp import *
from fastai.lm_rnn import *
PATH='data/nietzsche/'
TRN_PATH = 'trn/'
VAL_PATH = 'val/'
TRN = f'{PATH}{TRN_PATH}'
VAL = f'{PATH}{VAL_PATH}'
%ls {PATH}
models/ nietzsche.txt trn/ val/
%ls {PATH}trn
trn.txt
TEXT = data.Field(lower=True, tokenize=list)
bs=64
bptt=8
n_fac=42
n_hidden=256
FILES = dict(train=TRN_PATH, validation=VAL_PATH, test=VAL_PATH)
md = LanguageModelData.from_text_files(PATH, TEXT, **FILES, bs=bs, bptt=bptt, min_freq=3)
len(md.trn_dl), md.nt, len(md.trn_ds), len(md.trn_ds[0].text)
(1153, 55, 1, 590960)
class CharSeqStatefulRnn(nn.Module):
def __init__(self, vocab_size, n_fac, bs):
self.vocab_size = vocab_size
super().__init__()
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.RNN(n_fac, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
self.init_hidden(bs)
def forward(self, cs):
bs = cs[0].size(0)
if self.h.size(1) != bs: self.init_hidden(bs)
outp,h = self.rnn(self.e(cs), self.h)
self.h = repackage_var(h)
return F.log_softmax(self.l_out(outp), dim=-1).view(-1, self.vocab_size)
def init_hidden(self, bs): self.h = V(torch.zeros(1, bs, n_hidden))
m = CharSeqStatefulRnn(md.nt, n_fac, 512).cuda()
opt = optim.Adam(m.parameters(), 1e-3)
fit(m, md, 4, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.799179 1.795937 1 1.635381 1.644064 2 1.55905 1.571078 3 1.517546 1.52435
[array([1.52435])]
set_lrs(opt, 1e-4)
fit(m, md, 4, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.439247 1.473334 1 1.442974 1.466567 2 1.437667 1.460651 3 1.438405 1.455288
[array([1.45529])]
class CharSeqStatefulRnn2(nn.Module):
def __init__(self, vocab_size, n_fac, bs):
super().__init__()
self.vocab_size = vocab_size
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.RNNCell(n_fac, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
self.init_hidden(bs)
def forward(self, cs):
bs = cs[0].size(0)
if self.h.size(1) != bs: self.init_hidden(bs)
outp = []
o = self.h
for c in cs:
o = self.rnn(self.e(c), o)
outp.append(o)
outp = self.l_out(torch.stack(outp))
self.h = repackage_var(o)
return F.log_softmax(outp, dim=-1).view(-1, self.vocab_size)
def init_hidden(self, bs): self.h = V(torch.zeros(1, bs, n_hidden))
m = CharSeqStatefulRnn2(md.nt, n_fac, 512).cuda()
opt = optim.Adam(m.parameters(), 1e-3)
fit(m, md, 4, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.80518 1.797154 1 1.631868 1.635422 2 1.559168 1.566188 3 1.515277 1.51792
[array([1.51792])]
class CharSeqStatefulGRU(nn.Module):
def __init__(self, vocab_size, n_fac, bs):
super().__init__()
self.vocab_size = vocab_size
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.GRU(n_fac, n_hidden)
self.l_out = nn.Linear(n_hidden, vocab_size)
self.init_hidden(bs)
def forward(self, cs):
bs = cs[0].size(0)
if self.h.size(1) != bs: self.init_hidden(bs)
outp,h = self.rnn(self.e(cs), self.h)
self.h = repackage_var(h)
return F.log_softmax(self.l_out(outp), dim=-1).view(-1, self.vocab_size)
def init_hidden(self, bs): self.h = V(torch.zeros(1, bs, n_hidden))
m = CharSeqStatefulGRU(md.nt, n_fac, 512).cuda()
opt = optim.Adam(m.parameters(), 1e-3)
fit(m, md, 6, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.675136 1.674389 1 1.508034 1.506611 2 1.434869 1.437619 3 1.395137 1.395509 4 1.361485 1.36442 5 1.336734 1.337312
[array([1.33731])]
set_lrs(opt, 1e-4)
fit(m, md, 3, opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.255207 1.291024 1 1.252789 1.282952 2 1.253178 1.277432
[array([1.27743])]
from fastai import sgdr
n_hidden = 512
class CharSeqStatefulLSTM(nn.Module):
def __init__(self, vocab_size, n_fac, bs, nl):
super().__init__()
self.vocab_size,self.nl = vocab_size,nl
self.e = nn.Embedding(vocab_size, n_fac)
self.rnn = nn.LSTM(n_fac, n_hidden, nl, dropout=0.5)
self.l_out = nn.Linear(n_hidden, vocab_size)
self.init_hidden(bs)
def forward(self, cs):
bs = cs[0].size(0)
if self.h[0].size(1) != bs: self.init_hidden(bs)
outp,h = self.rnn(self.e(cs), self.h)
self.h = repackage_var(h)
return F.log_softmax(self.l_out(outp), dim=-1).view(-1, self.vocab_size)
def init_hidden(self, bs):
self.h = (V(torch.zeros(self.nl, bs, n_hidden)),
V(torch.zeros(self.nl, bs, n_hidden)))
m = CharSeqStatefulLSTM(md.nt, n_fac, 512, 2).cuda()
lo = LayerOptimizer(optim.Adam, m, 1e-2, 1e-5)
os.makedirs(f'{PATH}models', exist_ok=True)
fit(m, md, 2, lo.opt, F.nll_loss)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.735124 1.677932 1 1.650891 1.592743
[array([1.59274])]
on_end = lambda sched, cycle: save_model(m, f'{PATH}models/cyc_{cycle}')
cb = [CosAnneal(lo, len(md.trn_dl), cycle_mult=2, on_cycle_end=on_end)]
fit(m, md, 2**4-1, lo.opt, F.nll_loss, callbacks=cb)
Failed to display Jupyter Widget of type HBox
.
If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean that the widgets JavaScript is still loading. If this message persists, it likely means that the widgets JavaScript library is either not installed or not enabled. See the Jupyter Widgets Documentation for setup instructions.
If you're reading this message in another frontend (for example, a static rendering on GitHub or NBViewer), it may mean that your frontend doesn't currently support widgets.
epoch trn_loss val_loss 0 1.479674 1.422725 1 1.523124 1.467232 2 1.406148 1.359548 3 1.56533 1.495614 4 1.487524 1.431022 5 1.400005 1.348234 6 1.342281 1.303726 7 1.534022 1.470526 8 1.499201 1.442911 9 1.477629 1.426429 10 1.44182 1.384572 11 1.398508 1.342475 12 1.352225 1.301657 13 1.313135 1.264042 14 1.283205 1.243538
[array([1.24354])]
def get_next(inp):
idxs = TEXT.numericalize(inp)
p = m(VV(idxs.transpose(0, 1)))
r = torch.multinomial(p[-1].exp(), 1)
return TEXT.vocab.itos[to_np(r)[0]]
get_next('for thos')
'u'
def get_next_n(inp, n):
res = inp
for i in range(n):
c = get_next(inp)
res += c
inp = inp[1:] + c
return res
print(get_next_n('for thos', 400))
for those oneself--however, a conscience is remotesmeans (to whichmust wish love of their religious dating and though there bean infliction or that is not the validity oneso or with relations, in men for display is civilizators one of which is anothers accased also again being andorality, truthful higher intercourse ofthe jews socratic enough to the deptium--low untruthsbetrough, in notion of knowledge in