In this post I provide an example implementation of a Seq2seq transformer, originally described in this 2014 paper. The code likely will not run in a reasonable time frame locally, and requires CUDA (for which, Google Colab is a good platform). The implementation may also need to be modified from time to time since PyTorch is changing. In general, you would need

  • Something to tokenize (i.e. convert letters/phrases to numbers) the texts, and de-code (i.e. convert numbers to letters/phrases).

  • Something to convert numbers to more complicated rules of numbers (i.e. features).

  • Something to backpropagate gradient on.

And finally:

  • Some text.

For these above tasks, torchtext, torch.nn, and spacy have pre-built helpers for us. The text can be downloaded from the Multi30k repository.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# Import libraries and preprocessing
import math
import torchtext
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchtext.data.utils import get_tokenizer
from collections import Counter
from torchtext.vocab import vocab,Vocab
from torchtext.utils import download_from_url, extract_archive
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, TensorDataset
from torchtext.transforms import ToTensor
from torch import Tensor
from torch.nn import TransformerEncoder, TransformerDecoder, TransformerEncoderLayer, TransformerDecoderLayer
import io
import time
import spacy

# load data from urls
url_base = 'https://raw.githubusercontent.com/multi30k/dataset/master/data/task1/raw/'
train_urls = ('train.de.gz', 'train.en.gz')
val_urls = ('val.de.gz', 'val.en.gz')
test_urls = ('test_2016_flickr.de.gz', 'test_2016_flickr.en.gz')

train_filepaths = [extract_archive(download_from_url(url_base + url))[0] for url in train_urls]
val_filepaths = [extract_archive(download_from_url(url_base + url))[0] for url in val_urls]
test_filepaths = [extract_archive(download_from_url(url_base + url))[0] for url in test_urls]

# tokenize
de_tokenizer = get_tokenizer('spacy', language='de_core_news_sm')
en_tokenizer = get_tokenizer('spacy', language='en_core_web_sm')

# get set of words contained in the German and English texts
def build_vocab(filepath, tokenizer):
    counter = Counter()
    with io.open(filepath,encoding='utf8') as f:
    for string_ in f:
    counter.update(tokenizer(string_))
    return vocab(counter,specials=['<unk>','<pad>','<bos>','<eos>'])

de_vocab = build_vocab(train_filepaths[0],de_tokenizer)
en_vocab = build_vocab(train_filepaths[1],en_tokenizer)
de_vocab.set_default_index(0)
en_vocab.set_default_index(0)

def process_data(filepaths):
    raw_de_iter = iter(io.open(filepaths[0],encoding='utf8'))
    raw_en_iter = iter(io.open(filepaths[1],encoding='utf8'))
    de_data = []
    en_data = []
    for (raw_de,raw_en) in zip(raw_de_iter,raw_en_iter):
    de_data.append([2]+de_vocab(de_tokenizer(raw_de.rstrip('n')))+[3])
    en_data.append([2]+en_vocab(en_tokenizer(raw_en.rstrip('n')))+[3])
    return de_data,en_data

train_data = process_data(train_filepaths)
val_data = process_data(val_filepaths)
test_data = process_data(test_filepaths)

# define tensor with padding
to_tensor = ToTensor(padding_value=1)

# create training and testing data
de_train_data = to_tensor(train_data[0])
en_train_data = to_tensor(train_data[1])
de_val_data = to_tensor(val_data[0])
en_val_data = to_tensor(val_data[1])
de_test_data = to_tensor(test_data[0])
en_test_data = to_tensor(test_data[1])


# create batched datasets
train_ds = TensorDataset(de_train_data,en_train_data)
val_ds = TensorDataset(de_val_data,en_val_data)
test_ds = TensorDataset(de_test_data,en_test_data)

# batch size
batch_size = 2**8
train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
val_dl = DataLoader(val_ds, batch_size=batch_size, shuffle=True)
test_dl = DataLoader(test_ds, batch_size=batch_size, shuffle=True)

Main class for Seq2seq transformer below (requires CUDA):

class Seq2SeqTransformer(nn.Module):
def __init__(self, num_encoder_layers: int, num_decoder_layers: int, 
                emb_size: int, src_vocab_size: int, tgt_vocab_size: int,
                dim_feedforward:int = 512, dropout:float = 0.1):
    super().__init__()
    encoder_layer = TransformerEncoderLayer(d_model=emb_size, nhead=NHEAD,dim_feedforward=dim_feedforward,batch_first=True)
    self.transformer_encoder = TransformerEncoder(encoder_layer, num_layers=num_encoder_layers)
    decoder_layer = TransformerDecoderLayer(d_model=emb_size, nhead=NHEAD, dim_feedforward=dim_feedforward,batch_first=True)
    self.transformer_decoder = TransformerDecoder(decoder_layer,num_layers=num_decoder_layers)
    self.generator = nn.Linear(emb_size, tgt_vocab_size)
    self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)
    self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)
    self.positional_encoding = PositionalEncoding(emb_size, dropout=dropout)
    def forward(self, src: Tensor, trg: Tensor, src_mask: Tensor, tgt_mask: Tensor, src_padding_mask: Tensor, tgt_padding_mask: Tensor, memory_key_padding_mask: Tensor):
        src_emb = self.positional_encoding(self.src_tok_emb(src))
        tgt_emb = self.positional_encoding(self.tgt_tok_emb(trg))
        memory = self.transformer_encoder(src_emb, src_mask, src_padding_mask)
        outs = self.transformer_decoder(tgt_emb, memory,tgt_mask, None, tgt_padding_mask, memory_key_padding_mask)
    return self.generator(outs)

    def encode(self, src: Tensor, src_mask: Tensor):
        return self.transformer_encoder(self.positional_encoding(self.src_tok_emb(src)), src_mask)
    def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):
        return self.transformer_decoder(self.positional_encoding(self.tgt_tok_emb(tgt)), memory, tgt_mask)

class PositionalEncoding(nn.Module):
    def __init__(self,emb_size,dropout,max_len=5000):
        super().__init__()
        den = torch.exp(-torch.arange(0,emb_size,2) * math.log(10000)/emb_size)
        pos = torch.arange(0,max_len).reshape(max_len,1)
        pos_embedding = torch.zeros((max_len,emb_size))
        pos_embedding[:,0::2] = torch.sin(pos * den)
        pos_embedding[:,1::2] = torch.cos(pos * den)
        pos_embedding = pos_embedding.unsqueeze(-2)
        self.dropout = nn.Dropout(dropout)
        self.register_buffer('pos_embedding',pos_embedding)
    def forward(self,token_embedding):
        return self.dropout(token_embedding + self.pos_embedding[:token_embedding.size(0),:])

# feature lifting
class TokenEmbedding(nn.Module):
    def __init__(self, vocab_size: int, emb_size):
        super(TokenEmbedding, self).__init__()
        self.embedding = nn.Embedding(vocab_size, emb_size)
        self.emb_size = emb_size
    def forward(self, tokens: Tensor):
        return self.embedding(tokens.long()) * math.sqrt(self.emb_size)

def generate_square_subsequent_mask(sz):
    mask = (torch.triu(torch.ones((sz,sz),device='cuda')) == 1).transpose(0,1)
    mask = mask.float().masked_fill(mask==0,float('-inf')).masked_fill(mask==1,float(0.0))
    return mask

def create_mask(src,tgt):
    src_seq_len = src.shape[1]
    tgt_seq_len = tgt.shape[1]
    tgt_mask = generate_square_subsequent_mask(tgt_seq_len).cuda()
    src_mask = torch.zeros((src_seq_len,src_seq_len),device='cuda').type(torch.bool)
    src_padding_mask = (src == 1).cuda()
    tgt_padding_mask = (tgt == 1).cuda()
    return src_mask, tgt_mask, src_padding_mask,tgt_padding_mask

An example of training code is provided below:

SRC_VOCAB_SIZE = len(de_vocab)
TGT_VOCAB_SIZE = len(en_vocab)
EMB_SIZE = 512
NHEAD = 8
FFN_HID_DIM = 512
BATCH_SIZE = 128
NUM_ENCODER_LAYERS = 3
NUM_DECODER_LAYERS = 3
NUM_EPOCHS = 50
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

# create model
transformer = Seq2SeqTransformer(NUM_ENCODER_LAYERS, NUM_DECODER_LAYERS, EMB_SIZE, SRC_VOCAB_SIZE, TGT_VOCAB_SIZE, FFN_HID_DIM)
transformer = transformer.cuda()

# Xavier initialization
for p in transformer.parameters():
    if p.dim() > 1:
        nn.init.xavier_uniform_(p)

# criterion
loss_fn = torch.nn.CrossEntropyLoss(ignore_index=1)

# optimization routine
optimizer = torch.optim.Adam(transformer.parameters(),lr=0.0001,betas=(0.9,0.98), eps=1e-9)

# training function
def train_epoch(model, train_dl, optimizer):
    model.train()
    losses = 0
    for idx, (src, tgt) in enumerate(train_dl):
        src = src.cuda()
        tgt = tgt.cuda()
        tgt_input = tgt[:,:-1]
        src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = create_mask(src, tgt_input)
        pred = model(src, tgt_input, src_mask, tgt_mask, src_padding_mask, tgt_padding_mask, src_padding_mask)
        optimizer.zero_grad()
        tgt_out = tgt[:, 1:]
        # evaluate training loss
        loss = loss_fn(pred.reshape(-1, pred.shape[-1]), tgt_out.reshape(-1))
        # backpropagte
        loss.backward()
        optimizer.step()
        losses += loss.item()

    return losses / len(train_dl)

We use the following function to evaluate performance, essential predicting on test set.

def evaluate(model,val_dl):
    model.eval()
    losses = 0
    for idx, (src,tgt) in enumerate(val_dl):
        src = src.cuda()
        tgt = tgt.cuda()
        tgt_input =tgt[:,:-1]
        src_mask, tgt_mask, src_padding_mask, tgt_padding_mask = create_mask(src, tgt_input)
        logits = model(src, tgt_input, src_mask,
        tgt_mask, src_padding_mask,
        tgt_padding_mask, src_padding_mask)
        optimizer.zero_grad()
        tgt_out = tgt[:,1:]
        loss = loss_fn(logits.reshape(-1, logits.shape[-1]), tgt_out.reshape(-1))
        losses += loss.item()
    return losses/len(val_dl)

Experiments below:

# train model
num_epochs = 20-1
for epoch in range(num_epochs):
    start_time = time.time()
    train_loss = train_epoch(transformer,train_dl,optimizer)
    end_time = time.time()
    val_loss = evaluate(transformer,val_dl)

    # report
    print(f'>>>> Epoch: {epoch}, Train loss: {train_loss:.3f}, Val loss: {val_loss:.3f}, Epoch time = {(end_time - start_time):.3f} sec ')

The training on my machine looked like the following:


>>>> Epoch: 0, Train loss: 5.233, Val loss: 3.972, Epoch time = 70.432s
>>>> Epoch: 1, Train loss: 3.719, Val loss: 3.433, Epoch time = 73.081s
>>>> Epoch: 2, Train loss: 3.308, Val loss: 3.150, Epoch time = 74.110s
>>>> Epoch: 3, Train loss: 3.018, Val loss: 2.931, Epoch time = 73.863s
>>>> Epoch: 4, Train loss: 2.764, Val loss: 2.727, Epoch time = 75.674s
>>>> Epoch: 5, Train loss: 2.534, Val loss: 2.559, Epoch time = 75.814s
>>>> Epoch: 6, Train loss: 2.341, Val loss: 2.428, Epoch time = 77.545s
>>>> Epoch: 7, Train loss: 2.176, Val loss: 2.332, Epoch time = 78.984s
>>>> Epoch: 8, Train loss: 2.034, Val loss: 2.266, Epoch time = 79.218s
>>>> Epoch: 9, Train loss: 1.906, Val loss: 2.193, Epoch time = 81.111s
>>>> Epoch: 10, Train loss: 1.793, Val loss: 2.170, Epoch time = 81.286s
>>>> Epoch: 11, Train loss: 1.691, Val loss: 2.134, Epoch time = 80.992s
>>>> Epoch: 12, Train loss: 1.596, Val loss: 2.094, Epoch time = 80.932s
>>>> Epoch: 13, Train loss: 1.509, Val loss: 2.099, Epoch time = 81.206s
>>>> Epoch: 14, Train loss: 1.428, Val loss: 2.061, Epoch time = 81.153s
>>>> Epoch: 15, Train loss: 1.353, Val loss: 2.046, Epoch time = 82.154s
>>>> Epoch: 16, Train loss: 1.282, Val loss: 2.039, Epoch time = 81.877s
>>>> Epoch: 17, Train loss: 1.216, Val loss: 2.049, Epoch time = 81.736s
>>>> Epoch: 18, Train loss: 1.154, Val loss: 2.046, Epoch time = 81.477s


After the model is trained, technically it is still outputting numbers. What we want are texts in English. One way to decode is to compute (greedily) a maximum likelihood sequence of texts, defined from our dictionary.


def greedy_decode(model, src, src_mask, max_len, start_symbol):
    src = src.cuda()
    src_mask = src_mask.cuda()
    memory = model.encode(src, src_mask)
    ys = torch.ones(1, 1).fill_(2).type(torch.long).cuda()
    for i in range(max_len-1):
    memory = memory.cuda()
    memory_mask = torch.zeros(ys.shape[1], memory.shape[1]).cuda().type(torch.bool)
    tgt_mask = (generate_square_subsequent_mask(ys.size(1)).type(torch.bool)).cuda()
    out = model.decode(ys, memory, tgt_mask).squeeze(0)[-1]
    prob = model.generator(out)
    next_word = prob.argmax()
    next_word = next_word.item()
    ys = torch.cat([ys,torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1)
    if next_word == 3:
        break
    return ys

def translate(model, src, src_vocab, tgt_vocab, src_tokenizer):
    model.eval()
    tokens = [2]+de_vocab(de_tokenizer(src))+[3]
    num_tokens = len(tokens)
    src = (torch.LongTensor(tokens).reshape(1,num_tokens))
    src_mask = (torch.zeros(num_tokens, num_tokens)).type(torch.bool)
    tgt_tokens = greedy_decode(model, src, src_mask, max_len=num_tokens + 5, start_symbol=2).flatten()
    return " ".join([tgt_vocab.get_itos()[tok] for tok in tgt_tokens]).replace("<bos>", "").replace("<eos>", "")

Finally, we can try our hands on translating some text, even outside of the training data.

output = translate(transformer, "Eine Gruppe von Menschen steht vor einem Flughafen .", de_vocab, en_vocab, de_tokenizer)
print(output)

With the following output

A group of people standing in front of an airport .

I will come back later and write about the math/probability underlying transformers.