Skip to content

Commit

Permalink
Moved other architectures to dedicated repos
Browse files Browse the repository at this point in the history
  • Loading branch information
ggldnl committed Mar 25, 2024
1 parent 1a54b8c commit 9b43f0c
Show file tree
Hide file tree
Showing 29 changed files with 51 additions and 1,650 deletions.
10 changes: 8 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,2 +1,8 @@
# Lightning-Models
Collection of machine learning models implemented using pytorch lightning
# Transformer

Pytorch lightning implementation of the original Transformer architecture
as described in the [Attention Is All You Need](https://arxiv.org/abs/1706.03762) paper.
Along with the architecture, the repo contains the code to run training and inference on
a machine translation task to translate from english to italian. The code for a Tokenizer
and a dataloader are provided as well. The dataloader uses the [OPUS Books](https://huggingface.co/datasets/opus_books)
dataset.
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from models.transformer.blocks.layer_normalization import LayerNormalization
from architecture.blocks.layer_normalization import LayerNormalization
import torch.nn as nn


Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from models.transformer.blocks.multihead_attention import MultiHeadAttentionBlock
from models.transformer.blocks.residual_connection import ResidualConnection
from models.transformer.blocks.feed_forward import FeedForwardBlock
from architecture.blocks.multihead_attention import MultiHeadAttentionBlock
from architecture.blocks.residual_connection import ResidualConnection
from architecture.blocks.feed_forward import FeedForwardBlock
import torch.nn as nn


Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from models.transformer.blocks.layer_normalization import LayerNormalization
from architecture.blocks.layer_normalization import LayerNormalization
import torch.nn as nn


Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from models.transformer.blocks.multihead_attention import MultiHeadAttentionBlock
from models.transformer.blocks.residual_connection import ResidualConnection
from models.transformer.blocks.feed_forward import FeedForwardBlock
from architecture.blocks.multihead_attention import MultiHeadAttentionBlock
from architecture.blocks.residual_connection import ResidualConnection
from architecture.blocks.feed_forward import FeedForwardBlock
import torch.nn as nn


Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from models.transformer.blocks.layer_normalization import LayerNormalization
from architecture.blocks.layer_normalization import LayerNormalization
import torch.nn as nn


Expand Down
File renamed without changes.
18 changes: 9 additions & 9 deletions models/graphormer/model.py → architecture/model.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from models.graphormer.blocks.multihead_attention import MultiHeadAttentionBlock
from models.graphormer.blocks.positional_encoding import PositionalEncoding
from models.graphormer.blocks.input_embeddings import InputEmbeddings
from models.graphormer.blocks.feed_forward import FeedForwardBlock
from models.graphormer.blocks.projection import ProjectionLayer
from models.graphormer.blocks.encoder_block import EncoderBlock
from models.graphormer.blocks.decoder_block import DecoderBlock
from models.graphormer.blocks.encoder import Encoder
from models.graphormer.blocks.decoder import Decoder
from architecture.blocks.multihead_attention import MultiHeadAttentionBlock
from architecture.blocks.positional_encoding import PositionalEncoding
from architecture.blocks.input_embeddings import InputEmbeddings
from architecture.blocks.feed_forward import FeedForwardBlock
from architecture.blocks.projection import ProjectionLayer
from architecture.blocks.encoder_block import EncoderBlock
from architecture.blocks.decoder_block import DecoderBlock
from architecture.blocks.encoder import Encoder
from architecture.blocks.decoder import Decoder
import pytorch_lightning as pl
import torch.nn as nn
import torch
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def test_dataloader(self):

if __name__ == '__main__':

from models.transformer.machine_translation.tokenizer import WordLevelTokenizer
from machine_translation.tokenizer import WordLevelTokenizer

# Use txt for better interpretability
source_tokenizer_path = os.path.join(config.TOK_DIR, r'tokenizer_source.txt')
Expand Down
Original file line number Diff line number Diff line change
@@ -1,18 +1,17 @@
from models.transformer.blocks.multihead_attention import MultiHeadAttentionBlock
from models.transformer.blocks.positional_encoding import PositionalEncoding
from models.transformer.blocks.input_embeddings import InputEmbeddings
from models.transformer.blocks.feed_forward import FeedForwardBlock
from models.transformer.blocks.projection import ProjectionLayer
from models.transformer.blocks.encoder_block import EncoderBlock
from models.transformer.blocks.decoder_block import DecoderBlock
from models.transformer.blocks.encoder import Encoder
from models.transformer.blocks.decoder import Decoder
from models.transformer.model import Transformer
from architecture.blocks.multihead_attention import MultiHeadAttentionBlock
from architecture.blocks.positional_encoding import PositionalEncoding
from architecture.blocks.input_embeddings import InputEmbeddings
from architecture.blocks.feed_forward import FeedForwardBlock
from architecture.blocks.projection import ProjectionLayer
from architecture.blocks.encoder_block import EncoderBlock
from architecture.blocks.decoder_block import DecoderBlock
from architecture.blocks.encoder import Encoder
from architecture.blocks.decoder import Decoder
from architecture.model import Transformer
from tokenizer import WordLevelTokenizer
from data import OPUSDataModule
import torch.nn as nn
import config
import torch
import os


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -273,8 +273,8 @@ def load(cls, path, driver='pkl'):

if __name__ == '__main__':

from models.transformer.machine_translation.data import OPUSDataModule
import models.transformer.machine_translation.config as config
from machine_translation.data import OPUSDataModule
import machine_translation.config as config

def create_tokenizer(stage='source'): # stage = ['source', 'target']

Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
from models.transformer.blocks.multihead_attention import MultiHeadAttentionBlock
from models.transformer.blocks.positional_encoding import PositionalEncoding
from models.transformer.blocks.input_embeddings import InputEmbeddings
from models.transformer.blocks.feed_forward import FeedForwardBlock
from models.transformer.blocks.projection import ProjectionLayer
from models.transformer.blocks.encoder_block import EncoderBlock
from models.transformer.blocks.decoder_block import DecoderBlock
from models.transformer.model import LightningTransformer
from models.transformer.blocks.encoder import Encoder
from models.transformer.blocks.decoder import Decoder
from models.transformer.model import Transformer
from models.transformer.cosine_warmup import CosineWarmupScheduler
from architecture.blocks.multihead_attention import MultiHeadAttentionBlock
from architecture.blocks.positional_encoding import PositionalEncoding
from architecture.blocks.input_embeddings import InputEmbeddings
from architecture.blocks.feed_forward import FeedForwardBlock
from architecture.blocks.projection import ProjectionLayer
from architecture.blocks.encoder_block import EncoderBlock
from architecture.blocks.decoder_block import DecoderBlock
from architecture.model import LightningTransformer
from architecture.blocks.encoder import Encoder
from architecture.blocks.decoder import Decoder
from architecture.model import Transformer
from architecture.cosine_warmup import CosineWarmupScheduler
from tokenizer import WordLevelTokenizer
from data import OPUSDataModule
import pytorch_lightning as pl
Expand Down
18 changes: 0 additions & 18 deletions models/feedforward/MNIST_classification/config.py

This file was deleted.

156 changes: 0 additions & 156 deletions models/feedforward/MNIST_classification/data.py

This file was deleted.

41 changes: 0 additions & 41 deletions models/feedforward/MNIST_classification/train.py

This file was deleted.

Loading

0 comments on commit 9b43f0c

Please sign in to comment.