File size: 3,964 Bytes
5519365
b06d1fc
0ab9fb8
b510718
747d2b3
933ad25
 
5519365
 
8637cf4
5519365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120

#from transformers import T5TokenizerFast, T5ForConditionalGeneration
import Tensor
#from torch.nn import Module
from typing import List, Optional, Tuple
#import torch, os
#import torch.nn.functional as F


class T5():
    '''
    T5 model from: https://huggingface.co/docs/transformers/model_doc/t5
    '''

    def __init__(self, 
                 variant:str="t5-small",
                 max_source_length:int=256, 
                 max_target_length:int=128,
                 optimizer_config:dict={},
                ):

        # Assertions
        assert variant in ["t5-small", "t5-base", "t5-large"]

        super().__init__()

        self.variant            = variant
        self.max_source_length  = max_source_length
        self.max_target_length  = max_target_length

        # Tokenizer & model
        self.tokenizer          = T5TokenizerFast.from_pretrained(self.variant,
                                                                  model_max_length=self.max_source_length)
        self.model              = T5ForConditionalGeneration.from_pretrained(self.variant)
        
        # Optimizer
        self.optimizer          = torch.optim.AdamW(self.parameters(), **optimizer_config)
        
        # Scheduler
        self.scheduler          = None


    def tokenize(self, input:List[str]):

        out = self.tokenizer(input, max_length=self.max_source_length,
                             truncation=True, padding=True, 
                             return_tensors="pt")

        return out.input_ids.cuda(), out.attention_mask.cuda()


    def forward(self, input:List[str], label:Optional[List[str]]=None) -> Tuple[Tensor, Optional[Tensor]]:

        '''
        Will receive input and target string and produce the final output as tensor (not decoded)
        when target is not None, it will give the loss functions with the output as tuple
        '''
        
        input_ids, input_masks = self.tokenize(input)
        
        if label is not None:
            label_ids, label_masks = self.tokenize(label)    
            output = self.model(input_ids=input_ids, labels=label_ids)
            return output.logits, output.loss
            
        return self.model.generate(input_ids=input_ids,
                                   max_new_tokens=self.max_target_length), None



    def predict(self, input:List[str]) -> List[str]:
        
        '''
        Will generate the target output as string
        '''
        
        logits, loss = self.forward(input=input)
        return self.tokenizer.batch_decode(logits, skip_special_tokens=True)
        


if __name__ == '__main__':

    '''
    Implement a tester class similar to T5-old.py to test if it works
    '''
    
    model = T5('t5-small')
    model.to('cuda')

    #inputs = [
        #"translate English to German: Thank you so much, Chris.",
        #"translate English to German: I have been blown away by this conference, and I want to thank all of you for the many nice comments about what I had to say the other night.",
        #"translate German to English: Es ist mir wirklich eine Ehre, zweimal auf dieser Bühne stehen zu dürfen. Tausend Dank dafür.",
    #]

    #targets = [
        #"Vielen Dank, Chris.",
        #"Ich bin wirklich begeistert von dieser Konferenz, und ich danke Ihnen allen für die vielen netten Kommentare zu meiner Rede vorgestern Abend.",
        #"And it's truly a great honor to have the opportunity to come to this stage twice; I'm extremely grateful.",
    #]

    inputs = ["Good Morning, How are you?"]
    targets = ["Buongiorno, come stai?"]

    logits, loss = model.forward(inputs, targets)
    print('Model forward')
    print('logits: ', logits)
    print('loss: ', loss)
    
    outputs = model.predict(inputs)
    
    #print('OUTPUT')
    #print(outputs)
    for (inp, out), tar in zip(zip(inputs, outputs), targets):
        print(f"Input: \n{inp}\n\nOutput: \n{out}\n\nTarget: \n{tar}\n\n")