File size: 2,336 Bytes
851ebf7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
{
    "module": "keras_nlp.src.models.gemma.gemma_causal_lm",
    "class_name": "GemmaCausalLM",
    "config": {
        "backbone": {
            "module": "keras_nlp.src.models.gemma.gemma_backbone",
            "class_name": "GemmaBackbone",
            "config": {
                "name": "gemma_backbone",
                "trainable": true,
                "vocabulary_size": 256000,
                "num_layers": 18,
                "num_query_heads": 8,
                "num_key_value_heads": 1,
                "hidden_dim": 2048,
                "intermediate_dim": 32768,
                "head_dim": 256,
                "layer_norm_epsilon": 1e-06,
                "dropout": 0,
                "query_head_dim_normalize": true,
                "use_post_ffw_norm": false,
                "use_post_attention_norm": false,
                "final_logit_soft_cap": null,
                "attention_logit_soft_cap": null,
                "sliding_window_size": 4096,
                "use_sliding_window_attention": false
            },
            "registered_name": "keras_nlp>GemmaBackbone"
        },
        "preprocessor": {
            "module": "keras_nlp.src.models.gemma.gemma_causal_lm_preprocessor",
            "class_name": "GemmaCausalLMPreprocessor",
            "config": {
                "name": "gemma_causal_lm_preprocessor",
                "trainable": true,
                "dtype": "float32",
                "tokenizer": {
                    "module": "keras_nlp.src.models.gemma.gemma_tokenizer",
                    "class_name": "GemmaTokenizer",
                    "config": {
                        "name": "gemma_tokenizer",
                        "trainable": true,
                        "dtype": "int32",
                        "proto": null,
                        "sequence_length": null,
                        "add_bos": false,
                        "add_eos": false
                    },
                    "registered_name": "keras_nlp>GemmaTokenizer"
                },
                "sequence_length": 512,
                "add_start_token": true,
                "add_end_token": true
            },
            "registered_name": "keras_nlp>GemmaCausalLMPreprocessor"
        },
        "name": "gemma_causal_lm"
    },
    "registered_name": "keras_nlp>GemmaCausalLM"
}