piuzha commited on
Commit
78f3e42
·
verified ·
1 Parent(s): d2309ce

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -23,7 +23,7 @@ You can use the following code to run inference with the model. The model is sav
23
  import torch
24
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
25
 
26
- model_name = 'moxin-org/moxin-chat-7b'
27
  tokenizer = AutoTokenizer.from_pretrained(model_name)
28
  model = AutoModelForCausalLM.from_pretrained(
29
  model_name,
 
23
  import torch
24
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
25
 
26
+ model_name = 'moxin-org/moxin-7b'
27
  tokenizer = AutoTokenizer.from_pretrained(model_name)
28
  model = AutoModelForCausalLM.from_pretrained(
29
  model_name,