Text Generation
Transformers
Safetensors
Japanese
English
qwen
custom_code
ptrdvn commited on
Commit
e7f0b62
1 Parent(s): 1b6b19e

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -24,8 +24,8 @@ Qwen/Qwen-14B-Chat + Karasu's finetuning datasets
24
  from transformers import AutoTokenizer, AutoModelForCausalLM
25
  import torch
26
 
27
- tokenizer = AutoTokenizer.from_pretrained("lightblue/karasu-7B-chat-plus")
28
- model = AutoModelForCausalLM.from_pretrained("lightblue/karasu-7B-chat-plus", torch_dtype=torch.bfloat16, device_map="auto")
29
 
30
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
31
 
@@ -43,7 +43,7 @@ pipe(prompt, max_new_tokens=100, do_sample=False, temperature=0.0, return_full_t
43
  from vllm import LLM, SamplingParams
44
 
45
  sampling_params = SamplingParams(temperature=0.0, max_tokens=100)
46
- llm = LLM(model="lightblue/karasu-7B-chat-plus")
47
 
48
  messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
49
  messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})
 
24
  from transformers import AutoTokenizer, AutoModelForCausalLM
25
  import torch
26
 
27
+ tokenizer = AutoTokenizer.from_pretrained("lightblue/qarasu-14B-chat-plus-unleashed", trust_remote_code=True)
28
+ model = AutoModelForCausalLM.from_pretrained("lightblue/qarasu-14B-chat-plus-unleashed", torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True)
29
 
30
  pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
31
 
 
43
  from vllm import LLM, SamplingParams
44
 
45
  sampling_params = SamplingParams(temperature=0.0, max_tokens=100)
46
+ llm = LLM(model="lightblue/karasu-7B-chat-plus", trust_remote_code=True)
47
 
48
  messages = [{"role": "system", "content": "あなたはAIアシスタントです。"}]
49
  messages.append({"role": "user", "content": "イギリスの首相は誰ですか?"})