English
ssui-liu commited on
Commit
646cf59
1 Parent(s): c36a17f

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +40 -0
README.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ datasets:
3
+ - yahma/alpaca-cleaned
4
+ - Nebulous/gpt4all_pruned
5
+ language:
6
+ - en
7
+ ---
8
+ from peft import PeftModel, PeftConfig
9
+ from transformers import AutoModelForCausalLM, AutoTokenizer
10
+
11
+ peft_model_id = "edu-linguistic/opt-1.3b-edu-sft"
12
+ model_name = 'facebook/opt-1.3b'
13
+
14
+ config = PeftConfig.from_pretrained(peft_model_id)
15
+ model = AutoModelForCausalLM.from_pretrained(model_name)
16
+ model = PeftModel.from_pretrained(model, peft_model_id)
17
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
18
+
19
+ question = "<|prompter|> Consider the following function: f(x1, x2) = ln(x1). This function is…"
20
+
21
+ question = tokenizer.encode(question, return_tensors='pt')
22
+
23
+ generation_kwargs = {
24
+ "do_sample": True,
25
+ "top_k": 0,
26
+ "top_p": 0.9,
27
+ "bos_token_id": tokenizer.bos_token_id,
28
+ "pad_token_id": tokenizer.pad_token_id,
29
+ "eos_token_id": tokenizer.eos_token_id,
30
+ "num_return_sequences": 1,
31
+ "min_new_tokens": 10,
32
+ "max_new_tokens": 512,
33
+ }
34
+
35
+ response = model.generate(input_ids=question, **generation_kwargs)
36
+ response = tokenizer.decode(response[0],
37
+ skip_special_tokens=False,
38
+ clean_up_tokenization_spaces=False
39
+ )
40
+ print(response)