mjbuehler commited on
Commit
5678897
1 Parent(s): 082b385

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +76 -1
README.md CHANGED
@@ -15,6 +15,81 @@ This model is based on work reported in https://doi.org/10.1002/advs.202306724.
15
 
16
  This repository includes both, Hugging Face transformers and GGUF files (in different versions, the q5_K_M is recommended).
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  ```
19
  from llama_cpp import Llama
20
 
@@ -80,4 +155,4 @@ deltat=time.time() - start_time
80
  print("--- %s seconds ---" % deltat)
81
  toked=tokenizer(res)
82
  print ("Tokens per second (generation): ", len (toked['input_ids'])/deltat)
83
- ```
 
15
 
16
  This repository includes both, Hugging Face transformers and GGUF files (in different versions, the q5_K_M is recommended).
17
 
18
+ #### Hugging Face transformers files: Loading and inference
19
+
20
+ ```
21
+ from transformers import AutoModelForCausalLM, AutoTokenizer
22
+ from accelerate import infer_auto_device_map
23
+
24
+ model = AutoModelForCausalLM.from_pretrained(
25
+ model_name,
26
+ trust_remote_code=True,
27
+ device_map="auto", #device_map="cuda:0",
28
+ torch_dtype= torch.bfloat16,
29
+ # use_flash_attention_2=True,
30
+ )
31
+
32
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
33
+ ```
34
+ Chat template
35
+ ```
36
+ messages = [
37
+ {"role": "system", "content": "You are a friendly materials scientist."},
38
+ {"role": "user", "content": "What is the strongest spider silk material?"},
39
+ {"role": "assistant", "content": "Sample response."},
40
+ ]
41
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
42
+ ```
43
+
44
+ '<|system|>\nYou are a friendly materials scientist.</s>\n<|user|>\nWhat is the strongest spider silk material?</s>\n<|assistant|>\nSample response.</s>\n<|assistant|>\n'
45
+
46
+ ```
47
+ device='cuda'
48
+ def generate_response (text_input="Biological materials offer amazing possibilities, such as",
49
+ num_return_sequences=1,
50
+ temperature=1.,
51
+ max_new_tokens=127,
52
+ num_beams=1,
53
+ top_k = 50,
54
+ top_p =0.9,repetition_penalty=1.,eos_token_id=2,verbatim=False,
55
+ exponential_decay_length_penalty_fac=None,
56
+ ):
57
+
58
+ inputs = tokenizer.encode(text_input, add_special_tokens =False, return_tensors ='pt')
59
+ if verbatim:
60
+ print ("Length of input, tokenized: ", inputs.shape, inputs)
61
+ with torch.no_grad():
62
+ outputs = model.generate(input_ids=inputs.to(device),
63
+ max_new_tokens=max_new_tokens,
64
+ temperature=temperature, #value used to modulate the next token probabilities.
65
+ num_beams=num_beams,
66
+ top_k = top_k,
67
+ top_p =top_p,
68
+ num_return_sequences = num_return_sequences, eos_token_id=eos_token_id,
69
+ do_sample =True,
70
+ repetition_penalty=repetition_penalty,
71
+ )
72
+ return tokenizer.batch_decode(outputs[:,inputs.shape[1]:].detach().cpu().numpy(), skip_special_tokens=True)
73
+
74
+ ```
75
+ Then:
76
+ ```
77
+ messages = [
78
+ {"role": "system", "content": "You are a friendly materials scientist."},
79
+ {"role": "user", "content": "What is the strongest spider silk material?"},
80
+ ]
81
+ prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
82
+
83
+ output_text=generate_response (text_input=prompt, eos_token_id=eos_token,
84
+ num_return_sequences=1, repetition_penalty=1.,
85
+ top_p=0.9, top_k=512,
86
+ temperature=0.1,max_new_tokens=512, verbatim=False,
87
+ )
88
+ print (output_text)
89
+ ```
90
+
91
+ #### GGUF files: Loading and inference
92
+
93
  ```
94
  from llama_cpp import Llama
95
 
 
155
  print("--- %s seconds ---" % deltat)
156
  toked=tokenizer(res)
157
  print ("Tokens per second (generation): ", len (toked['input_ids'])/deltat)
158
+ ```