Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,151 +1,120 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
|
4 |
-
import
|
5 |
-
import
|
6 |
-
from
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
else:
|
43 |
-
row_count = len(messages)
|
44 |
-
for i, message in enumerate(messages):
|
45 |
-
st.session_state[f"message{i}"] = message
|
46 |
-
st.session_state.row_count = row_count
|
47 |
-
|
48 |
-
if reset:
|
49 |
-
row_count = 1
|
50 |
-
st.session_state.row_count = row_count
|
51 |
-
for i in range(100): # Assuming a maximum of 100 rows
|
52 |
-
st.session_state[f"note{i}"] = ""
|
53 |
-
st.session_state[f"message{i}"] = ""
|
54 |
-
st.session_state[f"response{i}"] = ""
|
55 |
-
st.session_state[f"prompt_tokens{i}"] = 0
|
56 |
-
st.session_state[f"response_tokens{i}"] = 0
|
57 |
-
st.session_state[f"word_count{i}"] = 0
|
58 |
-
|
59 |
-
def generate_response(i, message):
|
60 |
-
try:
|
61 |
-
completion = openai.ChatCompletion.create(
|
62 |
-
model=model,
|
63 |
-
messages=[
|
64 |
-
{"role": "system", "content": system_message},
|
65 |
-
{"role": "user", "content": message}
|
66 |
-
],
|
67 |
-
temperature=temperature,
|
68 |
-
max_tokens=max_tokens,
|
69 |
-
top_p=top_p
|
70 |
)
|
71 |
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
self.
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
download_filename = "GPT-4 Responses.txt"
|
150 |
-
download_link = create_download_link(responses_text, download_filename)
|
151 |
-
st.markdown(download_link, unsafe_allow_html=True)
|
|
|
1 |
+
from typing import Any, Dict, Tuple
|
2 |
+
import warnings
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
+
from transformers import (
|
7 |
+
StoppingCriteria,
|
8 |
+
StoppingCriteriaList,
|
9 |
+
TextIteratorStreamer,
|
10 |
+
)
|
11 |
+
|
12 |
+
|
13 |
+
INSTRUCTION_KEY = "### Instruction:"
|
14 |
+
RESPONSE_KEY = "### Response:"
|
15 |
+
END_KEY = "### End"
|
16 |
+
INTRO_BLURB = "Below is an instruction that describes a task. Write a response that appropriately completes the request."
|
17 |
+
PROMPT_FOR_GENERATION_FORMAT = """{intro}
|
18 |
+
{instruction_key}
|
19 |
+
{instruction}
|
20 |
+
{response_key}
|
21 |
+
""".format(
|
22 |
+
intro=INTRO_BLURB,
|
23 |
+
instruction_key=INSTRUCTION_KEY,
|
24 |
+
instruction="{instruction}",
|
25 |
+
response_key=RESPONSE_KEY,
|
26 |
+
)
|
27 |
+
|
28 |
+
|
29 |
+
class InstructionTextGenerationPipeline:
|
30 |
+
def __init__(
|
31 |
+
self,
|
32 |
+
model_name,
|
33 |
+
torch_dtype=torch.bfloat16,
|
34 |
+
trust_remote_code=True,
|
35 |
+
use_auth_token=None,
|
36 |
+
) -> None:
|
37 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
38 |
+
model_name,
|
39 |
+
torch_dtype=torch_dtype,
|
40 |
+
trust_remote_code=trust_remote_code,
|
41 |
+
use_auth_token=use_auth_token,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
)
|
43 |
|
44 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
45 |
+
model_name,
|
46 |
+
trust_remote_code=trust_remote_code,
|
47 |
+
use_auth_token=use_auth_token,
|
48 |
+
)
|
49 |
+
if tokenizer.pad_token_id is None:
|
50 |
+
warnings.warn(
|
51 |
+
"pad_token_id is not set for the tokenizer. Using eos_token_id as pad_token_id."
|
52 |
+
)
|
53 |
+
tokenizer.pad_token = tokenizer.eos_token
|
54 |
+
tokenizer.padding_side = "left"
|
55 |
+
self.tokenizer = tokenizer
|
56 |
+
|
57 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
58 |
+
self.model.eval()
|
59 |
+
self.model.to(device=device, dtype=torch_dtype)
|
60 |
+
|
61 |
+
self.generate_kwargs = {
|
62 |
+
"temperature": 0.1,
|
63 |
+
"top_p": 0.92,
|
64 |
+
"top_k": 0,
|
65 |
+
"max_new_tokens": 1024,
|
66 |
+
"use_cache": True,
|
67 |
+
"do_sample": True,
|
68 |
+
"eos_token_id": self.tokenizer.eos_token_id,
|
69 |
+
"pad_token_id": self.tokenizer.pad_token_id,
|
70 |
+
"repetition_penalty": 1.1, # 1.0 means no penalty, > 1.0 means penalty, 1.2 from CTRL paper
|
71 |
+
}
|
72 |
+
|
73 |
+
def format_instruction(self, instruction):
|
74 |
+
return PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
|
75 |
+
|
76 |
+
def __call__(
|
77 |
+
self, instruction: str, **generate_kwargs: Dict[str, Any]
|
78 |
+
) -> Tuple[str, str, float]:
|
79 |
+
s = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction)
|
80 |
+
input_ids = self.tokenizer(s, return_tensors="pt").input_ids
|
81 |
+
input_ids = input_ids.to(self.model.device)
|
82 |
+
gkw = {**self.generate_kwargs, **generate_kwargs}
|
83 |
+
with torch.no_grad():
|
84 |
+
output_ids = self.model.generate(input_ids, **gkw)
|
85 |
+
# Slice the output_ids tensor to get only new tokens
|
86 |
+
new_tokens = output_ids[0, len(input_ids[0]) :]
|
87 |
+
output_text = self.tokenizer.decode(new_tokens, skip_special_tokens=True)
|
88 |
+
return output_text
|
89 |
+
|
90 |
+
# Initialize the model and tokenizer
|
91 |
+
generate = InstructionTextGenerationPipeline(
|
92 |
+
"mosaicml/mpt-7b-instruct",
|
93 |
+
torch_dtype=torch.bfloat16,
|
94 |
+
trust_remote_code=True,
|
95 |
+
)
|
96 |
+
stop_token_ids = generate.tokenizer.convert_tokens_to_ids(["<|endoftext|>"])
|
97 |
+
|
98 |
+
|
99 |
+
# Define a custom stopping criteria
|
100 |
+
class StopOnTokens(StoppingCriteria):
|
101 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
102 |
+
for stop_id in stop_token_ids:
|
103 |
+
if input_ids[0][-1] == stop_id:
|
104 |
+
return True
|
105 |
+
return False
|
106 |
+
|
107 |
+
"""### The prompt & response"""
|
108 |
+
|
109 |
+
import json
|
110 |
+
import textwrap
|
111 |
+
|
112 |
+
def get_prompt(instruction):
|
113 |
+
prompt_template = f"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:"
|
114 |
+
return prompt_template
|
115 |
+
|
116 |
+
# print(get_prompt('What is the meaning of life?'))
|
117 |
+
|
118 |
+
def parse_text(text):
|
119 |
+
wrapped_text = textwrap.fill(text, width=100)
|
120 |
+
print(wrapped_text +'\n\n')
|
|
|
|
|
|