Spaces:
Sleeping
Sleeping
add
Browse files- .gitignore +2 -0
- app.py +49 -0
- requirements.txt +4 -0
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.venv
|
2 |
+
gradio_chached_examples/
|
app.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from peft import PeftModel, PeftConfig
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
+
import torch
|
5 |
+
import re
|
6 |
+
|
7 |
+
model_name = "google/gemma-2b"
|
8 |
+
peft_model = "kazuma313/gemma-dokter-ft"
|
9 |
+
device_map = "auto"
|
10 |
+
|
11 |
+
base_model = AutoModelForCausalLM.from_pretrained(
|
12 |
+
model_name,
|
13 |
+
low_cpu_mem_usage=True,
|
14 |
+
return_dict=True,
|
15 |
+
torch_dtype=torch.float16,
|
16 |
+
device_map=device_map,
|
17 |
+
)
|
18 |
+
model = PeftModel.from_pretrained(base_model, peft_model)
|
19 |
+
model = model.merge_and_unload()
|
20 |
+
|
21 |
+
# Reload tokenizer to save it
|
22 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name,
|
23 |
+
trust_remote_code=True)
|
24 |
+
tokenizer.pad_token = tokenizer.eos_token
|
25 |
+
tokenizer.padding_side = "right"
|
26 |
+
|
27 |
+
def echo(message, history, tokens):
|
28 |
+
pattern = r'Step \d+/\d+|^\d+\.\s*'
|
29 |
+
input_ids = tokenizer(message, return_tensors="pt")
|
30 |
+
outputs = model.generate(**input_ids, max_length=tokens)
|
31 |
+
answer = tokenizer.decode(outputs[0], skip_special_tokens=True).split('Answer:')[-1]
|
32 |
+
clean_answer = re.sub(pattern, '', answer)
|
33 |
+
|
34 |
+
return clean_answer
|
35 |
+
|
36 |
+
|
37 |
+
demo = gr.ChatInterface(echo,
|
38 |
+
examples = [["what is the negative effect of alcohol?"],
|
39 |
+
["i have lack of sleep, what happend if continously do this?"]],
|
40 |
+
title="dokter Bot",
|
41 |
+
retry_btn=None,
|
42 |
+
undo_btn="Delete Previous",
|
43 |
+
clear_btn="Clear",
|
44 |
+
additional_inputs=[
|
45 |
+
gr.Slider(64, 256, value=124)
|
46 |
+
],
|
47 |
+
|
48 |
+
)
|
49 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
peft
|
3 |
+
transformers
|
4 |
+
torch
|