kazuma313 commited on
Commit
01e3b36
1 Parent(s): 11f9396

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -60
app.py CHANGED
@@ -1,61 +1,50 @@
 
 
 
1
  import gradio as gr
2
- from peft import PeftModel, PeftConfig
3
- from transformers import AutoModelForCausalLM, AutoTokenizer
4
- from huggingface_hub import login
5
- import torch
6
- import re
7
- import os
8
-
9
- model_name = "google/gemma-2b"
10
- peft_model = "kazuma313/gemma-dokter-ft"
11
- device_map = "auto"
12
- hf_token = os.getenv('hftoken')
13
- login(token=hf_token, add_to_git_credential=True)
14
-
15
- from accelerate import disk_offload
16
- save_dir="gemma-dokter-ft"
17
- # disk_offload(model=peft_model, offload_dir=save_dir)
18
-
19
- # config = PeftConfig.from_pretrained(peft_model)
20
-
21
- base_model = AutoModelForCausalLM.from_pretrained(
22
- model_name,
23
- token=hf_token,
24
- low_cpu_mem_usage=True,
25
- return_dict=True,
26
- torch_dtype=torch.float16,
27
- device_map=device_map,
28
- )
29
- model = PeftModel.from_pretrained(base_model, peft_model)
30
- model = model.merge_and_unload()
31
-
32
- # Reload tokenizer to save it
33
- tokenizer = AutoTokenizer.from_pretrained(model_name,
34
- token=hf_token,
35
- trust_remote_code=True)
36
- tokenizer.pad_token = tokenizer.eos_token
37
- tokenizer.padding_side = "right"
38
-
39
- def echo(message, history, tokens):
40
- pattern = r'Step \d+/\d+|^\d+\.\s*'
41
- input_ids = tokenizer(message, return_tensors="pt")
42
- outputs = model.generate(**input_ids, max_length=tokens)
43
- answer = tokenizer.decode(outputs[0], skip_special_tokens=True).split('Answer:')[-1]
44
- clean_answer = re.sub(pattern, '', answer)
45
-
46
- return clean_answer
47
-
48
-
49
- demo = gr.ChatInterface(echo,
50
- examples = [["what is the negative effect of alcohol?"],
51
- ["i have lack of sleep, what happend if continously do this?"]],
52
- title="dokter Bot",
53
- retry_btn=None,
54
- undo_btn="Delete Previous",
55
- clear_btn="Clear",
56
- additional_inputs=[
57
- gr.Slider(64, 256, value=80)
58
- ],
59
-
60
- )
61
- demo.launch()
 
1
+ ## Imports
2
+ from huggingface_hub import hf_hub_download
3
+ from llama_cpp import Llama
4
  import gradio as gr
5
+
6
+ ## Download the GGUF model
7
+ model_name = "kazuma313/lora_model_dokter_consultasi_q4_k_m"
8
+ model_file = "lora_model_dokter_consultasi_q4_k_m-unsloth.Q4_K_M.gguf" # this is the specific model file we'll use in this example. It's a 4-bit quant, but other levels of quantization are available in the model repo if preferred
9
+ model_path = hf_hub_download(model_name, filename=model_file)
10
+
11
+
12
+ prompt_template="""<|begin_of_text|>Dibawah ini adalah percakapan antara dokter dengan pasiennya yang ingin berkonsultasi terkait kesehatan. Tuliskan jawaban yang tepat dan lengkap sesuai sesuai pertanyaan dari pasien.<|end_of_text|>
13
+
14
+ ### Pertanyaan:
15
+ {ask}
16
+
17
+ ### Jawaban:
18
+ """
19
+
20
+ def output_inference(tanya, history):
21
+ prompt = prompt_template.format(ask=tanya)
22
+
23
+ output = llm(
24
+ prompt,
25
+ stop=["<|end_of_text|>"],
26
+ max_tokens=512,
27
+ temperature=0.3,
28
+ top_p=0.95,
29
+ top_k=40,
30
+ min_p=0.05,
31
+ typical_p=1.0,
32
+ stream=False,
33
+
34
+ )
35
+ return output['choices'][0]['text']
36
+
37
+
38
+ gr.ChatInterface(
39
+ output_inference,
40
+ chatbot=gr.Chatbot(height=300),
41
+ textbox=gr.Textbox(placeholder="Tanya saya kesehatan anda", container=False, scale=7),
42
+ title="Konsultasi dokter",
43
+ description="Tanya saja semua keluhan mu",
44
+ theme="soft",
45
+ examples=["apa saja tips hidup sehat?", "apa penyebab dari minum alkohol berlebihan?", "apa yang terjadi jika pola tidur tidak teratur?", "berapa hasil dari 10 + 5?"],
46
+ cache_examples=True,
47
+ retry_btn=None,
48
+ undo_btn="Delete Previous",
49
+ clear_btn="Clear",
50
+ ).launch()