Biswajit Padhi commited on
Commit
97c9b98
1 Parent(s): 60f6821

Frist Commit

Browse files
Files changed (2) hide show
  1. app.py +29 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
4
+
5
+ def formatting_func(document):
6
+ instruction = "You are a model designed to rephrase medical summaries for a general audience. Please summarize the following article in such a way a normal person could understand it, while also ensuring the same factual accuracy. Replace any technical terms with their equivalents in ordinary language, and be concise (< 100 words) and approachable.\n---------\n"
7
+ text = f"### {instruction} \n### Conclusion: {document} \n### Summary: "
8
+ return text
9
+
10
+ def genenrate(text):
11
+ ft_model = AutoModelForCausalLM.from_pretrained("BiswajitPadhi99/mistral-7b-finetuned-medical-summarizer",
12
+ device_map="cuda", load_in_4bit=True)
13
+ eval_tokenizer = AutoTokenizer.from_pretrained("BiswajitPadhi99/mistral-7b-finetuned-medical-summarizer", add_bos_token=True,
14
+ device_map="cuda")
15
+ ft_model.eval()
16
+ with torch.no_grad():
17
+ eval_prompt = formatting_func(text)
18
+ model_input = eval_tokenizer(eval_prompt, return_tensors="pt").to("cuda")
19
+ response = eval_tokenizer.decode(ft_model.generate(**model_input, max_new_tokens=200)[0], skip_special_tokens=True)
20
+ if(eval_prompt in response):
21
+ response = response.replace(eval_prompt, '')
22
+ return response
23
+
24
+ input = st.text_input(label= "Input Text")
25
+ if input is not None:
26
+ col =st.columns(1)
27
+ output = generate(input)
28
+ col.header("Summary")
29
+ col.write(output)
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ torch