mintaeng commited on
Commit
acd52ee
β€’
1 Parent(s): 63b5e64

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +125 -96
app.py CHANGED
@@ -1,108 +1,137 @@
1
- from langchain import PromptTemplate, LLMChain
2
- from langchain.llms import CTransformers
3
- import os
4
- from langchain.text_splitter import RecursiveCharacterTextSplitter
5
- from langchain.vectorstores import Chroma
6
- from langchain.chains import RetrievalQA
7
- from langchain.embeddings import HuggingFaceBgeEmbeddings
8
- from io import BytesIO
9
- from langchain.document_loaders import PyPDFLoader
10
  import gradio as gr
 
11
 
 
 
 
 
 
 
 
 
12
 
13
- local_llm = "final_model_maybe_gguf-unsloth.Q5_K_M.gguf"
14
-
15
- config = {
16
- 'max_new_tokens': 2048,
17
- 'repetition_penalty': 1.1,
18
- 'temperature': 0.6,
19
- 'top_k': 50,
20
- 'top_p': 0.9,
21
- 'stream': True,
22
- 'threads': int(os.cpu_count() / 2)
23
- }
24
-
25
- llm = CTransformers(
26
- model=local_llm,
27
- model_type="mistral",
28
- lib="avx2", #for CPU use
29
- **config
30
- )
31
-
32
- print("LLM Initialized...")
33
 
 
34
 
35
- prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
36
- μ œμ‹œλœ μ§ˆλ¬Έμ— λŒ€ν•΄μ„œ context λ‚΄μš©μœΌλ‘œ λ‹΅λ³€ν•΄μ€˜.
37
 
38
- ### Context :
39
- {context}
40
 
41
- ### Instruction:
42
- {question}
43
 
44
- ### Response:
45
- """
 
 
46
 
47
- model_name = "jhgan/ko-sroberta-multitask"
48
- model_kwargs = {'device': 'cpu'}
49
- encode_kwargs = {'normalize_embeddings': False}
50
- embeddings = HuggingFaceBgeEmbeddings(
51
- model_name=model_name,
52
- model_kwargs=model_kwargs,
53
- encode_kwargs=encode_kwargs
54
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
 
57
- prompt = PromptTemplate(template=prompt_template, input_variables=['context', 'question'])
58
- load_vector_store = Chroma(persist_directory="stores/pet_cosine", embedding_function=embeddings)
59
- retriever = load_vector_store.as_retriever(search_kwargs={"k":1})
60
- # query = "what is the fastest speed for a greyhound dog?"
61
- # semantic_search = retriever.get_relevant_documents(query)
62
- # print(semantic_search)
63
-
64
- print("######################################################################")
65
-
66
- chain_type_kwargs = {"prompt": prompt}
67
-
68
- # qa = RetrievalQA.from_chain_type(
69
- # llm=llm,
70
- # chain_type="stuff",
71
- # retriever=retriever,
72
- # return_source_documents = True,
73
- # chain_type_kwargs= chain_type_kwargs,
74
- # verbose=True
75
- # )
76
-
77
- # response = qa(query)
78
-
79
- # print(response)
80
-
81
- sample_prompts = ["what is the fastest speed for a greyhound dog?", "Why should we not feed chocolates to the dogs?", "Name two factors which might contribute to why some dogs might get scared?"]
82
-
83
- def get_response(input):
84
- query = input
85
- chain_type_kwargs = {"prompt": prompt}
86
- qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever, return_source_documents=True, chain_type_kwargs=chain_type_kwargs, verbose=True)
87
- response = qa(query)
88
- return response
89
-
90
- input = gr.Text(
91
- label="Prompt",
92
- show_label=False,
93
- max_lines=1,
94
- placeholder="Enter your prompt",
95
- container=False,
96
- )
97
-
98
- iface = gr.Interface(fn=get_response,
99
- inputs=input,
100
- outputs="text",
101
- title="My Dog PetCare Bot",
102
- description="This is a RAG implementation based on Zephyr 7B Beta LLM.",
103
- examples=sample_prompts,
104
- allow_screenshot=False,
105
- allow_flagging=False
106
- )
107
-
108
- iface.launch()
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from datasets import load_dataset
3
 
4
+ import os
5
+ import spaces
6
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
7
+ import torch
8
+ from threading import Thread
9
+ from sentence_transformers import SentenceTransformer
10
+ from datasets import load_dataset
11
+ import time
12
 
13
+ token = os.environ["HF_TOKEN"]
14
+ ST = SentenceTransformer("BM-K/KoSimCSE-roberta-multitask")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ dataset = load_dataset("not-lain/wikipedia",revision = "embedded")
17
 
18
+ data = dataset["train"]
19
+ data = data.add_faiss_index("embeddings") # column name that has the embeddings of the dataset
20
 
 
 
21
 
22
+ model_id = "mintaeng/small_fut_final"
 
23
 
24
+ # use quantization to lower GPU usage
25
+ bnb_config = BitsAndBytesConfig(
26
+ load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16
27
+ )
28
 
29
+ tokenizer = AutoTokenizer.from_pretrained(model_id,token=token)
30
+ model = AutoModelForCausalLM.from_pretrained(
31
+ model_id,
32
+ torch_dtype=torch.bfloat16,
33
+ device_map="auto",
34
+ quantization_config=bnb_config,
35
+ token=token
36
  )
37
+ terminators = [
38
+ tokenizer.eos_token_id,
39
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
40
+ ]
41
+
42
+ SYS_PROMPT = """You are an assistant for answering questions.
43
+ You are given the extracted parts of a long document and a question. Provide a conversational answer.
44
+ If you don't know the answer, just say "I do not know." Don't make up an answer."""
45
+
46
+
47
+
48
+ def search(query: str, k: int = 3 ):
49
+ """a function that embeds a new query and returns the most probable results"""
50
+ embedded_query = ST.encode(query) # embed new query
51
+ scores, retrieved_examples = data.get_nearest_examples( # retrieve results
52
+ "embeddings", embedded_query, # compare our new embedded query with the dataset embeddings
53
+ k=k # get only top k results
54
+ )
55
+ return scores, retrieved_examples
56
+
57
+ def format_prompt(prompt,retrieved_documents,k):
58
+ """using the retrieved documents we will prompt the model to generate our responses"""
59
+ PROMPT = f"Question:{prompt}\nContext:"
60
+ for idx in range(k) :
61
+ PROMPT+= f"{retrieved_documents['text'][idx]}\n"
62
+ return PROMPT
63
+
64
+
65
+ @spaces.GPU(duration=150)
66
+ def talk(prompt,history):
67
+ k = 1 # number of retrieved documents
68
+ scores , retrieved_documents = search(prompt, k)
69
+ formatted_prompt = format_prompt(prompt,retrieved_documents,k)
70
+ formatted_prompt = formatted_prompt[:2000] # to avoid GPU OOM
71
+ messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
72
+ # tell the model to generate
73
+ input_ids = tokenizer.apply_chat_template(
74
+ messages,
75
+ add_generation_prompt=True,
76
+ return_tensors="pt"
77
+ ).to(model.device)
78
+ outputs = model.generate(
79
+ input_ids,
80
+ max_new_tokens=1024,
81
+ eos_token_id=terminators,
82
+ do_sample=True,
83
+ temperature=0.6,
84
+ top_p=0.9,
85
+ )
86
+ streamer = TextIteratorStreamer(
87
+ tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True
88
+ )
89
+ generate_kwargs = dict(
90
+ input_ids= input_ids,
91
+ streamer=streamer,
92
+ max_new_tokens=1024,
93
+ do_sample=True,
94
+ top_p=0.95,
95
+ temperature=0.75,
96
+ eos_token_id=terminators,
97
+ )
98
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
99
+ t.start()
100
+
101
+ outputs = []
102
+ for text in streamer:
103
+ outputs.append(text)
104
+ print(outputs)
105
+ yield "".join(outputs)
106
+
107
+
108
+ TITLE = "# RAG"
109
+
110
+ DESCRIPTION = """
111
+ A rag pipeline with a chatbot feature
112
+ Resources used to build this project :
113
+ * embedding model : https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1
114
+ * dataset : https://huggingface.co/datasets/not-lain/wikipedia
115
+ * faiss docs : https://huggingface.co/docs/datasets/v2.18.0/en/package_reference/main_classes#datasets.Dataset.add_faiss_index
116
+ * chatbot : https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct
117
+ * Full documentation : https://huggingface.co/blog/not-lain/rag-chatbot-using-llama3
118
+ """
119
 
120
 
121
+ demo = gr.ChatInterface(
122
+ fn=talk,
123
+ chatbot=gr.Chatbot(
124
+ show_label=True,
125
+ show_share_button=True,
126
+ show_copy_button=True,
127
+ likeable=True,
128
+ layout="bubble",
129
+ bubble_full_width=False,
130
+ ),
131
+ theme="Soft",
132
+ examples=[["what's anarchy ? "]],
133
+ title=TITLE,
134
+ description=DESCRIPTION,
135
+
136
+ )
137
+ demo.launch(debug=True)