RAMYASRI-39
commited on
Commit
•
e74bd37
1
Parent(s):
41d17e7
Update app.py
Browse files
app.py
CHANGED
@@ -1,297 +1,297 @@
|
|
1 |
-
import requests
|
2 |
-
import gradio as gr
|
3 |
-
from ragatouille import RAGPretrainedModel
|
4 |
-
import logging
|
5 |
-
from pathlib import Path
|
6 |
-
from time import perf_counter
|
7 |
-
from sentence_transformers import CrossEncoder
|
8 |
-
from huggingface_hub import InferenceClient
|
9 |
-
from jinja2 import Environment, FileSystemLoader
|
10 |
-
import numpy as np
|
11 |
-
from os import getenv
|
12 |
-
from backend.query_llm import generate_hf, generate_qwen
|
13 |
-
from backend.semantic_search import table, retriever
|
14 |
-
from huggingface_hub import InferenceClient
|
15 |
-
|
16 |
-
|
17 |
-
# Bhashini API translation function
|
18 |
-
api_key = getenv('API_KEY')
|
19 |
-
user_id = getenv('USER_ID')
|
20 |
-
|
21 |
-
def bhashini_translate(text: str, from_code: str = "en", to_code: str = "hi") -> dict:
|
22 |
-
"""Translates text from source language to target language using the Bhashini API."""
|
23 |
-
|
24 |
-
if not text.strip():
|
25 |
-
print('Input text is empty. Please provide valid text for translation.')
|
26 |
-
return {"status_code": 400, "message": "Input text is empty", "translated_content": None, "speech_content": None}
|
27 |
-
else:
|
28 |
-
print('Input text - ',text)
|
29 |
-
print(f'Starting translation process from {from_code} to {to_code}...')
|
30 |
-
print(f'Starting translation process from {from_code} to {to_code}...')
|
31 |
-
gr.Warning(f'Translating to {to_code}...')
|
32 |
-
|
33 |
-
url = 'https://meity-auth.ulcacontrib.org/ulca/apis/v0/model/getModelsPipeline'
|
34 |
-
headers = {
|
35 |
-
"Content-Type": "application/json",
|
36 |
-
"userID": user_id,
|
37 |
-
"ulcaApiKey": api_key
|
38 |
-
}
|
39 |
-
payload = {
|
40 |
-
"pipelineTasks": [{"taskType": "translation", "config": {"language": {"sourceLanguage": from_code, "targetLanguage": to_code}}}],
|
41 |
-
"pipelineRequestConfig": {"pipelineId": "64392f96daac500b55c543cd"}
|
42 |
-
}
|
43 |
-
|
44 |
-
print('Sending initial request to get the pipeline...')
|
45 |
-
response = requests.post(url, json=payload, headers=headers)
|
46 |
-
|
47 |
-
if response.status_code != 200:
|
48 |
-
print(f'Error in initial request: {response.status_code}')
|
49 |
-
return {"status_code": response.status_code, "message": "Error in translation request", "translated_content": None}
|
50 |
-
|
51 |
-
print('Initial request successful, processing response...')
|
52 |
-
response_data = response.json()
|
53 |
-
service_id = response_data["pipelineResponseConfig"][0]["config"][0]["serviceId"]
|
54 |
-
callback_url = response_data["pipelineInferenceAPIEndPoint"]["callbackUrl"]
|
55 |
-
|
56 |
-
print(f'Service ID: {service_id}, Callback URL: {callback_url}')
|
57 |
-
|
58 |
-
headers2 = {
|
59 |
-
"Content-Type": "application/json",
|
60 |
-
response_data["pipelineInferenceAPIEndPoint"]["inferenceApiKey"]["name"]: response_data["pipelineInferenceAPIEndPoint"]["inferenceApiKey"]["value"]
|
61 |
-
}
|
62 |
-
compute_payload = {
|
63 |
-
"pipelineTasks": [{"taskType": "translation", "config": {"language": {"sourceLanguage": from_code, "targetLanguage": to_code}, "serviceId": service_id}}],
|
64 |
-
"inputData": {"input": [{"source": text}], "audio": [{"audioContent": None}]}
|
65 |
-
}
|
66 |
-
|
67 |
-
print(f'Sending translation request with text: "{text}"')
|
68 |
-
compute_response = requests.post(callback_url, json=compute_payload, headers=headers2)
|
69 |
-
|
70 |
-
if compute_response.status_code != 200:
|
71 |
-
print(f'Error in translation request: {compute_response.status_code}')
|
72 |
-
return {"status_code": compute_response.status_code, "message": "Error in translation", "translated_content": None}
|
73 |
-
|
74 |
-
print('Translation request successful, processing translation...')
|
75 |
-
compute_response_data = compute_response.json()
|
76 |
-
translated_content = compute_response_data["pipelineResponse"][0]["output"][0]["target"]
|
77 |
-
|
78 |
-
print(f'Translation successful. Translated content: "{translated_content}"')
|
79 |
-
return {"status_code": 200, "message": "Translation successful", "translated_content": translated_content}
|
80 |
-
|
81 |
-
|
82 |
-
# Existing chatbot functions
|
83 |
-
VECTOR_COLUMN_NAME = "vector"
|
84 |
-
TEXT_COLUMN_NAME = "text"
|
85 |
-
HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
|
86 |
-
proj_dir = Path(__file__).parent
|
87 |
-
|
88 |
-
logging.basicConfig(level=logging.INFO)
|
89 |
-
logger = logging.getLogger(__name__)
|
90 |
-
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1", token=HF_TOKEN)
|
91 |
-
env = Environment(loader=FileSystemLoader(proj_dir / 'templates'))
|
92 |
-
|
93 |
-
template = env.get_template('template.j2')
|
94 |
-
template_html = env.get_template('template_html.j2')
|
95 |
-
|
96 |
-
# def add_text(history, text):
|
97 |
-
# history = [] if history is None else history
|
98 |
-
# history = history + [(text, None)]
|
99 |
-
# return history, gr.Textbox(value="", interactive=False)
|
100 |
-
|
101 |
-
def bot(history, cross_encoder):
|
102 |
-
|
103 |
-
top_rerank = 25
|
104 |
-
top_k_rank = 20
|
105 |
-
query = history[-1][0] if history else ''
|
106 |
-
print('\nQuery: ',query )
|
107 |
-
print('\nHistory:',history)
|
108 |
-
if not query:
|
109 |
-
gr.Warning("Please submit a non-empty string as a prompt")
|
110 |
-
raise ValueError("Empty string was submitted")
|
111 |
-
|
112 |
-
logger.warning('Retrieving documents...')
|
113 |
-
|
114 |
-
if cross_encoder == '(HIGH ACCURATE) ColBERT':
|
115 |
-
gr.Warning('Retrieving using ColBERT.. First time query will take a minute for model to load..pls wait')
|
116 |
-
RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
|
117 |
-
RAG_db = RAG.from_index('.ragatouille/colbert/indexes/cbseclass10index')
|
118 |
-
documents_full = RAG_db.search(query, k=top_k_rank)
|
119 |
-
|
120 |
-
documents = [item['content'] for item in documents_full]
|
121 |
-
prompt = template.render(documents=documents, query=query)
|
122 |
-
prompt_html = template_html.render(documents=documents, query=query)
|
123 |
-
|
124 |
-
generate_fn = generate_hf
|
125 |
-
|
126 |
-
history[-1][1] = ""
|
127 |
-
for character in generate_fn(prompt, history[:-1]):
|
128 |
-
history[-1][1] = character
|
129 |
-
yield history, prompt_html
|
130 |
-
else:
|
131 |
-
document_start = perf_counter()
|
132 |
-
|
133 |
-
query_vec = retriever.encode(query)
|
134 |
-
doc1 = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_k_rank)
|
135 |
-
|
136 |
-
documents = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_rerank).to_list()
|
137 |
-
documents = [doc[TEXT_COLUMN_NAME] for doc in documents]
|
138 |
-
|
139 |
-
query_doc_pair = [[query, doc] for doc in documents]
|
140 |
-
if cross_encoder == '(FAST) MiniLM-L6v2':
|
141 |
-
cross_encoder1 = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
|
142 |
-
elif cross_encoder == '(ACCURATE) BGE reranker':
|
143 |
-
cross_encoder1 = CrossEncoder('BAAI/bge-reranker-base')
|
144 |
-
|
145 |
-
cross_scores = cross_encoder1.predict(query_doc_pair)
|
146 |
-
sim_scores_argsort = list(reversed(np.argsort(cross_scores)))
|
147 |
-
|
148 |
-
documents = [documents[idx] for idx in sim_scores_argsort[:top_k_rank]]
|
149 |
-
|
150 |
-
document_time = perf_counter() - document_start
|
151 |
-
|
152 |
-
prompt = template.render(documents=documents, query=query)
|
153 |
-
prompt_html = template_html.render(documents=documents, query=query)
|
154 |
-
|
155 |
-
#generate_fn = generate_hf
|
156 |
-
generate_fn=generate_qwen
|
157 |
-
# Create a new history entry instead of modifying the tuple directly
|
158 |
-
new_history = history[:-1] + [ (prompt, "") ] # query replaced prompt
|
159 |
-
output=''
|
160 |
-
# for character in generate_fn(prompt, history[:-1]):
|
161 |
-
# #new_history[-1] = (query, character)
|
162 |
-
# output+=character
|
163 |
-
output=generate_fn(prompt, history[:-1])
|
164 |
-
|
165 |
-
print('Output:',output)
|
166 |
-
new_history[-1] = (prompt, output) #query replaced with prompt
|
167 |
-
print('New History',new_history)
|
168 |
-
#print('prompt html',prompt_html)# Update the last tuple with new text
|
169 |
-
|
170 |
-
history_list = list(history[-1])
|
171 |
-
history_list[1] = output # Assuming `character` is what you want to assign
|
172 |
-
# Update the history with the modified list converted back to a tuple
|
173 |
-
history[-1] = tuple(history_list)
|
174 |
-
|
175 |
-
#history[-1][1] = character
|
176 |
-
# yield new_history, prompt_html
|
177 |
-
yield history, prompt_html
|
178 |
-
# new_history,prompt_html
|
179 |
-
# history[-1][1] = ""
|
180 |
-
# for character in generate_fn(prompt, history[:-1]):
|
181 |
-
# history[-1][1] = character
|
182 |
-
# yield history, prompt_html
|
183 |
-
|
184 |
-
#def translate_text(response_text, selected_language):
|
185 |
-
|
186 |
-
def translate_text(selected_language,history):
|
187 |
-
|
188 |
-
iso_language_codes = {
|
189 |
-
"Hindi": "hi",
|
190 |
-
"Gom": "gom",
|
191 |
-
"Kannada": "kn",
|
192 |
-
"Dogri": "doi",
|
193 |
-
"Bodo": "brx",
|
194 |
-
"Urdu": "ur",
|
195 |
-
"Tamil": "ta",
|
196 |
-
"Kashmiri": "ks",
|
197 |
-
"Assamese": "as",
|
198 |
-
"Bengali": "bn",
|
199 |
-
"Marathi": "mr",
|
200 |
-
"Sindhi": "sd",
|
201 |
-
"Maithili": "mai",
|
202 |
-
"Punjabi": "pa",
|
203 |
-
"Malayalam": "ml",
|
204 |
-
"Manipuri": "mni",
|
205 |
-
"Telugu": "te",
|
206 |
-
"Sanskrit": "sa",
|
207 |
-
"Nepali": "ne",
|
208 |
-
"Santali": "sat",
|
209 |
-
"Gujarati": "gu",
|
210 |
-
"Odia": "or"
|
211 |
-
}
|
212 |
-
|
213 |
-
to_code = iso_language_codes[selected_language]
|
214 |
-
response_text = history[-1][1] if history else ''
|
215 |
-
print('response_text for translation',response_text)
|
216 |
-
translation = bhashini_translate(response_text, to_code=to_code)
|
217 |
-
return translation['translated_content']
|
218 |
-
|
219 |
-
|
220 |
-
# Gradio interface
|
221 |
-
with gr.Blocks(theme='gradio/soft') as CHATBOT:
|
222 |
-
history_state = gr.State([])
|
223 |
-
with gr.Row():
|
224 |
-
with gr.Column(scale=10):
|
225 |
-
gr.HTML(value="""<div style="color: #FF4500;"><h1>m-</h1>MITHRA<h1><span style="color: #008000">student Manual Chatbot </span></h1></div>""")
|
226 |
-
gr.HTML(value=f"""<p style="font-family: sans-serif; font-size: 16px;">Using GenAI for CBIC Capacity Building - A free chat bot developed by National Customs Targeting Center using Open source LLMs for CBIC Officers</p>""")
|
227 |
-
gr.HTML(value=f"""<p style="font-family: Arial, sans-serif; font-size: 14px;">Developed by NCTC,Mumbai. Suggestions may be sent to <a href="mailto:nctc-admin@gov.in" style="color: #00008B; font-style: italic;">nctc-admin@gov.in</a>.</p>""")
|
228 |
-
|
229 |
-
with gr.Column(scale=3):
|
230 |
-
gr.Image(value='logo.png', height=200, width=200)
|
231 |
-
|
232 |
-
chatbot = gr.Chatbot(
|
233 |
-
[],
|
234 |
-
elem_id="chatbot",
|
235 |
-
avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg',
|
236 |
-
'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'),
|
237 |
-
bubble_full_width=False,
|
238 |
-
show_copy_button=True,
|
239 |
-
show_share_button=True,
|
240 |
-
)
|
241 |
-
|
242 |
-
with gr.Row():
|
243 |
-
txt = gr.Textbox(
|
244 |
-
scale=3,
|
245 |
-
show_label=False,
|
246 |
-
placeholder="Enter text and press enter",
|
247 |
-
container=False,
|
248 |
-
)
|
249 |
-
txt_btn = gr.Button(value="Submit text", scale=1)
|
250 |
-
|
251 |
-
cross_encoder = gr.Radio(choices=['(FAST) MiniLM-L6v2', '(ACCURATE) BGE reranker', '(HIGH ACCURATE) ColBERT'], value='(ACCURATE) BGE reranker', label="Embeddings", info="Only First query to Colbert may take little time)")
|
252 |
-
language_dropdown = gr.Dropdown(
|
253 |
-
choices=[
|
254 |
-
"Hindi", "Gom", "Kannada", "Dogri", "Bodo", "Urdu", "Tamil", "Kashmiri", "Assamese", "Bengali", "Marathi",
|
255 |
-
"Sindhi", "Maithili", "Punjabi", "Malayalam", "Manipuri", "Telugu", "Sanskrit", "Nepali", "Santali",
|
256 |
-
"Gujarati", "Odia"
|
257 |
-
],
|
258 |
-
value="Hindi", # default to Hindi
|
259 |
-
label="Select Language for Translation"
|
260 |
-
)
|
261 |
-
|
262 |
-
prompt_html = gr.HTML()
|
263 |
-
|
264 |
-
translated_textbox = gr.Textbox(label="Translated Response")
|
265 |
-
def update_history_and_translate(txt, cross_encoder, history_state, language_dropdown):
|
266 |
-
print('History state',history_state)
|
267 |
-
history = history_state
|
268 |
-
history.append((txt, ""))
|
269 |
-
#history_state.value=(history)
|
270 |
-
|
271 |
-
# Call bot function
|
272 |
-
# bot_output = list(bot(history, cross_encoder))
|
273 |
-
bot_output = next(bot(history, cross_encoder))
|
274 |
-
print('bot_output',bot_output)
|
275 |
-
#history, prompt_html = bot_output[-1]
|
276 |
-
history, prompt_html = bot_output
|
277 |
-
print('History',history)
|
278 |
-
# Update the history state
|
279 |
-
history_state[:] = history
|
280 |
-
|
281 |
-
# Translate text
|
282 |
-
translated_text = translate_text(language_dropdown, history)
|
283 |
-
return history, prompt_html, translated_text
|
284 |
-
|
285 |
-
txt_msg = txt_btn.click(update_history_and_translate, [txt, cross_encoder, history_state, language_dropdown], [chatbot, prompt_html, translated_textbox])
|
286 |
-
txt_msg = txt.submit(update_history_and_translate, [txt, cross_encoder, history_state, language_dropdown], [chatbot, prompt_html, translated_textbox])
|
287 |
-
|
288 |
-
examples = ['
|
289 |
-
'
|
290 |
-
|
291 |
-
|
292 |
-
gr.Examples(examples, txt)
|
293 |
-
|
294 |
-
|
295 |
-
# Launch the Gradio application
|
296 |
-
CHATBOT.launch(share=True,debug=True)
|
297 |
-
|
|
|
1 |
+
import requests
|
2 |
+
import gradio as gr
|
3 |
+
from ragatouille import RAGPretrainedModel
|
4 |
+
import logging
|
5 |
+
from pathlib import Path
|
6 |
+
from time import perf_counter
|
7 |
+
from sentence_transformers import CrossEncoder
|
8 |
+
from huggingface_hub import InferenceClient
|
9 |
+
from jinja2 import Environment, FileSystemLoader
|
10 |
+
import numpy as np
|
11 |
+
from os import getenv
|
12 |
+
from backend.query_llm import generate_hf, generate_qwen
|
13 |
+
from backend.semantic_search import table, retriever
|
14 |
+
from huggingface_hub import InferenceClient
|
15 |
+
|
16 |
+
|
17 |
+
# Bhashini API translation function
|
18 |
+
api_key = getenv('API_KEY')
|
19 |
+
user_id = getenv('USER_ID')
|
20 |
+
|
21 |
+
def bhashini_translate(text: str, from_code: str = "en", to_code: str = "hi") -> dict:
|
22 |
+
"""Translates text from source language to target language using the Bhashini API."""
|
23 |
+
|
24 |
+
if not text.strip():
|
25 |
+
print('Input text is empty. Please provide valid text for translation.')
|
26 |
+
return {"status_code": 400, "message": "Input text is empty", "translated_content": None, "speech_content": None}
|
27 |
+
else:
|
28 |
+
print('Input text - ',text)
|
29 |
+
print(f'Starting translation process from {from_code} to {to_code}...')
|
30 |
+
print(f'Starting translation process from {from_code} to {to_code}...')
|
31 |
+
gr.Warning(f'Translating to {to_code}...')
|
32 |
+
|
33 |
+
url = 'https://meity-auth.ulcacontrib.org/ulca/apis/v0/model/getModelsPipeline'
|
34 |
+
headers = {
|
35 |
+
"Content-Type": "application/json",
|
36 |
+
"userID": user_id,
|
37 |
+
"ulcaApiKey": api_key
|
38 |
+
}
|
39 |
+
payload = {
|
40 |
+
"pipelineTasks": [{"taskType": "translation", "config": {"language": {"sourceLanguage": from_code, "targetLanguage": to_code}}}],
|
41 |
+
"pipelineRequestConfig": {"pipelineId": "64392f96daac500b55c543cd"}
|
42 |
+
}
|
43 |
+
|
44 |
+
print('Sending initial request to get the pipeline...')
|
45 |
+
response = requests.post(url, json=payload, headers=headers)
|
46 |
+
|
47 |
+
if response.status_code != 200:
|
48 |
+
print(f'Error in initial request: {response.status_code}')
|
49 |
+
return {"status_code": response.status_code, "message": "Error in translation request", "translated_content": None}
|
50 |
+
|
51 |
+
print('Initial request successful, processing response...')
|
52 |
+
response_data = response.json()
|
53 |
+
service_id = response_data["pipelineResponseConfig"][0]["config"][0]["serviceId"]
|
54 |
+
callback_url = response_data["pipelineInferenceAPIEndPoint"]["callbackUrl"]
|
55 |
+
|
56 |
+
print(f'Service ID: {service_id}, Callback URL: {callback_url}')
|
57 |
+
|
58 |
+
headers2 = {
|
59 |
+
"Content-Type": "application/json",
|
60 |
+
response_data["pipelineInferenceAPIEndPoint"]["inferenceApiKey"]["name"]: response_data["pipelineInferenceAPIEndPoint"]["inferenceApiKey"]["value"]
|
61 |
+
}
|
62 |
+
compute_payload = {
|
63 |
+
"pipelineTasks": [{"taskType": "translation", "config": {"language": {"sourceLanguage": from_code, "targetLanguage": to_code}, "serviceId": service_id}}],
|
64 |
+
"inputData": {"input": [{"source": text}], "audio": [{"audioContent": None}]}
|
65 |
+
}
|
66 |
+
|
67 |
+
print(f'Sending translation request with text: "{text}"')
|
68 |
+
compute_response = requests.post(callback_url, json=compute_payload, headers=headers2)
|
69 |
+
|
70 |
+
if compute_response.status_code != 200:
|
71 |
+
print(f'Error in translation request: {compute_response.status_code}')
|
72 |
+
return {"status_code": compute_response.status_code, "message": "Error in translation", "translated_content": None}
|
73 |
+
|
74 |
+
print('Translation request successful, processing translation...')
|
75 |
+
compute_response_data = compute_response.json()
|
76 |
+
translated_content = compute_response_data["pipelineResponse"][0]["output"][0]["target"]
|
77 |
+
|
78 |
+
print(f'Translation successful. Translated content: "{translated_content}"')
|
79 |
+
return {"status_code": 200, "message": "Translation successful", "translated_content": translated_content}
|
80 |
+
|
81 |
+
|
82 |
+
# Existing chatbot functions
|
83 |
+
VECTOR_COLUMN_NAME = "vector"
|
84 |
+
TEXT_COLUMN_NAME = "text"
|
85 |
+
HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
|
86 |
+
proj_dir = Path(__file__).parent
|
87 |
+
|
88 |
+
logging.basicConfig(level=logging.INFO)
|
89 |
+
logger = logging.getLogger(__name__)
|
90 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1", token=HF_TOKEN)
|
91 |
+
env = Environment(loader=FileSystemLoader(proj_dir / 'templates'))
|
92 |
+
|
93 |
+
template = env.get_template('template.j2')
|
94 |
+
template_html = env.get_template('template_html.j2')
|
95 |
+
|
96 |
+
# def add_text(history, text):
|
97 |
+
# history = [] if history is None else history
|
98 |
+
# history = history + [(text, None)]
|
99 |
+
# return history, gr.Textbox(value="", interactive=False)
|
100 |
+
|
101 |
+
def bot(history, cross_encoder):
|
102 |
+
|
103 |
+
top_rerank = 25
|
104 |
+
top_k_rank = 20
|
105 |
+
query = history[-1][0] if history else ''
|
106 |
+
print('\nQuery: ',query )
|
107 |
+
print('\nHistory:',history)
|
108 |
+
if not query:
|
109 |
+
gr.Warning("Please submit a non-empty string as a prompt")
|
110 |
+
raise ValueError("Empty string was submitted")
|
111 |
+
|
112 |
+
logger.warning('Retrieving documents...')
|
113 |
+
|
114 |
+
if cross_encoder == '(HIGH ACCURATE) ColBERT':
|
115 |
+
gr.Warning('Retrieving using ColBERT.. First time query will take a minute for model to load..pls wait')
|
116 |
+
RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
|
117 |
+
RAG_db = RAG.from_index('.ragatouille/colbert/indexes/cbseclass10index')
|
118 |
+
documents_full = RAG_db.search(query, k=top_k_rank)
|
119 |
+
|
120 |
+
documents = [item['content'] for item in documents_full]
|
121 |
+
prompt = template.render(documents=documents, query=query)
|
122 |
+
prompt_html = template_html.render(documents=documents, query=query)
|
123 |
+
|
124 |
+
generate_fn = generate_hf
|
125 |
+
|
126 |
+
history[-1][1] = ""
|
127 |
+
for character in generate_fn(prompt, history[:-1]):
|
128 |
+
history[-1][1] = character
|
129 |
+
yield history, prompt_html
|
130 |
+
else:
|
131 |
+
document_start = perf_counter()
|
132 |
+
|
133 |
+
query_vec = retriever.encode(query)
|
134 |
+
doc1 = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_k_rank)
|
135 |
+
|
136 |
+
documents = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_rerank).to_list()
|
137 |
+
documents = [doc[TEXT_COLUMN_NAME] for doc in documents]
|
138 |
+
|
139 |
+
query_doc_pair = [[query, doc] for doc in documents]
|
140 |
+
if cross_encoder == '(FAST) MiniLM-L6v2':
|
141 |
+
cross_encoder1 = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
|
142 |
+
elif cross_encoder == '(ACCURATE) BGE reranker':
|
143 |
+
cross_encoder1 = CrossEncoder('BAAI/bge-reranker-base')
|
144 |
+
|
145 |
+
cross_scores = cross_encoder1.predict(query_doc_pair)
|
146 |
+
sim_scores_argsort = list(reversed(np.argsort(cross_scores)))
|
147 |
+
|
148 |
+
documents = [documents[idx] for idx in sim_scores_argsort[:top_k_rank]]
|
149 |
+
|
150 |
+
document_time = perf_counter() - document_start
|
151 |
+
|
152 |
+
prompt = template.render(documents=documents, query=query)
|
153 |
+
prompt_html = template_html.render(documents=documents, query=query)
|
154 |
+
|
155 |
+
#generate_fn = generate_hf
|
156 |
+
generate_fn=generate_qwen
|
157 |
+
# Create a new history entry instead of modifying the tuple directly
|
158 |
+
new_history = history[:-1] + [ (prompt, "") ] # query replaced prompt
|
159 |
+
output=''
|
160 |
+
# for character in generate_fn(prompt, history[:-1]):
|
161 |
+
# #new_history[-1] = (query, character)
|
162 |
+
# output+=character
|
163 |
+
output=generate_fn(prompt, history[:-1])
|
164 |
+
|
165 |
+
print('Output:',output)
|
166 |
+
new_history[-1] = (prompt, output) #query replaced with prompt
|
167 |
+
print('New History',new_history)
|
168 |
+
#print('prompt html',prompt_html)# Update the last tuple with new text
|
169 |
+
|
170 |
+
history_list = list(history[-1])
|
171 |
+
history_list[1] = output # Assuming `character` is what you want to assign
|
172 |
+
# Update the history with the modified list converted back to a tuple
|
173 |
+
history[-1] = tuple(history_list)
|
174 |
+
|
175 |
+
#history[-1][1] = character
|
176 |
+
# yield new_history, prompt_html
|
177 |
+
yield history, prompt_html
|
178 |
+
# new_history,prompt_html
|
179 |
+
# history[-1][1] = ""
|
180 |
+
# for character in generate_fn(prompt, history[:-1]):
|
181 |
+
# history[-1][1] = character
|
182 |
+
# yield history, prompt_html
|
183 |
+
|
184 |
+
#def translate_text(response_text, selected_language):
|
185 |
+
|
186 |
+
def translate_text(selected_language,history):
|
187 |
+
|
188 |
+
iso_language_codes = {
|
189 |
+
"Hindi": "hi",
|
190 |
+
"Gom": "gom",
|
191 |
+
"Kannada": "kn",
|
192 |
+
"Dogri": "doi",
|
193 |
+
"Bodo": "brx",
|
194 |
+
"Urdu": "ur",
|
195 |
+
"Tamil": "ta",
|
196 |
+
"Kashmiri": "ks",
|
197 |
+
"Assamese": "as",
|
198 |
+
"Bengali": "bn",
|
199 |
+
"Marathi": "mr",
|
200 |
+
"Sindhi": "sd",
|
201 |
+
"Maithili": "mai",
|
202 |
+
"Punjabi": "pa",
|
203 |
+
"Malayalam": "ml",
|
204 |
+
"Manipuri": "mni",
|
205 |
+
"Telugu": "te",
|
206 |
+
"Sanskrit": "sa",
|
207 |
+
"Nepali": "ne",
|
208 |
+
"Santali": "sat",
|
209 |
+
"Gujarati": "gu",
|
210 |
+
"Odia": "or"
|
211 |
+
}
|
212 |
+
|
213 |
+
to_code = iso_language_codes[selected_language]
|
214 |
+
response_text = history[-1][1] if history else ''
|
215 |
+
print('response_text for translation',response_text)
|
216 |
+
translation = bhashini_translate(response_text, to_code=to_code)
|
217 |
+
return translation['translated_content']
|
218 |
+
|
219 |
+
|
220 |
+
# Gradio interface
|
221 |
+
with gr.Blocks(theme='gradio/soft') as CHATBOT:
|
222 |
+
history_state = gr.State([])
|
223 |
+
with gr.Row():
|
224 |
+
with gr.Column(scale=10):
|
225 |
+
gr.HTML(value="""<div style="color: #FF4500;"><h1>m-</h1>MITHRA<h1><span style="color: #008000">student Manual Chatbot </span></h1></div>""")
|
226 |
+
gr.HTML(value=f"""<p style="font-family: sans-serif; font-size: 16px;">Using GenAI for CBIC Capacity Building - A free chat bot developed by National Customs Targeting Center using Open source LLMs for CBIC Officers</p>""")
|
227 |
+
gr.HTML(value=f"""<p style="font-family: Arial, sans-serif; font-size: 14px;">Developed by NCTC,Mumbai. Suggestions may be sent to <a href="mailto:nctc-admin@gov.in" style="color: #00008B; font-style: italic;">nctc-admin@gov.in</a>.</p>""")
|
228 |
+
|
229 |
+
with gr.Column(scale=3):
|
230 |
+
gr.Image(value='logo.png', height=200, width=200)
|
231 |
+
|
232 |
+
chatbot = gr.Chatbot(
|
233 |
+
[],
|
234 |
+
elem_id="chatbot",
|
235 |
+
avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg',
|
236 |
+
'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'),
|
237 |
+
bubble_full_width=False,
|
238 |
+
show_copy_button=True,
|
239 |
+
show_share_button=True,
|
240 |
+
)
|
241 |
+
|
242 |
+
with gr.Row():
|
243 |
+
txt = gr.Textbox(
|
244 |
+
scale=3,
|
245 |
+
show_label=False,
|
246 |
+
placeholder="Enter text and press enter",
|
247 |
+
container=False,
|
248 |
+
)
|
249 |
+
txt_btn = gr.Button(value="Submit text", scale=1)
|
250 |
+
|
251 |
+
cross_encoder = gr.Radio(choices=['(FAST) MiniLM-L6v2', '(ACCURATE) BGE reranker', '(HIGH ACCURATE) ColBERT'], value='(ACCURATE) BGE reranker', label="Embeddings", info="Only First query to Colbert may take little time)")
|
252 |
+
language_dropdown = gr.Dropdown(
|
253 |
+
choices=[
|
254 |
+
"Hindi", "Gom", "Kannada", "Dogri", "Bodo", "Urdu", "Tamil", "Kashmiri", "Assamese", "Bengali", "Marathi",
|
255 |
+
"Sindhi", "Maithili", "Punjabi", "Malayalam", "Manipuri", "Telugu", "Sanskrit", "Nepali", "Santali",
|
256 |
+
"Gujarati", "Odia"
|
257 |
+
],
|
258 |
+
value="Hindi", # default to Hindi
|
259 |
+
label="Select Language for Translation"
|
260 |
+
)
|
261 |
+
|
262 |
+
prompt_html = gr.HTML()
|
263 |
+
|
264 |
+
translated_textbox = gr.Textbox(label="Translated Response")
|
265 |
+
def update_history_and_translate(txt, cross_encoder, history_state, language_dropdown):
|
266 |
+
print('History state',history_state)
|
267 |
+
history = history_state
|
268 |
+
history.append((txt, ""))
|
269 |
+
#history_state.value=(history)
|
270 |
+
|
271 |
+
# Call bot function
|
272 |
+
# bot_output = list(bot(history, cross_encoder))
|
273 |
+
bot_output = next(bot(history, cross_encoder))
|
274 |
+
print('bot_output',bot_output)
|
275 |
+
#history, prompt_html = bot_output[-1]
|
276 |
+
history, prompt_html = bot_output
|
277 |
+
print('History',history)
|
278 |
+
# Update the history state
|
279 |
+
history_state[:] = history
|
280 |
+
|
281 |
+
# Translate text
|
282 |
+
translated_text = translate_text(language_dropdown, history)
|
283 |
+
return history, prompt_html, translated_text
|
284 |
+
|
285 |
+
txt_msg = txt_btn.click(update_history_and_translate, [txt, cross_encoder, history_state, language_dropdown], [chatbot, prompt_html, translated_textbox])
|
286 |
+
txt_msg = txt.submit(update_history_and_translate, [txt, cross_encoder, history_state, language_dropdown], [chatbot, prompt_html, translated_textbox])
|
287 |
+
|
288 |
+
examples = ['CAN U SAY THE DIFFERENCES BETWEEN METALS AND NON METALS?','WHAT IS IONIC BOND?',
|
289 |
+
'EXPLAIN ASEXUAL REPRODUCTION',
|
290 |
+
|
291 |
+
|
292 |
+
gr.Examples(examples, txt)
|
293 |
+
|
294 |
+
|
295 |
+
# Launch the Gradio application
|
296 |
+
CHATBOT.launch(share=True,debug=True)
|
297 |
+
|