UNEP-decisions-qa / utils.py
timeki's picture
init qa
26ed9d3
raw
history blame
2.22 kB
from langchain_openai import ChatOpenAI
from langchain.prompts.prompt import PromptTemplate
from typing import Tuple, List
from langchain.schema import format_document
import gradio as gr
from langchain.chat_models import ChatOpenAI
import os
from langchain_openai import ChatOpenAI
import os
DEFAULT_DOCUMENT_PROMPT = PromptTemplate.from_template(template="{page_content}")
def make_pairs(lst):
"""from a list of even lenght, make tupple pairs"""
return [(lst[i], lst[i + 1]) for i in range(0, len(lst), 2)]
def reset_textbox():
return gr.update(value="")
def _combine_documents(
docs, document_prompt=DEFAULT_DOCUMENT_PROMPT, document_separator="\n\n"
):
doc_strings = [f"Document {i}: \n'''\n{format_document(doc, document_prompt)}\n'''" for i, doc in enumerate(docs, 1)]
return document_separator.join(doc_strings)
def _format_chat_history(chat_history: List[Tuple]) -> str:
buffer = ""
for dialogue_turn in chat_history:
human = "Human: " + dialogue_turn[0]
ai = "Assistant: " + dialogue_turn[1]
buffer += "\n" + "\n".join([human, ai])
return buffer
def _format_chat_history(chat_history: List[Tuple]) -> str:
turn = 1
buffer = []
for dialogue in chat_history:
buffer.append(("Human: " if turn else "Assistant: ") + dialogue.content)
turn ^= 1
return "\n".join(buffer) + "\n"
def get_llm(model="gpt-4o-mini",max_tokens=1024, temperature=0.0, streaming=True,timeout=30, **kwargs):
llm = ChatOpenAI(
model=model,
api_key=os.environ.get("OPENAI_API_KEY", None),
max_tokens = max_tokens,
streaming = streaming,
temperature=temperature,
timeout = timeout,
**kwargs,
)
return llm
def make_html_source(source,i):
meta = source.metadata
# content = source.page_content.split(":",1)[1].strip()
content = source.page_content.strip()
card = f"""
<div class="card" id="doc{i}">
<div class="card-content">
<h2>Document {i} - Meeting {meta["meeting_number"]} - title {meta['Title']} - Issues {meta['Issues']}</h2>
<p>{content}</p>
</div>
</div>
"""
return card