Spaces:
Runtime error
Runtime error
File size: 3,138 Bytes
67de7ca a8dc960 873ced5 67de7ca a8dc960 67de7ca a8dc960 67de7ca a8dc960 67de7ca a8dc960 873ced5 67de7ca a8dc960 67de7ca a8dc960 67de7ca a8dc960 67de7ca a8dc960 873ced5 a8dc960 873ced5 a8dc960 873ced5 a8dc960 873ced5 a8dc960 873ced5 67de7ca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
import gradio as gr
from openai import OpenAI
import os
from dotenv import load_dotenv
from langchain.prompts import PromptTemplate
from qdrant_client import QdrantClient
from qdrant_client.http.models import Distance, VectorParams
from langchain_qdrant import QdrantVectorStore
from langchain_openai import OpenAIEmbeddings
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
load_dotenv()
RUNPOD_KEY = os.getenv("RUNPOD_KEY")
RUNPOD_URL = os.getenv("RUNPOD_URL")
model = OpenAI(api_key=RUNPOD_KEY, base_url= RUNPOD_URL)
QDRANT_URL = os.getenv("QDRANT_URL")
QDRANT_KEY = os.getenv("QDRANT_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = QdrantClient(QDRANT_URL, api_key=QDRANT_KEY)
# collection_name = "search_engine"
collection_name = "week_4_assesment_embeddings"
embeddings = OpenAIEmbeddings(
model="text-embedding-3-small",
openai_api_key=OPENAI_API_KEY
)
qdrant = QdrantVectorStore(
client=client,
collection_name=collection_name,
embedding=embeddings
)
promtp_template = """
Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
{instruction}
### Input:
{input}
### Response:
"""
prompt = PromptTemplate(
input_variables=["instruction", "input"],
template=promtp_template,
)
def prompt_template(query):
results = qdrant.similarity_search( query=query, k= 6)
_ctx = ''
for i, result in enumerate(results):
_ctx += f'Content {i}: {result.page_content}\n-----\n'
_prompt = prompt.format(instruction=query, input=_ctx)
return _prompt
def prompt_top6(text):
# query_embedding = openai.Embedding.create(
# input=text,
# model="text-embedding-3-small"
# )['data'][0]['embedding']
query_embedding = embeddings.embed_query(text)
search_results = client.search(
collection_name="week_4_assesment_embeddings",
query_vector=query_embedding,
limit=6
)
# search_results = qdrant.similarity_search( query=text, k= 6)
print(search_results)
chunks = ''
for result in search_results:
# print(f"Question: {text}")
# print(f"Answer: {result.payload['answer']}")
chunks += f"Chunk: {result.payload['context']}\n"
chunks += '-----\n'
_prompt = prompt.format(instruction=text, input=chunks)
return _prompt
def generate_response(prompt):
response = model.chat.completions.create(
model="cenrak/llama3.1_fineTuned_model",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
]
)
return response.choices[0].message
def main(query, history):
# prompt = prompt_template(query)
prompt = prompt_top6(query)
resault = generate_response(prompt)
return resault.content
examples = [
"What is the Berry Export Summary 2028 and what is its purpose?",
]
demo = gr.ChatInterface(fn=main, title = "Assignment 4 GPT", examples=examples)
if __name__ == "__main__":
demo.launch() |