AdiOO7 commited on
Commit
9fb4aee
1 Parent(s): 5d00781

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -89
app.py DELETED
@@ -1,89 +0,0 @@
1
- from transformers import AutoModel
2
- import torch
3
- import transformers
4
- from transformers import AutoTokenizer, AutoModelForCausalLM
5
- from sklearn.metrics.pairwise import cosine_similarity
6
- from sentence_transformers import SentenceTransformer
7
-
8
- import gdown
9
- import warnings
10
- import openai
11
- import pandas as pd
12
- import gradio as gr
13
-
14
- warnings.filterwarnings("ignore")
15
-
16
- openai.api_key = "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"
17
-
18
-
19
- def generate_prompt(question):
20
- prompt = f"""
21
- ### <instruction>: Given an suitable answer for the question asked.
22
- ### <human>: {question}
23
- ### <assistant>:
24
- """.strip()
25
- return prompt
26
-
27
- file_id = '1CjJ-CQhZyr8QowwSksw5uo7O9OYgbq96'
28
-
29
- url = f'https://drive.google.com/uc?id={file_id}'
30
-
31
- output_file = 'data.xlsx'
32
-
33
- gdown.download(url, output_file, quiet=False)
34
-
35
- df = pd.read_csv(output_file, encoding='latin-1')
36
-
37
- df.head()
38
-
39
- sentences = []
40
- for row in df['QUESTION']:
41
- sentences.append(row)
42
-
43
-
44
- model_encode = SentenceTransformer('sentence-transformers/all-mpnet-base-v2')
45
- embeddings = model_encode.encode(sentences)
46
-
47
- answer = []
48
- for index, val in enumerate(df['ORIGINAL/SYNONYM']):
49
- if str(val) == "Original":
50
- answer.append(index)
51
-
52
- def answer_prompt(text):
53
-
54
- ind, sim = 0, 0
55
- bot_response = ''
56
- text_embedding = model_encode.encode(text)
57
- for index, val in enumerate(embeddings):
58
- res = cosine_similarity(text_embedding.reshape(1,-1),embeddings[index].reshape(1,-1))
59
- if res[0][0] > sim:
60
- sim = res[0][0]
61
- ind = index
62
-
63
- for i in range(len(answer)):
64
- if answer[i] > ind:
65
- bot_response = bot_response = 'This Solution is Extracted from the Database' + '\n' + f'Similarity Score is {round(sim * 100)} %' + '\n' + f'The issue is raised for {df["TECHNOLOGY"][answer[i - 1]]}' + '\n' + df['SOLUTION'][answer[i - 1]]
66
- break
67
-
68
- if sim > 0.5:
69
- return bot_response
70
-
71
- else:
72
-
73
- prompt = generate_prompt(text)
74
- response = openai.Completion.create(
75
- engine="gpt-3.5-turbo-instruct",
76
- prompt = prompt,
77
- max_tokens = 1024,
78
- top_p = 0.7,
79
- temperature = 0.3,
80
- presence_penalty = 0.7,
81
- )
82
-
83
- return 'This response is generated by GPT 3.5 Turbo LLM' + '\n' + response['choices'][0]['text']
84
-
85
- iface = gr.Interface(fn=answer_prompt,
86
- inputs=gr.Textbox(lines=10, label="Enter Your Issue", css={"font-size":"18px"}),
87
- outputs=gr.Textbox(lines=10, label="Generated Solution", css={"font-size":"16px"}))
88
-
89
- iface.launch(inline=False)