Update app.py
Browse files
app.py
CHANGED
@@ -2,86 +2,54 @@ import streamlit as st
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
from sentence_transformers import SentenceTransformer
|
4 |
from langchain.vectorstores import Chroma
|
5 |
-
import
|
6 |
-
import psutil
|
7 |
|
8 |
-
# ๋ชจ๋ธ ID
|
9 |
model_id = "hewoo/hehehehe"
|
|
|
10 |
|
11 |
-
#
|
12 |
-
|
13 |
-
|
14 |
-
st.write(f"ํ์ฌ ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋: {memory_info.percent}%")
|
15 |
|
16 |
-
#
|
17 |
-
|
18 |
-
def load_model():
|
19 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
20 |
-
model = AutoModelForCausalLM.from_pretrained(model_id)
|
21 |
-
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150, temperature=0.5, top_p=0.85, top_k=40, repetition_penalty=1.2)
|
22 |
|
23 |
-
# ์ฌ์ฉ์ ์ ์ ์๋ฒ ๋ฉ ํด๋์ค
|
24 |
class CustomEmbedding:
|
25 |
def __init__(self, model):
|
26 |
self.model = model
|
27 |
|
28 |
def embed_query(self, text):
|
29 |
-
return self.model.encode(text, convert_to_tensor=
|
30 |
|
31 |
def embed_documents(self, texts):
|
32 |
-
return [self.model.encode(text, convert_to_tensor=
|
33 |
|
34 |
-
#
|
35 |
-
|
36 |
-
|
37 |
-
return SentenceTransformer("jhgan/ko-sroberta-multitask")
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
|
44 |
# ์ง๋ฌธ์ ๋ํ ์๋ต ์์ฑ ํจ์
|
45 |
def generate_response(user_input):
|
46 |
-
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
|
47 |
search_results = retriever.get_relevant_documents(user_input)
|
48 |
context = "\n".join([result.page_content for result in search_results])
|
49 |
-
|
50 |
-
|
51 |
-
์ฌ์ฉ์์ ์ง๋ฌธ์ ๋ํด ์ฃผ์ด์ง ๋งฅ๋ฝ์ ๊ธฐ๋ฐ์ผ๋ก ์ ํํ๊ณ ์์ธํ ๋ต๋ณ์ ํ๊ตญ์ด๋ก ์์ฑํ์ธ์.
|
52 |
-
๋ง์ฝ ๋งฅ๋ฝ์ ๊ด๋ จ ์ ๋ณด๊ฐ ์์ผ๋ฉด, "์ฃ์กํ์ง๋ง ํด๋น ์ง๋ฌธ์ ๋ํ ๋ต๋ณ์ ์ฐพ์ ์ ์์ต๋๋ค."๋ผ๊ณ ๋ต๋ณํ์ธ์.
|
53 |
-
|
54 |
-
๋งฅ๋ฝ:
|
55 |
-
{context}
|
56 |
-
|
57 |
-
์ง๋ฌธ:
|
58 |
-
{user_input}
|
59 |
-
|
60 |
-
๋ต๋ณ:"""
|
61 |
-
|
62 |
-
response = pipe(prompt)[0]["generated_text"]
|
63 |
return response
|
64 |
|
65 |
-
# ๋ชจ๋ธ ๋ฐ ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ก๋
|
66 |
-
pipe = load_model()
|
67 |
-
embedding_model = load_embedding_model()
|
68 |
-
vectorstore = load_vectorstore(embedding_model)
|
69 |
-
|
70 |
# Streamlit ์ฑ UI
|
71 |
-
st.title("์ฑ๋ด
|
72 |
st.write("Llama 3.2-3B ๋ชจ๋ธ์ ์ฌ์ฉํ ์ฑ๋ด์
๋๋ค. ์ง๋ฌธ์ ์
๋ ฅํด ์ฃผ์ธ์.")
|
73 |
|
74 |
-
monitor_memory() # ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋ ํ์ธ
|
75 |
-
|
76 |
# ์ฌ์ฉ์ ์
๋ ฅ ๋ฐ๊ธฐ
|
77 |
user_input = st.text_input("์ง๋ฌธ")
|
78 |
if user_input:
|
79 |
response = generate_response(user_input)
|
80 |
st.write("์ฑ๋ด ์๋ต:", response)
|
81 |
-
monitor_memory() # ๋ฉ๋ชจ๋ฆฌ ์ํ ์
๋ฐ์ดํธ
|
82 |
-
|
83 |
-
# ๋ฉ๋ชจ๋ฆฌ ํด์
|
84 |
-
del response
|
85 |
-
gc.collect()
|
86 |
-
|
87 |
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
from sentence_transformers import SentenceTransformer
|
4 |
from langchain.vectorstores import Chroma
|
5 |
+
import os
|
|
|
6 |
|
7 |
+
# Hugging Face ๋ชจ๋ธ ID
|
8 |
model_id = "hewoo/hehehehe"
|
9 |
+
token = os.getenv("HF_API_TOKEN") # ํ์ํ ๊ฒฝ์ฐ ์ฌ์ฉ์์๊ฒ Hugging Face API ํ ํฐ ์
๋ ฅ์ ์์ฒญํ ์ ์์
|
10 |
|
11 |
+
# ๋ชจ๋ธ๊ณผ ํ ํฌ๋์ด์ ๋ก๋
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id, use_auth_token=token)
|
13 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, use_auth_token=token)
|
|
|
14 |
|
15 |
+
# ํ
์คํธ ์์ฑ ํ์ดํ๋ผ์ธ ์ค์
|
16 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150, temperature=0.5, top_p=0.85, top_k=40, repetition_penalty=1.2)
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
# ์ฌ์ฉ์ ์ ์ ์๋ฒ ๋ฉ ํด๋์ค ์์ฑ
|
19 |
class CustomEmbedding:
|
20 |
def __init__(self, model):
|
21 |
self.model = model
|
22 |
|
23 |
def embed_query(self, text):
|
24 |
+
return self.model.encode(text, convert_to_tensor=True).tolist()
|
25 |
|
26 |
def embed_documents(self, texts):
|
27 |
+
return [self.model.encode(text, convert_to_tensor=True).tolist() for text in texts]
|
28 |
|
29 |
+
# ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ฐ ๋ฒกํฐ ์คํ ์ด ์ค์
|
30 |
+
embedding_model = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
|
31 |
+
embedding_function = CustomEmbedding(embedding_model)
|
|
|
32 |
|
33 |
+
# Chroma ๋ฒกํฐ ์คํ ์ด ์ค์
|
34 |
+
persist_directory = "./chroma_batch_vectors" # Spaces ํ๊ฒฝ์ ๋ง๊ฒ ์กฐ์ ํ์
|
35 |
+
vectorstore = Chroma(persist_directory=persist_directory, embedding_function=embedding_function)
|
36 |
+
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
|
37 |
|
38 |
# ์ง๋ฌธ์ ๋ํ ์๋ต ์์ฑ ํจ์
|
39 |
def generate_response(user_input):
|
|
|
40 |
search_results = retriever.get_relevant_documents(user_input)
|
41 |
context = "\n".join([result.page_content for result in search_results])
|
42 |
+
input_text = f"๋งฅ๋ฝ: {context}\n์ง๋ฌธ: {user_input}"
|
43 |
+
response = pipe(input_text)[0]["generated_text"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
return response
|
45 |
|
|
|
|
|
|
|
|
|
|
|
46 |
# Streamlit ์ฑ UI
|
47 |
+
st.title("์ฑ๋ด test")
|
48 |
st.write("Llama 3.2-3B ๋ชจ๋ธ์ ์ฌ์ฉํ ์ฑ๋ด์
๋๋ค. ์ง๋ฌธ์ ์
๋ ฅํด ์ฃผ์ธ์.")
|
49 |
|
|
|
|
|
50 |
# ์ฌ์ฉ์ ์
๋ ฅ ๋ฐ๊ธฐ
|
51 |
user_input = st.text_input("์ง๋ฌธ")
|
52 |
if user_input:
|
53 |
response = generate_response(user_input)
|
54 |
st.write("์ฑ๋ด ์๋ต:", response)
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|