Update app.py
Browse files
app.py
CHANGED
@@ -2,19 +2,23 @@ import streamlit as st
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
from sentence_transformers import SentenceTransformer
|
4 |
from langchain.vectorstores import Chroma
|
5 |
-
import os
|
6 |
-
import psutil
|
7 |
import gc
|
|
|
8 |
|
9 |
-
#
|
10 |
model_id = "hewoo/hehehehe"
|
11 |
|
12 |
-
#
|
|
|
|
|
|
|
|
|
|
|
13 |
@st.cache_resource
|
14 |
def load_model():
|
15 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
16 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
17 |
-
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150)
|
18 |
|
19 |
# ์ฌ์ฉ์ ์ ์ ์๋ฒ ๋ฉ ํด๋์ค
|
20 |
class CustomEmbedding:
|
@@ -22,18 +26,18 @@ class CustomEmbedding:
|
|
22 |
self.model = model
|
23 |
|
24 |
def embed_query(self, text):
|
25 |
-
return self.model.encode(text, convert_to_tensor=
|
26 |
|
27 |
def embed_documents(self, texts):
|
28 |
-
return [self.model.encode(text, convert_to_tensor=
|
29 |
|
30 |
-
# ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ฐ ๋ฒกํฐ ์คํ ์ด ์ค์
|
31 |
@st.cache_resource
|
32 |
def load_embedding_model():
|
33 |
return SentenceTransformer("jhgan/ko-sroberta-multitask")
|
34 |
|
35 |
@st.cache_resource
|
36 |
-
def load_vectorstore(_embedding_model):
|
37 |
embedding_function = CustomEmbedding(_embedding_model)
|
38 |
return Chroma(persist_directory="./chroma_batch_vectors", embedding_function=embedding_function)
|
39 |
|
@@ -56,8 +60,7 @@ def generate_response(user_input):
|
|
56 |
๋ต๋ณ:"""
|
57 |
|
58 |
response = pipe(prompt)[0]["generated_text"]
|
59 |
-
|
60 |
-
return response, context
|
61 |
|
62 |
# ๋ชจ๋ธ ๋ฐ ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ก๋
|
63 |
pipe = load_model()
|
@@ -66,20 +69,19 @@ vectorstore = load_vectorstore(embedding_model)
|
|
66 |
|
67 |
# Streamlit ์ฑ UI
|
68 |
st.title("์ฑ๋ด ๋ฐ๋ชจ")
|
69 |
-
st.write("
|
|
|
|
|
70 |
|
71 |
# ์ฌ์ฉ์ ์
๋ ฅ ๋ฐ๊ธฐ
|
72 |
user_input = st.text_input("์ง๋ฌธ")
|
73 |
if user_input:
|
74 |
-
response
|
75 |
st.write("์ฑ๋ด ์๋ต:", response)
|
76 |
-
|
77 |
-
|
78 |
-
# ๋ฉ๋ชจ๋ฆฌ ์ํ ๋ชจ๋ํฐ๋ง
|
79 |
-
memory_usage = psutil.virtual_memory().used / (1024 ** 3)
|
80 |
-
st.write(f"ํ์ฌ ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋: {memory_usage:.2f} GB")
|
81 |
|
82 |
# ๋ฉ๋ชจ๋ฆฌ ํด์
|
83 |
del response
|
84 |
gc.collect()
|
85 |
|
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
from sentence_transformers import SentenceTransformer
|
4 |
from langchain.vectorstores import Chroma
|
|
|
|
|
5 |
import gc
|
6 |
+
import psutil
|
7 |
|
8 |
+
# ๋ชจ๋ธ ID (๊ณต๊ฐ๋ ๋ชจ๋ธ์ด์ด์ผ ํจ)
|
9 |
model_id = "hewoo/hehehehe"
|
10 |
|
11 |
+
# ๋ฉ๋ชจ๋ฆฌ ๋ชจ๋ํฐ๋ง ํจ์
|
12 |
+
def monitor_memory():
|
13 |
+
memory_info = psutil.virtual_memory()
|
14 |
+
st.write(f"ํ์ฌ ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋: {memory_info.percent}%")
|
15 |
+
|
16 |
+
# ์บ์๋ฅผ ์ฌ์ฉํ์ฌ ๋ชจ๋ธ ๋ฐ ํ์ดํ๋ผ์ธ ๋ก๋
|
17 |
@st.cache_resource
|
18 |
def load_model():
|
19 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
20 |
model = AutoModelForCausalLM.from_pretrained(model_id)
|
21 |
+
return pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150, temperature=0.5, top_p=0.85, top_k=40, repetition_penalty=1.2)
|
22 |
|
23 |
# ์ฌ์ฉ์ ์ ์ ์๋ฒ ๋ฉ ํด๋์ค
|
24 |
class CustomEmbedding:
|
|
|
26 |
self.model = model
|
27 |
|
28 |
def embed_query(self, text):
|
29 |
+
return self.model.encode(text, convert_to_tensor=False).tolist()
|
30 |
|
31 |
def embed_documents(self, texts):
|
32 |
+
return [self.model.encode(text, convert_to_tensor=False).tolist() for text in texts]
|
33 |
|
34 |
+
# ํ๊ตญ์ด ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ฐ ๋ฒกํฐ ์คํ ์ด ์ค์
|
35 |
@st.cache_resource
|
36 |
def load_embedding_model():
|
37 |
return SentenceTransformer("jhgan/ko-sroberta-multitask")
|
38 |
|
39 |
@st.cache_resource
|
40 |
+
def load_vectorstore(_embedding_model): # _embedding_model๋ก ์ด๋ฆ ๋ณ๊ฒฝ
|
41 |
embedding_function = CustomEmbedding(_embedding_model)
|
42 |
return Chroma(persist_directory="./chroma_batch_vectors", embedding_function=embedding_function)
|
43 |
|
|
|
60 |
๋ต๋ณ:"""
|
61 |
|
62 |
response = pipe(prompt)[0]["generated_text"]
|
63 |
+
return response
|
|
|
64 |
|
65 |
# ๋ชจ๋ธ ๋ฐ ์๋ฒ ๋ฉ ๋ชจ๋ธ ๋ก๋
|
66 |
pipe = load_model()
|
|
|
69 |
|
70 |
# Streamlit ์ฑ UI
|
71 |
st.title("์ฑ๋ด ๋ฐ๋ชจ")
|
72 |
+
st.write("Llama 3.2-3B ๋ชจ๋ธ์ ์ฌ์ฉํ ์ฑ๋ด์
๋๋ค. ์ง๋ฌธ์ ์
๋ ฅํด ์ฃผ์ธ์.")
|
73 |
+
|
74 |
+
monitor_memory() # ๋ฉ๋ชจ๋ฆฌ ์ฌ์ฉ๋ ํ์ธ
|
75 |
|
76 |
# ์ฌ์ฉ์ ์
๋ ฅ ๋ฐ๊ธฐ
|
77 |
user_input = st.text_input("์ง๋ฌธ")
|
78 |
if user_input:
|
79 |
+
response = generate_response(user_input)
|
80 |
st.write("์ฑ๋ด ์๋ต:", response)
|
81 |
+
monitor_memory() # ๋ฉ๋ชจ๋ฆฌ ์ํ ์
๋ฐ์ดํธ
|
|
|
|
|
|
|
|
|
82 |
|
83 |
# ๋ฉ๋ชจ๋ฆฌ ํด์
|
84 |
del response
|
85 |
gc.collect()
|
86 |
|
87 |
+
|