Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,9 +3,13 @@ from transformers import GPT2Tokenizer, GPT2LMHeadModel, AutoModelForSeq2SeqLM,
|
|
3 |
import torch
|
4 |
from langchain.memory import ConversationBufferMemory
|
5 |
|
|
|
|
|
|
|
6 |
# Load the tokenizer and model for DistilGPT-2
|
7 |
tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2")
|
8 |
model = GPT2LMHeadModel.from_pretrained("distilgpt2")
|
|
|
9 |
|
10 |
# Load summarization model (e.g., T5-small)
|
11 |
summarizer_tokenizer = AutoTokenizer.from_pretrained("t5-small")
|
@@ -20,10 +24,6 @@ def summarize_history(history):
|
|
20 |
summary = summarizer_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
21 |
return summary
|
22 |
|
23 |
-
# Move model to device (GPU if available)
|
24 |
-
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
25 |
-
model.to(device)
|
26 |
-
|
27 |
# Set up conversational memory using LangChain's ConversationBufferMemory
|
28 |
memory = ConversationBufferMemory()
|
29 |
|
|
|
3 |
import torch
|
4 |
from langchain.memory import ConversationBufferMemory
|
5 |
|
6 |
+
# Move model to device (GPU if available)
|
7 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
8 |
+
|
9 |
# Load the tokenizer and model for DistilGPT-2
|
10 |
tokenizer = GPT2Tokenizer.from_pretrained("distilgpt2")
|
11 |
model = GPT2LMHeadModel.from_pretrained("distilgpt2")
|
12 |
+
model.to(device)
|
13 |
|
14 |
# Load summarization model (e.g., T5-small)
|
15 |
summarizer_tokenizer = AutoTokenizer.from_pretrained("t5-small")
|
|
|
24 |
summary = summarizer_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
25 |
return summary
|
26 |
|
|
|
|
|
|
|
|
|
27 |
# Set up conversational memory using LangChain's ConversationBufferMemory
|
28 |
memory = ConversationBufferMemory()
|
29 |
|