Spaces:
Sleeping
Sleeping
barathm111
commited on
Commit
•
7eaa6f3
1
Parent(s):
8c4195c
Upload app.py
Browse files
app.py
CHANGED
@@ -1,14 +1,23 @@
|
|
1 |
import transformers
|
2 |
import torch
|
3 |
import gradio as gr
|
|
|
4 |
|
5 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
|
7 |
pipeline = transformers.pipeline(
|
8 |
"text-generation",
|
9 |
model=model_id,
|
10 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
11 |
device_map="auto",
|
|
|
12 |
)
|
13 |
|
14 |
# Function to calculate scores and rankings
|
|
|
1 |
import transformers
|
2 |
import torch
|
3 |
import gradio as gr
|
4 |
+
import os
|
5 |
|
6 |
+
# Retrieve Hugging Face API token from environment variable
|
7 |
+
hf_token = os.getenv("HF_TOKEN")
|
8 |
+
|
9 |
+
# Ensure the token is available
|
10 |
+
if not hf_token:
|
11 |
+
raise ValueError("Hugging Face token not found. Please add it to the secrets in Hugging Face Spaces.")
|
12 |
+
|
13 |
+
# Load the chatbot model with the token (for private models or usage limits)
|
14 |
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
|
15 |
pipeline = transformers.pipeline(
|
16 |
"text-generation",
|
17 |
model=model_id,
|
18 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
19 |
device_map="auto",
|
20 |
+
use_auth_token=hf_token # Use the Hugging Face token here
|
21 |
)
|
22 |
|
23 |
# Function to calculate scores and rankings
|