RishabhBhardwaj commited on
Commit
cd0c6eb
β€’
1 Parent(s): 2697a17

Walledguard bring back

Browse files
app.py CHANGED
@@ -16,7 +16,7 @@ Answer: [/INST]
16
  # Load the model and tokenizer
17
  @st.cache_resource
18
  def load_model():
19
- model_name = "meta-llama/Llama-Guard-3-8B-INT8"
20
  tokenizer = AutoTokenizer.from_pretrained(model_name)
21
  model = AutoModelForCausalLM.from_pretrained(model_name)
22
  return tokenizer, model
@@ -35,8 +35,8 @@ def evaluate_text(user_input):
35
  tokenizer, model = st.session_state.model_and_tokenizer
36
 
37
  # Prepare input
38
- input_ids = tokenizer.apply_chat_template(chat, return_tensors="pt").to(device)
39
-
40
  # Generate output
41
  output = model.generate(input_ids=input_ids, max_new_tokens=20, pad_token_id=0)
42
 
 
16
  # Load the model and tokenizer
17
  @st.cache_resource
18
  def load_model():
19
+ model_name = "walledai/walledguard-c"
20
  tokenizer = AutoTokenizer.from_pretrained(model_name)
21
  model = AutoModelForCausalLM.from_pretrained(model_name)
22
  return tokenizer, model
 
35
  tokenizer, model = st.session_state.model_and_tokenizer
36
 
37
  # Prepare input
38
+ input_ids = tokenizer.encode(TEMPLATE.format(prompt=user_input), return_tensors="pt")
39
+
40
  # Generate output
41
  output = model.generate(input_ids=input_ids, max_new_tokens=20, pad_token_id=0)
42
 
app_walledguard.py β†’ app_LG3_try.py RENAMED
@@ -16,7 +16,7 @@ Answer: [/INST]
16
  # Load the model and tokenizer
17
  @st.cache_resource
18
  def load_model():
19
- model_name = "walledai/walledguard-c"
20
  tokenizer = AutoTokenizer.from_pretrained(model_name)
21
  model = AutoModelForCausalLM.from_pretrained(model_name)
22
  return tokenizer, model
@@ -35,8 +35,8 @@ def evaluate_text(user_input):
35
  tokenizer, model = st.session_state.model_and_tokenizer
36
 
37
  # Prepare input
38
- input_ids = tokenizer.encode(TEMPLATE.format(prompt=user_input), return_tensors="pt")
39
-
40
  # Generate output
41
  output = model.generate(input_ids=input_ids, max_new_tokens=20, pad_token_id=0)
42
 
 
16
  # Load the model and tokenizer
17
  @st.cache_resource
18
  def load_model():
19
+ model_name = "meta-llama/Llama-Guard-3-8B-INT8"
20
  tokenizer = AutoTokenizer.from_pretrained(model_name)
21
  model = AutoModelForCausalLM.from_pretrained(model_name)
22
  return tokenizer, model
 
35
  tokenizer, model = st.session_state.model_and_tokenizer
36
 
37
  # Prepare input
38
+ input_ids = tokenizer.apply_chat_template(chat, return_tensors="pt").to(device)
39
+
40
  # Generate output
41
  output = model.generate(input_ids=input_ids, max_new_tokens=20, pad_token_id=0)
42