shivam9980 commited on
Commit
7bf8bd1
1 Parent(s): 2161f81

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -7
app.py CHANGED
@@ -1,13 +1,12 @@
1
  # Load model directly
2
  import streamlit as st
3
 
4
- from unsloth import FastLanguageModel
5
- model, tokenizer = FastLanguageModel.from_pretrained(
6
- model_name = "shivam9980/mistral-7b-news", # YOUR MODEL YOU USED FOR TRAINING
7
- max_seq_length = 2048,
8
- dtype = None,
9
  load_in_4bit = True,)
10
- FastLanguageModel.for_inference(model) # Enable native 2x faster inference
11
 
12
  # alpaca_prompt = You MUST copy from above!
13
 
@@ -33,4 +32,8 @@ inputs = tokenizer(
33
 
34
  outputs = model.generate(**inputs, max_new_tokens = 64, use_cache = True)
35
  results= tokenizer.batch_decode(outputs)
36
- st.write(results)
 
 
 
 
 
1
  # Load model directly
2
  import streamlit as st
3
 
4
+ from peft import AutoPeftModelForCausalLM
5
+ from transformers import AutoTokenizer
6
+ model = AutoPeftModelForCausalLM.from_pretrained(
7
+ "shivam9980/mistral-7b-news", # YOUR MODEL YOU USED FOR TRAINING
 
8
  load_in_4bit = True,)
9
+ tokenizer = AutoTokenizer.from_pretrained("shivam9980/mistral-7b-news")
10
 
11
  # alpaca_prompt = You MUST copy from above!
12
 
 
32
 
33
  outputs = model.generate(**inputs, max_new_tokens = 64, use_cache = True)
34
  results= tokenizer.batch_decode(outputs)
35
+ out = results[0].split('\n')[-1]
36
+ st.text_area(label='Headline',value=out[:len(out)-4])
37
+
38
+
39
+