Komal-patra commited on
Commit
dea4866
1 Parent(s): 64ba630
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -7,7 +7,7 @@ from peft import PeftModel, PeftConfig
7
  token = os.environ.get("token")
8
  login(token)
9
  print("login is succesful")
10
- max_length=512
11
 
12
  MODEL_NAME = "google/flan-t5-base"
13
  tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME, token=token)
@@ -17,7 +17,7 @@ model = PeftModel.from_pretrained(base_model, "Komal-patra/results")
17
 
18
  #gr.Interface.from_pipeline(pipe).launch()
19
 
20
- def generate_text(prompt, max_length=512):
21
  """Generates text using the PEFT model.
22
  Args:
23
  prompt (str): The user-provided prompt to start the generation.
@@ -35,7 +35,7 @@ def generate_text(prompt, max_length=512):
35
  input_ids = inputs["input_ids"],
36
  max_length=max_length,
37
  num_beams=1,
38
- repetition_penalty=1.0
39
  )
40
 
41
  print(outputs)
 
7
  token = os.environ.get("token")
8
  login(token)
9
  print("login is succesful")
10
+ max_length=150
11
 
12
  MODEL_NAME = "google/flan-t5-base"
13
  tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME, token=token)
 
17
 
18
  #gr.Interface.from_pipeline(pipe).launch()
19
 
20
+ def generate_text(prompt, max_length=150):
21
  """Generates text using the PEFT model.
22
  Args:
23
  prompt (str): The user-provided prompt to start the generation.
 
35
  input_ids = inputs["input_ids"],
36
  max_length=max_length,
37
  num_beams=1,
38
+ repetition_penalty=2.2
39
  )
40
 
41
  print(outputs)