coffeeee commited on
Commit
1239a24
1 Parent(s): adafa1e
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -24,19 +24,22 @@ outputs = []
24
 
25
 
26
  def generate_response(new_prompt):
27
-
28
  global outputs
 
29
  story_so_far = "\n".join(outputs[:int(1024 / response_length + 1)])
30
-
31
  set_seed(random.randint(0, 4000000000))
32
  inputs = tokenizer.encode(story_so_far + '\n' + new_prompt if story_so_far else new_prompt,
33
  return_tensors='pt', truncation=True,
34
  max_length=1024 - response_length)
35
-
36
  output = model.generate(inputs, do_sample=True, generation_config=generation_config)
 
37
  response = clean_paragraph(tokenizer.batch_decode(output)[0][((len(story_so_far) + 1) if story_so_far else 0):])
 
38
  outputs.append(response)
39
-
40
  return ((story_so_far + '\n' if story_so_far else '') + response).replace('\n', '\n\n')
41
 
42
  def undo():
 
24
 
25
 
26
  def generate_response(new_prompt):
27
+ print('a')
28
  global outputs
29
+ print('b')
30
  story_so_far = "\n".join(outputs[:int(1024 / response_length + 1)])
31
+ print('c')
32
  set_seed(random.randint(0, 4000000000))
33
  inputs = tokenizer.encode(story_so_far + '\n' + new_prompt if story_so_far else new_prompt,
34
  return_tensors='pt', truncation=True,
35
  max_length=1024 - response_length)
36
+ print('d')
37
  output = model.generate(inputs, do_sample=True, generation_config=generation_config)
38
+ print('e')
39
  response = clean_paragraph(tokenizer.batch_decode(output)[0][((len(story_so_far) + 1) if story_so_far else 0):])
40
+ print('f')
41
  outputs.append(response)
42
+ print('g')
43
  return ((story_so_far + '\n' if story_so_far else '') + response).replace('\n', '\n\n')
44
 
45
  def undo():