Arturo Jiménez de los Galanes Reguillos commited on
Commit
47728bf
1 Parent(s): efce22a

Add streaming again

Browse files
Files changed (1) hide show
  1. app.py +3 -15
app.py CHANGED
@@ -30,8 +30,7 @@ decode_kwargs = dict(skip_special_tokens=True)
30
  streamer = TextIteratorStreamer(tokenizer, decode_kwargs=decode_kwargs)
31
 
32
  cplusplus = None
33
- def translate(python, progress=gr.Progress()):
34
- progress(0, desc="Starting")
35
  formatted_prompt = tokenizer.apply_chat_template(
36
  messages_for(python),
37
  tokenize=False,
@@ -41,7 +40,7 @@ def translate(python, progress=gr.Progress()):
41
  attention_mask = inputs.attention_mask
42
  input_ids = inputs.input_ids
43
 
44
- outputs = model.generate(
45
  input_ids,
46
  attention_mask=attention_mask,
47
  max_new_tokens=1024,
@@ -49,24 +48,13 @@ def translate(python, progress=gr.Progress()):
49
  pad_token_id=tokenizer.eos_token_id,
50
  eos_token_id=tokenizer.eos_token_id,
51
  )
52
- progress(1, desc="Finished")
53
- return tokenizer.decode(outputs[0][len(input_ids[0]):], skip_special_tokens=True)
54
- '''
55
- generation_kwargs = dict(
56
- inputs,
57
- streamer=streamer,
58
- max_new_tokens=1024,
59
- do_sample=False,
60
- pad_token_id=tokenizer.eos_token_id,
61
- eos_token_id=tokenizer.eos_token_id
62
- )
63
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
64
  thread.start()
65
  cplusplus = ""
66
  for chunk in streamer:
67
  cplusplus += chunk
68
  yield cplusplus
69
- '''
70
 
71
  demo = gr.Interface(fn=translate, inputs="code", outputs="markdown")
72
  demo.launch()
 
30
  streamer = TextIteratorStreamer(tokenizer, decode_kwargs=decode_kwargs)
31
 
32
  cplusplus = None
33
+ def translate(python):
 
34
  formatted_prompt = tokenizer.apply_chat_template(
35
  messages_for(python),
36
  tokenize=False,
 
40
  attention_mask = inputs.attention_mask
41
  input_ids = inputs.input_ids
42
 
43
+ generation_kwargs = dict(
44
  input_ids,
45
  attention_mask=attention_mask,
46
  max_new_tokens=1024,
 
48
  pad_token_id=tokenizer.eos_token_id,
49
  eos_token_id=tokenizer.eos_token_id,
50
  )
51
+
 
 
 
 
 
 
 
 
 
 
52
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
53
  thread.start()
54
  cplusplus = ""
55
  for chunk in streamer:
56
  cplusplus += chunk
57
  yield cplusplus
 
58
 
59
  demo = gr.Interface(fn=translate, inputs="code", outputs="markdown")
60
  demo.launch()