Arturo Jiménez de los Galanes Reguillos commited on
Commit
e73ae0d
1 Parent(s): 41caafb

Update model to NTQAI/Nxcode-CQ-7B-orpo

Browse files
Files changed (1) hide show
  1. app.py +9 -16
app.py CHANGED
@@ -2,10 +2,8 @@ import gradio as gr
2
  from huggingface_hub import login
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
4
  from threading import Thread
5
- import torch
6
 
7
- MODEL = "m-a-p/OpenCodeInterpreter-DS-33B"
8
- CHAT_TEMPLATE = "{%- set found_item = false -%}\n{%- for message in messages -%}\n {%- if message['role'] == 'system' -%}\n {%- set found_item = true -%}\n {%- endif -%}\n{%- endfor -%}\n{%- if not found_item -%}\n{{'You are an AI programming assistant, utilizing the Deepseek Coder model, developed by Deepseek Company, and you only answer questions related to computer science. For politically sensitive questions, security and privacy issues, and other non-computer science questions, you will refuse to answer.\\n'}}\n{%- endif %}\n{%- for message in messages %}\n {%- if message['role'] == 'system' %}\n{{ message['content'] }}\n {%- else %}\n {%- if message['role'] == 'user' %}\n{{'### Instruction:\\n' + message['content'] + '\\n'}}\n {%- else %}\n{{'### Response:\\n' + message['content'] + '\\n<|EOT|>\\n'}}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{{'### Response:\\n'}}\n"
9
 
10
  system_message = "You are a computer programmer that can translate python code to C++ in order to improve performance"
11
 
@@ -22,31 +20,26 @@ def messages_for(python):
22
  ]
23
 
24
  tokenizer = AutoTokenizer.from_pretrained(MODEL)
25
- tokenizer.chat_template = CHAT_TEMPLATE
26
- model = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype=torch.bfloat16, device_map="auto")
27
- model.eval()
28
 
29
  decode_kwargs = dict(skip_special_tokens=True)
30
  streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, decode_kwargs=decode_kwargs)
31
 
32
  cplusplus = None
33
  def translate(python):
34
- formatted_prompt = tokenizer.apply_chat_template(
35
  messages_for(python),
36
- tokenize=False,
37
  add_generation_prompt=True,
38
- return_tensors="pt")
39
- inputs = tokenizer(formatted_prompt, return_tensors="pt", padding=True).to(model.device)
40
- attention_mask = inputs.attention_mask
41
- input_ids = inputs.input_ids
42
 
43
  generation_kwargs = dict(
44
- input_ids=input_ids,
45
  streamer=streamer,
46
- attention_mask=attention_mask,
47
- max_new_tokens=1024,
48
  do_sample=False,
49
- pad_token_id=tokenizer.eos_token_id,
 
 
50
  eos_token_id=tokenizer.eos_token_id,
51
  )
52
 
 
2
  from huggingface_hub import login
3
  from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
4
  from threading import Thread
 
5
 
6
+ MODEL = "NTQAI/Nxcode-CQ-7B-orpo"
 
7
 
8
  system_message = "You are a computer programmer that can translate python code to C++ in order to improve performance"
9
 
 
20
  ]
21
 
22
  tokenizer = AutoTokenizer.from_pretrained(MODEL)
23
+ model = AutoModelForCausalLM.from_pretrained(MODEL, torch_dtype="auto", device_map="auto")
 
 
24
 
25
  decode_kwargs = dict(skip_special_tokens=True)
26
  streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, decode_kwargs=decode_kwargs)
27
 
28
  cplusplus = None
29
  def translate(python):
30
+ inputs = tokenizer.apply_chat_template(
31
  messages_for(python),
 
32
  add_generation_prompt=True,
33
+ return_tensors="pt").to(model.device)
 
 
 
34
 
35
  generation_kwargs = dict(
36
+ inputs,
37
  streamer=streamer,
38
+ max_new_tokens=512,
 
39
  do_sample=False,
40
+ top_k=50,
41
+ top_p=0.95,
42
+ num_sequences=1,
43
  eos_token_id=tokenizer.eos_token_id,
44
  )
45