Spaces:
Running
on
Zero
Running
on
Zero
Update voice_chat.py
Browse files- voice_chat.py +2 -2
voice_chat.py
CHANGED
@@ -21,7 +21,7 @@ tokenizer = spm.SentencePieceProcessor(hf_hub_download(model_name, "tokenizer.sp
|
|
21 |
|
22 |
# Mistral Model Configuration
|
23 |
client1 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
24 |
-
system_instructions1 = "
|
25 |
|
26 |
def resample(audio_fp32, sr):
|
27 |
return soxr.resample(audio_fp32, sr, sample_rate)
|
@@ -50,7 +50,7 @@ def transcribe(audio_path):
|
|
50 |
return text
|
51 |
|
52 |
def model(text):
|
53 |
-
formatted_prompt = system_instructions1 + text + "[
|
54 |
stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
|
55 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
56 |
|
|
|
21 |
|
22 |
# Mistral Model Configuration
|
23 |
client1 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
24 |
+
system_instructions1 = "[SYSTEM] Answer as Real OpenGPT 4o, Made by 'KingNish', Keep conversation very short, clear, friendly and concise. The text provided is a request for a specific type of response from you, the virtual assistant. You will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, your task is to generate responses that appear as if they are created by the user and not an AI-powered assistant. [USER]"
|
25 |
|
26 |
def resample(audio_fp32, sr):
|
27 |
return soxr.resample(audio_fp32, sr, sample_rate)
|
|
|
50 |
return text
|
51 |
|
52 |
def model(text):
|
53 |
+
formatted_prompt = system_instructions1 + text + "[OpenGPT 4o]"
|
54 |
stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
|
55 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
56 |
|