mobinln commited on
Commit
68eded2
1 Parent(s): eee4fbc

change to mistral v0.3

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -1,16 +1,16 @@
1
  import gradio as gr
2
  from llama_cpp import Llama
3
 
4
- model = "lmstudio-community/gemma-2-2b-it-GGUF"
5
  llm = Llama.from_pretrained(
6
  repo_id=model,
7
- filename="gemma-2-2b-it-Q8_0.gguf",
8
  verbose=True,
9
  use_mmap=False,
10
  use_mlock=True,
11
  n_threads=2,
12
  n_threads_batch=2,
13
- n_ctx=8000,
14
  )
15
 
16
 
@@ -22,8 +22,7 @@ def respond(
22
  temperature,
23
  top_p,
24
  ):
25
- # messages = [{"role": "system", "content": system_message}]
26
- messages = []
27
 
28
  for val in history:
29
  if val[0]:
 
1
  import gradio as gr
2
  from llama_cpp import Llama
3
 
4
+ model = "MaziyarPanahi/Mistral-7B-Instruct-v0.3-GGUF"
5
  llm = Llama.from_pretrained(
6
  repo_id=model,
7
+ filename="Mistral-7B-Instruct-v0.3.Q4_K_M.gguf",
8
  verbose=True,
9
  use_mmap=False,
10
  use_mlock=True,
11
  n_threads=2,
12
  n_threads_batch=2,
13
+ n_ctx=32000,
14
  )
15
 
16
 
 
22
  temperature,
23
  top_p,
24
  ):
25
+ messages = [{"role": "system", "content": system_message}]
 
26
 
27
  for val in history:
28
  if val[0]: