pcuenq HF staff commited on
Commit
39d9780
1 Parent(s): d06b261

pip transformers

Browse files
Files changed (2) hide show
  1. app.py +2 -8
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,15 +1,7 @@
1
  import os
2
- import shlex
3
- import subprocess
4
  from threading import Thread
5
  from typing import Iterator
6
 
7
- from huggingface_hub import hf_hub_download
8
-
9
- whl_path = hf_hub_download("google/gemma-2-9b-it", "transformers/transformers-4.42.0.dev0-py3-none-any.whl")
10
- subprocess.run(shlex.split(f"pip install {whl_path}"))
11
-
12
-
13
  import gradio as gr
14
  import spaces
15
  import torch
@@ -26,6 +18,8 @@ DESCRIPTION = """\
26
  Gemma 2 is Google's latest iteration of open LLMs.
27
  This is a demo of [`google/gemma-2-9b-it`](https://huggingface.co/google/gemma-2-9b-it), fine-tuned for instruction following.
28
  For more details, please check [our post](https://huggingface.co/blog/gemma2).
 
 
29
  """
30
 
31
  MAX_MAX_NEW_TOKENS = 2048
 
1
  import os
 
 
2
  from threading import Thread
3
  from typing import Iterator
4
 
 
 
 
 
 
 
5
  import gradio as gr
6
  import spaces
7
  import torch
 
18
  Gemma 2 is Google's latest iteration of open LLMs.
19
  This is a demo of [`google/gemma-2-9b-it`](https://huggingface.co/google/gemma-2-9b-it), fine-tuned for instruction following.
20
  For more details, please check [our post](https://huggingface.co/blog/gemma2).
21
+
22
+ 👉 Looking for a larger and more powerful version? Try the 27B version in [HuggingChat](https://huggingface.co/chat/models/google/gemma-2-27b-it).
23
  """
24
 
25
  MAX_MAX_NEW_TOKENS = 2048
requirements.txt CHANGED
@@ -3,4 +3,4 @@ bitsandbytes==0.43.1
3
  gradio==4.37.1
4
  spaces==0.28.3
5
  torch==2.2.0
6
- #transformers==4.42.0
 
3
  gradio==4.37.1
4
  spaces==0.28.3
5
  torch==2.2.0
6
+ transformers==4.42.1