KingNish commited on
Commit
3dff02c
1 Parent(s): 297327b

Update chatbot.py

Browse files
Files changed (1) hide show
  1. chatbot.py +4 -25
chatbot.py CHANGED
@@ -19,7 +19,6 @@ from huggingface_hub import InferenceClient
19
  from PIL import Image
20
  import spaces
21
  from functools import lru_cache
22
- import cv2
23
  import re
24
  import io
25
  import json
@@ -27,33 +26,13 @@ from gradio_client import Client, file
27
  from groq import Groq
28
 
29
  # Model and Processor Loading (Done once at startup)
30
- MODEL_ID = "Qwen/Qwen2-VL-2B-Instruct"
31
  model = Qwen2VLForConditionalGeneration.from_pretrained(MODEL_ID, trust_remote_code=True, torch_dtype=torch.float16).to("cuda").eval()
32
  processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
33
 
34
  GROQ_API_KEY = os.environ.get("GROQ_API_KEY", None)
35
 
36
  client_groq = Groq(api_key=GROQ_API_KEY)
37
-
38
- def sample_frames(video_file) :
39
- try:
40
- video = cv2.VideoCapture(video_file)
41
- total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
42
- num_frames = 12
43
- interval = total_frames // num_frames
44
- frames = []
45
- for i in range(total_frames):
46
- ret, frame = video.read()
47
- pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
48
- if not ret:
49
- continue
50
- if i % interval == 0:
51
- frames.append(pil_img)
52
- video.release()
53
- return frames
54
- except:
55
- frames=[]
56
- return frames
57
 
58
 
59
  # Path to example images
@@ -109,7 +88,7 @@ EXAMPLES = [
109
  ],
110
  [
111
  {
112
- "text": "Who are they? Tell me about both of them",
113
  "files": [f"{examples_path}/example_images/elon_smoking.jpg",
114
  f"{examples_path}/example_images/steve_jobs.jpg", ]
115
  }
@@ -333,9 +312,9 @@ def model_inference( user_prompt, chat_history):
333
  image = image_gen(f"{str(query)}")
334
  yield gr.Image(image[1])
335
  except:
336
- client_sd3 = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
337
  seed = random.randint(0,999999)
338
- image = client_sd3.text_to_image(query, negative_prompt=f"{seed}")
339
  yield gr.Image(image)
340
 
341
 
 
19
  from PIL import Image
20
  import spaces
21
  from functools import lru_cache
 
22
  import re
23
  import io
24
  import json
 
26
  from groq import Groq
27
 
28
  # Model and Processor Loading (Done once at startup)
29
+ MODEL_ID = "Qwen/Qwen2-VL-7B-Instruct"
30
  model = Qwen2VLForConditionalGeneration.from_pretrained(MODEL_ID, trust_remote_code=True, torch_dtype=torch.float16).to("cuda").eval()
31
  processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
32
 
33
  GROQ_API_KEY = os.environ.get("GROQ_API_KEY", None)
34
 
35
  client_groq = Groq(api_key=GROQ_API_KEY)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
 
38
  # Path to example images
 
88
  ],
89
  [
90
  {
91
+ "text": "Who are they? Tell me about both of them.",
92
  "files": [f"{examples_path}/example_images/elon_smoking.jpg",
93
  f"{examples_path}/example_images/steve_jobs.jpg", ]
94
  }
 
312
  image = image_gen(f"{str(query)}")
313
  yield gr.Image(image[1])
314
  except:
315
+ client_flux = InferenceClient("black-forest-labs/FLUX.1-schnell")
316
  seed = random.randint(0,999999)
317
+ image = client_flux.text_to_image(query, negative_prompt=f"{seed}")
318
  yield gr.Image(image)
319
 
320