luisarizmendi commited on
Commit
9dea5a0
1 Parent(s): 03cd781

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +23 -15
README.md CHANGED
@@ -153,43 +153,51 @@ import gradio as gr
153
  from ultralytics import YOLO
154
  from PIL import Image
155
  import os
156
- import cv2
157
- import torch
158
 
159
- def detect_objects_in_files(files):
 
 
160
  """
161
  Processes uploaded images for object detection.
162
  """
163
  if not files:
164
  return "No files uploaded.", []
165
 
166
- device = "cuda" if torch.cuda.is_available() else "cpu"
167
- model = YOLO("https://github.com/luisarizmendi/ai-apps/raw/refs/heads/main/models/luisarizmendi/object-detector-hardhat-or-hat/object-detector-hardhat-or-hat.pt")
168
- model.to(device)
169
-
 
 
 
170
  results_images = []
171
  for file in files:
172
  try:
173
  image = Image.open(file).convert("RGB")
174
- results = model(image)
175
  result_img_bgr = results[0].plot()
176
  result_img_rgb = cv2.cvtColor(result_img_bgr, cv2.COLOR_BGR2RGB)
177
- results_images.append(result_img_rgb)
178
-
179
  # If you want that images appear one by one (slower)
180
- #yield "Processing image...", results_images
181
-
182
  except Exception as e:
183
  return f"Error processing file: {file}. Exception: {str(e)}", []
184
 
185
- del model
186
  torch.cuda.empty_cache()
187
-
188
  return "Processing completed.", results_images
189
 
190
  interface = gr.Interface(
191
  fn=detect_objects_in_files,
192
- inputs=gr.Files(file_types=["image"], label="Select Images"),
 
 
 
193
  outputs=[
194
  gr.Textbox(label="Status"),
195
  gr.Gallery(label="Results")
 
153
  from ultralytics import YOLO
154
  from PIL import Image
155
  import os
156
+ import cv2
157
+ import torch
158
 
159
+ DEFAULT_MODEL_URL = "https://github.com/luisarizmendi/ai-apps/raw/refs/heads/main/models/luisarizmendi/object-detector-hardhat-or-hat/object-detector-hardhat-or-hat.pt"
160
+
161
+ def detect_objects_in_files(model_input, files):
162
  """
163
  Processes uploaded images for object detection.
164
  """
165
  if not files:
166
  return "No files uploaded.", []
167
 
168
+ model = YOLO(str(model_input))
169
+ if torch.cuda.is_available():
170
+ model.to('cuda')
171
+ print("Using GPU for inference")
172
+ else:
173
+ print("Using CPU for inference")
174
+
175
  results_images = []
176
  for file in files:
177
  try:
178
  image = Image.open(file).convert("RGB")
179
+ results = model(image)
180
  result_img_bgr = results[0].plot()
181
  result_img_rgb = cv2.cvtColor(result_img_bgr, cv2.COLOR_BGR2RGB)
182
+ results_images.append(result_img_rgb)
183
+
184
  # If you want that images appear one by one (slower)
185
+ #yield "Processing image...", results_images
186
+
187
  except Exception as e:
188
  return f"Error processing file: {file}. Exception: {str(e)}", []
189
 
190
+ del model
191
  torch.cuda.empty_cache()
192
+
193
  return "Processing completed.", results_images
194
 
195
  interface = gr.Interface(
196
  fn=detect_objects_in_files,
197
+ inputs=[
198
+ gr.Textbox(value=DEFAULT_MODEL_URL, label="Model URL", placeholder="Enter the model URL"),
199
+ gr.Files(file_types=["image"], label="Select Images"),
200
+ ],
201
  outputs=[
202
  gr.Textbox(label="Status"),
203
  gr.Gallery(label="Results")