GRATITUD3 commited on
Commit
b413d1c
1 Parent(s): 92dbca0

edit errors

Browse files
Files changed (1) hide show
  1. app.py +30 -36
app.py CHANGED
@@ -5,64 +5,58 @@ from autodistill_grounding_dino import GroundingDINO
5
  from autodistill.utils import plot
6
  import tempfile
7
  import cv2
8
-
9
  from autodistill.core.custom_detection_model import CustomDetectionModel
10
 
11
- # Hardcoded values - ensure to protect the API key
12
- api_key = "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"
13
- dino_prompt = "buildings. parks."
14
  gpt_prompt = "carbon sinks"
15
 
16
  MARKDOWN = """
17
- # NESGPT-AutoAnnotator
18
- Grounding DINO for Zero-Shot Carbon Sink Identification and Object Segmentation
19
- ."""
 
20
 
21
  def respond(input_image):
22
  input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
23
  with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
24
  cv2.imwrite(temp_file.name, input_image)
25
 
26
- try:
27
- DINOGPT = CustomDetectionModel(
28
- detection_model=GroundingDINO(
29
- CaptionOntology({dino_prompt: dino_prompt})
30
- ),
31
- classification_model=GPT4V(
32
- CaptionOntology({k: k for k in gpt_prompt.split(", ")}),
33
- api_key=api_key # Use the hardcoded API key
34
- )
35
  )
 
36
 
37
- results = DINOGPT.predict(temp_file.name)
38
 
39
- # Assuming the results are a list of dictionaries with 'class' keys
40
- # Extracting class names from results
41
- predicted_classes = [result['class'] for result in results]
42
- # Filtering the ontology prompts to include only those that match the predicted classes
43
- filtered_ontology = {k: v for k, v in DINOGPT.classification_model.ontology.prompts().items() if k in predicted_classes}
44
 
45
- result_image = plot(
46
- image=cv2.imread(temp_file.name),
47
- detections=results,
48
- classes=list(filtered_ontology.keys()), # Use the filtered ontology
49
- raw=True
50
- )
51
- except ValueError as e:
52
- print("An error occurred:", e)
53
- # Handle the error appropriately, perhaps by returning an error message or default image
54
- result_image = None # Replace with your error handling logic
55
 
56
- return result_image
57
 
58
  with gr.Blocks() as demo:
59
  gr.Markdown(MARKDOWN)
60
  with gr.Row():
61
  with gr.Column():
62
- input_image = gr.Image(type="numpy", label="Input Location")
63
  with gr.Column():
64
- output_image = gr.Image(type="numpy", label="Carbon Sink Identifier")
65
- submit_button = gr.Button("Identify Carbon Sinks")
66
 
67
  submit_button.click(
68
  fn=respond,
 
5
  from autodistill.utils import plot
6
  import tempfile
7
  import cv2
 
8
  from autodistill.core.custom_detection_model import CustomDetectionModel
9
 
10
+ # Hardcoded values
11
+ api_key = "your-hardcoded-api-key"
12
+ dino_prompt = "buildings, parks."
13
  gpt_prompt = "carbon sinks"
14
 
15
  MARKDOWN = """
16
+ # DINO-GPT4V
17
+ Use Grounding DINO and GPT-4V to label specific objects.
18
+ Visit [awesome-openai-vision-api-experiments](https://github.com/roboflow/awesome-openai-vision-api-experiments)
19
+ repository to find more OpenAI Vision API experiments or contribute your own."""
20
 
21
  def respond(input_image):
22
  input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB)
23
  with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
24
  cv2.imwrite(temp_file.name, input_image)
25
 
26
+ DINOGPT = CustomDetectionModel(
27
+ detection_model=GroundingDINO(
28
+ CaptionOntology({dino_prompt: dino_prompt})
29
+ ),
30
+ classification_model=GPT4V(
31
+ CaptionOntology({k: k for k in gpt_prompt.split(", ")}),
32
+ api_key=api_key
 
 
33
  )
34
+ )
35
 
36
+ results = DINOGPT.predict(temp_file.name)
37
 
38
+ if isinstance(results, tuple):
39
+ # If results are a tuple, handle it accordingly
40
+ # This is a placeholder, you need to adjust based on the actual structure of the tuple
41
+ results = results[0] # Assuming the first item in the tuple is the desired data
 
42
 
43
+ result = plot(
44
+ image=cv2.imread(temp_file.name),
45
+ detections=results,
46
+ classes=gpt_prompt.split(", "),
47
+ raw=True
48
+ )
 
 
 
 
49
 
50
+ return result
51
 
52
  with gr.Blocks() as demo:
53
  gr.Markdown(MARKDOWN)
54
  with gr.Row():
55
  with gr.Column():
56
+ input_image = gr.Image(type="numpy", label="Input Image")
57
  with gr.Column():
58
+ output_image = gr.Image(type="numpy", label="Output Image")
59
+ submit_button = gr.Button("Submit")
60
 
61
  submit_button.click(
62
  fn=respond,