Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import base64
|
3 |
+
import requests
|
4 |
+
import random
|
5 |
+
import os
|
6 |
+
from openai import OpenAI
|
7 |
+
from PIL import Image
|
8 |
+
import json
|
9 |
+
import cohere
|
10 |
+
|
11 |
+
iucn_api_token = os.environ.get('IUCN_API')
|
12 |
+
cohere_api_token = os.environ.get('COHERE_API')
|
13 |
+
openai_api_token = os.environ.get('OPENAI_API')
|
14 |
+
|
15 |
+
client = OpenAI(api_key=openai_api_token)
|
16 |
+
co = cohere.Client(cohere_api_token)
|
17 |
+
|
18 |
+
def encode_image(image_path):
|
19 |
+
with open(image_path, "rb") as image_file:
|
20 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
21 |
+
|
22 |
+
def summarize_with_llm(text, prompt, max_token=210):
|
23 |
+
response = co.generate(
|
24 |
+
model='command',
|
25 |
+
prompt=f'This is a piece of information about an animal: "{text}". {prompt}',
|
26 |
+
max_tokens=max_token,
|
27 |
+
temperature=0.5,
|
28 |
+
k=0,
|
29 |
+
stop_sequences=[],
|
30 |
+
return_likelihoods='NONE')
|
31 |
+
return response.generations[0].text
|
32 |
+
|
33 |
+
def get_iucn_data(genus, species):
|
34 |
+
iucn_narrative = requests.get(f"https://apiv3.iucnredlist.org/api/v3/species/narrative/{genus}%20{species}?token={iucn_api_token}")
|
35 |
+
iucn_status = requests.get(f"https://apiv3.iucnredlist.org/api/v3/species/history/name/{genus}%20{species}?token={iucn_api_token}")
|
36 |
+
iucn_common_name = requests.get(f"https://apiv3.iucnredlist.org/api/v3/species/common_names/{genus}%20{species}?token={iucn_api_token}")
|
37 |
+
iucn_web_link = requests.get(f"https://apiv3.iucnredlist.org/api/v3/weblink/{genus}%20{species}")
|
38 |
+
|
39 |
+
if iucn_narrative.status_code == 200:
|
40 |
+
narratives = iucn_narrative.json()
|
41 |
+
conservation_status = iucn_status.json()
|
42 |
+
if conservation_status['result'] == []:
|
43 |
+
return dict()
|
44 |
+
|
45 |
+
status_category = conservation_status['result'][0]['category']
|
46 |
+
status_code = conservation_status['result'][0]['code']
|
47 |
+
common_name = iucn_common_name.json()['result'][0]['taxonname']
|
48 |
+
web_link = iucn_web_link.json()['rlurl']
|
49 |
+
threats = summarize_with_llm(narratives['result'][0]['threats'], 'In one sentence, the threats posing this species are', max_token=210)
|
50 |
+
population = summarize_with_llm(narratives['result'][0]['population'], 'In one sentence, estimation of the population of this species is', max_token=210)
|
51 |
+
habitat = summarize_with_llm(narratives['result'][0]['habitat'], 'Description of the habitat of this species is')
|
52 |
+
return {
|
53 |
+
"status_category": status_category,
|
54 |
+
"status_code": status_code,
|
55 |
+
"common_name": common_name,
|
56 |
+
"web_link": web_link,
|
57 |
+
"threats": threats.strip().split('.')[0],
|
58 |
+
"population": population.strip().split('.')[0],
|
59 |
+
"habitat": habitat.strip().split('.')[0]
|
60 |
+
}
|
61 |
+
else:
|
62 |
+
return dict()
|
63 |
+
|
64 |
+
def get_taxonomy(image):
|
65 |
+
# Path to your image
|
66 |
+
id = random.randint(0, 1000)
|
67 |
+
image_path = f"upload_{id}.png"
|
68 |
+
image.save(image_path)
|
69 |
+
# Getting the base64 string
|
70 |
+
base64_image = encode_image(image_path)
|
71 |
+
os.remove(image_path)
|
72 |
+
|
73 |
+
headers = {
|
74 |
+
"Content-Type": "application/json",
|
75 |
+
"Authorization": f"Bearer {openai_api_token}"
|
76 |
+
}
|
77 |
+
|
78 |
+
payload = {
|
79 |
+
"model": "gpt-4-vision-preview",
|
80 |
+
"messages": [
|
81 |
+
{
|
82 |
+
"role": "user",
|
83 |
+
"content": [
|
84 |
+
{
|
85 |
+
"type": "text",
|
86 |
+
"text": """
|
87 |
+
Your role is to identify scientific names of species from zoo signs in images, focusing strictly on extracting the scientific name.
|
88 |
+
If the image is low quality or unreadable, the response in the JSON will be 'low quality image'.
|
89 |
+
If no informational sign is detected, it will respond with 'no sign found'.
|
90 |
+
When multiple signs are present, the response will be 'more than one sign'.
|
91 |
+
The GPT interacts minimally, responding in a dictionary format with the key "result" and the value being the scientific name or the specific response based on the image analysis.
|
92 |
+
"""
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"type": "image_url",
|
96 |
+
"image_url": {
|
97 |
+
"url": f"data:image/jpeg;base64,{base64_image}",
|
98 |
+
"detail": "low"
|
99 |
+
}
|
100 |
+
}
|
101 |
+
]
|
102 |
+
}
|
103 |
+
],
|
104 |
+
"max_tokens": 300
|
105 |
+
}
|
106 |
+
|
107 |
+
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
108 |
+
|
109 |
+
result = response.json()['choices'][0]['message']['content']
|
110 |
+
|
111 |
+
json_string = "".join(result.split("\n")[1:-1])
|
112 |
+
|
113 |
+
# Parse the JSON string into a dictionary
|
114 |
+
result_dict = json.loads(json_string)
|
115 |
+
|
116 |
+
return result_dict['result']
|
117 |
+
|
118 |
+
def get_information(image):
|
119 |
+
taxonomy = get_taxonomy(image)
|
120 |
+
genus, species = taxonomy.split()[0], taxonomy.split()[1]
|
121 |
+
iucn_data = get_iucn_data(genus, species)
|
122 |
+
information = f"## {taxonomy}"
|
123 |
+
|
124 |
+
if len(list(iucn_data.keys())) > 0:
|
125 |
+
information += f"""
|
126 |
+
## {iucn_data['common_name']}
|
127 |
+
|
128 |
+
**Conservation status**: {iucn_data['status_category']} ({iucn_data['status_code']}).
|
129 |
+
|
130 |
+
**Threats**: {iucn_data['threats']}.
|
131 |
+
|
132 |
+
**Population**: {iucn_data['population']}.
|
133 |
+
|
134 |
+
**Habitat**: {iucn_data['habitat']}.
|
135 |
+
|
136 |
+
*For more information, please visit this species page*: {iucn_data['web_link']}
|
137 |
+
"""
|
138 |
+
|
139 |
+
return information
|
140 |
+
|
141 |
+
image = gr.Image(label="Image", type='pil')
|
142 |
+
output = gr.Markdown()
|
143 |
+
|
144 |
+
demo = gr.Interface(
|
145 |
+
fn=get_information,
|
146 |
+
inputs=[image],
|
147 |
+
outputs=output,
|
148 |
+
title="ZooSign Reader",
|
149 |
+
examples=['example_1.png', 'example_2.jpeg'],
|
150 |
+
description="""
|
151 |
+
Introducing **ZooSign Reader**, an innovative application designed to enhance your zoo experience! **ZooSign Reader** allows users to effortlessly upload images of zoo informational signs and receive detailed information about the species mentioned on those signs.
|
152 |
+
|
153 |
+
With **ZooSign Reader**, you no longer need to spend time searching for information about a particular animal or bird species while visiting the zoo. Simply capture an image of the sign using your smartphone camera, or choose an existing image from your gallery, and let **ZooSign Reader** do the rest.
|
154 |
+
|
155 |
+
Using cutting-edge image recognition and natural language processing technologies, **ZooSign Reader** quickly analyzes the uploaded image and extracts the text containing the scientific name. The app then searches the IUCN Redlist's extensive database, which includes a wide range of animals, birds, and reptiles found in zoos worldwide.
|
156 |
+
"""
|
157 |
+
)
|
158 |
+
|
159 |
+
demo.launch()
|