Spaces:
Build error
Build error
luxmorocco
commited on
Commit
β’
c23289b
0
Parent(s):
initial commit
Browse files- .gitattributes +35 -0
- README.md +13 -0
- app.py +131 -0
- requirements.txt +0 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Ocr Aimlapi Pyzerox
|
3 |
+
emoji: π’
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: gray
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 5.1.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
short_description: π OCR Reader, π Analyzer, and π¬ Chat Assistant
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
from pyzerox import zerox
|
4 |
+
import asyncio
|
5 |
+
import openai
|
6 |
+
import markdown2
|
7 |
+
from gradio_pdf import PDF
|
8 |
+
|
9 |
+
# Header
|
10 |
+
header = """
|
11 |
+
# π OCR Reader, π Analyzer, and π¬ Chat Assistant using π Zerox, π§ GPT-4o, powered by π AI/ML API
|
12 |
+
|
13 |
+
Author: Jad Tounsi El Azzoiani
|
14 |
+
GitHub: [https://github.com/jadouse5](https://github.com/jadouse5)
|
15 |
+
LinkedIn: [Jad Tounsi El Azzoiani](https://www.linkedin.com/in/jad-tounsi-el-azzoiani-87499a21a/)
|
16 |
+
|
17 |
+
This project uses:
|
18 |
+
- [AI/ML API](https://api.aimlapi.com)
|
19 |
+
- [Gradio](https://www.gradio.app)
|
20 |
+
- [pyzerox](https://github.com/getomni-ai/zerox?tab=readme-ov-file#python-zerox)
|
21 |
+
"""
|
22 |
+
|
23 |
+
# Set up the model and provider
|
24 |
+
model = "gpt-4o" # GPT-4o model from AI/ML API
|
25 |
+
|
26 |
+
# Set the environment variables for the AI/ML API
|
27 |
+
os.environ["OPENAI_API_KEY"] = "your_api_key"
|
28 |
+
os.environ["OPENAI_API_BASE"] = "https://api.aimlapi.com/v1"
|
29 |
+
|
30 |
+
# Initialize the OpenAI client
|
31 |
+
client = openai.OpenAI(
|
32 |
+
api_key=os.environ["OPENAI_API_KEY"],
|
33 |
+
base_url=os.environ["OPENAI_API_BASE"]
|
34 |
+
)
|
35 |
+
|
36 |
+
# Async function to process the file using Zerox OCR and GPT-4o
|
37 |
+
async def process_file(file):
|
38 |
+
file_path = file.name
|
39 |
+
result = await zerox(
|
40 |
+
file_path=file_path,
|
41 |
+
model=model,
|
42 |
+
cleanup=True,
|
43 |
+
concurrency=5,
|
44 |
+
maintain_format=True,
|
45 |
+
)
|
46 |
+
content = "\n\n".join([page.content for page in result.pages])
|
47 |
+
|
48 |
+
response = client.chat.completions.create(
|
49 |
+
model="gpt-4o",
|
50 |
+
messages=[
|
51 |
+
{"role": "system", "content": "You are an AI assistant that analyzes OCR output. Provide your analysis in markdown format, using bold text, tables, and other formatting as appropriate to make the information clear and easy to read."},
|
52 |
+
{"role": "user", "content": f"Analyze the following OCR output and provide a summary:\n\n{content}"}
|
53 |
+
],
|
54 |
+
temperature=0.7,
|
55 |
+
max_tokens=500
|
56 |
+
)
|
57 |
+
|
58 |
+
ai_analysis = response.choices[0].message.content
|
59 |
+
ai_analysis_html = markdown2.markdown(ai_analysis)
|
60 |
+
|
61 |
+
return content, ai_analysis_html, file_path
|
62 |
+
|
63 |
+
# Function to handle chat with AI
|
64 |
+
def chat_with_ai(message, chat_history, document_content):
|
65 |
+
response = client.chat.completions.create(
|
66 |
+
model="gpt-4o",
|
67 |
+
messages=[
|
68 |
+
{"role": "system", "content": "You are an AI assistant that can answer questions about a document. Use the document content to provide accurate answers."},
|
69 |
+
{"role": "user", "content": f"Document content: {document_content}"},
|
70 |
+
{"role": "user", "content": message}
|
71 |
+
],
|
72 |
+
temperature=0.7,
|
73 |
+
max_tokens=150
|
74 |
+
)
|
75 |
+
ai_response = response.choices[0].message.content
|
76 |
+
chat_history.append((message, ai_response))
|
77 |
+
return "", chat_history
|
78 |
+
|
79 |
+
# Build the Gradio interface
|
80 |
+
with gr.Blocks() as demo:
|
81 |
+
gr.Markdown(header)
|
82 |
+
|
83 |
+
api_key_input = gr.Textbox(
|
84 |
+
label="Enter your AI/ML API Key",
|
85 |
+
type="password",
|
86 |
+
placeholder="Enter your API key here"
|
87 |
+
)
|
88 |
+
|
89 |
+
with gr.Row():
|
90 |
+
with gr.Column(scale=1):
|
91 |
+
file_input = gr.File(label="Upload Document", file_types=[".pdf", ".docx", ".jpg", ".png", ".jpeg"])
|
92 |
+
run_button = gr.Button("Run OCR and Analysis")
|
93 |
+
with gr.Column(scale=1):
|
94 |
+
pdf_viewer = PDF(label="Original Document", interactive=False)
|
95 |
+
|
96 |
+
with gr.Row():
|
97 |
+
with gr.Column(scale=1):
|
98 |
+
ocr_output = gr.Textbox(label="Extracted Text", lines=20)
|
99 |
+
with gr.Column(scale=1):
|
100 |
+
ai_analysis_output = gr.HTML(label="AI Analysis")
|
101 |
+
|
102 |
+
gr.Markdown("## Chat with AI about the document")
|
103 |
+
chatbot = gr.Chatbot()
|
104 |
+
msg = gr.Textbox(label="Ask a question about the document")
|
105 |
+
clear = gr.Button("Clear")
|
106 |
+
|
107 |
+
document_content = gr.State()
|
108 |
+
|
109 |
+
def process_and_display(file, api_key):
|
110 |
+
os.environ["OPENAI_API_KEY"] = api_key
|
111 |
+
client.api_key = api_key
|
112 |
+
content, analysis, file_path = asyncio.run(process_file(file))
|
113 |
+
return file_path, file_path, content, analysis, content
|
114 |
+
|
115 |
+
run_button.click(
|
116 |
+
process_and_display,
|
117 |
+
inputs=[file_input, api_key_input],
|
118 |
+
outputs=[pdf_viewer, file_input, ocr_output, ai_analysis_output, document_content]
|
119 |
+
)
|
120 |
+
|
121 |
+
msg.submit(chat_with_ai, [msg, chatbot, document_content], [msg, chatbot])
|
122 |
+
clear.click(lambda: None, None, chatbot, queue=False)
|
123 |
+
|
124 |
+
footer = gr.Markdown("""
|
125 |
+
---
|
126 |
+
Created by Jad Tounsi El Azzoiani | [GitHub](https://github.com/jadouse5) | [LinkedIn](https://www.linkedin.com/in/jad-tounsi-el-azzoiani-87499a21a/)
|
127 |
+
|
128 |
+
Powered by [AI/ML API](https://aimlapi.com/?via=jad), [Gradio](https://www.gradio.app), and [pyzerox](https://github.com/getomni-ai/zerox?tab=readme-ov-file#python-zerox)
|
129 |
+
""")
|
130 |
+
|
131 |
+
demo.launch()
|
requirements.txt
ADDED
File without changes
|