Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -34,7 +34,7 @@ def extract_assistant_reply(input_string):
|
|
34 |
|
35 |
|
36 |
@spaces.GPU
|
37 |
-
def
|
38 |
messages = [
|
39 |
{"role": "user", "content": [
|
40 |
{"type": "image"},
|
@@ -45,7 +45,7 @@ def diagnose_router(image):
|
|
45 |
inputs = processor(image, input_text, return_tensors="pt").to(model.device)
|
46 |
|
47 |
# Generate the output from the model
|
48 |
-
output = model.generate(**inputs, max_new_tokens=
|
49 |
print(output)
|
50 |
markdown_text = processor.decode(output[0])
|
51 |
print(markdown_text)
|
@@ -56,11 +56,11 @@ def diagnose_router(image):
|
|
56 |
|
57 |
# Gradio UI
|
58 |
interface = gr.Interface(
|
59 |
-
fn=
|
60 |
-
inputs=gr.Image(type="pil", label="Upload an image of the
|
61 |
outputs=gr.HTML(),
|
62 |
-
title="
|
63 |
-
description="Upload
|
64 |
)
|
65 |
|
66 |
# Launch the UI
|
|
|
34 |
|
35 |
|
36 |
@spaces.GPU
|
37 |
+
def med_interpreter(image):
|
38 |
messages = [
|
39 |
{"role": "user", "content": [
|
40 |
{"type": "image"},
|
|
|
45 |
inputs = processor(image, input_text, return_tensors="pt").to(model.device)
|
46 |
|
47 |
# Generate the output from the model
|
48 |
+
output = model.generate(**inputs, max_new_tokens=4000)
|
49 |
print(output)
|
50 |
markdown_text = processor.decode(output[0])
|
51 |
print(markdown_text)
|
|
|
56 |
|
57 |
# Gradio UI
|
58 |
interface = gr.Interface(
|
59 |
+
fn=med_interpreter,
|
60 |
+
inputs=gr.Image(type="pil", label="Upload an image of the medical report"),
|
61 |
outputs=gr.HTML(),
|
62 |
+
title="Medical Report Insights",
|
63 |
+
description="Upload an image of your medical report to get the interperation of it"
|
64 |
)
|
65 |
|
66 |
# Launch the UI
|