File size: 5,081 Bytes
ebd324a
 
00a711c
 
ebd324a
 
6afe6e2
 
ebd324a
6afe6e2
ebd324a
 
 
 
6afe6e2
 
 
ebd324a
6afe6e2
 
 
ebd324a
6afe6e2
 
 
00a711c
6afe6e2
 
1ee72b8
6afe6e2
 
 
 
 
ebd324a
 
d6f1646
ebd324a
 
1ee72b8
ebd324a
00a711c
332053a
ebd324a
0d737d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ebd324a
00a711c
 
ebd324a
 
 
1ee72b8
0d737d9
ebd324a
 
 
 
 
 
e4c5b39
ebd324a
 
b299e67
 
 
ebd324a
 
0d737d9
 
 
 
 
 
6afe6e2
 
698e114
 
6afe6e2
698e114
6afe6e2
698e114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6afe6e2
698e114
0971287
698e114
0971287
698e114
0971287
0d737d9
 
 
 
 
 
0971287
698e114
 
 
6afe6e2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
import gradio as gr
from transformers import pipeline
from langchain import PromptTemplate
from langchain.chains import LLMChain
from langchain_google_genai import ChatGoogleGenerativeAI
import os
from PIL import Image
import json

# Retrieve the API keys and other secrets from the environment
api_key = os.environ.get('GOOGLE_API_KEY')
if api_key is None:
    raise ValueError("No API key found. Please set the 'GOOGLE_API_KEY' environment variable.")

tracking_id = os.environ.get('TRACKING_ID')
if tracking_id is None:
    raise ValueError("No tracking ID found. Please set the 'TRACKING_ID' environment variable.")

initial_prompt = os.environ.get('initial_prompt')
if initial_prompt is None:
    raise ValueError("No initial prompt found. Please set the 'initial_prompt' environment variable.")

description_json = os.environ.get('description')
if description_json is None:
    raise ValueError("No description found. Please set the 'description' environment variable.")

# Convert the description JSON string to a dictionary
description = json.loads(description_json)

# Set the API key for Google
os.environ['GOOGLE_API_KEY'] = api_key

# Initialize the OCR pipeline
ocr_pipe = pipeline("image-to-text", model="jinhybr/OCR-Donut-CORD")

# Initialize the LLM
llm_model = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0.4, top_p=0.85)

# Define the prompt template
prompt = PromptTemplate(input_variables=['task_type', 'task_number', 'question', 'content', 'description'], template=initial_prompt)

# Define the LLM chain
chain = LLMChain(llm=llm_model, prompt=prompt)

def evaluate(task_type, task_number, question, input_type, image=None, text=None):
    if input_type == "Image" and image is not None:
        # Ensure the image is in the correct format
        if isinstance(image, str):
            # Load the image if it's a URL or path
            image = Image.open(image)
        
        # Process the image to extract text
        text_content = ocr_pipe(image)
        content = text_content[0]['generated_text']
    elif input_type == "Text" and text is not None:
        content = text
    else:
        return "Please provide the required input based on your selection."

    # Retrieve the description for the given task type and number, or use a default value
    task_description = description.get((task_type, task_number), "No description available for this task.")

    # Run the chain
    result = chain.run({
        'task_type': task_type,
        'task_number': task_number,
        'question': question,
        'content': content,
        'description': task_description
    })

    return result

# Create the Gradio interface
inputs = [
    gr.Dropdown(choices=["Academic", "General"], label="Test Type", value="Academic"),
    gr.Dropdown(choices=["Task 1", "Task 2"], label="Task Number", value="Task 1"),
    gr.Textbox(label="Question", value=""),
    gr.Radio(choices=["Image", "Text"], label="Input Type", value="Image"),
    gr.Image(type="pil", label="Upload Image", visible=True),
    gr.Textbox(label="Enter Text", visible=False)
]

def toggle_input(input_type):
    if input_type == "Image":
        return gr.update(visible=True), gr.update(visible=False)
    else:
        return gr.update(visible=False), gr.update(visible=True)

footer_html_with_analytics = f"""
<script async src="https://www.googletagmanager.com/gtag/js?id={tracking_id}"></script>
<script>
  window.dataLayer = window.dataLayer || [];
  function gtag(){{dataLayer.push(arguments);}}
  gtag('js', new Date());
  gtag('config', '{tracking_id}');
</script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0-beta3/css/all.min.css">
<div style='text-align: center; margin-top: 20px;'>
    <p>Developed by Hossein Mohseni</p>
    <p>Contact Information:</p>
    <p>
        <a href='mailto:mohseni.h1999@gmail.com' style='margin-right: 10px;'>
            <i class='fas fa-envelope'></i>
        </a>
        <a href='https://www.linkedin.com/in/mohsenihossein/' target='_blank' style='margin-right: 10px;'>
            <i class='fab fa-linkedin'></i>
        </a>
        <a href='https://t.me/mohsenih1999' target='_blank'>
            <i class='fab fa-telegram'></i>
        </a>
    </p>
    <p>This application is a demonstration. To enhance and improve it, your feedback is highly appreciated.</p>
</div>
"""

outputs = gr.Markdown(label="Result")

# Define the Gradio Blocks and Interface
with gr.Blocks() as demo:
    gr.Markdown("# IELTS Writing Evaluation")
    with gr.Row():
        with gr.Column():
            input_type_radio = gr.Radio(choices=["Image", "Text"], label="Input Type", value="Image")
            image_input = gr.Image(type="pil", label="Upload Image", visible=True)
            text_input = gr.Textbox(label="Enter Text", visible=False)
        input_type_radio.change(toggle_input, input_type_radio, [image_input, text_input])
    gr.Interface(fn=evaluate, inputs=inputs, outputs=outputs)
    gr.HTML(footer_html_with_analytics)

# Launch the interface
demo.launch(share=True, debug=True)