Spaces:
Sleeping
Sleeping
import gradio as gr | |
# VARIABLES: will eventually be loaded with JSON from a dataset | |
question_text = """ | |
### Bar Question | |
What is the answer to this question?""" | |
answers_text = ["A", "B", "C", "D"] | |
# BLOCKS: main user interface | |
with gr.Blocks() as user_eval: | |
# Title text introducing study | |
gr.Markdown(""" | |
# Legal Retriever Evaluation Study | |
Thank you for your participation! Here are some basic instructions on how to complete the legal study. | |
""") | |
# Passages and user evaluations thereof | |
with gr.Row(equal_height = False, visible = False) as evals: | |
# Passage text | |
with gr.Column(scale = 2) as passages: | |
passage_display = gr.Markdown(""" | |
### Relevant Passages | |
- Dataset 1 | |
- Dataset 2 | |
- More text | |
- More text | |
- More text | |
- More text | |
### Auto-Generated Summary | |
This is a summary of the above legal passages, which imitates how a RAG system might \ | |
encorporate retrieved data into its context to give a better response to a certain query. | |
""") | |
# Scoring box | |
with gr.Column(scale = 1) as scores: | |
desc_1 = gr.Markdown("How **relevant** are these passages to our query?") | |
eval_1 = gr.Slider(1, 5, step = 0.5) | |
desc_2 = gr.Markdown("How **novel** are these passages compared to the previous passages?") | |
eval_2 = gr.Slider(1, 5, step = 0.5) | |
btn = gr.Button("Next") | |
def next(eval_1, eval_2 = 0): | |
print(eval_1 + eval_2) | |
btn.click(fn = next, inputs = [eval_1, eval_2]) | |
# Question and answering dynamics | |
with gr.Row() as question: | |
with gr.Column(): | |
gr.Markdown(question_text) | |
a = gr.Button(answers_text[0]) | |
b = gr.Button(answers_text[1]) | |
c = gr.Button(answers_text[2]) | |
d = gr.Button(answers_text[3]) | |
def answer(): | |
return { | |
question: gr.Row(visible = False), | |
evals: gr.Row(visible = True) | |
} | |
a.click(fn = answer, outputs = [question, evals]) | |
b.click(fn = answer, outputs = [question, evals]) | |
c.click(fn = answer, outputs = [question, evals]) | |
d.click(fn = answer, outputs = [question, evals]) | |
# Starts on question, switches to evaluation after the user answers | |
user_eval.launch() |