import json import os from dataclasses import dataclass, field from typing import List, Optional, Dict from PIL import Image import numpy as np import pandas as pd import streamlit as st from fsspec.implementations.local import LocalFileSystem from huggingface_hub import HfFileSystem import streamlit.components.v1 as components @dataclass class Field: type: str title: str name: str = None mandatory: bool = True # if value of field is in the list of those values, makes following siblings mandatory following_mandatory_values: list = False help: Optional[str] = None children: Optional[List['Field']] = None other_params: Optional[Dict[str, object]] = field(default_factory=lambda: {}) # Function to get user ID from URL def get_param_from_url(param): user_id = st.query_params.get(param, "") return user_id ######################################################################################## # CHANGE THE FOLLOWING VARIABLES ACCORDING TO YOUR NEEDS # 'local' or 'hf'. hf is for Hugging Face file system but has limits on the number of access per hour filesystem = 'hf' # path to repo or local file system TODO rename input_repo_path = 'datasets/emvecchi/annotation' output_repo_path = 'datasets/emvecchi/annotation' # filesystem = 'local' # path to repo or local file system # input_repo_path = '/data/mod-gen-eval-pref' # output_repo_path = '/data/mod-gen-eval-pref' to_annotate_file_name = 'to_annotate.csv' # CSV file to annotate COLS_TO_SAVE = ['comment_id','comment','confidence_score'] agreement_labels = ['strongly disagree', 'disagree', 'neither agree no disagree', 'agree', 'strongly agree'] quality_labels = ['very poor', 'poor', 'acceptable', 'good', 'very good'] priority_labels = ['not a priority', 'low priority', 'neutral', 'moderate priority', 'high priority'] yes_no_labels = ['no','yes'] yes_no_other_labels = ['no','yes','other'] default_labels = agreement_labels function_choices = ['Broadening Discussion', 'Improving Comment Quality', 'Content Correction', 'Keeping Discussion on Topic', 'Organizing Discussion', 'Policing', 'Resolving Site Use Issues', 'Social Functions', 'Other (please specify)'] property_choices = ['appropriateness', 'clarity', 'constructiveness', 'common good', 'effectiveness', 'emotion', 'impact', 'overall quality', 'proposal', 'Q for justification', 'storytelling', 'rationality', 'reasonableness', 'reciprocity', 'reference', 'respect', 'moderation behavior', 'Other (please specify)'] assistance_choices = ['Expand the breadth of moderator role', 'Reduce my own bias', #'Assist with recall', 'Avoids me missing relevant instances', 'Improve speed of moderation tasks', 'Manage prioritization of comments to consider', 'Visualization of properties narrows down moderator contribution', 'Other (please specify)'] default_choices = function_choices consent_text = ''' ## Consent Form You will be asked to take part in a research study. Before you decide to take part in this study, it is important that you understand why the study is being done and what it involves. Please read the following information carefully. ________________________________________________________________________________________ Project title: Moderator Intervention Prediction\\ Researchers: E.M. Vecchi, N. Falk, I. Jundi, G. Lapesa\\ Institute: Institute for Machine Speech Processing (IMS)\\ University: University of Stuttgart\\ Contact: eva-maria.vecchi@ims.uni-stuttgart.de _________________________________________________________________________________________ ### Description of the research study In this study, we investigate an approach to assist expert moderators in online discussion platforms by automatically identifying comments in need of moderation. The annotators' task is to evaluate whether a comment returned by our system are indeed requires moderator intervention, and assess the impact such a system would have on the task of moderation. The intended use of the results of this study includes an analysis as well as processed versions of the collected data in the context of a publicly available scientific publication. **Time required:** Your participation will take up to an estimated 8 hours. The time required may vary on an individual basis. **Risks and benefits:** The risks to your participation in this online survey are those associated with basic computer tasks, including boredom, fatigue, mild stress, or breach of confidentiality. Some of the topics discussed in the online posts to be annotated may include violence, suicide or rape. The only benefit to you is the learning experience from participating in a research study. The benefit to society is the contribution to scientific knowledge **Compensation:** You will be compensated for participating in this study. If you are interested, we will also be more than happy to share more information about our research with you. **Voluntary participation:** Your participation in this study is voluntary. It is your decision whether or not to participate in this study. If you decide to participate in this study, you will be asked to confirm this consent form ("I agree."). Even after signing the consent form, you can withdraw from participation at any time and without giving any reason. Partial data will not be analysed. **Confidentiality:** Your responses to this experiment will be anonymous. Please do not share any Information that can be used to identify you. The researcher(s) will make every effort to maintain your confidentiality. **Contact:** If at any time you have questions about this study or would like to report any adverse effects due to this study, please contact the researcher(s). **Trigger Warning:** The texts included in this study are produced in an online debate forum and some topics that are discussed, how they are discussed, and user perspectives may be uncomfortable or sensitive. First, all texts included here do not represent the views of the researchers conducting the study. Secondly, we provide the option [described in detail in the guidelines provided in the next step] to avoid having to annotate any instance that is problematic or uncomfortable for the annotator without penalty of compensation. ### Consent: Please indicate, in the box below, that you are at least 18 years old, have read and understood this consent form, are comfortable using the English language to complete the survey, and you agree to participate in this online research survey. - *I am age 18 or older.* - *I have read this consent form or had it read to me.* - *I am comfortable using the English language to participate in this survey.* - *I agree to participate in this research and I want to continue with the survey.* ''' guidelines_text = 'Please read the guidelines' study_code = 'CE552C7F' failed_sanity_check_code = 'C17NFPW5' redirect_url = f'https://app.prolific.com/submissions/complete?cfc={study_code}' annotation_guidelines_fields: List[Field] = [ Field(name="annotation_guidelines", type="radio", title="Did you read the guidelines?", mandatory=True, other_params={'labels': ['Yes, in detail, and I understand the study', 'Yes, in detail, but still confused', 'Yes, I skimmed it', 'I will read it later', 'No, not interested in reading them', 'I can not open the link', ], 'accepted_values': [0]}), ] intro_fields: List[Field] = [ Field(type="container", title="**Introductory Questions**", children=[ Field(name="intro_moderation_goals", type="textarea", title="As a moderator, what are your goals/objectives for the comment section?"), Field(name="intro_experience", type="textarea", title="What do you feel contributes to a good experience for the users/discussion?"), Field(name="intro_valuable_comment", type="textarea", title="What makes a comment or contribution valuable?"), Field(name="intro_bad_comment", type="textarea", title="What makes a comment or contribution of poor quality, unconstructive or detrimental to the discussion?"), ]), ] end_fields: List[Field] = [ Field(type="container", title="**Thank you for taking part in this study!**", children=[ Field(name="email", type="textarea", title="Enter an email address where we can send you the voucher. **Without this, we cannot compensate you for your contribution to our research.**"), ]), ] fields: List[Field] = [ Field(name="topic", type="input_col", title="**Topic:**"), Field(type="expander", title="**Preceeding Comment:** *(expand)*", children=[ Field(name="parent_comment", type="input_col", title=""), ]), Field(name="comment", type="input_col", title="**Comment:**"), Field(name="image_name", type="input_col", title=""),# "**Visualization of high contributing properties:**"), Field(type="container", title="**Need for Moderation**", children=[ Field(name="to_moderate", type="y_n_radio", title="Do feel this comment/discussion would benefit from moderator intervention?", mandatory=True), Field(name="priority_level", type="likert_radio", title="With what level of **priority** would you need to interact with this comment?", other_params={'labels': priority_labels}, mandatory=True), ]), Field(type="container", title="**Moderation Function**", children=[ Field(name="mod_function", type="multiselect", title="What type of moderation function is needed here? *(Multiple selection possible)*", mandatory=True), Field(name="mod_function_other", type="text", title="*If Other, please specify:*", mandatory=False), ]), Field(type="container", title="**Contributing properties**", children=[ Field(name="relevant_properties", type="multiselect", title="Which property(s) is most impactful in your assessment? *(Multiple selection possible)*", other_params={'choices': property_choices}, mandatory=True), Field(name="relevant_properties_other", type="text", title="*If Other, please specify:*", mandatory=False), ]), Field(type="container", title="**Moderator Assistance**", children=[ Field(name="helpful", type="y_n_radio", title="If this comment/discussion was flagged to you, would it be helpful in your task of moderation?", mandatory=True, following_mandatory_values=[0]), Field(name="mod_assistance", type="multiselect", title="If yes, please motivate the benefit it would contribute to the task. *(Multiple selection possible)*", other_params={'choices': assistance_choices}), Field(name="mod_assistance_other", type="text", title="*If Other, please specify:*", mandatory=False), ]), Field(type="container", title="**Other**", children=[ Field(name="other_comments", type="text", title="Please provide any additional details or information: *(optional)*", mandatory=False), ]), ] INPUT_FIELD_DEFAULT_VALUES = {'slider': 0, 'text': '', 'textarea': '', 'checkbox': False, 'radio': None, 'select_slider': 0, 'multiselect': None, 'likert_radio': None, 'y_n_radio': None} SHOW_HELP_ICON = False SHOW_VALIDATION_ERROR_MESSAGE = True ######################################################################################## if filesystem == 'hf': HF_TOKEN = os.environ.get("HF_TOKEN_WRITE") print("is none?", HF_TOKEN is None) hf_fs = HfFileSystem(token=HF_TOKEN) else: hf_fs = LocalFileSystem() def get_start_index(): if hf_fs.exists(output_repo_path + '/' + get_base_path()): files = hf_fs.ls(output_repo_path + '/' + get_base_path()) return len(files) - 2 else: return -3 def read_data(): with hf_fs.open(input_repo_path + '/' + to_annotate_file_name) as f: return pd.read_csv(f) def read_saved_data(): _path = get_path() if hf_fs.exists(output_repo_path + '/' + _path): with hf_fs.open(output_repo_path + '/' + _path) as f: try: return json.load(f) except json.JSONDecodeError as e: print(e) return None # Write a remote file def save_data(data): if not hf_fs.exists(f"{output_repo_path}/{get_base_path()}"): hf_fs.mkdir(f"{output_repo_path}/{get_base_path()}") with hf_fs.open(f"{output_repo_path}/{get_path()}", "w") as f: f.write(json.dumps(data)) def get_base_path(): return f"{st.session_state.user_id}" def get_path(): return f"{get_base_path()}/{st.session_state.current_index}.json" def display_image(image_path): with hf_fs.open(image_path) as f: img = Image.open(f) st.image(img, caption='8 most contributing properties', use_column_width=True) #################################### Streamlit App #################################### # Function to navigate rows def navigate(index_change): st.session_state.current_index += index_change # only works consistently if done before rerun js = ''' ''' st.components.v1.html(js, height=0) # https://discuss.streamlit.io/t/click-twice-on-button-for-changing-state/45633/2 # disable text input enter to submit # https://discuss.streamlit.io/t/text-input-how-to-disable-press-enter-to-apply/14457/6 components.html( """ """, height=0 ) st.rerun() def show_field(f: Field, index: int, data_collected): if f.type not in INPUT_FIELD_DEFAULT_VALUES.keys(): st.session_state.following_mandatory = False match f.type: case 'input_col': value = st.session_state.data.iloc[index][f.name] if value and value is not np.nan: st.write(f.title) if f.name == 'image_name': display_image(os.path.join(input_repo_path, 'images', value)) else: st.write(value) case 'markdown': st.markdown(f.title) case 'expander' | 'container': with (st.expander(f.title) if f.type == 'expander' else st.container(border=True)): if f.type == 'container': st.markdown(f.title) for child in f.children: show_field(child, index, data_collected) case 'skip_checkbox': st.checkbox(f.title, key=f.name, value=False) else: key = f.name + str(index) st.session_state.data_inputs_keys.append(f.name) value = st.session_state[key] if key in st.session_state else \ (data_collected[f.name] if data_collected else INPUT_FIELD_DEFAULT_VALUES[f.type]) if not SHOW_HELP_ICON: f.title = f'**{f.title}**\n\n{f.help}' if f.help else f.title validation_error = False # form is not displayed for first time if st.session_state.form_displayed == st.session_state.current_index: if f.mandatory or st.session_state.following_mandatory: if st.session_state[key] == INPUT_FIELD_DEFAULT_VALUES[f.type]: st.session_state.valid = False validation_error = True elif f.following_mandatory_values and st.session_state[key] in f.following_mandatory_values: st.session_state.following_mandatory = True # check for any unaccepted values if ( (f.other_params.get('accepted_values') and value not in f.other_params.get('accepted_values')) or (f.other_params.get('accepted_values_per_sample') and index in f.other_params.get('accepted_values_per_sample') and value not in f.other_params.get('accepted_values_per_sample').get(index)) ): st.session_state.unacceptable_response = True if f.mandatory or st.session_state.following_mandatory: f.title += " :red[* required!]" if (validation_error and not SHOW_VALIDATION_ERROR_MESSAGE) else' :red[*]' f.help = None match f.type: case 'checkbox': st.checkbox(f.title, key=key, value=value, help=f.help) case 'radio': st.radio(f.title, ["yes","no"], key=key, help=f.help) case 'slider': st.slider(f.title, min_value=0, max_value=6, step=1, key=key, value=value, help=f.help) case 'select_slider': labels = default_labels if not f.other_params.get('labels') else f.other_params.get('labels') st.select_slider(f.title, options=[0, 20, 40, 60, 80, 100], format_func=lambda x: labels[x // 20], key=key, value=value, help=f.help) case 'multiselect': choices = default_choices if not f.other_params.get('choices') else f.other_params.get('choices') st.multiselect(f.title, options = choices, format_func=lambda x: x, key=key, max_selections=3, default=value, help=f.help) case 'likert_radio': labels = default_labels if not f.other_params.get('labels') else f.other_params.get('labels') st.radio(f.title, options=[0, 1, 2, 3, 4], format_func=lambda x: labels[x], key=key, index=value, help=f.help, horizontal=True) case 'y_n_radio': labels = yes_no_labels if not f.other_params.get('labels') else f.other_params.get('labels') st.radio(f.title, options=[0, 1], format_func=lambda x: labels[x], key=key, index=value, help=f.help, horizontal=True) case 'text': st.text_input(f.title, key=key, value=value, max_chars=None) case 'textarea': st.text_area(f.title, key=key, value=value, max_chars=None) if validation_error: st.session_state.unacceptable_response = False st.error(f"Mandatory field") def show_fields(fields: List[Field]): st.session_state.valid = True index = st.session_state.current_index data_collected = read_saved_data() st.session_state.data_inputs_keys = [] st.session_state.following_mandatory = False for field in fields: show_field(field, index, data_collected) submitted = st.form_submit_button("Submit") if submitted: if 'unacceptable_response' in st.session_state and st.session_state.unacceptable_response: prep_and_save_data(index, ('skip' in st.session_state and st.session_state['skip'])) st.rerun() skip_sample = ('skip' in st.session_state and st.session_state['skip']) if not skip_sample and not st.session_state.valid: st.error("Please fill in all mandatory fields") # st.rerun() # filed-out values are not shown otherwise else: with st.spinner(text="saving"): prep_and_save_data(index, skip_sample) st.success("Feedback submitted successfully!") navigate(1) st.session_state.form_displayed = st.session_state.current_index def prep_and_save_data(index, skip_sample): save_data({ 'user_id': st.session_state.user_id, 'index': st.session_state.current_index, **(st.session_state.data.iloc[index][COLS_TO_SAVE].to_dict() if index >= 0 else {}), **{k: st.session_state[k + str(index)] for k in st.session_state.data_inputs_keys}, 'skip': skip_sample }) # st.set_page_config(layout='wide') # Title of the app st.title("Moderator Intervention Prediction") st.markdown( """ """, unsafe_allow_html=True) def add_annotation_guidelines(): st.write(f"username is {st.session_state.user_id}") st.markdown( "
Annotation Guidelines" + guidelines_text + "

" , unsafe_allow_html=True) if 'unacceptable_response' in st.session_state and st.session_state.unacceptable_response: add_annotation_guidelines() st.error("You are not eligible for this study. Thank you for your time!" + ("" if st.session_state.current_index < 0 else #" You will receive a small compensation as explained in the guidelines. " "Please email eva-maria.vecchi@ims.uni-stuttgart.de for issues or questions." )) st.stop() # Load the data to annotate if 'data' not in st.session_state: st.session_state.data = read_data() # user id user_id_from_url = get_param_from_url("user_id") if user_id_from_url: st.session_state.user_id = user_id_from_url # current index if 'current_index' not in st.session_state: start_index = get_start_index() st.session_state.current_index = start_index st.session_state.form_displayed = -3 if get_param_from_url('show_extra_fields'): fields += url_conditional_fields def add_validated_submit(fields, message): st.session_state.form_displayed = st.session_state.current_index if st.form_submit_button("Submit"): if all(not x for x in fields): st.error(message) else: navigate(1) def add_checked_submit(): check = st.checkbox('I agree', key='consent') add_validated_submit([check], "Please agree to give your consent to proceed") if st.session_state.current_index == -3: with st.form("data_form"): st.markdown(consent_text) add_checked_submit() elif st.session_state.current_index == -2: if st.session_state.get('user_id'): navigate(1) else: with st.form("data_form"): st.session_state.user_id = st.text_input('User ID', value=user_id_from_url) add_validated_submit([st.session_state.user_id], "Please enter a valid user ID") elif st.session_state.current_index == -1: add_annotation_guidelines() with st.form("intro_form"): show_fields(intro_fields) elif st.session_state.current_index < len(st.session_state.data): add_annotation_guidelines() with st.form("data_form"+str(st.session_state.current_index)): show_fields(fields) else: st.write(f"Thank you for taking part in this study! [Click here]({redirect_url}) to complete the study or copy and paste this code back to finish the study: {study_code}") # Navigation buttons if st.session_state.current_index > 0: if st.button("Previous"): navigate(-1) if 0 <= st.session_state.current_index < len(st.session_state.data): st.write(f"Page {st.session_state.current_index + 1} out of {len(st.session_state.data)}") st.markdown( """""", unsafe_allow_html=True )