Spaces:
Runtime error
Runtime error
File size: 3,077 Bytes
ea129da 3b7a7ae ea129da 3b7a7ae ea129da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
from functools import lru_cache
from typing import Dict, List
import plotly.express as px
import streamlit as st
from datasets import Dataset, get_dataset_infos, load_dataset
BASE_DATASET: str = "lion-ai/pl_med_data"
read_key = os.environ.get('HF_TOKEN', None)
dataset_names_map: Dict[str, str] = {
"znany_lekarz": "Porady - pytania i odpowiedzi",
"kor_epikryzy_qa": "Dokumentacja medyczna - pytania i odpowiedzi",
"wikipedia": "Ogólna wiedza medyczna - pytania i opowiedzi",
}
reverse_dataset_names_map: Dict[str, str] = {v: k for k, v in dataset_names_map.items()}
@st.cache_resource
def list_datasets() -> Dict[str, Dataset]:
"""
Retrieves a list of dataset information.
Returns:
List[Dict[str, str]]: A list of dataset information.
"""
return get_dataset_infos(BASE_DATASET)
def show_examples(dataset_name: str, split: str) -> None:
dataset_name = reverse_dataset_names_map.get(dataset_name, dataset_name)
dataset: Dataset = load_dataset(BASE_DATASET, dataset_name, split=f"{split}[:10]", use_auth_token=read_key)
st.data_editor(dataset.to_pandas(), use_container_width=True)
def count_all_examples(datasets: Dict[str, Dataset]) -> None:
count: int = 0
for dataset_name, dataset_info in datasets.items():
count += dataset_info.num_examples
st.metric(label="Total no. of instructions", value=f"{count:,}")
def filter_splits(dataset: Dict[str, Dataset], split: str) -> Dict[str, Dataset]:
"""
Filter the dataset based on the specified split.
Args:
dataset (Dict[str, Dataset]): A dictionary containing dataset information.
split (str): The split to filter the dataset by.
Returns:
Dict[str, Dataset]: A dictionary containing the filtered dataset splits.
"""
dataset_splits: Dict[str, Dataset] = {}
for dataset_name, dataset_info in dataset.items():
if split in dataset_info.splits:
dataset_name = dataset_names_map.get(dataset_name, dataset_name)
dataset_splits[dataset_name] = dataset_info.splits[split]
return dataset_splits
split: str = st.selectbox("splits", ["raw", "processed"])
datasets: Dict[str, Dataset] = list_datasets()
# st.write(datasets)
filtered_datasets: Dict[str, Dataset] = filter_splits(datasets, split)
# st.write(filtered_datasets)
count_all_examples(filtered_datasets)
# Create a pie chart showing the number of examples per dataset
fig = px.pie(
values=[split.num_examples for split in filtered_datasets.values()],
names=list(filtered_datasets.keys()),
# title=f"Number of Examples per Dataset ({split} split)",
labels={"label": "Dataset", "value": "Number of Examples"},
)
# Update layout for better readability
fig.update_traces(textposition="inside", textinfo="value+label")
fig.update_layout(legend_title_text="Datasets", uniformtext_minsize=12, uniformtext_mode="hide")
chart = st.plotly_chart(fig, use_container_width=True)
dataset_name = st.selectbox("Select a dataset", list(filtered_datasets.keys()))
show_examples(dataset_name, split) |