|
--- |
|
configs: |
|
- config_name: default |
|
data_files: |
|
- split: train |
|
path: data/train-* |
|
- split: validation |
|
path: data/validation-* |
|
dataset_info: |
|
features: |
|
- name: query |
|
dtype: string |
|
- name: question |
|
dtype: string |
|
- name: table_names |
|
sequence: string |
|
- name: tables |
|
sequence: string |
|
- name: answer |
|
dtype: string |
|
- name: source |
|
dtype: string |
|
- name: target |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 2203191673 |
|
num_examples: 6715 |
|
- name: validation |
|
num_bytes: 434370435 |
|
num_examples: 985 |
|
download_size: 535322409 |
|
dataset_size: 2637562108 |
|
--- |
|
# Dataset Card for "spider-tableQA" |
|
|
|
# Usage |
|
``` |
|
import pandas as pd |
|
from datasets import load_dataset |
|
|
|
spider_tableQA = load_dataset("vaishali/spider-tableQA") |
|
|
|
for sample in spider_tableQA['train']: |
|
question = sample['question'] |
|
sql_query = sample['query'] |
|
input_table_names = sample["table_names"] |
|
input_tables = [pd.read_json(table, orient='split') for table in sample['tables']] |
|
answer = pd.read_json(sample['answer'], orient='split') |
|
|
|
# flattened input/output |
|
input_to_model = sample["source"] |
|
target = sample["target"] |
|
``` |
|
|
|
[More Information needed](https://github.com/huggingface/datasets/blob/main/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards) |