Datasets:
GAIR
/

Languages:
English
ArXiv:
Libraries:
License:

Still errors with GAIR loading dataset

#3
by brando - opened

my error

Resolving data files: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 22/22 [00:00<00:00, 161037.85it/s]
Setting num_proc from 255 to 22 for the train split as it only contains 22 shards.
Generating train split: 727874 examples [00:35, 20536.47 examples/s]
multiprocess.pool.RemoteTraceback:
"""
Traceback (most recent call last):
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/builder.py", line 1869, in _prepare_split_single
    writer.write_table(table)
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/arrow_writer.py", line 580, in write_table
    pa_table = table_cast(pa_table, self._schema)
               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/table.py", line 2283, in table_cast
    return cast_table_to_schema(table, schema)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/table.py", line 2242, in cast_table_to_schema
    arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/table.py", line 2242, in <listcomp>
    arrays = [cast_array_to_feature(table[name], feature) for name, feature in features.items()]
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/table.py", line 1795, in wrapper
    return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
                            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/table.py", line 1795, in <listcomp>
    return pa.chunked_array([func(chunk, *args, **kwargs) for chunk in array.chunks])
                             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/table.py", line 2006, in cast_array_to_feature
    arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()]
             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/table.py", line 2006, in <listcomp>
    arrays = [_c(array.field(name), subfeature) for name, subfeature in feature.items()]
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/table.py", line 1797, in wrapper
    return func(array, *args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/table.py", line 2100, in cast_array_to_feature
    return array_cast(
           ^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/table.py", line 1797, in wrapper
    return func(array, *args, **kwargs)
           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/table.py", line 1948, in array_cast
    raise TypeError(f"Couldn't cast array of type {_short_str(array.type)} to {_short_str(pa_type)}")
TypeError: Couldn't cast array of type string to null

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/multiprocess/pool.py", line 125, in worker
    result = (True, func(*args, **kwds))
                    ^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/utils/py_utils.py", line 678, in _write_generator_to_queue
    for i, result in enumerate(func(**kwargs)):
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/builder.py", line 1896, in _prepare_split_single
    raise DatasetGenerationError("An error occurred while generating the dataset") from e
datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset
"""

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/lfs/hyperturing1/0/brando9/beyond-scale-2-alignment-coeff/py_src/alignment/synth_data_gen/af/math_nl_2_lean4.py", line 101, in <module>
    fire.Fire(main)
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/fire/core.py", line 143, in Fire
    component_trace = _Fire(component, args, parsed_flag_args, context, name)
                      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/fire/core.py", line 477, in _Fire
    component, remaining_args = _CallAndUpdateTrace(
                                ^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/fire/core.py", line 693, in _CallAndUpdateTrace
    component = fn(*varargs, **kwargs)
                ^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/beyond-scale-2-alignment-coeff/py_src/alignment/synth_data_gen/af/math_nl_2_lean4.py", line 83, in main
    dataset: Dataset = get_hf_dataset_gair()
                       ^^^^^^^^^^^^^^^^^^^^^
  File "/afs/cs.stanford.edu/u/brando9/beyond-scale-2-alignment-coeff/py_src/alignment/synth_data_gen/af/get_nl_math_data.py", line 125, in get_hf_dataset_gair
    dataset = load_dataset(path, split='train', num_proc=os.cpu_count())
              ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/load.py", line 2096, in load_dataset
    builder_instance.download_and_prepare(
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/builder.py", line 924, in download_and_prepare
    self._download_and_prepare(
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/builder.py", line 999, in _download_and_prepare
    self._prepare_split(split_generator, **prepare_split_kwargs)
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/builder.py", line 1769, in _prepare_split
    for job_id, done, content in iflatmap_unordered(
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/utils/py_utils.py", line 718, in iflatmap_unordered
    [async_result.get(timeout=0.05) for async_result in async_results]
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/datasets/utils/py_utils.py", line 718, in <listcomp>
    [async_result.get(timeout=0.05) for async_result in async_results]
     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  File "/lfs/hyperturing1/0/brando9/miniconda/envs/beyond_scale_2/lib/python3.11/site-packages/multiprocess/pool.py", line 774, in get
    raise self._value
datasets.exceptions.DatasetGenerationError: An error occurred while generating the dataset

I do have it downloaded

# gair: https://huggingface.co/datasets/GAIR/MathPile
# GAIR/MathPile
def get_hf_dataset_gair(
        path: str = '~/data/GAIR/MathPile/train/', 
        # split: str = 'train',
        ) -> Dataset:
    path: str = os.path.expanduser(path)
    dataset = load_dataset(path, split='train', num_proc=os.cpu_count())
    print(dataset[0])  # Preview a single example from the dataset
    # Remove the columns
    all_columns = dataset.column_names  # Get all the column names
    all_columns.remove('text')  # Remove 'text' from the list of columns to drop
    dataset = dataset.remove_columns(all_columns)
    # Shuffle and select 10k examples
    dataset = dataset.shuffle(seed=42)
    dataset = dataset.select(10_000)
    return dataset

e.g.,

Download GAIR

source $AFS/.bashrc
conda activate beyond_scale_2

mkdir -p ~/data/GAIR/MathPile
huggingface-cli download --resume-download --repo-type dataset GAIR/MathPile --local-dir ~/data/GAIR/MathPile --local-dir-use-symlinks False

cd ~/data/GAIR/MathPile/
find . -type f -name "*.gz" -exec gzip -d {} \;

Sign up or log in to comment