Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
Qingyun commited on
Commit
06bfeb9
1 Parent(s): 02e4c56

Upload dataset

Browse files
CC-MAIN-2017-17/train-00000-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af5fcb9738c11180203322d7fd09996d52b48436aa25f64b128c259100afe58a
3
+ size 430270627
CC-MAIN-2017-17/train-00001-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d74d015b9605af77814e2182d3d97e55779eb696a34b2395ed850a0cde1cbd6e
3
+ size 429004223
CC-MAIN-2017-17/train-00002-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58cac1eb3582b990ed49971d0390b4344d5d4b40d364cf33bcc52f905887cebd
3
+ size 433658033
CC-MAIN-2017-17/train-00003-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3b6ec1f003a664562e39ff3995d7c42009354d51fc86370866885a743e1af47
3
+ size 430473389
CC-MAIN-2017-17/train-00004-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d49b63c37bead39fa8dc2697ce518eeedb2c970e54591ca897c6c5294bd60178
3
+ size 430873762
CC-MAIN-2017-17/train-00005-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97b01d3b1126936537c0beca6d03df2d0bf4ed2396c300c64726faf342c2c22c
3
+ size 432841530
CC-MAIN-2017-17/train-00006-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0ec1c37e8302d00924cdad39378f7f1919a35942ac260d056bc620a4834e186
3
+ size 425980373
CC-MAIN-2017-17/train-00007-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d022b6648b7695a7a3d42b38e2e9aa9ecf307bc6751b6d1749df50a8805d255
3
+ size 430475264
CC-MAIN-2017-17/train-00008-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13d8c1ea9ef491135dc42404cb5a54f30c6e2caa16b377d314afbe58edb84078
3
+ size 430821554
CC-MAIN-2017-17/train-00009-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:865493064870966c5cbf95edd3245678104491501e801e640a614cb94a0a68fd
3
+ size 430556517
CC-MAIN-2017-17/train-00010-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33e61bae6aa4e381f491c59f013fc30e5034e3cf69738f42486fef1560bc0a3b
3
+ size 430787864
CC-MAIN-2017-17/train-00011-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ba5ccd64406d185f62cb88042c8abf315eda0e02f2dfed2a2fa809f638ad1d9
3
+ size 429132484
CC-MAIN-2017-17/train-00012-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f794c4b35dd048a7a2e6711d9a3c1c8acd2e0d4359cfba7756215da1e46937f
3
+ size 428320717
CC-MAIN-2017-17/train-00013-of-00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78fe45b8cb52130a919ed24ccf9e88b940d444bc8975b66c9f41efb6602051f6
3
+ size 431910219
README.md CHANGED
@@ -1672,6 +1672,58 @@ dataset_info:
1672
  num_examples: 2458486
1673
  download_size: 5422393873
1674
  dataset_size: 12209904783
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1675
  configs:
1676
  - config_name: CC-MAIN-2013-20
1677
  data_files:
@@ -1801,6 +1853,10 @@ configs:
1801
  data_files:
1802
  - split: train
1803
  path: CC-MAIN-2017-13/train-*
 
 
 
 
1804
  ---
1805
 
1806
  We are uploading the dataset files ~
 
1672
  num_examples: 2458486
1673
  download_size: 5422393873
1674
  dataset_size: 12209904783
1675
+ - config_name: CC-MAIN-2017-17
1676
+ features:
1677
+ - name: general_metadata
1678
+ struct:
1679
+ - name: domain
1680
+ sequence: string
1681
+ - name: fluency_prob
1682
+ dtype: float64
1683
+ - name: id
1684
+ dtype: string
1685
+ - name: non_advertisement_prob
1686
+ dtype: float64
1687
+ - name: politics_prob
1688
+ dtype: float64
1689
+ - name: porn_prob
1690
+ dtype: float64
1691
+ - name: toxic_prob
1692
+ dtype: float64
1693
+ - name: url
1694
+ dtype: string
1695
+ - name: images
1696
+ sequence: string
1697
+ - name: texts
1698
+ sequence: string
1699
+ - name: metadata
1700
+ list:
1701
+ - name: aesthetic_prob
1702
+ dtype: float64
1703
+ - name: bytes
1704
+ dtype: int64
1705
+ - name: d_hash
1706
+ dtype: string
1707
+ - name: d_hash_dup_count
1708
+ dtype: int64
1709
+ - name: height
1710
+ dtype: int64
1711
+ - name: img_url_sha
1712
+ dtype: string
1713
+ - name: p_hash
1714
+ dtype: string
1715
+ - name: p_hash_dup_count
1716
+ dtype: int64
1717
+ - name: unsafe_prob
1718
+ dtype: float64
1719
+ - name: width
1720
+ dtype: int64
1721
+ splits:
1722
+ - name: train
1723
+ num_bytes: 13763109013
1724
+ num_examples: 2615558
1725
+ download_size: 6025106556
1726
+ dataset_size: 13763109013
1727
  configs:
1728
  - config_name: CC-MAIN-2013-20
1729
  data_files:
 
1853
  data_files:
1854
  - split: train
1855
  path: CC-MAIN-2017-13/train-*
1856
+ - config_name: CC-MAIN-2017-17
1857
+ data_files:
1858
+ - split: train
1859
+ path: CC-MAIN-2017-17/train-*
1860
  ---
1861
 
1862
  We are uploading the dataset files ~