Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
Qingyun commited on
Commit
02e4c56
1 Parent(s): 75224a7

Upload dataset

Browse files
CC-MAIN-2017-13/train-00000-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b866b3a1c6b5654efaa82a728207d042eed812a507b830aeb4226a3aa6abc7ac
3
+ size 415140749
CC-MAIN-2017-13/train-00001-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da88f0bc3ddfbbc549846cd87dd46376447b2343f7b072f8173c63514b0ce706
3
+ size 417259380
CC-MAIN-2017-13/train-00002-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a19824ad502e8e9b71111cf5f164a5a55ca734bb76010eca6b1d1269365be7a8
3
+ size 417261176
CC-MAIN-2017-13/train-00003-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11ea6b3d243c0fe6a2cd82b966730dd9c96912991ba7fbbe0b6486444f57bff3
3
+ size 417825643
CC-MAIN-2017-13/train-00004-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afc6c32d06a2f11bff561e53ae6c9d8d72e1ec28866dadcd92567b344e692efc
3
+ size 416585386
CC-MAIN-2017-13/train-00005-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5baf8d3278367234a9d25cc8e3cf1e11c8ab51405d48b726b8f7d5ce632b183
3
+ size 417217915
CC-MAIN-2017-13/train-00006-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78f4a70137b501f4041fe71c74852815d67b46687c7362ed447b4c6c27445a5a
3
+ size 416590779
CC-MAIN-2017-13/train-00007-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81e61a3419d49d3cc476809c48efb2f22f999951429fc7054c7e1404760f1a9e
3
+ size 416237263
CC-MAIN-2017-13/train-00008-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc1f140f933d23a073179219470bfbe00f95d31e2a7ba440a56e2390038a5a45
3
+ size 416626654
CC-MAIN-2017-13/train-00009-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:124c74998747002ff5ed6f20cbfd0ca39c5189ce1dc07e7ad0483956f8676d5e
3
+ size 416970867
CC-MAIN-2017-13/train-00010-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93137a5fe6502aa13cabe00ea18fa70fb17ffcd1055f490dbf1e11841e6975ae
3
+ size 419734556
CC-MAIN-2017-13/train-00011-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fa5808c009ab914cfed1cb4cc6727fd2b88d49821456d933d0efd430e7772c3
3
+ size 417004854
CC-MAIN-2017-13/train-00012-of-00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89529939f4f5b5c674fc1413529615663ad1d59b3c16ede21a57d8292b4fc67b
3
+ size 417938651
README.md CHANGED
@@ -1620,6 +1620,58 @@ dataset_info:
1620
  num_examples: 2561539
1621
  download_size: 5398993614
1622
  dataset_size: 12473370126
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1623
  configs:
1624
  - config_name: CC-MAIN-2013-20
1625
  data_files:
@@ -1745,6 +1797,10 @@ configs:
1745
  data_files:
1746
  - split: train
1747
  path: CC-MAIN-2017-09/train-*
 
 
 
 
1748
  ---
1749
 
1750
  We are uploading the dataset files ~
 
1620
  num_examples: 2561539
1621
  download_size: 5398993614
1622
  dataset_size: 12473370126
1623
+ - config_name: CC-MAIN-2017-13
1624
+ features:
1625
+ - name: general_metadata
1626
+ struct:
1627
+ - name: domain
1628
+ sequence: string
1629
+ - name: fluency_prob
1630
+ dtype: float64
1631
+ - name: id
1632
+ dtype: string
1633
+ - name: non_advertisement_prob
1634
+ dtype: float64
1635
+ - name: politics_prob
1636
+ dtype: float64
1637
+ - name: porn_prob
1638
+ dtype: float64
1639
+ - name: toxic_prob
1640
+ dtype: float64
1641
+ - name: url
1642
+ dtype: string
1643
+ - name: images
1644
+ sequence: string
1645
+ - name: texts
1646
+ sequence: string
1647
+ - name: metadata
1648
+ list:
1649
+ - name: aesthetic_prob
1650
+ dtype: float64
1651
+ - name: bytes
1652
+ dtype: int64
1653
+ - name: d_hash
1654
+ dtype: string
1655
+ - name: d_hash_dup_count
1656
+ dtype: int64
1657
+ - name: height
1658
+ dtype: int64
1659
+ - name: img_url_sha
1660
+ dtype: string
1661
+ - name: p_hash
1662
+ dtype: string
1663
+ - name: p_hash_dup_count
1664
+ dtype: int64
1665
+ - name: unsafe_prob
1666
+ dtype: float64
1667
+ - name: width
1668
+ dtype: int64
1669
+ splits:
1670
+ - name: train
1671
+ num_bytes: 12209904783
1672
+ num_examples: 2458486
1673
+ download_size: 5422393873
1674
+ dataset_size: 12209904783
1675
  configs:
1676
  - config_name: CC-MAIN-2013-20
1677
  data_files:
 
1797
  data_files:
1798
  - split: train
1799
  path: CC-MAIN-2017-09/train-*
1800
+ - config_name: CC-MAIN-2017-13
1801
+ data_files:
1802
+ - split: train
1803
+ path: CC-MAIN-2017-13/train-*
1804
  ---
1805
 
1806
  We are uploading the dataset files ~