Datasets:

Modalities:
Text
Formats:
json
Languages:
English
Libraries:
Datasets
pandas
License:
michaelmior commited on
Commit
9f91866
·
verified ·
1 Parent(s): 7b8ecc4

Update data with commits

Browse files
.gitattributes CHANGED
@@ -1 +1,2 @@
1
  *.jsonl.gz filter=lfs diff=lfs merge=lfs -text
 
 
1
  *.jsonl.gz filter=lfs diff=lfs merge=lfs -text
2
+ commits.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -15,13 +15,21 @@ This is somewhat restrictive, but still manages to find a large number of schema
15
 
16
  pipenv run python slurp.py --outfile repos.csv
17
 
18
- # Step 2: Download the JSON Schema files
 
 
 
 
 
 
 
 
19
 
20
  This script will download each schema which comes from GitHub and save it into subfolders in the `data` directory.
21
 
22
  ./fetch_files.sh
23
 
24
- # Step 3: Validate each JSON Schema
25
 
26
  The following script will read each schema in the `data` directory and confirm that it is a valid JSON Schema.
27
  A copy of all valid schemas will be placed in the `valid_data` directory.
@@ -29,7 +37,7 @@ Note that schemas are parsed as [JSON5](https://json5.org/) to be more permissiv
29
 
30
  pipenv run python validate_schemas.py
31
 
32
- # Step 4: Split into train, test, and validation
33
 
34
  Finally data is split into training, test, and validation sets.
35
  Schemas are always grouped together in the same set based on the GitHub organization they are from.
 
15
 
16
  pipenv run python slurp.py --outfile repos.csv
17
 
18
+ # Step 2: Fetch the history information for each file
19
+
20
+ We fetch every revision of each JSON Schema file.
21
+ Before downloading the files, we use the GitHub API to get the list of commit hashes.
22
+ The resulting data is saved to `commits.json`.
23
+
24
+ pipenv run python fetch_history.py
25
+
26
+ # Step 3: Download the JSON Schema files
27
 
28
  This script will download each schema which comes from GitHub and save it into subfolders in the `data` directory.
29
 
30
  ./fetch_files.sh
31
 
32
+ # Step 4: Validate each JSON Schema
33
 
34
  The following script will read each schema in the `data` directory and confirm that it is a valid JSON Schema.
35
  A copy of all valid schemas will be placed in the `valid_data` directory.
 
37
 
38
  pipenv run python validate_schemas.py
39
 
40
+ # Step 5: Split into train, test, and validation
41
 
42
  Finally data is split into training, test, and validation sets.
43
  Schemas are always grouped together in the same set based on the GitHub organization they are from.
commits.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c08a61a38b65249b8f81d93321acf191cc71a7d1c7737d04114eddb4b470b83
3
+ size 14605432
fetch_files.sh CHANGED
@@ -1,12 +1,12 @@
1
  #!/bin/bash
2
 
3
- pv repos.csv | tail -n +2 | \
4
- while IFS=, read -r repo stars fetched commit path; do
5
- path=$(echo "$path" | tr -d '\r')
6
- source=$(echo "$repo" | cut -d/ -f1)
7
- if [ "$source" == "github.com" ]; then
8
- repo_name=$(echo "$repo" | cut -d/ -f2-)
9
- curl "https://raw.githubusercontent.com/$repo_name/$commit/$path" --silent --create-dirs -o "data/$repo/$path"
10
  sleep 1
11
  fi
12
  done
 
1
  #!/bin/bash
2
 
3
+ pv commits.json |
4
+ jq -r '("https://raw.githubusercontent.com/" + .repository) as $url | .path as $path | .commits[] | $url + "/" + .sha + "/" + $path' |
5
+ while read url; do
6
+ # Strip the url prefix to get the path
7
+ path=$(echo "$url" | cut -d/ -f4-)
8
+ if ! [ -f "data/$path" ]; then
9
+ curl "$url" --silent --create-dirs -o "data/$path"
10
  sleep 1
11
  fi
12
  done
fetch_history.py CHANGED
@@ -28,10 +28,22 @@ def get_commits(session, repo, path):
28
  requests.exceptions.ReadTimeout,
29
  ):
30
  # Skip on request error
31
- return set()
32
  else:
33
  # Get the commit hashes
34
- return set(c["sha"] for c in r.json())
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
 
37
  def main():
@@ -54,9 +66,16 @@ def main():
54
  commits = get_commits(session, repo, row["path"])
55
 
56
  # Write the collected commits
57
- obj = {"repository": repo, "path": row["path"], "commits": list(commits)}
58
- json.dump(obj, sys.stdout)
59
- sys.stdout.write("\n")
 
 
 
 
 
 
 
60
 
61
 
62
  if __name__ == "__main__":
 
28
  requests.exceptions.ReadTimeout,
29
  ):
30
  # Skip on request error
31
+ return None
32
  else:
33
  # Get the commit hashes
34
+ obj = r.json()
35
+ if isinstance(obj, list):
36
+ commits = []
37
+ for c in obj:
38
+ try:
39
+ commits.append(
40
+ {"sha": c["sha"], "date": c["commit"]["committer"]["date"]}
41
+ )
42
+ except KeyError:
43
+ pass
44
+ return commits
45
+ else:
46
+ return None
47
 
48
 
49
  def main():
 
66
  commits = get_commits(session, repo, row["path"])
67
 
68
  # Write the collected commits
69
+ if commits:
70
+ obj = {
71
+ "repository": repo,
72
+ "path": row["path"],
73
+ "repoStars": row["repoStars"],
74
+ "repoLastFetched": row["repoLastFetched"],
75
+ "commits": list(commits),
76
+ }
77
+ json.dump(obj, sys.stdout)
78
+ sys.stdout.write("\n")
79
 
80
 
81
  if __name__ == "__main__":
test.jsonl.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5248fb1d00b834b10313bc391e7b0a6d22993b3c130c3dcf6335eb3819ede53
3
- size 771540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a60e9e12cc5c73a66b9c14b8fdd368b534dc7235bb12165043f658cccc7f5b1
3
+ size 1074453
train.jsonl.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:293f05cf668011d6d58ffc68f2fa0ac05f04b47e7b2b2d778e506c0c30a01153
3
- size 23484213
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4cf2c9605a9cbd1cb8ecfb1bdaafd52c337ed47489588256c45a487c9058aff7
3
+ size 38163445
train_split.py CHANGED
@@ -1,5 +1,5 @@
1
  import argparse
2
- import csv
3
  import gzip
4
  import json
5
  import os
@@ -27,11 +27,19 @@ def write_schemas(filename, schema_list, schema_data):
27
  filename = str(os.path.join(*Path(schema).parts[1:]))
28
  data = schema_data[filename]
29
  schema = open(schema).read()
 
 
 
 
 
 
 
30
  obj = {
31
  "repository": data["repository"],
32
  "commit": data["commit"],
 
33
  "path": data["path"],
34
- "repoStars": int(data["repoStars"]),
35
  "repoLastFetched": data["repoLastFetched"],
36
  "content": schema,
37
  }
@@ -39,7 +47,7 @@ def write_schemas(filename, schema_list, schema_data):
39
  f.write("\n")
40
 
41
 
42
- def main(similarity, split, seed, repo_file):
43
  files = files_list()
44
 
45
  # Prepare a BK Tree if we're doing similarity grouping
@@ -106,11 +114,15 @@ def main(similarity, split, seed, repo_file):
106
  (test_indexes, val_indexes) = next(gss.split(test_schemas, groups=test_groups))
107
 
108
  schema_data = {}
109
- with open(repo_file) as csvfile:
110
- reader = csv.DictReader(csvfile)
111
- for row in reader:
112
- filename = os.path.join(row["repository"], row["path"])
113
- schema_data[filename] = row
 
 
 
 
114
 
115
  # Write the train and test sets
116
  write_schemas("train.jsonl.gz", all_schemas[train_indexes], schema_data)
@@ -123,6 +135,6 @@ if __name__ == "__main__":
123
  parser.add_argument("--similarity", default=None, type=float)
124
  parser.add_argument("--seed", default=94, type=int)
125
  parser.add_argument("--split", default=0.8, type=float)
126
- parser.add_argument("--repo_file", default="repos.csv")
127
  args = parser.parse_args()
128
- main(args.similarity, args.split, args.seed, args.repo_file)
 
1
  import argparse
2
+ import copy
3
  import gzip
4
  import json
5
  import os
 
27
  filename = str(os.path.join(*Path(schema).parts[1:]))
28
  data = schema_data[filename]
29
  schema = open(schema).read()
30
+
31
+ # Get stars or null if missing
32
+ try:
33
+ repoStars = int(data["repoStars"])
34
+ except (KeyError, ValueError):
35
+ repoStars = None
36
+
37
  obj = {
38
  "repository": data["repository"],
39
  "commit": data["commit"],
40
+ "commitDate": data["commitDate"],
41
  "path": data["path"],
42
+ "repoStars": repoStars,
43
  "repoLastFetched": data["repoLastFetched"],
44
  "content": schema,
45
  }
 
47
  f.write("\n")
48
 
49
 
50
+ def main(similarity, split, seed, commits_file):
51
  files = files_list()
52
 
53
  # Prepare a BK Tree if we're doing similarity grouping
 
114
  (test_indexes, val_indexes) = next(gss.split(test_schemas, groups=test_groups))
115
 
116
  schema_data = {}
117
+ with open(commits_file) as f:
118
+ for line in f:
119
+ obj = json.loads(line)
120
+ for commit in obj["commits"]:
121
+ obj = copy.deepcopy(obj)
122
+ filename = os.path.join(obj["repository"], commit["sha"], obj["path"])
123
+ obj["commit"] = commit["sha"]
124
+ obj["commitDate"] = commit["date"]
125
+ schema_data[filename] = obj
126
 
127
  # Write the train and test sets
128
  write_schemas("train.jsonl.gz", all_schemas[train_indexes], schema_data)
 
135
  parser.add_argument("--similarity", default=None, type=float)
136
  parser.add_argument("--seed", default=94, type=int)
137
  parser.add_argument("--split", default=0.8, type=float)
138
+ parser.add_argument("--commits_file", default="commits.json")
139
  args = parser.parse_args()
140
+ main(args.similarity, args.split, args.seed, args.commits_file)
validation.jsonl.gz CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:99d35c67ed0c473c07571ab60360df1c932e8fa345ba6a0d2b937de404788d69
3
- size 560261
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab8a6ee8067de1eae9582f7cff274b5aab9f9719460471fe45283832ae8e3abd
3
+ size 5578087