Datasets:
File size: 4,805 Bytes
d17161f 9f91866 7220d9f d17161f 7220d9f d17161f 7220d9f 9f91866 7220d9f 9f91866 7220d9f 9f91866 7220d9f 9f91866 d17161f 7220d9f d17161f 7220d9f d17161f 7220d9f d17161f 7220d9f d17161f 7220d9f d17161f 7220d9f d17161f 7220d9f d17161f 7220d9f d17161f 7220d9f d17161f 7220d9f d17161f 7220d9f d17161f 7220d9f d17161f 7220d9f 9f91866 7220d9f d17161f 7220d9f d17161f 9f91866 d17161f 9f91866 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
import argparse
import copy
import gzip
import json
import os
from pathlib import Path
import sys
import numpy as np
import pybktree
from sklearn.model_selection import GroupShuffleSplit
import tqdm
import unionfind
import Levenshtein
def files_list():
data_path = Path("valid_data")
files = [f for f in data_path.rglob("*.json") if f.is_file()]
return files
def write_schemas(filename, schema_list, schema_data):
sys.stderr.write(f"Writing {filename}…\n")
with gzip.open(filename, "wt") as f:
for schema in tqdm.tqdm(list(schema_list)):
filename = str(os.path.join(*Path(schema).parts[1:]))
data = schema_data[filename]
schema = open(schema).read()
# Get stars or null if missing
try:
repoStars = int(data["repoStars"])
except (KeyError, ValueError):
repoStars = None
obj = {
"repository": data["repository"],
"commit": data["commit"],
"commitDate": data["commitDate"],
"path": data["path"],
"repoStars": repoStars,
"repoLastFetched": data["repoLastFetched"],
"content": schema,
}
json.dump(obj, f)
f.write("\n")
def main(similarity, split, seed, commits_file):
files = files_list()
# Prepare a BK Tree if we're doing similarity grouping
if similarity:
tree = pybktree.BKTree(
lambda a, b: Levenshtein.distance(a, b) / max(len(a), len(b))
)
# Initialize a union-find data structure
uf = unionfind.UnionFind()
# Track the first schema added to each org so we can group them
org_map = {}
sys.stderr.write("Grouping by repository…\n")
for schema_file in tqdm.tqdm(files):
path_str = str(schema_file)
# Get the organization name from the path
org = schema_file.parts[1:3]
uf.add(str(schema_file))
if org not in org_map:
# Track the first schema for this organization
org_map[org] = str(schema_file)
else:
# Merge with the previous group if this
# organization has been seen before
uf.union(org_map[org], str(schema_file))
# Add to the BK Tree
if similarity:
tree.add((str(schema_file), open(schema_file).read().strip()))
del org_map
# Optionally group together similar files
if similarity:
sys.stderr.write("Grouping similar files…\n")
for schema_file in tqdm.tqdm(files):
path_str = str(schema_file)
data = open(schema_file).read().strip()
# Find similar schemas for this schema and group them together
for other_path, _ in tree.find(data, similarity):
uf.union(path_str, other_path)
# Produce a list of schemas and their associated groups
all_schemas = list()
schema_groups = list()
for group, schemas in enumerate(uf.components()):
all_schemas.extend(schemas)
schema_groups.extend([group] * len(schemas))
# Split the schemas into training and test
all_schemas = np.array(all_schemas)
schema_groups = np.array(schema_groups)
gss = GroupShuffleSplit(n_splits=1, train_size=split, random_state=seed)
(train_indexes, test_indexes) = next(gss.split(all_schemas, groups=schema_groups))
test_schemas = all_schemas[test_indexes]
test_groups = schema_groups[test_indexes]
gss = GroupShuffleSplit(n_splits=1, train_size=0.5, random_state=seed)
(test_indexes, val_indexes) = next(gss.split(test_schemas, groups=test_groups))
schema_data = {}
with open(commits_file) as f:
for line in f:
obj = json.loads(line)
for commit in obj["commits"]:
obj = copy.deepcopy(obj)
filename = os.path.join(obj["repository"], commit["sha"], obj["path"])
obj["commit"] = commit["sha"]
obj["commitDate"] = commit["date"]
schema_data[filename] = obj
# Write the train and test sets
write_schemas("train.jsonl.gz", all_schemas[train_indexes], schema_data)
write_schemas("test.jsonl.gz", test_schemas[test_indexes], schema_data)
write_schemas("validation.jsonl.gz", test_schemas[val_indexes], schema_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--similarity", default=None, type=float)
parser.add_argument("--seed", default=94, type=int)
parser.add_argument("--split", default=0.8, type=float)
parser.add_argument("--commits_file", default="commits.json")
args = parser.parse_args()
main(args.similarity, args.split, args.seed, args.commits_file)
|