|
import argparse |
|
import csv |
|
import gzip |
|
import json |
|
import os |
|
from pathlib import Path |
|
import sys |
|
|
|
import numpy as np |
|
import pybktree |
|
from sklearn.model_selection import GroupShuffleSplit |
|
import tqdm |
|
import unionfind |
|
import Levenshtein |
|
|
|
|
|
def files_list(): |
|
data_path = Path("valid_data") |
|
files = [f for f in data_path.rglob("*.json") if f.is_file()] |
|
return files |
|
|
|
|
|
def write_schemas(filename, schema_list, schema_data): |
|
sys.stderr.write(f"Writing {filename}…\n") |
|
with gzip.open(filename, "wt") as f: |
|
for schema in tqdm.tqdm(list(schema_list)): |
|
filename = str(os.path.join(*Path(schema).parts[1:])) |
|
data = schema_data[filename] |
|
schema = open(schema).read() |
|
obj = { |
|
"repository": data["repository"], |
|
"commit": data["commit"], |
|
"path": data["path"], |
|
"repoStars": data["repoStars"], |
|
"repoLastFetched": data["repoLastFetched"], |
|
"content": schema, |
|
} |
|
json.dump(obj, f) |
|
f.write("\n") |
|
|
|
|
|
def main(similarity, split, seed, repo_file): |
|
files = files_list() |
|
|
|
|
|
if similarity: |
|
tree = pybktree.BKTree( |
|
lambda a, b: Levenshtein.distance(a, b) / max(len(a), len(b)) |
|
) |
|
|
|
|
|
uf = unionfind.UnionFind() |
|
|
|
|
|
org_map = {} |
|
|
|
sys.stderr.write("Grouping by repository…\n") |
|
for schema_file in tqdm.tqdm(files): |
|
path_str = str(schema_file) |
|
|
|
|
|
org = schema_file.parts[1:3] |
|
|
|
uf.add(str(schema_file)) |
|
if org not in org_map: |
|
|
|
org_map[org] = str(schema_file) |
|
else: |
|
|
|
|
|
uf.union(org_map[org], str(schema_file)) |
|
|
|
|
|
if similarity: |
|
tree.add((str(schema_file), open(schema_file).read().strip())) |
|
|
|
del org_map |
|
|
|
|
|
if similarity: |
|
sys.stderr.write("Grouping similar files…\n") |
|
for schema_file in tqdm.tqdm(files): |
|
path_str = str(schema_file) |
|
data = open(schema_file).read().strip() |
|
|
|
|
|
for other_path, _ in tree.find(data, similarity): |
|
uf.union(path_str, other_path) |
|
|
|
|
|
all_schemas = list() |
|
schema_groups = list() |
|
for group, schemas in enumerate(uf.components()): |
|
all_schemas.extend(schemas) |
|
schema_groups.extend([group] * len(schemas)) |
|
|
|
|
|
all_schemas = np.array(all_schemas) |
|
schema_groups = np.array(schema_groups) |
|
gss = GroupShuffleSplit(n_splits=1, train_size=split, random_state=seed) |
|
(train_indexes, test_indexes) = next(gss.split(all_schemas, groups=schema_groups)) |
|
|
|
test_schemas = all_schemas[test_indexes] |
|
test_groups = schema_groups[test_indexes] |
|
gss = GroupShuffleSplit(n_splits=1, train_size=0.5, random_state=seed) |
|
(test_indexes, val_indexes) = next(gss.split(test_schemas, groups=test_groups)) |
|
|
|
schema_data = {} |
|
with open(repo_file) as csvfile: |
|
reader = csv.DictReader(csvfile) |
|
for row in reader: |
|
filename = os.path.join(row["repository"], row["path"]) |
|
schema_data[filename] = row |
|
|
|
|
|
write_schemas("train.jsonl.gz", all_schemas[train_indexes], schema_data) |
|
write_schemas("test.jsonl.gz", test_schemas[test_indexes], schema_data) |
|
write_schemas("validation.jsonl.gz", test_schemas[val_indexes], schema_data) |
|
|
|
|
|
if __name__ == "__main__": |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument("--similarity", default=None, type=float) |
|
parser.add_argument("--seed", default=94, type=int) |
|
parser.add_argument("--split", default=0.8, type=float) |
|
parser.add_argument("--repo_file", default="repos.csv") |
|
args = parser.parse_args() |
|
main(args.similarity, args.split, args.seed, args.repo_file) |
|
|