Datasets:

Modalities:
Text
Formats:
json
Languages:
English
Libraries:
Datasets
pandas
License:
json-schema / train_split.py
michaelmior's picture
Convert repo stars to integer
aa9c9c1 verified
raw
history blame
4.38 kB
import argparse
import csv
import gzip
import json
import os
from pathlib import Path
import sys
import numpy as np
import pybktree
from sklearn.model_selection import GroupShuffleSplit
import tqdm
import unionfind
import Levenshtein
def files_list():
data_path = Path("valid_data")
files = [f for f in data_path.rglob("*.json") if f.is_file()]
return files
def write_schemas(filename, schema_list, schema_data):
sys.stderr.write(f"Writing {filename}…\n")
with gzip.open(filename, "wt") as f:
for schema in tqdm.tqdm(list(schema_list)):
filename = str(os.path.join(*Path(schema).parts[1:]))
data = schema_data[filename]
schema = open(schema).read()
obj = {
"repository": data["repository"],
"commit": data["commit"],
"path": data["path"],
"repoStars": int(data["repoStars"]),
"repoLastFetched": data["repoLastFetched"],
"content": schema,
}
json.dump(obj, f)
f.write("\n")
def main(similarity, split, seed, repo_file):
files = files_list()
# Prepare a BK Tree if we're doing similarity grouping
if similarity:
tree = pybktree.BKTree(
lambda a, b: Levenshtein.distance(a, b) / max(len(a), len(b))
)
# Initialize a union-find data structure
uf = unionfind.UnionFind()
# Track the first schema added to each org so we can group them
org_map = {}
sys.stderr.write("Grouping by repository…\n")
for schema_file in tqdm.tqdm(files):
path_str = str(schema_file)
# Get the organization name from the path
org = schema_file.parts[1:3]
uf.add(str(schema_file))
if org not in org_map:
# Track the first schema for this organization
org_map[org] = str(schema_file)
else:
# Merge with the previous group if this
# organization has been seen before
uf.union(org_map[org], str(schema_file))
# Add to the BK Tree
if similarity:
tree.add((str(schema_file), open(schema_file).read().strip()))
del org_map
# Optionally group together similar files
if similarity:
sys.stderr.write("Grouping similar files…\n")
for schema_file in tqdm.tqdm(files):
path_str = str(schema_file)
data = open(schema_file).read().strip()
# Find similar schemas for this schema and group them together
for other_path, _ in tree.find(data, similarity):
uf.union(path_str, other_path)
# Produce a list of schemas and their associated groups
all_schemas = list()
schema_groups = list()
for group, schemas in enumerate(uf.components()):
all_schemas.extend(schemas)
schema_groups.extend([group] * len(schemas))
# Split the schemas into training and test
all_schemas = np.array(all_schemas)
schema_groups = np.array(schema_groups)
gss = GroupShuffleSplit(n_splits=1, train_size=split, random_state=seed)
(train_indexes, test_indexes) = next(gss.split(all_schemas, groups=schema_groups))
test_schemas = all_schemas[test_indexes]
test_groups = schema_groups[test_indexes]
gss = GroupShuffleSplit(n_splits=1, train_size=0.5, random_state=seed)
(test_indexes, val_indexes) = next(gss.split(test_schemas, groups=test_groups))
schema_data = {}
with open(repo_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
filename = os.path.join(row["repository"], row["path"])
schema_data[filename] = row
# Write the train and test sets
write_schemas("train.jsonl.gz", all_schemas[train_indexes], schema_data)
write_schemas("test.jsonl.gz", test_schemas[test_indexes], schema_data)
write_schemas("validation.jsonl.gz", test_schemas[val_indexes], schema_data)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--similarity", default=None, type=float)
parser.add_argument("--seed", default=94, type=int)
parser.add_argument("--split", default=0.8, type=float)
parser.add_argument("--repo_file", default="repos.csv")
args = parser.parse_args()
main(args.similarity, args.split, args.seed, args.repo_file)