Datasets:

Modalities:
Text
Formats:
json
Languages:
English
Libraries:
Datasets
pandas
License:
michaelmior commited on
Commit
7220d9f
·
verified ·
1 Parent(s): d17161f

Finalize train/test split script

Browse files
Files changed (1) hide show
  1. train_split.py +66 -13
train_split.py CHANGED
@@ -1,7 +1,10 @@
1
  import argparse
 
 
2
  import json
3
  import os
4
  from pathlib import Path
 
5
 
6
  import numpy as np
7
  import pybktree
@@ -17,59 +20,109 @@ def files_list():
17
  return files
18
 
19
 
20
- def main(similarity, split, seed):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  files = files_list()
22
 
 
23
  if similarity:
24
  tree = pybktree.BKTree(
25
  lambda a, b: Levenshtein.distance(a, b) / max(len(a), len(b))
26
  )
27
 
 
28
  uf = unionfind.UnionFind()
29
- repo_map = {}
 
 
 
 
30
  for schema_file in tqdm.tqdm(files):
31
  path_str = str(schema_file)
32
- repo = schema_file.parts[1:4]
 
 
 
33
  uf.add(str(schema_file))
34
- if repo not in repo_map:
35
- repo_map[repo] = str(schema_file)
 
36
  else:
37
- uf.union(repo_map[repo], str(schema_file))
 
 
38
 
 
39
  if similarity:
40
  tree.add((str(schema_file), open(schema_file).read().strip()))
41
 
42
- del repo_map
43
 
44
  # Optionally group together similar files
45
  if similarity:
 
46
  for schema_file in tqdm.tqdm(files):
47
  path_str = str(schema_file)
48
  data = open(schema_file).read().strip()
 
 
49
  for other_path, _ in tree.find(data, similarity):
50
  uf.union(path_str, other_path)
51
 
 
52
  all_schemas = list()
53
  schema_groups = list()
54
  for group, schemas in enumerate(uf.components()):
55
  all_schemas.extend(schemas)
56
  schema_groups.extend([group] * len(schemas))
57
 
 
58
  all_schemas = np.array(all_schemas)
59
  schema_groups = np.array(schema_groups)
60
  gss = GroupShuffleSplit(n_splits=1, train_size=split, random_state=seed)
61
  (train_indexes, test_indexes) = next(gss.split(all_schemas, groups=schema_groups))
62
 
63
- open("train_schemas.json", "w").write(
64
- json.dumps(all_schemas[train_indexes].tolist())
65
- )
66
- open("test_schemas.json", "w").write(json.dumps(all_schemas[test_indexes].tolist()))
 
 
 
 
 
 
 
 
 
 
 
 
67
 
68
 
69
  if __name__ == "__main__":
70
  parser = argparse.ArgumentParser()
71
  parser.add_argument("--similarity", default=None, type=float)
72
- parser.add_argument("--seed", default=15, type=int)
73
  parser.add_argument("--split", default=0.8, type=float)
 
74
  args = parser.parse_args()
75
- main(args.similarity, args.split, args.seed)
 
1
  import argparse
2
+ import csv
3
+ import gzip
4
  import json
5
  import os
6
  from pathlib import Path
7
+ import sys
8
 
9
  import numpy as np
10
  import pybktree
 
20
  return files
21
 
22
 
23
+ def write_schemas(filename, schema_list, schema_data):
24
+ sys.stderr.write(f"Writing {filename}…\n")
25
+ with gzip.open(filename, "wt") as f:
26
+ for schema in tqdm.tqdm(list(schema_list)):
27
+ filename = str(os.path.join(*Path(schema).parts[1:]))
28
+ data = schema_data[filename]
29
+ schema = open(schema).read()
30
+ obj = {
31
+ "repository": data["repository"],
32
+ "commit": data["commit"],
33
+ "path": data["path"],
34
+ "repoStars": data["repoStars"],
35
+ "repoLastFetched": data["repoLastFetched"],
36
+ "content": schema,
37
+ }
38
+ json.dump(obj, f)
39
+ f.write("\n")
40
+
41
+
42
+ def main(similarity, split, seed, repo_file):
43
  files = files_list()
44
 
45
+ # Prepare a BK Tree if we're doing similarity grouping
46
  if similarity:
47
  tree = pybktree.BKTree(
48
  lambda a, b: Levenshtein.distance(a, b) / max(len(a), len(b))
49
  )
50
 
51
+ # Initialize a union-find data structure
52
  uf = unionfind.UnionFind()
53
+
54
+ # Track the first schema added to each org so we can group them
55
+ org_map = {}
56
+
57
+ sys.stderr.write("Grouping by repository…\n")
58
  for schema_file in tqdm.tqdm(files):
59
  path_str = str(schema_file)
60
+
61
+ # Get the organization name from the path
62
+ org = schema_file.parts[1:3]
63
+
64
  uf.add(str(schema_file))
65
+ if org not in org_map:
66
+ # Track the first schema for this organization
67
+ org_map[org] = str(schema_file)
68
  else:
69
+ # Merge with the previous group if this
70
+ # organization has been seen before
71
+ uf.union(org_map[org], str(schema_file))
72
 
73
+ # Add to the BK Tree
74
  if similarity:
75
  tree.add((str(schema_file), open(schema_file).read().strip()))
76
 
77
+ del org_map
78
 
79
  # Optionally group together similar files
80
  if similarity:
81
+ sys.stderr.write("Grouping similar files…\n")
82
  for schema_file in tqdm.tqdm(files):
83
  path_str = str(schema_file)
84
  data = open(schema_file).read().strip()
85
+
86
+ # Find similar schemas for this schema and group them together
87
  for other_path, _ in tree.find(data, similarity):
88
  uf.union(path_str, other_path)
89
 
90
+ # Produce a list of schemas and their associated groups
91
  all_schemas = list()
92
  schema_groups = list()
93
  for group, schemas in enumerate(uf.components()):
94
  all_schemas.extend(schemas)
95
  schema_groups.extend([group] * len(schemas))
96
 
97
+ # Split the schemas into training and test
98
  all_schemas = np.array(all_schemas)
99
  schema_groups = np.array(schema_groups)
100
  gss = GroupShuffleSplit(n_splits=1, train_size=split, random_state=seed)
101
  (train_indexes, test_indexes) = next(gss.split(all_schemas, groups=schema_groups))
102
 
103
+ test_schemas = all_schemas[test_indexes]
104
+ test_groups = schema_groups[test_indexes]
105
+ gss = GroupShuffleSplit(n_splits=1, train_size=0.5, random_state=seed)
106
+ (test_indexes, val_indexes) = next(gss.split(test_schemas, groups=test_groups))
107
+
108
+ schema_data = {}
109
+ with open(repo_file) as csvfile:
110
+ reader = csv.DictReader(csvfile)
111
+ for row in reader:
112
+ filename = os.path.join(row["repository"], row["path"])
113
+ schema_data[filename] = row
114
+
115
+ # Write the train and test sets
116
+ write_schemas("train.jsonl.gz", all_schemas[train_indexes], schema_data)
117
+ write_schemas("test.jsonl.gz", test_schemas[test_indexes], schema_data)
118
+ write_schemas("validation.jsonl.gz", test_schemas[val_indexes], schema_data)
119
 
120
 
121
  if __name__ == "__main__":
122
  parser = argparse.ArgumentParser()
123
  parser.add_argument("--similarity", default=None, type=float)
124
+ parser.add_argument("--seed", default=94, type=int)
125
  parser.add_argument("--split", default=0.8, type=float)
126
+ parser.add_argument("--repo_file", default="repos.csv")
127
  args = parser.parse_args()
128
+ main(args.similarity, args.split, args.seed, args.repo_file)