Update unify_predicate.py
Browse files- unify_predicate.py +3 -18
unify_predicate.py
CHANGED
@@ -1,15 +1,3 @@
|
|
1 |
-
# import json
|
2 |
-
# import pandas as pd
|
3 |
-
#
|
4 |
-
# with open("data/t_rex.filter.jsonl") as f:
|
5 |
-
# data = pd.DataFrame([json.loads(i) for i in f.read().split('\n') if len(i) > 0])
|
6 |
-
# freq = data.groupby("predicate").count()['title']
|
7 |
-
# data['freq'] = [freq.loc[i] for i in data['predicate']]
|
8 |
-
# data = data[data['freq'] >= 3]
|
9 |
-
# tmp = data.groupby("predicate").sample(10, replace=True)
|
10 |
-
# tmp = tmp.drop_duplicates()
|
11 |
-
# tmp.to_csv("data/t_rex.filter.predicate_check_sample.csv", index=False)
|
12 |
-
|
13 |
import json
|
14 |
import pandas as pd
|
15 |
|
@@ -59,11 +47,8 @@ data_filter_join.pop("reverse")
|
|
59 |
data_filter_join.pop("predicate")
|
60 |
data_filter_join['predicate'] = data_filter_join.pop("pretty relation name")
|
61 |
|
62 |
-
print(f"
|
63 |
-
print(f"[entity]: {len(set(data_filter_join['object'].unique().tolist() + data_filter_join['subject'].unique().tolist()))}")
|
64 |
-
print(f"[predicate]: {len(data_filter_join['predicate'].unique())}")
|
65 |
-
|
66 |
|
67 |
-
data = [i.to_dict() for _, i in data_filter_join.iterrows()]
|
68 |
with open(f"data/t_rex.filter_unified.jsonl", 'w') as f:
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import json
|
2 |
import pandas as pd
|
3 |
|
|
|
47 |
data_filter_join.pop("predicate")
|
48 |
data_filter_join['predicate'] = data_filter_join.pop("pretty relation name")
|
49 |
|
50 |
+
print(f"{len(data_filter_join)} triples, {len(data_filter_join['predicate'].unique())} predicates")
|
|
|
|
|
|
|
51 |
|
|
|
52 |
with open(f"data/t_rex.filter_unified.jsonl", 'w') as f:
|
53 |
+
for _, i in data_filter_join.iterrows():
|
54 |
+
f.write(json.dumps(i) + '\n')
|