Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
asahi417 commited on
Commit
e89b931
1 Parent(s): e291650

Rename filtering_purify.py to min_entity_filter.py

Browse files
Files changed (2) hide show
  1. filtering_purify.py +0 -133
  2. min_entity_filter.py +55 -0
filtering_purify.py DELETED
@@ -1,133 +0,0 @@
1
- import json
2
- from itertools import product
3
-
4
- import numpy as np
5
- import pandas as pd
6
- import seaborn as sns
7
- from matplotlib import pyplot as plt
8
-
9
- from datasets import Dataset
10
-
11
- parameters_min_e_freq = [1, 2, 3, 4]
12
- parameters_max_p_freq = [100, 50, 25, 10]
13
- assert len(parameters_min_e_freq) == 4
14
- assert len(parameters_max_p_freq) == 4
15
- sns.set_theme(style="whitegrid")
16
-
17
-
18
- def is_entity(token):
19
- return any(i.isupper() for i in token)
20
-
21
-
22
- # load filtered data
23
- with open(f"data/t_rex.filter_unified.jsonl") as f:
24
- data = Dataset.from_list([json.loads(i) for i in f.read().split('\n') if len(i) > 0])
25
- df_main = data.to_pandas()
26
- # entity frequency filter
27
- c_sub = df_main.groupby("subject")['title'].count()
28
- c_obj = df_main.groupby("object")['title'].count()
29
- key = set(list(c_sub.index) + list(c_obj.index))
30
- count_main = pd.DataFrame(
31
- [{'entity': k, "subject": c_sub[k] if k in c_sub else 0, "object": c_obj[k] if k in c_obj else 0} for k in key])
32
- count_main.index = count_main.pop('entity')
33
- count_main['is_entity'] = [is_entity(i) for i in count_main.index]
34
- count_main['sum'] = count_main['subject'] + count_main['object']
35
-
36
-
37
- def filtering(row, min_freq: int = 3, target: str = "subject"):
38
- if not row['is_entity']:
39
- return True
40
- return row[target] >= min_freq
41
-
42
-
43
- def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 3, random_sampling: bool = True):
44
- df = df_main.copy()
45
- count_filter_sub = count_main[count_main.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='subject'), axis=1)]['subject']
46
- count_filter_obj = count_main[count_main.apply(lambda x: filtering(x, min_freq=min_entity_freq, target='object'), axis=1)]['object']
47
- vocab_sub = set(count_filter_sub.index)
48
- vocab_obj = set(count_filter_obj.index)
49
- df['flag_subject'] = [i in vocab_sub for i in df['subject']]
50
- df['flag_object'] = [i in vocab_obj for i in df['object']]
51
- df['flag'] = df['flag_subject'] & df['flag_object']
52
- df_filter = df[df['flag']]
53
- df_filter.pop("flag")
54
- df_filter.pop("flag_subject")
55
- df_filter.pop("flag_object")
56
- df_filter['count_subject'] = [count_filter_sub.loc[i] for i in df_filter['subject']]
57
- df_filter['count_object'] = [count_filter_obj.loc[i] for i in df_filter['object']]
58
- df_filter['count_sum'] = df_filter['count_subject'] + df_filter['count_object']
59
-
60
- # predicate frequency filter
61
- if random_sampling:
62
- df_balanced = pd.concat(
63
- [g if len(g) <= max_pairs_predicate else g.sample(max_pairs_predicate, random_state=0) for _, g in
64
- df_filter.groupby("predicate") if len(g) >= min_pairs_predicate])
65
- else:
66
- df_balanced = pd.concat(
67
- [g if len(g) <= max_pairs_predicate else g.sort_values(by='count_sum', ascending=False).head(max_pairs_predicate) for _, g in
68
- df_filter.groupby("predicate") if len(g) >= min_pairs_predicate])
69
-
70
- df_balanced.pop("count_subject")
71
- df_balanced.pop("count_object")
72
- df_balanced.pop("count_sum")
73
- df_balanced = df_balanced.drop_duplicates(subset=['subject', 'object', 'predicate'], keep='last')
74
- # return data
75
- target_data = [i.to_dict() for _, i in df_balanced.iterrows()]
76
- predicate_dist = df_balanced.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
77
- entity, count = np.unique(df_balanced['object'].tolist() + df_balanced['subject'].tolist(), return_counts=True)
78
- entity_dist = dict(list(zip(entity.tolist(), count.tolist())))
79
- return predicate_dist, entity_dist, target_data
80
-
81
-
82
- if __name__ == '__main__':
83
-
84
- p_dist_full = []
85
- e_dist_full = []
86
- config = []
87
- candidates = list(product(parameters_min_e_freq, parameters_max_p_freq))
88
-
89
- # run filtering with different configs
90
- for min_e_freq, max_p_freq in candidates:
91
- p_dist, e_dist, new_data = main(
92
- min_entity_freq=min_e_freq, max_pairs_predicate=max_p_freq, random_sampling=False)
93
- p_dist_full.append(p_dist)
94
- e_dist_full.append(e_dist)
95
- config.append([min_e_freq, max_p_freq])
96
- # save data
97
- with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.jsonl", 'w') as f:
98
- f.write('\n'.join([json.dumps(i) for i in new_data]))
99
-
100
- # plot predicate distribution
101
- df_p = pd.DataFrame([dict(enumerate(sorted(p.values(), reverse=True))) for p in p_dist_full]).T
102
- df_p.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates]
103
- fig, axes = plt.subplots(2, 2, constrained_layout=True)
104
- fig.suptitle('Predicate Distribution over Different Configurations')
105
- for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], parameters_max_p_freq):
106
- _df = df_p[[f"min entity: {mef}, max predicate: {mpf}" for mef in parameters_min_e_freq]]
107
- _df.columns = [f"min entity: {mef}" for mef in parameters_min_e_freq]
108
- ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1)
109
- if mpf != 100:
110
- ax.legend_.remove()
111
- axes[x, y].set_title(f'max predicate: {mpf}')
112
- fig.supxlabel('unique predicates sorted by frequency')
113
- fig.supylabel('number of triples')
114
- fig.savefig("data/stats.predicate_distribution.png", bbox_inches='tight')
115
- fig.clf()
116
-
117
- # plot entity distribution
118
- df_e = pd.DataFrame([dict(enumerate(sorted(e.values(), reverse=True))) for e in e_dist_full]).T
119
- df_e.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates]
120
- fig, axes = plt.subplots(2, 2, constrained_layout=True)
121
- fig.suptitle('Entity Distribution over Different Configurations')
122
- for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], parameters_max_p_freq):
123
- _df = df_e[[f"min entity: {mef}, max predicate: {mpf}" for mef in parameters_min_e_freq]]
124
- _df.columns = [f"min entity: {mef}" for mef in parameters_min_e_freq]
125
- ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1)
126
- ax.set(xscale='log')
127
- if mpf != 100:
128
- ax.legend_.remove()
129
- axes[x, y].set_title(f'max predicate: {mpf}')
130
- fig.supxlabel('unique entities sorted by frequency')
131
- fig.supylabel('number of triples')
132
- fig.savefig("data/stats.entity_distribution.png", bbox_inches='tight')
133
- fig.clf()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
min_entity_filter.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from itertools import product
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ import seaborn as sns
7
+ from matplotlib import pyplot as plt
8
+
9
+ from datasets import Dataset
10
+
11
+ min_entity_freq = 5
12
+
13
+
14
+ def is_entity(token):
15
+ return any(i.isupper() for i in token)
16
+
17
+
18
+ # load filtered data
19
+ with open(f"data/t_rex.filter_unified.jsonl") as f:
20
+ data = Dataset.from_list([json.loads(i) for i in f.read().split('\n') if len(i) > 0])
21
+
22
+ df = data.to_pandas()
23
+
24
+ # entity frequency filter
25
+ c_sub = df.groupby("subject")['title'].count()
26
+ c_obj = df.groupby("object")['title'].count()
27
+ key = set(list(c_sub.index) + list(c_obj.index))
28
+ count_main = pd.DataFrame([{'entity': k, "subject": c_sub[k] if k in c_sub else 0, "object": c_obj[k] if k in c_obj else 0} for k in key])
29
+ count_main.index = count_main.pop('entity')
30
+ count_main['is_entity'] = [is_entity(i) for i in count_main.index]
31
+ count_main['sum'] = count_main['subject'] + count_main['object']
32
+
33
+
34
+ def filtering(row, target: str = "subject"):
35
+ if not row['is_entity']:
36
+ return True
37
+ return row[target] >= parameters_min_e_freq
38
+
39
+
40
+ count_filter_sub = count_main[count_main.apply(lambda x: filtering(x, target='subject'), axis=1)]['subject']
41
+ count_filter_obj = count_main[count_main.apply(lambda x: filtering(x, target='object'), axis=1)]['object']
42
+ vocab_sub = set(count_filter_sub.index)
43
+ vocab_obj = set(count_filter_obj.index)
44
+ df['flag_subject'] = [i in vocab_sub for i in df['subject']]
45
+ df['flag_object'] = [i in vocab_obj for i in df['object']]
46
+ df['flag'] = df['flag_subject'] & df['flag_object']
47
+ df_filter = df[df['flag']]
48
+ df_filter.pop("flag")
49
+ df_filter.pop("flag_subject")
50
+ df_filter.pop("flag_object")
51
+ df_filter = df_filter.drop_duplicates(subset=['subject', 'object', 'predicate'], keep='last')
52
+ with open(f"data/t_rex.filter_unified.min_entity_{min_entity_freq}.jsonl", 'w') as f:
53
+ f.write('\n'.join([json.dumps(i.to_dict()) for _, i in df_filter.iterrows()]))
54
+
55
+ print(f"{len(df_filter)} triples, {len(df['predicate'].unique())}")