Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
asahi417 commited on
Commit
681de16
1 Parent(s): c41faeb

Update create_split.py

Browse files
Files changed (1) hide show
  1. create_split.py +14 -19
create_split.py CHANGED
@@ -6,10 +6,6 @@ import pandas as pd
6
  from random import shuffle, seed
7
 
8
 
9
- parameters_min_e_freq = [1, 2, 3, 4]
10
- parameters_max_p_freq = [100, 50, 25, 10]
11
-
12
-
13
  def get_test_predicate(_data):
14
  tmp_df = pd.DataFrame(_data)
15
  predicates_count = tmp_df.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
@@ -49,18 +45,17 @@ with open("data/t_rex.filter_unified.test.jsonl") as f:
49
 
50
 
51
  seed(42)
52
- for min_e_freq, max_p_freq in product(parameters_min_e_freq, parameters_max_p_freq):
53
- with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.jsonl") as f:
54
- data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
55
- for i in data:
56
- i['relation'] = i.pop('predicate')
57
- i['head'] = i.pop('subject')
58
- i['tail'] = i.pop('object')
59
- data = [i for i in data if i['relation'] not in test_predicate]
60
- shuffle(data)
61
- data_train = data[:int(len(data) * 0.9)]
62
- data_valid = data[int(len(data) * 0.9):]
63
- with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.train.jsonl", "w") as f:
64
- f.write('\n'.join([json.dumps(i) for i in data_train]))
65
- with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.validation.jsonl", "w") as f:
66
- f.write('\n'.join([json.dumps(i) for i in data_valid]))
 
6
  from random import shuffle, seed
7
 
8
 
 
 
 
 
9
  def get_test_predicate(_data):
10
  tmp_df = pd.DataFrame(_data)
11
  predicates_count = tmp_df.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
 
45
 
46
 
47
  seed(42)
48
+ with open(f"data/t_rex.filter_unified.jsonl") as f:
49
+ data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
50
+ for i in data:
51
+ i['relation'] = i.pop('predicate')
52
+ i['head'] = i.pop('subject')
53
+ i['tail'] = i.pop('object')
54
+ data = [i for i in data if i['relation'] not in test_predicate]
55
+ shuffle(data)
56
+ data_train = data[:int(len(data) * 0.8)]
57
+ data_valid = data[int(len(data) * 0.8):]
58
+ with open(f"data/t_rex.filter_unified.train.jsonl", "w") as f:
59
+ f.write('\n'.join([json.dumps(i) for i in data_train]))
60
+ with open(f"data/t_rex.filter_unified.validation.jsonl", "w") as f:
61
+ f.write('\n'.join([json.dumps(i) for i in data_valid]))