Datasets:
lmqg
/

Modalities:
Tabular
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
asahi417 commited on
Commit
aa8dd40
1 Parent(s): 346267c
Files changed (1) hide show
  1. process.py +0 -14
process.py CHANGED
@@ -88,15 +88,9 @@ if __name__ == '__main__':
88
  for _, _g in df.groupby('q_review_id'):
89
  if any(i == 'ANSWERNOTFOUND' for i in _g['human_ans_spans']):
90
  continue
91
- # if len(_g["human_ans_spans"].unique()) != 1:
92
- # continue
93
- # _df = _g.iloc[0]
94
  _len = [len(i) for i in _g["human_ans_spans"]]
95
  _df = _g.iloc[_len.index(max(_len))]
96
  start, end = eval(_df['human_ans_indices'])
97
- # if re.sub(r'[\s\W]', '', _df['review'][start:end]) != re.sub(r'[\s\W]', '', _df["human_ans_spans"]):
98
- # input(f"{_df['review'][start:end]} != {_df['human_ans_spans']}")
99
- # continue
100
  out = process_single_data(question=re.sub(r'\s+\?', '?', _df['question']),
101
  answer=_df['review'][start:end],
102
  paragraph=_df['review'])
@@ -107,11 +101,3 @@ if __name__ == '__main__':
107
  output.append(out)
108
  with open(f'./data/processed/{i}.{s.replace(".csv", ".jsonl")}', 'w') as f:
109
  f.write('\n'.join([json.dumps(i) for i in output]))
110
-
111
- # for s in ["dev", "test", "train"]:
112
- # output = []
113
- # for i in ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"]:
114
- # with open(f'./data/processed/{i}.{s}.jsonl', 'r') as f:
115
- # output += [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
116
- # with open(f'./data/processed/default.{s}.jsonl', 'w') as f:
117
- # f.write('\n'.join([json.dumps(i) for i in output]))
 
88
  for _, _g in df.groupby('q_review_id'):
89
  if any(i == 'ANSWERNOTFOUND' for i in _g['human_ans_spans']):
90
  continue
 
 
 
91
  _len = [len(i) for i in _g["human_ans_spans"]]
92
  _df = _g.iloc[_len.index(max(_len))]
93
  start, end = eval(_df['human_ans_indices'])
 
 
 
94
  out = process_single_data(question=re.sub(r'\s+\?', '?', _df['question']),
95
  answer=_df['review'][start:end],
96
  paragraph=_df['review'])
 
101
  output.append(out)
102
  with open(f'./data/processed/{i}.{s.replace(".csv", ".jsonl")}', 'w') as f:
103
  f.write('\n'.join([json.dumps(i) for i in output]))