LeeAfe commited on
Commit
7035677
1 Parent(s): 5a256ff

Upload 33 files

Browse files
Files changed (34) hide show
  1. .gitattributes +13 -0
  2. data/convert_to_hf.py +32 -0
  3. data/format_conversion/iter_gen.py +44 -0
  4. data/format_conversion/tapex_gen.py +51 -0
  5. data/readme.md +9 -0
  6. data/test_data/tests_seen.jsonl +0 -0
  7. data/test_data/tests_seen_iter.jsonl +0 -0
  8. data/test_data/tests_seen_table_seq.jsonl +0 -0
  9. data/test_data/tests_seen_table_seq_iter.jsonl +3 -0
  10. data/test_data/tests_seen_table_seq_tapex.jsonl +0 -0
  11. data/test_data/tests_unseen.jsonl +0 -0
  12. data/test_data/tests_unseen_iter.jsonl +0 -0
  13. data/test_data/tests_unseen_table_seq.jsonl +0 -0
  14. data/test_data/tests_unseen_table_seq_iter.jsonl +3 -0
  15. data/test_data/tests_unseen_table_seq_tapex.jsonl +0 -0
  16. data/train.jsonl +3 -0
  17. data/train_data/train.jsonl +3 -0
  18. data/train_data/train_iter.jsonl +3 -0
  19. data/train_data/train_table_seq_1.jsonl +3 -0
  20. data/train_data/train_table_seq_1_iter.jsonl +3 -0
  21. data/train_data/train_table_seq_1_tapex.jsonl +3 -0
  22. data/train_data/train_table_seq_2.jsonl +3 -0
  23. data/train_data/train_table_seq_2_iter.jsonl +3 -0
  24. data/train_data/train_table_seq_2_tapex.jsonl +3 -0
  25. data/valid_data/valid_seen.jsonl +0 -0
  26. data/valid_data/valid_seen_iter.jsonl +0 -0
  27. data/valid_data/valid_seen_table_seq.jsonl +0 -0
  28. data/valid_data/valid_seen_table_seq_iter.jsonl +3 -0
  29. data/valid_data/valid_seen_table_seq_tapex.jsonl +0 -0
  30. data/valid_data/valid_unseen.jsonl +0 -0
  31. data/valid_data/valid_unseen_iter.jsonl +0 -0
  32. data/valid_data/valid_unseen_table_seq.jsonl +0 -0
  33. data/valid_data/valid_unseen_table_seq_iter.jsonl +3 -0
  34. data/valid_data/valid_unseen_table_seq_tapex.jsonl +0 -0
.gitattributes ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ data/test_data/tests_seen_table_seq_iter.jsonl filter=lfs diff=lfs merge=lfs -text
2
+ data/test_data/tests_unseen_table_seq_iter.jsonl filter=lfs diff=lfs merge=lfs -text
3
+ data/train_data/train_iter.jsonl filter=lfs diff=lfs merge=lfs -text
4
+ data/train_data/train_table_seq_1_iter.jsonl filter=lfs diff=lfs merge=lfs -text
5
+ data/train_data/train_table_seq_1_tapex.jsonl filter=lfs diff=lfs merge=lfs -text
6
+ data/train_data/train_table_seq_1.jsonl filter=lfs diff=lfs merge=lfs -text
7
+ data/train_data/train_table_seq_2_iter.jsonl filter=lfs diff=lfs merge=lfs -text
8
+ data/train_data/train_table_seq_2_tapex.jsonl filter=lfs diff=lfs merge=lfs -text
9
+ data/train_data/train_table_seq_2.jsonl filter=lfs diff=lfs merge=lfs -text
10
+ data/train_data/train.jsonl filter=lfs diff=lfs merge=lfs -text
11
+ data/train.jsonl filter=lfs diff=lfs merge=lfs -text
12
+ data/valid_data/valid_seen_table_seq_iter.jsonl filter=lfs diff=lfs merge=lfs -text
13
+ data/valid_data/valid_unseen_table_seq_iter.jsonl filter=lfs diff=lfs merge=lfs -text
data/convert_to_hf.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from tabulate import tabulate
3
+
4
+
5
+
6
+ def load_data(filepath):
7
+ data = []
8
+ with open(filepath) as f:
9
+ for line in f.read().splitlines():
10
+ instance = {}
11
+ item = json.loads(line)
12
+ instance["id"] = item["id"]
13
+ prefix = "SCENE DESCRIPTION:"
14
+ if prefix not in item["input"]:
15
+ continue
16
+ index = item["input"].index(prefix)
17
+ instance["input_task"] = item["input"][:index].strip()
18
+ instance["input_table"] = item["input"][index+len(prefix):].strip().split("[SEP]")
19
+ instance["input_table"] = [r.strip().split(",") for r in instance["input_table"]]
20
+ instance["input_table_md"] = tabulate(instance["input_table"],headers='firstrow')
21
+ instance["output_text"] = item["output"]
22
+ instance["output_list"] = [o.split("|") for o in item["output"]]
23
+ instance["label"] = "unseen" if "unseen" in filepath else "seen"
24
+ data.append(instance)
25
+ return data
26
+
27
+ data = load_data(filepath = "data/train_data/train_table_seq_1.jsonl")
28
+ data += load_data(filepath = "data/train_data/train_table_seq_2.jsonl")
29
+ with open("data/train.jsonl", "w") as f:
30
+ for item in data:
31
+ f.write(json.dumps(item) + "\n")
32
+
data/format_conversion/iter_gen.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jsonlines
2
+
3
+ import os
4
+ def file_name(file_dir):
5
+ L=[]
6
+ for root, dirs, files in os.walk(file_dir):
7
+ for file in files:
8
+ if os.path.splitext(file)[1] == '.jsonl' and "iter" not in os.path.splitext(file)[0] and "tapex" not in os.path.splitext(file)[0]:
9
+ L.append(os.path.join(root, file))
10
+ return L
11
+
12
+ def iter(path):
13
+ with jsonlines.open(f"{path.replace('.jsonl','_iter.jsonl')}", mode='w') as writer:
14
+ with jsonlines.open(f"{path}", "r") as f:
15
+ for item in f:
16
+ output=item['output'][0].split(' | ')
17
+ for i in range(len(output)):
18
+ if "SCENE DESCRIPTION" not in item['input']:
19
+ id=item['id']+f'%{i}'
20
+ input=[]
21
+ input.append(item['input'])
22
+ for _ in range(i):
23
+ input.append(output[_])
24
+ input=" | ".join(input)
25
+ z=[output[i]]
26
+ dic={'id':id,"input":input,"output":z}
27
+ writer.write(dic)
28
+ else :
29
+ id=item['id']+f'%{i}'
30
+ input_split=item['input'].split("SCENE DESCRIPTION")
31
+ scene_info=input_split[1]
32
+ input=[]
33
+ input.append(input_split[0])
34
+ for _ in range(i):
35
+ input.append(output[_])
36
+ input=" | ".join(input)
37
+ z=[output[i]]
38
+ dic={'id':id,"input":input+"SCENE DESCRIPTION"+scene_info,"output":z}
39
+ writer.write(dic)
40
+
41
+ L=file_name('./data')
42
+ for i in L:
43
+ print(i)
44
+ iter(i)
data/format_conversion/tapex_gen.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import jsonlines
2
+ import json
3
+ import os
4
+ def file_name(file_dir):
5
+ L=[]
6
+ for root, dirs, files in os.walk(file_dir):
7
+ for file in files:
8
+ if os.path.splitext(file)[1] == ".jsonl" and "iter" not in os.path.splitext(file)[0] and "tapex" not in os.path.splitext(file)[0] and 'table' in os.path.splitext(file)[0]:
9
+ L.append(os.path.join(root, file))
10
+ return L
11
+
12
+
13
+ attributes=["id","object_type","position_x","position_y","position_z","rotation_x","rotation_y","rotation_z","parent_receptacle"]
14
+
15
+ def convert(item):
16
+ input_table={attribute:[] for attribute in attributes}
17
+ id=item["id"]
18
+ input_split=item["input"].split("SCENE DESCRIPTION")
19
+ try:
20
+ scene_info=input_split[1]
21
+ except:
22
+ try:
23
+ scene_info=item["input"].split(":")[1]
24
+ except:return False
25
+ k=scene_info.split("[SEP]")
26
+ t=True
27
+ for object in k:
28
+ if t:
29
+ t=False
30
+ continue
31
+ atts=object.split(',')
32
+ for att,attribute in zip(atts,attributes):
33
+ input_table[attribute].append(att.strip())
34
+ dic={"id":id,"input":input_split[0],"output":item["output"][0],"input_table":input_table}
35
+ return dic
36
+
37
+
38
+ def convert_file(path):
39
+ with jsonlines.open(f"{path}", "r") as f:
40
+ with jsonlines.open(f'{path.replace(".jsonl","_tapex.jsonl")}', mode="w") as writer:
41
+ for item in f:
42
+ if convert(item)==False:continue
43
+ writer.write(convert(item))
44
+
45
+
46
+
47
+ L=file_name("./data")
48
+ print(L)
49
+ for i in L:
50
+ print(i)
51
+ convert_file(i)
data/readme.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ This folder contains the G-PlanET dataset and the data scripts used to generate data for iterative and tapex
2
+
3
+ run
4
+ ```
5
+ iter_gen.py
6
+
7
+ tapex_gen.py
8
+ ```
9
+ in the folder `format conversion` will generate the data for iterative training and tapex training and testing
data/test_data/tests_seen.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/test_data/tests_seen_iter.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/test_data/tests_seen_table_seq.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/test_data/tests_seen_table_seq_iter.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df9776e4146bd1e276e300603b42d021d3b3b0bdfa008e0ad15ae02ffa8c4d2a
3
+ size 25883297
data/test_data/tests_seen_table_seq_tapex.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/test_data/tests_unseen.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/test_data/tests_unseen_iter.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/test_data/tests_unseen_table_seq.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/test_data/tests_unseen_table_seq_iter.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6de7055d49ef98aa7271c91ae6d5b9e075fcf9410657948d881a1c25de499833
3
+ size 24273672
data/test_data/tests_unseen_table_seq_tapex.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:081b7669bb327ca9ff8e7f5ac72883136dd098c055ec9240cc49f70459807dcc
3
+ size 306598433
data/train_data/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e765613044dc9b749b24c95bd99722548fe7f5bc03f05277900367faf637f8f5
3
+ size 11746395
data/train_data/train_iter.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0123279a9dfc668c484db9515cdf6876eaaae3691e65ee0eaffc6a65cce0ab2
3
+ size 67672122
data/train_data/train_table_seq_1.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20246be8a050f2f74e18dd4be521399b677099918e45f2448e0d0103370fe4f3
3
+ size 47746372
data/train_data/train_table_seq_1_iter.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6edc50f2949b7ff21e979dcbadc580799f97b9c182815f4acea1b157f734d68a
3
+ size 370402104
data/train_data/train_table_seq_1_tapex.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2216d2734fa9c44d68ebe15768f1840a756cd4d49364d31859a70e70b61b5fb4
3
+ size 58170797
data/train_data/train_table_seq_2.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5f5c24052b8167cae054498d73837c92faa797e1923434ceec2790797025df5
3
+ size 47288736
data/train_data/train_table_seq_2_iter.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:586121c816c020ad2d238e5591b2a9fdc8976627b6bfd401e7385d122f679d35
3
+ size 363377921
data/train_data/train_table_seq_2_tapex.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ecfe56df3310686dbbca101ebb2880094e3c7380c0f88a55e670f7c258a895c
3
+ size 57614866
data/valid_data/valid_seen.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/valid_data/valid_seen_iter.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/valid_data/valid_seen_table_seq.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/valid_data/valid_seen_table_seq_iter.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cc7ecfd6f62f499d27f1d5d679c54b459342707dc61955014a5dc8b1d8da85c
3
+ size 29341005
data/valid_data/valid_seen_table_seq_tapex.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/valid_data/valid_unseen.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/valid_data/valid_unseen_iter.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/valid_data/valid_unseen_table_seq.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/valid_data/valid_unseen_table_seq_iter.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b61cb2dcadc673c4d627e62fc83db38eef0075f566aea70388c942d65284b81e
3
+ size 28336600
data/valid_data/valid_unseen_table_seq_tapex.jsonl ADDED
The diff for this file is too large to render. See raw diff