wangyueqian commited on
Commit
f558090
1 Parent(s): 6e9a079

model ckpt upload

Browse files
README.md CHANGED
@@ -1,3 +1,19 @@
1
  ---
2
  license: mit
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
  ---
4
+
5
+ This repository contains the baseline model of Conversation Speaker Idenfication as described in paper **Friends-MMC: A Dataset for Multi-modal Multi-party Conversation Understanding**.
6
+
7
+ ## Related Resources:
8
+ - Paper: [Friends-MMC: A Dataset for Multi-modal Multi-party Conversation Understanding](to update)
9
+ - Dataset: [Friends-MMC](https://huggingface.co/datasets/wangyueqian/friends_mmc)
10
+
11
+ ## Citation
12
+ If you use this work in your research, please cite (to update):
13
+ ```
14
+ @inproceedings{wang2025friends-mmc,
15
+ title={Friends-MMC: A Dataset for Multi-modal Multi-party Conversation Understanding},
16
+ author={Yueqian Wang and Xiaojun Meng and Yuxuan Wang and Jianxin Liang and Qun Liu and Dongyan Zhao},
17
+ booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},
18
+ year={2025}
19
+ }
cnn/ft-5_turns/best_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a3ae7027d8a30aad11ff12d287beecf6af1302fd598f2c52194af93cfee6dcc
3
+ size 94321627
cnn/ft-8_turns/best_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:160f55305eac9224b72322d5aed14ad47caf03587a802d3b8352a1a115523b21
3
+ size 94321627
deberta/ft-5_turns/checkpoint-valid/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./snap/multiturn/dialog_roberta-contrastive/0918-ijcai2019-mse_sim-deberta_large/checkpoint-valid",
3
+ "architectures": [
4
+ "DebertaV2ForSpeakerLabeling"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 1,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4096,
13
+ "layer_norm_eps": 1e-07,
14
+ "max_position_embeddings": 512,
15
+ "max_relative_positions": -1,
16
+ "model_type": "deberta-v2",
17
+ "norm_rel_ebd": "layer_norm",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "pad_token_id": 0,
21
+ "pooler_dropout": 0,
22
+ "pooler_hidden_act": "gelu",
23
+ "pooler_hidden_size": 1024,
24
+ "pos_att_type": [
25
+ "p2c",
26
+ "c2p"
27
+ ],
28
+ "position_biased_input": false,
29
+ "position_buckets": 256,
30
+ "relative_attention": true,
31
+ "share_att_key": true,
32
+ "sim_func": "linear",
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.27.1",
35
+ "type_vocab_size": 0,
36
+ "vocab_size": 128100
37
+ }
deberta/ft-5_turns/checkpoint-valid/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:229e7ed6a445f7ed3db2e41fc7a18be91a3a14c6fcd62f317fab82b0265ce6a9
3
+ size 1753508351
deberta/ft-8_turns/checkpoint-valid/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./snap/multiturn/dialog_roberta-contrastive/0918-ijcai2019-mse_sim-deberta_large/checkpoint-valid",
3
+ "architectures": [
4
+ "DebertaV2ForSpeakerLabeling"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 1,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4096,
13
+ "layer_norm_eps": 1e-07,
14
+ "max_position_embeddings": 512,
15
+ "max_relative_positions": -1,
16
+ "model_type": "deberta-v2",
17
+ "norm_rel_ebd": "layer_norm",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "pad_token_id": 0,
21
+ "pooler_dropout": 0,
22
+ "pooler_hidden_act": "gelu",
23
+ "pooler_hidden_size": 1024,
24
+ "pos_att_type": [
25
+ "p2c",
26
+ "c2p"
27
+ ],
28
+ "position_biased_input": false,
29
+ "position_buckets": 256,
30
+ "relative_attention": true,
31
+ "share_att_key": true,
32
+ "sim_func": "linear",
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.27.1",
35
+ "type_vocab_size": 0,
36
+ "vocab_size": 128100
37
+ }
deberta/ft-8_turns/checkpoint-valid/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a5f5be57fbf54c9ac197bfb6d3f2ce2a6ab315939565ffc2f80cf258f273eb7
3
+ size 1753508351
deberta/pt-ijcai2019/checkpoint-valid/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/share2/wangyq/.cache/huggingface/transformers/microsoft/deberta-v3-large",
3
+ "architectures": [
4
+ "DebertaV2ForSpeakerLabeling"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 1,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.1,
10
+ "hidden_size": 1024,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4096,
13
+ "layer_norm_eps": 1e-07,
14
+ "max_position_embeddings": 512,
15
+ "max_relative_positions": -1,
16
+ "model_type": "deberta-v2",
17
+ "norm_rel_ebd": "layer_norm",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "pad_token_id": 0,
21
+ "pooler_dropout": 0,
22
+ "pooler_hidden_act": "gelu",
23
+ "pooler_hidden_size": 1024,
24
+ "pos_att_type": [
25
+ "p2c",
26
+ "c2p"
27
+ ],
28
+ "position_biased_input": false,
29
+ "position_buckets": 256,
30
+ "relative_attention": true,
31
+ "share_att_key": true,
32
+ "sim_func": "linear",
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.27.1",
35
+ "type_vocab_size": 0,
36
+ "vocab_size": 128100
37
+ }
deberta/pt-ijcai2019/checkpoint-valid/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:890dc7956944c8ddd9d0c4f9ed2af21de478a6581255ad5294c2f1316f91218c
3
+ size 1753508351