admin commited on
Commit
14e7761
1 Parent(s): 1e440c2
Files changed (7) hide show
  1. .gitattributes +11 -11
  2. .gitignore +6 -0
  3. README.md +1 -1
  4. app.py +174 -0
  5. model.py +142 -0
  6. requirements.txt +5 -0
  7. utils.py +67 -0
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
 
4
  *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
  *.model filter=lfs diff=lfs merge=lfs -text
13
  *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
  *.onnx filter=lfs diff=lfs merge=lfs -text
17
  *.ot filter=lfs diff=lfs merge=lfs -text
18
  *.parquet filter=lfs diff=lfs merge=lfs -text
19
  *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
  *.pth filter=lfs diff=lfs merge=lfs -text
24
  *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1
  *.7z filter=lfs diff=lfs merge=lfs -text
2
  *.arrow filter=lfs diff=lfs merge=lfs -text
3
  *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
  *.bz2 filter=lfs diff=lfs merge=lfs -text
 
6
  *.ftz filter=lfs diff=lfs merge=lfs -text
7
  *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
  *.joblib filter=lfs diff=lfs merge=lfs -text
10
  *.lfs.* filter=lfs diff=lfs merge=lfs -text
 
11
  *.model filter=lfs diff=lfs merge=lfs -text
12
  *.msgpack filter=lfs diff=lfs merge=lfs -text
 
 
13
  *.onnx filter=lfs diff=lfs merge=lfs -text
14
  *.ot filter=lfs diff=lfs merge=lfs -text
15
  *.parquet filter=lfs diff=lfs merge=lfs -text
16
  *.pb filter=lfs diff=lfs merge=lfs -text
 
 
17
  *.pt filter=lfs diff=lfs merge=lfs -text
18
  *.pth filter=lfs diff=lfs merge=lfs -text
19
  *.rar filter=lfs diff=lfs merge=lfs -text
 
20
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
22
  *.tflite filter=lfs diff=lfs merge=lfs -text
23
  *.tgz filter=lfs diff=lfs merge=lfs -text
 
24
  *.xz filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *.tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ *.db* filter=lfs diff=lfs merge=lfs -text
29
+ *.ark* filter=lfs diff=lfs merge=lfs -text
30
+ **/*ckpt*data* filter=lfs diff=lfs merge=lfs -text
31
+ **/*ckpt*.meta filter=lfs diff=lfs merge=lfs -text
32
+ **/*ckpt*.index filter=lfs diff=lfs merge=lfs -text
33
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
34
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
35
+ *.wav filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ *.pt
2
+ __pycache__/*
3
+ tmp/*
4
+ flagged/*
5
+ test.py
6
+ rename.sh
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Erhu Playing Tech
3
- emoji: 📊
4
  colorFrom: indigo
5
  colorTo: gray
6
  sdk: gradio
 
1
  ---
2
  title: Erhu Playing Tech
3
+ emoji: 🪕
4
  colorFrom: indigo
5
  colorTo: gray
6
  sdk: gradio
app.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import random
4
+ import shutil
5
+ import librosa
6
+ import warnings
7
+ import numpy as np
8
+ import gradio as gr
9
+ import librosa.display
10
+ import matplotlib.pyplot as plt
11
+ from utils import get_modelist, find_wav_files, embed_img, TEMP_DIR
12
+ from model import EvalNet
13
+
14
+
15
+ TRANSLATE = {
16
+ "vibrato": "揉弦 Rou xian",
17
+ "trill": "颤音 Chan yin",
18
+ "tremolo": "颤弓 Chan gong",
19
+ "staccato": "顿弓 Dun gong",
20
+ "ricochet": "抛弓 Pao gong",
21
+ "pizzicato": "拨弦 Bo xian",
22
+ "percussive": "击弓 Ji gong",
23
+ "legato_slide_glissando": "连滑音 Lian hua yin",
24
+ "harmonic": "泛音 Fan yin",
25
+ "diangong": "垫弓 Dian gong",
26
+ "detache": "分弓 Fen gong",
27
+ }
28
+ CLASSES = list(TRANSLATE.keys())
29
+ SAMPLE_RATE = 44100
30
+
31
+
32
+ def circular_padding(y: np.ndarray, sr: int, dur=3):
33
+ if len(y) >= sr * dur:
34
+ return y[: sr * dur]
35
+
36
+ size = sr * dur // len(y) + int((sr * dur) % len(y) > 0)
37
+ arrays = []
38
+ for _ in range(size):
39
+ arrays.append(y)
40
+
41
+ y = np.hstack(arrays)
42
+ return y[: sr * dur]
43
+
44
+
45
+ def wav2mel(audio_path: str):
46
+ os.makedirs(TEMP_DIR, exist_ok=True)
47
+ try:
48
+ y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
49
+ y = circular_padding(y, sr)
50
+ mel_spec = librosa.feature.melspectrogram(y=y, sr=sr)
51
+ log_mel_spec = librosa.power_to_db(mel_spec, ref=np.max)
52
+ librosa.display.specshow(log_mel_spec)
53
+ plt.axis("off")
54
+ plt.savefig(
55
+ f"{TEMP_DIR}/output.jpg",
56
+ bbox_inches="tight",
57
+ pad_inches=0.0,
58
+ )
59
+ plt.close()
60
+
61
+ except Exception as e:
62
+ print(f"Error converting {audio_path} : {e}")
63
+
64
+
65
+ def wav2cqt(audio_path: str):
66
+ os.makedirs(TEMP_DIR, exist_ok=True)
67
+ try:
68
+ y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
69
+ y = circular_padding(y, sr)
70
+ cqt_spec = librosa.cqt(y=y, sr=sr)
71
+ log_cqt_spec = librosa.power_to_db(np.abs(cqt_spec) ** 2, ref=np.max)
72
+ librosa.display.specshow(log_cqt_spec)
73
+ plt.axis("off")
74
+ plt.savefig(
75
+ f"{TEMP_DIR}/output.jpg",
76
+ bbox_inches="tight",
77
+ pad_inches=0.0,
78
+ )
79
+ plt.close()
80
+
81
+ except Exception as e:
82
+ print(f"Error converting {audio_path} : {e}")
83
+
84
+
85
+ def wav2chroma(audio_path: str):
86
+ os.makedirs(TEMP_DIR, exist_ok=True)
87
+ try:
88
+ y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
89
+ y = circular_padding(y, sr)
90
+ chroma_spec = librosa.feature.chroma_stft(y=y, sr=sr)
91
+ log_chroma_spec = librosa.power_to_db(np.abs(chroma_spec) ** 2, ref=np.max)
92
+ librosa.display.specshow(log_chroma_spec)
93
+ plt.axis("off")
94
+ plt.savefig(
95
+ f"{TEMP_DIR}/output.jpg",
96
+ bbox_inches="tight",
97
+ pad_inches=0.0,
98
+ )
99
+ plt.close()
100
+
101
+ except Exception as e:
102
+ print(f"Error converting {audio_path} : {e}")
103
+
104
+
105
+ def infer(wav_path: str, log_name: str, folder_path=TEMP_DIR):
106
+ if os.path.exists(folder_path):
107
+ shutil.rmtree(folder_path)
108
+
109
+ if not wav_path:
110
+ return None, "请输入音频 Please input an audio!"
111
+
112
+ try:
113
+ model = EvalNet(log_name, len(TRANSLATE)).model
114
+ except Exception as e:
115
+ return None, f"{e}"
116
+
117
+ spec = log_name.split("_")[-3]
118
+ eval("wav2%s" % spec)(wav_path)
119
+ input = embed_img(f"{folder_path}/output.jpg")
120
+ output: torch.Tensor = model(input)
121
+ pred_id = torch.max(output.data, 1)[1]
122
+ return (
123
+ os.path.basename(wav_path),
124
+ f"{TRANSLATE[CLASSES[pred_id]]} ({CLASSES[pred_id].capitalize()})",
125
+ )
126
+
127
+
128
+ if __name__ == "__main__":
129
+ warnings.filterwarnings("ignore")
130
+ models = get_modelist()
131
+ examples = []
132
+ example_wavs = find_wav_files()
133
+ model_num = len(models)
134
+ for wav in example_wavs:
135
+ examples.append([wav, models[random.randint(0, model_num - 1)]])
136
+
137
+ with gr.Blocks() as demo:
138
+ gr.Interface(
139
+ fn=infer,
140
+ inputs=[
141
+ gr.Audio(label="上传录音 Upload a recording", type="filepath"),
142
+ gr.Dropdown(
143
+ choices=models, label="选择模型 Select a model", value=models[0]
144
+ ),
145
+ ],
146
+ outputs=[
147
+ gr.Textbox(label="音频文件名 Audio filename", show_copy_button=True),
148
+ gr.Textbox(
149
+ label="演奏技法识别 Playing tech recognition", show_copy_button=True
150
+ ),
151
+ ],
152
+ examples=examples,
153
+ cache_examples=False,
154
+ allow_flagging="never",
155
+ title="建议录音时长保持在 3s 左右<br>It is recommended to keep the recording length around 3s.",
156
+ )
157
+
158
+ gr.Markdown(
159
+ """
160
+ # 引用 Cite
161
+ ```bibtex
162
+ @dataset{zhaorui_liu_2021_5676893,
163
+ author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
164
+ title = {CCMusic: an Open and Diverse Database for Chinese and General Music Information Retrieval Research},
165
+ month = {mar},
166
+ year = {2024},
167
+ publisher = {HuggingFace},
168
+ version = {1.2},
169
+ url = {https://huggingface.co/ccmusic-database}
170
+ }
171
+ ```"""
172
+ )
173
+
174
+ demo.launch()
model.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torchvision.models as models
4
+ from modelscope.msdatasets import MsDataset
5
+ from utils import MODEL_DIR
6
+
7
+
8
+ class EvalNet:
9
+ model: nn.Module = None
10
+ m_type = "squeezenet"
11
+ input_size = 224
12
+ output_size = 512
13
+
14
+ def __init__(self, log_name: str, cls_num: int):
15
+ saved_model_path = f"{MODEL_DIR}/{log_name}/save.pt"
16
+ m_ver = "_".join(log_name.split("_")[:-3])
17
+ self.m_type, self.input_size = self._model_info(m_ver)
18
+
19
+ if not hasattr(models, m_ver):
20
+ raise Exception("Unsupported model.")
21
+
22
+ self.model = eval("models.%s()" % m_ver)
23
+ linear_output = self._set_outsize()
24
+ self._set_classifier(cls_num, linear_output)
25
+ checkpoint = torch.load(saved_model_path, map_location="cpu")
26
+ if torch.cuda.is_available():
27
+ checkpoint = torch.load(saved_model_path)
28
+
29
+ self.model.load_state_dict(checkpoint, False)
30
+ self.model.eval()
31
+
32
+ def _get_backbone(self, ver: str, backbone_list: list):
33
+ for bb in backbone_list:
34
+ if ver == bb["ver"]:
35
+ return bb
36
+
37
+ print("Backbone name not found, using default option - alexnet.")
38
+ return backbone_list[0]
39
+
40
+ def _model_info(self, m_ver: str):
41
+ backbone_list = MsDataset.load("monetjoe/cv_backbones", split="v1")
42
+ backbone = self._get_backbone(m_ver, backbone_list)
43
+ m_type = str(backbone["type"])
44
+ input_size = int(backbone["input_size"])
45
+ return m_type, input_size
46
+
47
+ def _classifier(self, cls_num: int, output_size: int, linear_output: bool):
48
+ q = (1.0 * output_size / cls_num) ** 0.25
49
+ l1 = int(q * cls_num)
50
+ l2 = int(q * l1)
51
+ l3 = int(q * l2)
52
+ if linear_output:
53
+ return torch.nn.Sequential(
54
+ nn.Dropout(),
55
+ nn.Linear(output_size, l3),
56
+ nn.ReLU(inplace=True),
57
+ nn.Dropout(),
58
+ nn.Linear(l3, l2),
59
+ nn.ReLU(inplace=True),
60
+ nn.Dropout(),
61
+ nn.Linear(l2, l1),
62
+ nn.ReLU(inplace=True),
63
+ nn.Linear(l1, cls_num),
64
+ )
65
+
66
+ else:
67
+ return torch.nn.Sequential(
68
+ nn.Dropout(),
69
+ nn.Conv2d(output_size, l3, kernel_size=(1, 1), stride=(1, 1)),
70
+ nn.ReLU(inplace=True),
71
+ nn.AdaptiveAvgPool2d(output_size=(1, 1)),
72
+ nn.Flatten(),
73
+ nn.Linear(l3, l2),
74
+ nn.ReLU(inplace=True),
75
+ nn.Dropout(),
76
+ nn.Linear(l2, l1),
77
+ nn.ReLU(inplace=True),
78
+ nn.Linear(l1, cls_num),
79
+ )
80
+
81
+ def _set_outsize(self):
82
+ for name, module in self.model.named_modules():
83
+ if (
84
+ str(name).__contains__("classifier")
85
+ or str(name).__eq__("fc")
86
+ or str(name).__contains__("head")
87
+ or hasattr(module, "classifier")
88
+ ):
89
+ if isinstance(module, torch.nn.Linear):
90
+ self.output_size = module.in_features
91
+ return True
92
+
93
+ if isinstance(module, torch.nn.Conv2d):
94
+ self.output_size = module.in_channels
95
+ return False
96
+
97
+ return False
98
+
99
+ def _set_classifier(self, cls_num: int, linear_output: bool):
100
+ if self.m_type == "convnext":
101
+ del self.model.classifier[2]
102
+ self.model.classifier = nn.Sequential(
103
+ *list(self.model.classifier)
104
+ + list(self._classifier(cls_num, self.output_size, linear_output))
105
+ )
106
+ return
107
+
108
+ elif self.m_type == "maxvit":
109
+ del self.model.classifier[5]
110
+ self.model.classifier = nn.Sequential(
111
+ *list(self.model.classifier)
112
+ + list(self._classifier(cls_num, self.output_size, linear_output))
113
+ )
114
+ return
115
+
116
+ if hasattr(self.model, "classifier"):
117
+ self.model.classifier = self._classifier(
118
+ cls_num, self.output_size, linear_output
119
+ )
120
+ return
121
+
122
+ elif hasattr(self.model, "fc"):
123
+ self.model.fc = self._classifier(cls_num, self.output_size, linear_output)
124
+ return
125
+
126
+ elif hasattr(self.model, "head"):
127
+ self.model.head = self._classifier(cls_num, self.output_size, linear_output)
128
+ return
129
+
130
+ self.model.heads.head = self._classifier(
131
+ cls_num, self.output_size, linear_output
132
+ )
133
+
134
+ def forward(self, x: torch.Tensor):
135
+ if torch.cuda.is_available():
136
+ x = x.cuda()
137
+ self.model = self.model.cuda()
138
+
139
+ if self.m_type == "googlenet":
140
+ return self.model(x)[0]
141
+ else:
142
+ return self.model(x)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ librosa
2
+ torch
3
+ matplotlib
4
+ torchvision
5
+ pillow
utils.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torchvision.transforms as transforms
4
+ from modelscope import snapshot_download
5
+ from PIL import Image
6
+
7
+ MODEL_DIR = snapshot_download(
8
+ f"ccmusic-database/erhu_playing_tech",
9
+ cache_dir=f"{os.getcwd()}/__pycache__",
10
+ )
11
+ TEMP_DIR = f"{os.getcwd()}/flagged"
12
+
13
+
14
+ def toCUDA(x):
15
+ if hasattr(x, "cuda"):
16
+ if torch.cuda.is_available():
17
+ return x.cuda()
18
+
19
+ return x
20
+
21
+
22
+ def find_wav_files(folder_path=f"{MODEL_DIR}/examples"):
23
+ wav_files = []
24
+ for root, _, files in os.walk(folder_path):
25
+ for file in files:
26
+ if file.endswith(".wav"):
27
+ file_path = os.path.join(root, file)
28
+ wav_files.append(file_path)
29
+
30
+ return wav_files
31
+
32
+
33
+ def get_modelist(model_dir=MODEL_DIR):
34
+ try:
35
+ entries = os.listdir(model_dir)
36
+ except OSError as e:
37
+ print(f"无法访问 {model_dir}: {e}")
38
+ return
39
+
40
+ # 遍历所有条目
41
+ output = []
42
+ for entry in entries:
43
+ # 获取完整路径
44
+ full_path = os.path.join(model_dir, entry)
45
+ # 跳过'.git'文件夹
46
+ if entry == ".git" or entry == "examples":
47
+ print(f"跳过 .git 或 examples 文件夹: {full_path}")
48
+ continue
49
+
50
+ # 检查条目是文件还是目录
51
+ if os.path.isdir(full_path):
52
+ # 打印目录路径
53
+ output.append(os.path.basename(full_path))
54
+
55
+ return output
56
+
57
+
58
+ def embed_img(img_path: str, input_size=224):
59
+ transform = transforms.Compose(
60
+ [
61
+ transforms.Resize([input_size, input_size]),
62
+ transforms.ToTensor(),
63
+ transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
64
+ ]
65
+ )
66
+ img = Image.open(img_path).convert("RGB")
67
+ return transform(img).unsqueeze(0)