{ "cells": [ { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "10406.263945578237\n" ] } ], "source": [ "import os\n", "import librosa\n", "import numpy as np\n", "path = \"./9nine/nimi_sora/reference_audio\"\n", "files = os.listdir(path)\n", "time = 0\n", "for file in files:\n", " data, fs = librosa.load(path + \"/\" + file)\n", " time += len(data) / fs\n", "print(time)" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "2.890627777777778" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "10406.26/60/60" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([[False, False, False, False, True, True, True, True, True, True],\n", " [False, False, False, False, True, True, True, True, True, True],\n", " [False, False, False, False, True, True, True, True, True, True],\n", " [False, False, False, False, True, True, True, True, True, True],\n", " [False, False, False, False, False, True, True, True, True, True],\n", " [False, False, False, False, False, False, True, True, True, True],\n", " [False, False, False, False, False, False, False, True, True, True],\n", " [False, False, False, False, False, False, False, False, True, True],\n", " [False, False, False, False, False, False, False, False, False, True],\n", " [False, False, False, False, False, False, False, False, False, False]])\n" ] } ], "source": [ "import torch\n", "import torch.nn.functional as F\n", "x = torch.randn(3, 4, 5)\n", "y = torch.randn(3, 6, 5)\n", "x_len = x.shape[1]\n", "y_len = y.shape[1]\n", "\n", "x_attn_mask = F.pad(\n", " torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),\n", " (0, y_len),\n", " value=True, )\n", "y_attn_mask = F.pad(\n", " torch.triu(\n", " torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),\n", " diagonal=1, ),\n", " (x_len, 0),\n", " value=False, )\n", "xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)\n", "bsz, src_len = x.shape[0], x_len + y_len\n", "_xy_padding_mask = (ar_xy_padding_mask.view(bsz, 1, 1, src_len)\n", " .expand(-1, self.num_head, -1, -1)\n", " .reshape(bsz * self.num_head, 1, src_len))" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([[False, False, False, False, True, True, True, True, True, True],\n", " [False, False, False, False, True, True, True, True, True, True],\n", " [False, False, False, False, True, True, True, True, True, True],\n", " [False, False, False, False, True, True, True, True, True, True],\n", " [False, False, False, False, False, True, True, True, True, True],\n", " [False, False, False, False, False, False, True, True, True, True],\n", " [False, False, False, False, False, False, False, True, True, True],\n", " [False, False, False, False, False, False, False, False, True, True],\n", " [False, False, False, False, False, False, False, False, False, True],\n", " [False, False, False, False, False, False, False, False, False, False]])\n" ] } ], "source": [ "import torch\n", "import torch.nn.functional as F\n", "from AR.models.utils import make_pad_mask\n", "# self.ar_text_embedding = TokenEmbedding(\n", "# self.embedding_dim, self.phoneme_vocab_size, self.p_dropout) vocab_size = 512, embedding_dim = 512\n", "\n", "# x = x + self.bert_proj(bert_feature.transpose(1,2))\n", "# x = self.ar_text_position(x)\n", "# x_mask = make_pad_mask(x_lens)\n", "\n", "# y_mask = make_pad_mask(y_lens)\n", "# y_mask_int = y_mask.type(torch.int64)\n", "# codes = y.type(torch.int64) * (1 - y_mask_int)\n", "\n", "# Training\n", "# AR Decoder\n", "# 将x(文本)和y(音频)的token拼起来,emb维度是512\n", "# 测试一个简单的case\n", "num_head = 8\n", "\n", "\n", "x_len = 4\n", "y_len = 6\n", "\n", "x_attn_mask = F.pad(\n", " torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),\n", " (0, y_len),\n", " value=True, )\n", "y_attn_mask = F.pad(\n", " torch.triu(\n", " torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),\n", " diagonal=1, ),\n", " (x_len, 0),\n", " value=False, )\n", "xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)\n", "bsz, src_len = x.shape[0], x_len + y_len\n", "print(xy_attn_mask)\n", "# _xy_padding_mask = (ar_xy_padding_mask.view(bsz, 1, 1, src_len)\n", "# .expand(-1, num_head, -1, -1)\n", "# .reshape(bsz * num_head, 1, src_len))\n", "# xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)\n", "# new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)\n", "# new_attn_mask.masked_fill_(xy_attn_mask, float(\"-inf\"))\n", "# xy_attn_mask = new_attn_mask" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "torch.Size([324, 192])" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import torch\n", "import AR.models.t2s_model\n", "# model = AR.models.t2s_model.Text2SemanticDecoder()\n", "dict = torch.load(r\"D:\\pyprojs\\GPT-SoVITSs\\fork\\GPT-SoVITS-NIMI_SORA\\9nine\\nimi_sora\\sora_e5_s3275.pth\")\n", "dict[\"weight\"][\"enc_p.text_embedding.weight\"].shape" ] } ], "metadata": { "kernelspec": { "display_name": "vits", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.0" } }, "nbformat": 4, "nbformat_minor": 2 }