Spaces:
Running
on
Zero
Running
on
Zero
wenmengzhou
commited on
Commit
•
66f70d6
1
Parent(s):
74ffd9d
update cli/model.py to initialize to cpou
Browse files- cosyvoice/cli/model.py +3 -1
cosyvoice/cli/model.py
CHANGED
@@ -19,7 +19,8 @@ class CosyVoiceModel:
|
|
19 |
llm: torch.nn.Module,
|
20 |
flow: torch.nn.Module,
|
21 |
hift: torch.nn.Module):
|
22 |
-
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
23 |
self.llm = llm
|
24 |
self.flow = flow
|
25 |
self.hift = hift
|
@@ -37,6 +38,7 @@ class CosyVoiceModel:
|
|
37 |
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), llm_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
|
38 |
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), flow_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
|
39 |
prompt_speech_feat=torch.zeros(1, 0, 80), prompt_speech_feat_len=torch.zeros(1, dtype=torch.int32)):
|
|
|
40 |
tts_speech_token = self.llm.inference(text=text.to(self.device),
|
41 |
text_len=text_len.to(self.device),
|
42 |
prompt_text=prompt_text.to(self.device),
|
|
|
19 |
llm: torch.nn.Module,
|
20 |
flow: torch.nn.Module,
|
21 |
hift: torch.nn.Module):
|
22 |
+
#self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
23 |
+
self.device = 'cpu'
|
24 |
self.llm = llm
|
25 |
self.flow = flow
|
26 |
self.hift = hift
|
|
|
38 |
llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), llm_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
|
39 |
flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), flow_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32),
|
40 |
prompt_speech_feat=torch.zeros(1, 0, 80), prompt_speech_feat_len=torch.zeros(1, dtype=torch.int32)):
|
41 |
+
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
42 |
tts_speech_token = self.llm.inference(text=text.to(self.device),
|
43 |
text_len=text_len.to(self.device),
|
44 |
prompt_text=prompt_text.to(self.device),
|