Spaces:
Running
Running
Display duration of parallel GPU transcribing
Browse files- src/vadParallel.py +5 -0
src/vadParallel.py
CHANGED
@@ -126,6 +126,8 @@ class ParallelTranscription(AbstractTranscription):
|
|
126 |
|
127 |
created_context = False
|
128 |
|
|
|
|
|
129 |
# Spawn a separate process for each device
|
130 |
try:
|
131 |
if (gpu_parallel_context is None):
|
@@ -155,6 +157,9 @@ class ParallelTranscription(AbstractTranscription):
|
|
155 |
if (created_context):
|
156 |
gpu_parallel_context.close()
|
157 |
|
|
|
|
|
|
|
158 |
return merged
|
159 |
|
160 |
def _get_merged_timestamps_parallel(self, transcription: AbstractTranscription, audio: str, config: TranscriptionConfig, total_duration: float,
|
|
|
126 |
|
127 |
created_context = False
|
128 |
|
129 |
+
perf_start_gpu = time.perf_counter()
|
130 |
+
|
131 |
# Spawn a separate process for each device
|
132 |
try:
|
133 |
if (gpu_parallel_context is None):
|
|
|
157 |
if (created_context):
|
158 |
gpu_parallel_context.close()
|
159 |
|
160 |
+
perf_end_gpu = time.perf_counter()
|
161 |
+
print("Parallel transcription took " + str(perf_end_gpu - perf_start_gpu) + " seconds")
|
162 |
+
|
163 |
return merged
|
164 |
|
165 |
def _get_merged_timestamps_parallel(self, transcription: AbstractTranscription, audio: str, config: TranscriptionConfig, total_duration: float,
|