This commit is contained in:
unknown
2025-12-28 20:01:33 -08:00
parent cd0111787c
commit 4f701f6caa
6 changed files with 22 additions and 76 deletions

View File

@@ -194,25 +194,12 @@ class Bark(TTSUtils, TTSRegistry, name='bark'):
**tts_dyn_params,
**fine_tuned_params
)
#audio_sentence = result.get('wav')
#if is_audio_data_valid(audio_sentence):
# audio_sentence = audio_sentence.tolist()
if is_audio_data_valid(audio_sentence):
if isinstance(audio_sentence, torch.Tensor):
audio_tensor = audio_sentence.detach().cpu().unsqueeze(0)
elif isinstance(audio_sentence, np.ndarray):
audio_tensor = torch.from_numpy(audio_sentence).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
elif isinstance(audio_sentence, (list, tuple)):
audio_tensor = torch.tensor(audio_sentence, dtype=torch.float32).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
else:
error = f"{self.session['tts_engine']}: Unsupported wav type: {type(audio_sentence)}"
print(error)
return False
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
sourceTensor = self._tensor_type(audio_sentence)
audio_tensor = sourceTensor.clone().detach().unsqueeze(0).cpu()
if audio_tensor is not None and audio_tensor.numel() > 0:
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
self.audio_segments.append(audio_tensor)
if not re.search(r'\w$', sentence, flags=re.UNICODE) and sentence[-1] != '':
silence_time = int(np.random.uniform(0.3, 0.6) * 100) / 100

View File

@@ -166,21 +166,11 @@ class Fairseq(TTSUtils, TTSRegistry, name='fairseq'):
**speaker_argument
)
if is_audio_data_valid(audio_sentence):
if isinstance(audio_sentence, torch.Tensor):
audio_tensor = audio_sentence.detach().cpu().unsqueeze(0)
elif isinstance(audio_sentence, np.ndarray):
audio_tensor = torch.from_numpy(audio_sentence).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
elif isinstance(audio_sentence, (list, tuple)):
audio_tensor = torch.tensor(audio_sentence, dtype=torch.float32).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
else:
error = f"{self.session['tts_engine']}: Unsupported wav type: {type(audio_sentence)}"
print(error)
return False
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
sourceTensor = self._tensor_type(audio_sentence)
audio_tensor = sourceTensor.clone().detach().unsqueeze(0).cpu()
if audio_tensor is not None and audio_tensor.numel() > 0:
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
self.audio_segments.append(audio_tensor)
if not re.search(r'\w$', sentence, flags=re.UNICODE) and sentence[-1] != '':
silence_time = int(np.random.uniform(0.3, 0.6) * 100) / 100

View File

@@ -194,21 +194,11 @@ class Tacotron2(TTSUtils, TTSRegistry, name='tacotron'):
**speaker_argument
)
if is_audio_data_valid(audio_sentence):
if isinstance(audio_sentence, torch.Tensor):
audio_tensor = audio_sentence.detach().cpu().unsqueeze(0)
elif isinstance(audio_sentence, np.ndarray):
audio_tensor = torch.from_numpy(audio_sentence).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
elif isinstance(audio_sentence, (list, tuple)):
audio_tensor = torch.tensor(audio_sentence, dtype=torch.float32).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
else:
error = f"{self.session['tts_engine']}: Unsupported wav type: {type(audio_sentence)}"
print(error)
return False
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
sourceTensor = self._tensor_type(audio_sentence)
audio_tensor = sourceTensor.clone().detach().unsqueeze(0).cpu()
if audio_tensor is not None and audio_tensor.numel() > 0:
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
self.audio_segments.append(audio_tensor)
if not re.search(r'\w$', sentence, flags=re.UNICODE) and sentence[-1] != '':
silence_time = int(np.random.uniform(0.3, 0.6) * 100) / 100

View File

@@ -179,21 +179,11 @@ class Vits(TTSUtils, TTSRegistry, name='vits'):
**speaker_argument
)
if is_audio_data_valid(audio_sentence):
if isinstance(audio_sentence, torch.Tensor):
audio_tensor = audio_sentence.detach().cpu().unsqueeze(0)
elif isinstance(audio_sentence, np.ndarray):
audio_tensor = torch.from_numpy(audio_sentence).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
elif isinstance(audio_sentence, (list, tuple)):
audio_tensor = torch.tensor(audio_sentence, dtype=torch.float32).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
else:
error = f"{self.session['tts_engine']}: Unsupported wav type: {type(audio_sentence)}"
print(error)
return False
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
sourceTensor = self._tensor_type(audio_sentence)
audio_tensor = sourceTensor.clone().detach().unsqueeze(0).cpu()
if audio_tensor is not None and audio_tensor.numel() > 0:
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
self.audio_segments.append(audio_tensor)
if not re.search(r'\w$', sentence, flags=re.UNICODE) and sentence[-1] != '':
silence_time = int(np.random.uniform(0.3, 0.6) * 100) / 100

View File

@@ -141,12 +141,11 @@ class XTTSv2(TTSUtils, TTSRegistry, name='xtts'):
)
audio_sentence = result.get('wav')
if is_audio_data_valid(audio_sentence):
audio_sentence = result.get('wav')
sourceTensor = self._tensor_type(audio_sentence)
audio_tensor = sourceTensor.clone().detach().unsqueeze(0).cpu()
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
if audio_tensor is not None and audio_tensor.numel() > 0:
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
self.audio_segments.append(audio_tensor)
if not re.search(r'\w$', sentence, flags=re.UNICODE) and sentence[-1] != '':
silence_time = int(np.random.uniform(0.3, 0.6) * 100) / 100

View File

@@ -109,21 +109,11 @@ class YourTTS(TTSUtils, TTSRegistry, name='yourtts'):
**speaker_argument
)
if is_audio_data_valid(audio_sentence):
if isinstance(audio_sentence, torch.Tensor):
audio_tensor = audio_sentence.detach().cpu().unsqueeze(0)
elif isinstance(audio_sentence, np.ndarray):
audio_tensor = torch.from_numpy(audio_sentence).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
elif isinstance(audio_sentence, (list, tuple)):
audio_tensor = torch.tensor(audio_sentence, dtype=torch.float32).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
else:
error = f"{self.session['tts_engine']}: Unsupported wav type: {type(audio_sentence)}"
print(error)
return False
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
sourceTensor = self._tensor_type(audio_sentence)
audio_tensor = sourceTensor.clone().detach().unsqueeze(0).cpu()
if audio_tensor is not None and audio_tensor.numel() > 0:
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
self.audio_segments.append(audio_tensor)
if not re.search(r'\w$', sentence, flags=re.UNICODE) and sentence[-1] != '':
silence_time = int(np.random.uniform(0.3, 0.6) * 100) / 100