v25.12.28

This commit is contained in:
ROBERT MCDOWELL
2025-12-28 15:29:40 -08:00
committed by GitHub
6 changed files with 19 additions and 8 deletions

View File

@@ -202,10 +202,12 @@ class Bark(TTSUtils, TTSRegistry, name='bark'):
audio_tensor = audio_sentence.detach().cpu().unsqueeze(0)
elif isinstance(audio_sentence, np.ndarray):
audio_tensor = torch.from_numpy(audio_sentence).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
elif isinstance(audio_sentence, (list, tuple)):
audio_tensor = torch.tensor(audio_sentence, dtype=torch.float32).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
else:
error = f"Unsupported Bark wav type: {type(audio_sentence)}"
error = f"{self.session['tts_engine']}: Unsupported wav type: {type(audio_sentence)}"
print(error)
return False
if sentence[-1].isalnum() or sentence[-1] == '':

View File

@@ -170,10 +170,12 @@ class Fairseq(TTSUtils, TTSRegistry, name='fairseq'):
audio_tensor = audio_sentence.detach().cpu().unsqueeze(0)
elif isinstance(audio_sentence, np.ndarray):
audio_tensor = torch.from_numpy(audio_sentence).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
elif isinstance(audio_sentence, (list, tuple)):
audio_tensor = torch.tensor(audio_sentence, dtype=torch.float32).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
else:
error = f"Unsupported Fairseq wav type: {type(audio_sentence)}"
error = f"{self.session['tts_engine']}: Unsupported wav type: {type(audio_sentence)}"
print(error)
return False
if sentence[-1].isalnum() or sentence[-1] == '':

View File

@@ -198,10 +198,12 @@ class Tacotron2(TTSUtils, TTSRegistry, name='tacotron'):
audio_tensor = audio_sentence.detach().cpu().unsqueeze(0)
elif isinstance(audio_sentence, np.ndarray):
audio_tensor = torch.from_numpy(audio_sentence).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
elif isinstance(audio_sentence, (list, tuple)):
audio_tensor = torch.tensor(audio_sentence, dtype=torch.float32).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
else:
error = f"Unsupported Tacotron2 wav type: {type(audio_sentence)}"
error = f"{self.session['tts_engine']}: Unsupported wav type: {type(audio_sentence)}"
print(error)
return False
if sentence[-1].isalnum() or sentence[-1] == '':

View File

@@ -183,10 +183,12 @@ class Vits(TTSUtils, TTSRegistry, name='vits'):
audio_tensor = audio_sentence.detach().cpu().unsqueeze(0)
elif isinstance(audio_sentence, np.ndarray):
audio_tensor = torch.from_numpy(audio_sentence).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
elif isinstance(audio_sentence, (list, tuple)):
audio_tensor = torch.tensor(audio_sentence, dtype=torch.float32).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
else:
error = f"Unsupported Vits wav type: {type(audio_sentence)}"
error = f"{self.session['tts_engine']}: Unsupported wav type: {type(audio_sentence)}"
print(error)
return False
if sentence[-1].isalnum() or sentence[-1] == '':

View File

@@ -142,16 +142,17 @@ class XTTSv2(TTSUtils, TTSRegistry, name='xtts'):
audio_sentence = result.get('wav')
if is_audio_data_valid(audio_sentence):
if isinstance(audio_sentence, torch.Tensor):
audio_tensor = audio_sentence.detach().unsqueeze(0)
audio_tensor = audio_sentence.detach().cpu().unsqueeze(0)
elif isinstance(audio_sentence, np.ndarray):
audio_tensor = torch.from_numpy(audio_sentence).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
elif isinstance(audio_sentence, (list, tuple)):
audio_tensor = torch.tensor(audio_sentence, dtype=torch.float32).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
else:
error = f"Unsupported XTTSv2 wav type: {type(audio_sentence)}"
error = f"{self.session['tts_engine']}: Unsupported wav type: {type(audio_sentence)}"
print(error)
return False
audio_tensor = audio_tensor.cpu()
if sentence[-1].isalnum() or sentence[-1] == '':
audio_tensor = trim_audio(audio_tensor.squeeze(), self.params['samplerate'], 0.001, trim_audio_buffer).unsqueeze(0)
if audio_tensor is not None and audio_tensor.numel() > 0:

View File

@@ -113,10 +113,12 @@ class YourTTS(TTSUtils, TTSRegistry, name='yourtts'):
audio_tensor = audio_sentence.detach().cpu().unsqueeze(0)
elif isinstance(audio_sentence, np.ndarray):
audio_tensor = torch.from_numpy(audio_sentence).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
elif isinstance(audio_sentence, (list, tuple)):
audio_tensor = torch.tensor(audio_sentence, dtype=torch.float32).unsqueeze(0)
audio_tensor = audio_tensor.cpu()
else:
error = f"Unsupported YourTTS wav type: {type(audio_sentence)}"
error = f"{self.session['tts_engine']}: Unsupported wav type: {type(audio_sentence)}"
print(error)
return False
if sentence[-1].isalnum() or sentence[-1] == '':