Corrected a few misspelled words (#1435)

This commit is contained in:
Felix
2023-08-05 01:51:08 +02:00
committed by GitHub
parent 043d5f2cb5
commit 97a6029cf7
4 changed files with 12 additions and 12 deletions

View File

@@ -477,7 +477,7 @@ def bytes_to_unicode():
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
This is a significant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""

View File

@@ -685,8 +685,8 @@ if __name__ == '__main__':
hps = get_hparams_from_file(config_path)
# If model has multiple speakers, validate speaker id and retrieve name if available.
model_has_multiple_spakers = hps.data.n_speakers > 0
if model_has_multiple_spakers:
model_has_multiple_speakers = hps.data.n_speakers > 0
if model_has_multiple_speakers:
logging.info(f"Model has {hps.data.n_speakers} speakers")
if args.speaker_id >= hps.data.n_speakers: raise ValueError(f"Speaker ID {args.speaker_id} is invalid for this model.")
speaker_name = "?"
@@ -722,7 +722,7 @@ if __name__ == '__main__':
stn_tst = text_mapper.get_text(text_to_synthesize, hps.data.add_blank, hps.data.text_cleaners)
logging.debug(f"Converted input text to tensor \"{text_to_synthesize}\" -> Tensor({stn_tst.shape}): {stn_tst.numpy()}")
x_tst, x_tst_lengths = stn_tst.unsqueeze(0), Tensor([stn_tst.shape[0]], dtype=dtypes.int64)
sid = Tensor([args.speaker_id], dtype=dtypes.int64) if model_has_multiple_spakers else None
sid = Tensor([args.speaker_id], dtype=dtypes.int64) if model_has_multiple_speakers else None
# Perform inference.
start_time = time.time()
@@ -732,7 +732,7 @@ if __name__ == '__main__':
# Save the audio output.
audio_data = (np.clip(audio_tensor.numpy(), -1.0, 1.0) * 32767).astype(np.int16)
out_path = Path(args.out_path or Path(args.out_dir)/f"{args.model_to_use}{f'_sid_{args.speaker_id}' if model_has_multiple_spakers else ''}_{args.base_name}.wav")
out_path = Path(args.out_path or Path(args.out_dir)/f"{args.model_to_use}{f'_sid_{args.speaker_id}' if model_has_multiple_speakers else ''}_{args.base_name}.wav")
out_path.parent.mkdir(parents=True, exist_ok=True)
with wave.open(str(out_path), 'wb') as wav_file:
wav_file.setnchannels(args.num_channels)
@@ -740,4 +740,4 @@ if __name__ == '__main__':
wav_file.setframerate(hps.data.sampling_rate)
wav_file.setnframes(len(audio_data))
wav_file.writeframes(audio_data.tobytes())
logging.info(f"Saved audio output to {out_path}")
logging.info(f"Saved audio output to {out_path}")

View File

@@ -102,7 +102,7 @@ class BertOutput:
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# approixmation of the error function
# approximation of the error function
def erf(x):
t = (1 + 0.3275911 * x.abs()).reciprocal()
return x.sign() * (1 - ((((1.061405429 * t + -1.453152027) * t + 1.421413741) * t + -0.284496736) * t + 0.254829592) * t * (-(x.square())).exp())

View File

@@ -339,7 +339,7 @@ class TestOpt(unittest.TestCase):
np.testing.assert_allclose(c.numpy().transpose(1,0), d.numpy(), rtol=1e-3, atol=1e-5)
assert cache_len == 1, "reduceop was rerun!"
@unittest.skipIf(PUSH_PERMUTES, "this test is brokem with PUSH_PERMUTES")
@unittest.skipIf(PUSH_PERMUTES, "this test is broken with PUSH_PERMUTES")
def test_no_reduceop_rerun_alt(self):
a = Tensor.randn(16, 16, 16)
with CLCache():
@@ -366,7 +366,7 @@ class TestOpt(unittest.TestCase):
a = Tensor.ones(n, m).sum(axis).reshape(n, 1).expand(n, m).sum(axis)
a.realize()
cache_len = len(GlobalCounters.cache)
np.testing.assert_allclose(a.numpy(), b.numpy(), rtol=1e-3, atol=1e-5)
np.testing.assert_allclose(a.numpy(), b.numpy(), rtol=1e-3, atol=1e-5)
return cache_len
def test_expand_reduce_is_folded_on_same_axis(self):
@@ -377,9 +377,9 @@ class TestOpt(unittest.TestCase):
a = Tensor.ones(n, n).sum(axis).reshape(n, 1).expand(n, n).sum(axis)
a.realize()
cache_len = len(GlobalCounters.cache)
np.testing.assert_allclose(a.numpy(), b.numpy(), rtol=1e-3, atol=1e-5)
np.testing.assert_allclose(a.numpy(), b.numpy(), rtol=1e-3, atol=1e-5)
return cache_len
def test_expand_reduce_is_not_folded_on_different_axes(self):
axis1, axis2 = 0, 1
for n in [4, 8, 16]:
@@ -388,7 +388,7 @@ class TestOpt(unittest.TestCase):
a = Tensor.ones(n, n).sum(axis1).reshape(n, 1).expand(n, n).sum(axis2)
a.realize()
cache_len = len(GlobalCounters.cache)
np.testing.assert_allclose(a.numpy(), b.numpy(), rtol=1e-3, atol=1e-5)
np.testing.assert_allclose(a.numpy(), b.numpy(), rtol=1e-3, atol=1e-5)
return cache_len
if __name__ == '__main__':