mirror of
https://github.com/SYSTRAN/faster-whisper.git
synced 2026-01-08 13:14:00 -05:00
Add new tests (#1158)
This commit is contained in:
BIN
tests/data/hotwords.mp3
Normal file
BIN
tests/data/hotwords.mp3
Normal file
Binary file not shown.
120
tests/test_tokenizer.py
Normal file
120
tests/test_tokenizer.py
Normal file
@@ -0,0 +1,120 @@
|
||||
from faster_whisper import WhisperModel
|
||||
from faster_whisper.tokenizer import Tokenizer
|
||||
from faster_whisper.transcribe import get_suppressed_tokens
|
||||
|
||||
|
||||
def test_suppressed_tokens_minus_1():
|
||||
model = WhisperModel("tiny.en")
|
||||
|
||||
tokenizer = Tokenizer(model.hf_tokenizer, False)
|
||||
tokens = get_suppressed_tokens(tokenizer, [-1])
|
||||
assert tokens == (
|
||||
1,
|
||||
2,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
14,
|
||||
25,
|
||||
26,
|
||||
27,
|
||||
28,
|
||||
29,
|
||||
31,
|
||||
58,
|
||||
59,
|
||||
60,
|
||||
61,
|
||||
62,
|
||||
63,
|
||||
90,
|
||||
91,
|
||||
92,
|
||||
93,
|
||||
357,
|
||||
366,
|
||||
438,
|
||||
532,
|
||||
685,
|
||||
705,
|
||||
796,
|
||||
930,
|
||||
1058,
|
||||
1220,
|
||||
1267,
|
||||
1279,
|
||||
1303,
|
||||
1343,
|
||||
1377,
|
||||
1391,
|
||||
1635,
|
||||
1782,
|
||||
1875,
|
||||
2162,
|
||||
2361,
|
||||
2488,
|
||||
3467,
|
||||
4008,
|
||||
4211,
|
||||
4600,
|
||||
4808,
|
||||
5299,
|
||||
5855,
|
||||
6329,
|
||||
7203,
|
||||
9609,
|
||||
9959,
|
||||
10563,
|
||||
10786,
|
||||
11420,
|
||||
11709,
|
||||
11907,
|
||||
13163,
|
||||
13697,
|
||||
13700,
|
||||
14808,
|
||||
15306,
|
||||
16410,
|
||||
16791,
|
||||
17992,
|
||||
19203,
|
||||
19510,
|
||||
20724,
|
||||
22305,
|
||||
22935,
|
||||
27007,
|
||||
30109,
|
||||
30420,
|
||||
33409,
|
||||
34949,
|
||||
40283,
|
||||
40493,
|
||||
40549,
|
||||
47282,
|
||||
49146,
|
||||
50257,
|
||||
50357,
|
||||
50358,
|
||||
50359,
|
||||
50360,
|
||||
)
|
||||
|
||||
|
||||
def test_suppressed_tokens_minus_value():
|
||||
model = WhisperModel("tiny.en")
|
||||
|
||||
tokenizer = Tokenizer(model.hf_tokenizer, False)
|
||||
tokens = get_suppressed_tokens(tokenizer, [13])
|
||||
assert tokens == (13, 50257, 50357, 50358, 50359, 50360)
|
||||
|
||||
|
||||
def test_split_on_unicode():
|
||||
model = WhisperModel("tiny")
|
||||
tokenizer = Tokenizer(model.hf_tokenizer, False)
|
||||
|
||||
tokens = [8404, 871, 287, 6, 246, 526, 3210, 20378]
|
||||
words, word_tokens = tokenizer.split_tokens_on_unicode(tokens)
|
||||
|
||||
assert words == [" elle", " est", " l", "'", "\ufffd", "é", "rit", "oire"]
|
||||
assert word_tokens == [[8404], [871], [287], [6], [246], [526], [3210], [20378]]
|
||||
@@ -1,10 +1,9 @@
|
||||
import inspect
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
from faster_whisper import BatchedInferencePipeline, WhisperModel, decode_audio
|
||||
from faster_whisper.tokenizer import Tokenizer
|
||||
from faster_whisper.transcribe import get_suppressed_tokens
|
||||
|
||||
|
||||
def test_supported_languages():
|
||||
@@ -215,107 +214,58 @@ def test_multilingual_transcription(data_dir):
|
||||
)
|
||||
|
||||
|
||||
def test_suppressed_tokens_minus_1():
|
||||
model = WhisperModel("tiny.en")
|
||||
def test_hotwords(data_dir):
|
||||
model = WhisperModel("tiny")
|
||||
pipeline = BatchedInferencePipeline(model)
|
||||
|
||||
tokenizer = Tokenizer(model.hf_tokenizer, False)
|
||||
tokens = get_suppressed_tokens(tokenizer, [-1])
|
||||
assert tokens == (
|
||||
1,
|
||||
2,
|
||||
7,
|
||||
8,
|
||||
9,
|
||||
10,
|
||||
14,
|
||||
25,
|
||||
26,
|
||||
27,
|
||||
28,
|
||||
29,
|
||||
31,
|
||||
58,
|
||||
59,
|
||||
60,
|
||||
61,
|
||||
62,
|
||||
63,
|
||||
90,
|
||||
91,
|
||||
92,
|
||||
93,
|
||||
357,
|
||||
366,
|
||||
438,
|
||||
532,
|
||||
685,
|
||||
705,
|
||||
796,
|
||||
930,
|
||||
1058,
|
||||
1220,
|
||||
1267,
|
||||
1279,
|
||||
1303,
|
||||
1343,
|
||||
1377,
|
||||
1391,
|
||||
1635,
|
||||
1782,
|
||||
1875,
|
||||
2162,
|
||||
2361,
|
||||
2488,
|
||||
3467,
|
||||
4008,
|
||||
4211,
|
||||
4600,
|
||||
4808,
|
||||
5299,
|
||||
5855,
|
||||
6329,
|
||||
7203,
|
||||
9609,
|
||||
9959,
|
||||
10563,
|
||||
10786,
|
||||
11420,
|
||||
11709,
|
||||
11907,
|
||||
13163,
|
||||
13697,
|
||||
13700,
|
||||
14808,
|
||||
15306,
|
||||
16410,
|
||||
16791,
|
||||
17992,
|
||||
19203,
|
||||
19510,
|
||||
20724,
|
||||
22305,
|
||||
22935,
|
||||
27007,
|
||||
30109,
|
||||
30420,
|
||||
33409,
|
||||
34949,
|
||||
40283,
|
||||
40493,
|
||||
40549,
|
||||
47282,
|
||||
49146,
|
||||
50257,
|
||||
50357,
|
||||
50358,
|
||||
50359,
|
||||
50360,
|
||||
audio_path = os.path.join(data_dir, "hotwords.mp3")
|
||||
audio = decode_audio(audio_path)
|
||||
|
||||
segments, info = model.transcribe(audio, hotwords="ComfyUI")
|
||||
segments = list(segments)
|
||||
|
||||
assert "ComfyUI" in segments[0].text
|
||||
assert info.transcription_options.hotwords == "ComfyUI"
|
||||
|
||||
segments, info = pipeline.transcribe(audio, hotwords="ComfyUI")
|
||||
segments = list(segments)
|
||||
|
||||
assert "ComfyUI" in segments[0].text
|
||||
assert info.transcription_options.hotwords == "ComfyUI"
|
||||
|
||||
|
||||
def test_transcribe_signature():
|
||||
model_transcribe_args = set(inspect.getargs(WhisperModel.transcribe.__code__).args)
|
||||
pipeline_transcribe_args = set(
|
||||
inspect.getargs(BatchedInferencePipeline.transcribe.__code__).args
|
||||
)
|
||||
pipeline_transcribe_args.remove("batch_size")
|
||||
|
||||
assert model_transcribe_args == pipeline_transcribe_args
|
||||
|
||||
|
||||
def test_suppressed_tokens_minus_value():
|
||||
model = WhisperModel("tiny.en")
|
||||
def test_monotonic_timestamps(physcisworks_path):
|
||||
model = WhisperModel("tiny")
|
||||
pipeline = BatchedInferencePipeline(model=model)
|
||||
|
||||
tokenizer = Tokenizer(model.hf_tokenizer, False)
|
||||
tokens = get_suppressed_tokens(tokenizer, [13])
|
||||
assert tokens == (13, 50257, 50357, 50358, 50359, 50360)
|
||||
segments, info = model.transcribe(physcisworks_path, word_timestamps=True)
|
||||
segments = list(segments)
|
||||
|
||||
for i in range(len(segments) - 1):
|
||||
assert segments[i].start <= segments[i].end
|
||||
assert segments[i].end <= segments[i + 1].start
|
||||
for word in segments[i].words:
|
||||
assert word.start <= word.end
|
||||
assert word.end <= segments[i].end
|
||||
assert segments[-1].end <= info.duration
|
||||
|
||||
segments, info = pipeline.transcribe(physcisworks_path, word_timestamps=True)
|
||||
segments = list(segments)
|
||||
|
||||
for i in range(len(segments) - 1):
|
||||
assert segments[i].start <= segments[i].end
|
||||
assert segments[i].end <= segments[i + 1].start
|
||||
for word in segments[i].words:
|
||||
assert word.start <= word.end
|
||||
assert word.end <= segments[i].end
|
||||
assert segments[-1].end <= info.duration
|
||||
|
||||
Reference in New Issue
Block a user