Spaces:
Configuration error
Configuration error
Fedir Zadniprovskyi
commited on
Commit
·
42343e0
1
Parent(s):
04d664a
chore: fix some ruff errors
Browse files
pyproject.toml
CHANGED
@@ -48,11 +48,9 @@ ignore = [
|
|
48 |
"FIX",
|
49 |
"TD", # disable todo warnings
|
50 |
"ERA", # allow commented out code
|
51 |
-
"PTH",
|
52 |
|
53 |
"ANN003", # missing kwargs
|
54 |
"ANN101", # missing self type
|
55 |
-
"ANN102", # missing cls
|
56 |
"B006",
|
57 |
"B008",
|
58 |
"COM812", # trailing comma
|
|
|
48 |
"FIX",
|
49 |
"TD", # disable todo warnings
|
50 |
"ERA", # allow commented out code
|
|
|
51 |
|
52 |
"ANN003", # missing kwargs
|
53 |
"ANN101", # missing self type
|
|
|
54 |
"B006",
|
55 |
"B008",
|
56 |
"COM812", # trailing comma
|
scripts/client.py
CHANGED
@@ -64,7 +64,7 @@ while True:
|
|
64 |
print(f"Recording finished. File size: {file.stat().st_size} bytes")
|
65 |
|
66 |
try:
|
67 |
-
with open(
|
68 |
start = time.perf_counter()
|
69 |
res = client.post(
|
70 |
OPENAI_BASE_URL + TRANSCRIBE_PATH,
|
|
|
64 |
print(f"Recording finished. File size: {file.stat().st_size} bytes")
|
65 |
|
66 |
try:
|
67 |
+
with file.open("rb") as fd:
|
68 |
start = time.perf_counter()
|
69 |
res = client.post(
|
70 |
OPENAI_BASE_URL + TRANSCRIBE_PATH,
|
src/faster_whisper_server/gradio_app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
from collections.abc import Generator
|
|
|
2 |
|
3 |
import gradio as gr
|
4 |
import httpx
|
@@ -33,7 +34,7 @@ def create_gradio_demo(config: Config) -> gr.Blocks:
|
|
33 |
yield audio_task(file_path, endpoint, temperature, model)
|
34 |
|
35 |
def audio_task(file_path: str, endpoint: str, temperature: float, model: str) -> str:
|
36 |
-
with open(
|
37 |
response = http_client.post(
|
38 |
endpoint,
|
39 |
files={"file": file},
|
@@ -50,7 +51,7 @@ def create_gradio_demo(config: Config) -> gr.Blocks:
|
|
50 |
def streaming_audio_task(
|
51 |
file_path: str, endpoint: str, temperature: float, model: str
|
52 |
) -> Generator[str, None, None]:
|
53 |
-
with open(
|
54 |
kwargs = {
|
55 |
"files": {"file": file},
|
56 |
"data": {
|
|
|
1 |
from collections.abc import Generator
|
2 |
+
from pathlib import Path
|
3 |
|
4 |
import gradio as gr
|
5 |
import httpx
|
|
|
34 |
yield audio_task(file_path, endpoint, temperature, model)
|
35 |
|
36 |
def audio_task(file_path: str, endpoint: str, temperature: float, model: str) -> str:
|
37 |
+
with Path(file_path).open("rb") as file:
|
38 |
response = http_client.post(
|
39 |
endpoint,
|
40 |
files={"file": file},
|
|
|
51 |
def streaming_audio_task(
|
52 |
file_path: str, endpoint: str, temperature: float, model: str
|
53 |
) -> Generator[str, None, None]:
|
54 |
+
with Path(file_path).open("rb") as file:
|
55 |
kwargs = {
|
56 |
"files": {"file": file},
|
57 |
"data": {
|
src/faster_whisper_server/routers/list_models.py
CHANGED
@@ -24,7 +24,7 @@ router = APIRouter()
|
|
24 |
def get_models() -> ListModelsResponse:
|
25 |
models = huggingface_hub.list_models(library="ctranslate2", tags="automatic-speech-recognition", cardData=True)
|
26 |
models = list(models)
|
27 |
-
models.sort(key=lambda model: model.downloads, reverse=True)
|
28 |
transformed_models: list[Model] = []
|
29 |
for model in models:
|
30 |
assert model.created_at is not None
|
@@ -56,7 +56,7 @@ def get_model(
|
|
56 |
model_name=model_name, library="ctranslate2", tags="automatic-speech-recognition", cardData=True
|
57 |
)
|
58 |
models = list(models)
|
59 |
-
models.sort(key=lambda model: model.downloads, reverse=True)
|
60 |
if len(models) == 0:
|
61 |
raise HTTPException(status_code=404, detail="Model doesn't exists")
|
62 |
exact_match: ModelInfo | None = None
|
|
|
24 |
def get_models() -> ListModelsResponse:
|
25 |
models = huggingface_hub.list_models(library="ctranslate2", tags="automatic-speech-recognition", cardData=True)
|
26 |
models = list(models)
|
27 |
+
models.sort(key=lambda model: model.downloads or -1, reverse=True)
|
28 |
transformed_models: list[Model] = []
|
29 |
for model in models:
|
30 |
assert model.created_at is not None
|
|
|
56 |
model_name=model_name, library="ctranslate2", tags="automatic-speech-recognition", cardData=True
|
57 |
)
|
58 |
models = list(models)
|
59 |
+
models.sort(key=lambda model: model.downloads or -1, reverse=True)
|
60 |
if len(models) == 0:
|
61 |
raise HTTPException(status_code=404, detail="Model doesn't exists")
|
62 |
exact_match: ModelInfo | None = None
|
src/faster_whisper_server/routers/stt.py
CHANGED
@@ -57,26 +57,27 @@ def segments_to_response(
|
|
57 |
response_format: ResponseFormat,
|
58 |
) -> Response:
|
59 |
segments = list(segments)
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
80 |
|
81 |
|
82 |
def format_as_sse(data: str) -> str:
|
|
|
57 |
response_format: ResponseFormat,
|
58 |
) -> Response:
|
59 |
segments = list(segments)
|
60 |
+
match response_format:
|
61 |
+
case ResponseFormat.TEXT:
|
62 |
+
return Response(segments_to_text(segments), media_type="text/plain")
|
63 |
+
case ResponseFormat.JSON:
|
64 |
+
return Response(
|
65 |
+
CreateTranscriptionResponseJson.from_segments(segments).model_dump_json(),
|
66 |
+
media_type="application/json",
|
67 |
+
)
|
68 |
+
case ResponseFormat.VERBOSE_JSON:
|
69 |
+
return Response(
|
70 |
+
CreateTranscriptionResponseVerboseJson.from_segments(segments, transcription_info).model_dump_json(),
|
71 |
+
media_type="application/json",
|
72 |
+
)
|
73 |
+
case ResponseFormat.VTT:
|
74 |
+
return Response(
|
75 |
+
"".join(segments_to_vtt(segment, i) for i, segment in enumerate(segments)), media_type="text/vtt"
|
76 |
+
)
|
77 |
+
case ResponseFormat.SRT:
|
78 |
+
return Response(
|
79 |
+
"".join(segments_to_srt(segment, i) for i, segment in enumerate(segments)), media_type="text/plain"
|
80 |
+
)
|
81 |
|
82 |
|
83 |
def format_as_sse(data: str) -> str:
|
tests/sse_test.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import json
|
2 |
-
import
|
3 |
|
4 |
import anyio
|
5 |
from faster_whisper_server.api_models import (
|
@@ -26,7 +26,7 @@ parameters = [(file_path, endpoint) for endpoint in ENDPOINTS for file_path in F
|
|
26 |
@pytest.mark.asyncio()
|
27 |
@pytest.mark.parametrize(("file_path", "endpoint"), parameters)
|
28 |
async def test_streaming_transcription_text(aclient: AsyncClient, file_path: str, endpoint: str) -> None:
|
29 |
-
extension =
|
30 |
async with await anyio.open_file(file_path, "rb") as f:
|
31 |
data = await f.read()
|
32 |
kwargs = {
|
@@ -42,7 +42,7 @@ async def test_streaming_transcription_text(aclient: AsyncClient, file_path: str
|
|
42 |
@pytest.mark.asyncio()
|
43 |
@pytest.mark.parametrize(("file_path", "endpoint"), parameters)
|
44 |
async def test_streaming_transcription_json(aclient: AsyncClient, file_path: str, endpoint: str) -> None:
|
45 |
-
extension =
|
46 |
async with await anyio.open_file(file_path, "rb") as f:
|
47 |
data = await f.read()
|
48 |
kwargs = {
|
@@ -57,7 +57,7 @@ async def test_streaming_transcription_json(aclient: AsyncClient, file_path: str
|
|
57 |
@pytest.mark.asyncio()
|
58 |
@pytest.mark.parametrize(("file_path", "endpoint"), parameters)
|
59 |
async def test_streaming_transcription_verbose_json(aclient: AsyncClient, file_path: str, endpoint: str) -> None:
|
60 |
-
extension =
|
61 |
async with await anyio.open_file(file_path, "rb") as f:
|
62 |
data = await f.read()
|
63 |
kwargs = {
|
|
|
1 |
import json
|
2 |
+
from pathlib import Path
|
3 |
|
4 |
import anyio
|
5 |
from faster_whisper_server.api_models import (
|
|
|
26 |
@pytest.mark.asyncio()
|
27 |
@pytest.mark.parametrize(("file_path", "endpoint"), parameters)
|
28 |
async def test_streaming_transcription_text(aclient: AsyncClient, file_path: str, endpoint: str) -> None:
|
29 |
+
extension = Path(file_path).suffix[1:]
|
30 |
async with await anyio.open_file(file_path, "rb") as f:
|
31 |
data = await f.read()
|
32 |
kwargs = {
|
|
|
42 |
@pytest.mark.asyncio()
|
43 |
@pytest.mark.parametrize(("file_path", "endpoint"), parameters)
|
44 |
async def test_streaming_transcription_json(aclient: AsyncClient, file_path: str, endpoint: str) -> None:
|
45 |
+
extension = Path(file_path).suffix[1:]
|
46 |
async with await anyio.open_file(file_path, "rb") as f:
|
47 |
data = await f.read()
|
48 |
kwargs = {
|
|
|
57 |
@pytest.mark.asyncio()
|
58 |
@pytest.mark.parametrize(("file_path", "endpoint"), parameters)
|
59 |
async def test_streaming_transcription_verbose_json(aclient: AsyncClient, file_path: str, endpoint: str) -> None:
|
60 |
+
extension = Path(file_path).suffix[1:]
|
61 |
async with await anyio.open_file(file_path, "rb") as f:
|
62 |
data = await f.read()
|
63 |
kwargs = {
|