Spaces:
Configuration error
Configuration error
Fedir Zadniprovskyi
commited on
Commit
·
9eed954
1
Parent(s):
d9a6bd7
chore: improve api docs
Browse files
faster_whisper_server/main.py
CHANGED
@@ -104,7 +104,10 @@ def get_models() -> list[ModelObject]:
|
|
104 |
|
105 |
|
106 |
@app.get("/v1/models/{model_name:path}")
|
107 |
-
|
|
|
|
|
|
|
108 |
models = list(
|
109 |
huggingface_hub.list_models(model_name=model_name, library="ctranslate2")
|
110 |
)
|
@@ -148,7 +151,10 @@ def handle_default_openai_model(model_name: str) -> str:
|
|
148 |
ModelName = Annotated[str, AfterValidator(handle_default_openai_model)]
|
149 |
|
150 |
|
151 |
-
@app.post(
|
|
|
|
|
|
|
152 |
def translate_file(
|
153 |
file: Annotated[UploadFile, Form()],
|
154 |
model: Annotated[ModelName, Form()] = config.whisper.model,
|
@@ -156,6 +162,11 @@ def translate_file(
|
|
156 |
response_format: Annotated[ResponseFormat, Form()] = config.default_response_format,
|
157 |
temperature: Annotated[float, Form()] = 0.0,
|
158 |
stream: Annotated[bool, Form()] = False,
|
|
|
|
|
|
|
|
|
|
|
159 |
):
|
160 |
start = time.perf_counter()
|
161 |
whisper = load_model(model)
|
@@ -201,7 +212,10 @@ def translate_file(
|
|
201 |
|
202 |
# https://platform.openai.com/docs/api-reference/audio/createTranscription
|
203 |
# https://github.com/openai/openai-openapi/blob/master/openapi.yaml#L8915
|
204 |
-
@app.post(
|
|
|
|
|
|
|
205 |
def transcribe_file(
|
206 |
file: Annotated[UploadFile, Form()],
|
207 |
model: Annotated[ModelName, Form()] = config.whisper.model,
|
@@ -214,6 +228,11 @@ def transcribe_file(
|
|
214 |
Form(alias="timestamp_granularities[]"),
|
215 |
] = ["segment"],
|
216 |
stream: Annotated[bool, Form()] = False,
|
|
|
|
|
|
|
|
|
|
|
217 |
):
|
218 |
start = time.perf_counter()
|
219 |
whisper = load_model(model)
|
|
|
104 |
|
105 |
|
106 |
@app.get("/v1/models/{model_name:path}")
|
107 |
+
# NOTE: `examples` doesn't work https://github.com/tiangolo/fastapi/discussions/10537
|
108 |
+
def get_model(
|
109 |
+
model_name: Annotated[str, Path(example="Systran/faster-distil-whisper-large-v3")],
|
110 |
+
) -> ModelObject:
|
111 |
models = list(
|
112 |
huggingface_hub.list_models(model_name=model_name, library="ctranslate2")
|
113 |
)
|
|
|
151 |
ModelName = Annotated[str, AfterValidator(handle_default_openai_model)]
|
152 |
|
153 |
|
154 |
+
@app.post(
|
155 |
+
"/v1/audio/translations",
|
156 |
+
response_model=str | TranscriptionJsonResponse | TranscriptionVerboseJsonResponse,
|
157 |
+
)
|
158 |
def translate_file(
|
159 |
file: Annotated[UploadFile, Form()],
|
160 |
model: Annotated[ModelName, Form()] = config.whisper.model,
|
|
|
162 |
response_format: Annotated[ResponseFormat, Form()] = config.default_response_format,
|
163 |
temperature: Annotated[float, Form()] = 0.0,
|
164 |
stream: Annotated[bool, Form()] = False,
|
165 |
+
) -> (
|
166 |
+
str
|
167 |
+
| TranscriptionJsonResponse
|
168 |
+
| TranscriptionVerboseJsonResponse
|
169 |
+
| StreamingResponse
|
170 |
):
|
171 |
start = time.perf_counter()
|
172 |
whisper = load_model(model)
|
|
|
212 |
|
213 |
# https://platform.openai.com/docs/api-reference/audio/createTranscription
|
214 |
# https://github.com/openai/openai-openapi/blob/master/openapi.yaml#L8915
|
215 |
+
@app.post(
|
216 |
+
"/v1/audio/transcriptions",
|
217 |
+
response_model=str | TranscriptionJsonResponse | TranscriptionVerboseJsonResponse,
|
218 |
+
)
|
219 |
def transcribe_file(
|
220 |
file: Annotated[UploadFile, Form()],
|
221 |
model: Annotated[ModelName, Form()] = config.whisper.model,
|
|
|
228 |
Form(alias="timestamp_granularities[]"),
|
229 |
] = ["segment"],
|
230 |
stream: Annotated[bool, Form()] = False,
|
231 |
+
) -> (
|
232 |
+
str
|
233 |
+
| TranscriptionJsonResponse
|
234 |
+
| TranscriptionVerboseJsonResponse
|
235 |
+
| StreamingResponse
|
236 |
):
|
237 |
start = time.perf_counter()
|
238 |
whisper = load_model(model)
|
faster_whisper_server/server_models.py
CHANGED
@@ -130,8 +130,6 @@ class TranscriptionVerboseJsonResponse(BaseModel):
|
|
130 |
|
131 |
|
132 |
class ModelObject(BaseModel):
|
133 |
-
model_config = ConfigDict(populate_by_name=True)
|
134 |
-
|
135 |
id: str
|
136 |
"""The model identifier, which can be referenced in the API endpoints."""
|
137 |
created: int
|
@@ -140,3 +138,29 @@ class ModelObject(BaseModel):
|
|
140 |
"""The object type, which is always "model"."""
|
141 |
owned_by: str
|
142 |
"""The organization that owns the model."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
130 |
|
131 |
|
132 |
class ModelObject(BaseModel):
|
|
|
|
|
133 |
id: str
|
134 |
"""The model identifier, which can be referenced in the API endpoints."""
|
135 |
created: int
|
|
|
138 |
"""The object type, which is always "model"."""
|
139 |
owned_by: str
|
140 |
"""The organization that owns the model."""
|
141 |
+
|
142 |
+
model_config = ConfigDict(
|
143 |
+
populate_by_name=True,
|
144 |
+
json_schema_extra={
|
145 |
+
"examples": [
|
146 |
+
{
|
147 |
+
"id": "Systran/faster-whisper-large-v3",
|
148 |
+
"created": 1700732060,
|
149 |
+
"object": "model",
|
150 |
+
"owned_by": "Systran",
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"id": "Systran/faster-distil-whisper-large-v3",
|
154 |
+
"created": 1711378296,
|
155 |
+
"object": "model",
|
156 |
+
"owned_by": "Systran",
|
157 |
+
},
|
158 |
+
{
|
159 |
+
"id": "bofenghuang/whisper-large-v2-cv11-french-ct2",
|
160 |
+
"created": 1687968011,
|
161 |
+
"object": "model",
|
162 |
+
"owned_by": "bofenghuang",
|
163 |
+
},
|
164 |
+
]
|
165 |
+
},
|
166 |
+
)
|