Upload 3 files
Browse files- Dockerfile +21 -0
- app.py +26 -0
- requirements.txt +3 -0
Dockerfile
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
COPY ./requirements.txt /code/requirements.txt
|
6 |
+
COPY ./app.py /code/app.py
|
7 |
+
|
8 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
|
9 |
+
|
10 |
+
RUN useradd -m -u 1000 user
|
11 |
+
|
12 |
+
USER user
|
13 |
+
|
14 |
+
ENV HOME=/home/user \
|
15 |
+
PATH=/home/user/.local/bin:$PATH
|
16 |
+
|
17 |
+
WORKDIR $HOME/app
|
18 |
+
|
19 |
+
COPY --chown=user . $HOME/app
|
20 |
+
|
21 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
app.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from llama_cpp import Llama
|
4 |
+
|
5 |
+
app = FastAPI()
|
6 |
+
|
7 |
+
# Load the model
|
8 |
+
llm = Llama.from_pretrained(
|
9 |
+
repo_id="unsloth/phi-4-GGUF",
|
10 |
+
filename="phi-4-Q4_K_M.gguf",
|
11 |
+
)
|
12 |
+
|
13 |
+
# Define request model
|
14 |
+
class ChatRequest(BaseModel):
|
15 |
+
system_prompt: str
|
16 |
+
query: str
|
17 |
+
|
18 |
+
@app.post("/chat-p4q4")
|
19 |
+
async def chat(request: ChatRequest):
|
20 |
+
response = llm.create_chat_completion(
|
21 |
+
messages=[
|
22 |
+
{"role": "system", "content": request.system_prompt},
|
23 |
+
{"role": "user", "content": request.query},
|
24 |
+
]
|
25 |
+
)
|
26 |
+
return {"response": response}
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
llama-cpp-python
|
2 |
+
fastapi
|
3 |
+
pydantic
|