RENDERING_ENGINE="INFERENCE_ENDPOINT" | |
LLM_ENGINE="INFERENCE_API" | |
# set this to control the number of pages | |
MAX_NB_PAGES=6 | |
# Set to "true" to create artificial delays and smooth out traffic | |
NEXT_PUBLIC_ENABLE_RATE_LIMITER="false" | |
# ------------- PROVIDER AUTH ------------ | |
AUTH_HF_API_TOKEN="YOUR_HF_API_TOKEN" | |
# ------------- RENDERING API CONFIG -------------- | |
# If you decide to use a private Hugging Face Inference Endpoint for the RENDERING engine | |
RENDERING_HF_INFERENCE_ENDPOINT_URL="https://XXXXXXXXXX.endpoints.huggingface.cloud" | |
# ------------- LLM API CONFIG ---------------- | |
# If you decide to use a Hugging Face Inference API model for the LLM engine | |
# LLM_HF_INFERENCE_API_MODEL="HuggingFaceH4/zephyr-7b-beta" | |
LLM_HF_INFERENCE_API_MODEL="mistralai/Mistral-7B-Instruct-v0.3" | |
# ----------- CENSORSHIP (OPTIONAL) ----------- | |
# censorship is currently disabled, but will be required when we create a "community roll" | |
# (a public repositoruy of user-generated comic strips) | |
ENABLE_CENSORSHIP="false" |