#!/bin/bash # Start the Ollama server in the background ollama serve & # Wait for the server to be ready sleep 5 # Pull the necessary models ollama pull llama3.2 ollama pull llama3.2:1b ollama pull granite3-moe ollama pull granite3-moe:1b # Keep the container running tail -f /dev/null