diff --git a/Dockerfile b/Dockerfile index 0251ace9..861277df 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,6 +38,7 @@ ENV PATH="$VIRTUAL_ENV/bin:$PATH" RUN apt-get update -y && \ apt-get -y upgrade && \ apt-get install --no-install-recommends -y \ + curl \ supervisor \ nginx \ imagemagick \ @@ -95,7 +96,8 @@ ENV ENABLE_UWSGI='yes' \ ENABLE_CELERY_BEAT='yes' \ ENABLE_CELERY_SHORT='yes' \ ENABLE_CELERY_LONG='yes' \ - ENABLE_MIGRATIONS='yes' + ENABLE_MIGRATIONS='yes' \ + ENABLE_OLLAMA='no' EXPOSE 9000 80 @@ -106,8 +108,12 @@ CMD ["./deploy/docker/start.sh"] ############ FULL IMAGE ############ FROM base AS full + +ENV ENABLE_OLLAMA='yes' + COPY requirements-full.txt ./ RUN mkdir -p /root/.cache/ && \ chmod go+rwx /root/ && \ chmod go+rwx /root/.cache/ -RUN uv pip install -r requirements-full.txt \ No newline at end of file +RUN uv pip install -r requirements-full.txt +RUN curl -fsSL https://ollama.com/install.sh | sh diff --git a/Makefile b/Makefile index 87936e91..a22dabf2 100644 --- a/Makefile +++ b/Makefile @@ -17,3 +17,8 @@ build-frontend: test: docker compose -f docker-compose-dev.yaml exec --env TESTING=True -T web pytest +build-base-image: + docker build -t mediacms/mediacms:latest --target base . + +build-full-image: + docker build -t mediacms/mediacms:full --target full . diff --git a/deploy/docker/prestart.sh b/deploy/docker/prestart.sh index 938dedb4..3f24b16b 100755 --- a/deploy/docker/prestart.sh +++ b/deploy/docker/prestart.sh @@ -69,3 +69,37 @@ if [ X"$ENABLE_CELERY_LONG" = X"yes" ] ; then cp deploy/docker/supervisord/supervisord-celery_long.conf /etc/supervisor/conf.d/supervisord-celery_long.conf rm /var/run/mediacms/* -f # remove any stale id, so that on forced restarts of celery workers there are no stale processes that prevent new ones fi + +if [ X"$ENABLE_OLLAMA" = X"yes" ] ; then + echo "Starting ollama to pull models..." + ollama serve & + OLLAMA_PID=$! + + # Wait for ollama to be ready + retries=10 + echo "Waiting for ollama to start..." + while [ $retries -gt 0 ] && ! curl -s http://127.0.0.1:11434/ > /dev/null 2>&1; do + sleep 1 + retries=$((retries-1)) + done + + if [ $retries -eq 0 ]; then + echo "Ollama did not start in time. Killing process." + kill $OLLAMA_PID + wait $OLLAMA_PID 2>/dev/null + echo "Failed to start ollama for model pulling. The main ollama service will be started later." + else + echo "Ollama is up. Checking for llama3.2 model." + + if ! ollama list | grep -q "llama3.2"; then + echo "llama3.2 model not found, pulling it..." + ollama pull llama3.2 + else + echo "llama3.2 model already exists." + fi + + echo "Stopping temporary ollama service." + kill $OLLAMA_PID + wait $OLLAMA_PID 2>/dev/null + fi +fi diff --git a/deploy/docker/start.sh b/deploy/docker/start.sh index c07707f6..1649eef1 100755 --- a/deploy/docker/start.sh +++ b/deploy/docker/start.sh @@ -11,6 +11,11 @@ else echo "There is no script $PRE_START_PATH" fi +if [ X"$ENABLE_OLLAMA" = X"yes" ] ; then + echo "Starting ollama service in background..." + ollama serve & +fi + # Start Supervisor, with Nginx and uWSGI echo "Starting server using supervisord..." diff --git a/requirements-full.txt b/requirements-full.txt index 2fed12ff..3f7d8dca 100644 --- a/requirements-full.txt +++ b/requirements-full.txt @@ -1,2 +1,3 @@ openai-whisper==20250625 setuptools-rust +ollama