Compare commits

..

3 Commits

Author SHA1 Message Date
120bd43cbc chore: update docker-compose configuration for open-webui service 2025-12-12 17:07:53 -05:00
23e5987799 feat: update ollama service configuration for NVIDIA support and environment variables
Only way I could mount my p100 gpu to ollama in docker container. Still had to install extra stuff on debian that was for ubuntu. Didn't feel right, did it anyways. Worked.
2025-12-12 17:07:09 -05:00
4b50114591 chore: update docker-compose configuration for Node-RED service 2025-12-12 17:05:47 -05:00
3 changed files with 22 additions and 12 deletions

View File

@@ -9,18 +9,21 @@ version: "3.7"
services:
node-red:
image: nodered/node-red:latest
container_name: node-red
#image: nodered/node-red:2.2.3-12@sha256:051cdb78937b7396d5be102e575b3b47322bfa48efad5aeb0d86d0a3bef22d22 // armv7 version works for raspberry pi, apparently isn't very good at seeing whether 32 or 64 bit
environment:
- TZ=America/New_York
- NODE_RED_CREDENTIAL_SECRET=YourEncryptedString
ports:
- "1880:1880"
networks:
- node-red-net
volumes:
- /docker-containers/node-red:/data
- /docker-containers/node-red/data:/data
networks:
homelab:
aliases:
- nodered
networks: # I don't know why but you have to specify the network 2x. In service, and in this tag area..
homelab:
name: homelab # Networks can also be given a custom name
external: true # This option causes compose to join the above network instead of making a _default one
volumes:
node-red-data:
networks:
node-red-net:

View File

@@ -6,13 +6,20 @@ services:
volumes:
- /docker-containers/ollama/code:/code
- /docker-containers/ollama/data:/root/.ollama
# - /usr/local/cuda:/usr/local/cuda:ro # <-- mount CUDA runtime from host maybe
container_name: ollama
pull_policy: always
tty: true
restart: always
environment:
- OLLAMA_KEEP_ALIVE=24h
- OLLAMA_HOST=0.0.0.0
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=compute,utility
# devices:
# - /dev/nvidia0:/dev/nvidia0
# - /dev/nvidiactl:/dev/nvidiactl
# - /dev/nvidia-uvm:/dev/nvidia-uvm
runtime: nvidia
networks:
- homelab

View File

@@ -1,7 +1,7 @@
version: "3.8"
services:
open-webui:
image: ghcr.io/open-webui/open-webui:v0.6.36
image: ghcr.io/open-webui/open-webui:latest
container_name: open-webui
restart: unless-stopped
@@ -12,7 +12,7 @@ services:
- MODEL_SERVER_URL=http://ollama:11434
- PROVIDER=ollama,openai
- OPENAI_API_KEY=YourAPIKEY
# This variable is required to be set, otherwise you may experience Websocket issues, doesn't fix my issues tho
# With below set, and turning off basic auth in nginx, I was able to get reverse proxy working
- CORS_ALLOW_ORIGIN=https://open-webui.example.com;https://example.com;http://open-webui;http://open-webui:8080;https://open-webui:8080;http://192.168.1.239;http://192.168.1.239:3030;http://localhost:3030;http://192.168.1.1:80;http://192.168.1.1:443
- WEBUI_URL=https://open-webui.example.com;https://example.com;http://open-webui;http://open-webui:8080;https://open-webui:8080;http://192.168.1.239;http://192.168.1.239:3030;http://localhost:3030;http://192.168.1.1:80;http://192.168.1.1:443
@@ -36,7 +36,7 @@ services:
- open-wui
openwebui_net:
aliases:
- openwebui_internal
- openwebui_internal # to connect to redis network, and not redis connect to homelab anymore (multi redis)
depends_on:
- redis-valkey-webui