-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
121 lines (114 loc) · 2.62 KB
/
docker-compose.yml
File metadata and controls
121 lines (114 loc) · 2.62 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
services:
backend:
build: ./backend
ports:
- "7000:8000"
volumes:
- ./backend/src:/app
- ./saves:/app/saves
- uploads:/app/uploads
- /var/run/docker.sock:/var/run/docker.sock
environment:
- OLLAMA_API_URL=${OLLAMA_API_URL}
- LLAMA_FACTORY_CONTAINER=${LLAMA_FACTORY_CONTAINER}
- LLAMA_CPP_CONTAINER=${LLAMA_CPP_CONTAINER}
- OLLAMA_CONTAINER=${OLLAMA_CONTAINER}
networks:
- docknet
frontend:
build:
context: ./frontend
dockerfile: Dockerfile
args:
- VITE_LLAMA_FACTORY_URL=${VITE_LLAMA_FACTORY_URL}
- VITE_EASY_DATASET_URL=${VITE_EASY_DATASET_URL}
- VITE_TENSORBOARD_URL=${VITE_TENSORBOARD_URL}
networks:
- docknet
caddy:
image: caddy:alpine
ports:
- "3000:3000"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- caddy_data:/data
depends_on:
- backend
networks:
- docknet
llama-factory:
container_name: finetune-llama-factory
build:
context: ./llama-factory
dockerfile: Dockerfile
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
ports:
- "7002:7860"
volumes:
- ./data:/app/shared_data
- ./hf_cache:/root/.cache/huggingface
- ./saves:/app/saves
- ./models:/app/models
- llama_factory_cache:/app/cache
environment:
- HF_TOKEN=${HF_TOKEN}
- MPLCONFIGDIR=/app/cache
- HF_HOME=/root/.cache/huggingface
networks:
- docknet
easy-dataset:
image: ghcr.io/conardli/easy-dataset
container_name: easy-dataset
ports:
- "7001:1717"
volumes:
- ./data:/app/local-db
restart: unless-stopped
networks:
- docknet
llama-cpp:
container_name: finetune-llama-cpp
build:
context: ./llama.cpp
dockerfile: .devops/cuda.Dockerfile
args:
- CUDA_VERSION=12.8.1
target: full
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
volumes:
- ./models:/models
- ./data:/data
- ./saves:/saves
- ./hf_cache:/hf_cache
entrypoint: []
command: tail -f /dev/null
networks:
- docknet
tensorboard:
image: tensorflow/tensorflow
ports:
- "7003:6006"
volumes:
- ./saves:/app/saves
command: tensorboard --logdir /app/saves --bind_all
networks:
- docknet
volumes:
uploads:
caddy_data:
llama_factory_cache:
networks:
docknet:
external: true