-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathserver.sh
More file actions
executable file
·255 lines (218 loc) · 7.84 KB
/
server.sh
File metadata and controls
executable file
·255 lines (218 loc) · 7.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
#!/bin/bash
function usage() {
echo "Usage: $0 {init|join|network|start|scale|stop|clean|stats|services|publish|unit-test|reload}"
echo " init: initialize the swarm cluster"
echo " join TOKEN IP:PORT: join the swarm cluster"
echo " network (REQUIRES SWARM CLUSTER): create shared networks for the swarm mode"
echo " start [-d: dev mode [-p: publish]] (REQUIRES SWARM CLUSTER): start the server"
echo " scale <SERVICE: data-processor, kafka-consumer, kafka-consumer-mongo, kafka-streams, spade-instance> <N: number of instances> (REQUIRES SWARM CLUSTER): scale the server to N SERVICE instances"
echo " stop: stop the server"
echo " clean: stop the server and remove all docker data"
echo " stats: print stats from all services"
echo " services: print all services"
echo " publish [-d: dev mode (REQUIRES SWARM CLUSTER)]: publish the images to a registry"
echo " unit-test SERVICE: run the unit-test suite for the given service"
echo " reload SERVICE (REQUIRES DEV MODE): rebuild and update the service"
echo " mongo-dump CONTAINER HOST_DESTINATION: dump the mongo database"
echo " mongo-restore CONTAINER HOST_SOURCE: restore the mongo database"
echo ""
echo "IMPORTANT: number of kafka-consumer/kafka-conumser-mongo/kafka-streams instances should be bigger or equal to the number of the topic partitions they read from"
echo " see UPDATE_AGENT_OUTPUT_TOPIC_PARTITIONS for kafka-consumer, UPDATE_AGENT_INPUT_TOPIC_PARTITIONS for kafka-consumer-mongo/kafka-streams"
exit 1
}
function init() {
if docker swarm init; then
echo "swarm cluster initialized"
else
echo "failed to initialize swarm cluster"
fi
}
function join() {
if [ -z "$1" ]; then
echo "missing token"
usage
fi
if [ -z "$2" ]; then
echo "missing address"
usage
fi
if docker swarm join --token "$1" "$2"; then
echo "swarm cluster joined"
else
echo "failed to join swarm cluster"
fi
}
function network() {
if docker network create --driver overlay --attachable sre-cs; then
echo "[simulation run environment <-> communication server] created"
else
echo "[simulation run environment <-> communication server] failed to create"
fi
if docker network create --driver overlay --attachable li-sre; then
echo "[local interface <-> simulation run environment] created"
else
echo "[local interface <-> simulation run environment] failed to create"
fi
}
function start() {
DEV=0
PUBLISH=0
while getopts dp opt; do
case $opt in
d) DEV=1 ;;
p) PUBLISH=1 ;;
*) usage ;;
esac
done
if [ "$DEV" -eq "1" ]; then
COMPOSE_FILE=docker-compose.dev.swarm.yml
if [ "$PUBLISH" -eq "1" ]; then publish -d; fi
else
COMPOSE_FILE=docker-compose.swarm.yml
fi
source .version
if env VERSION="${VERSION}" docker stack deploy -c ./"$COMPOSE_FILE" sre; then
echo "Version: ${VERSION}"
echo "OK"
else
echo ""
echo "failed to start the server"
echo ""
echo "if you see the following error:"
echo "failed to create service X: Error response from daemon: network Y not found"
echo "then restart docker daemon (i.e. sudo systemctl restart docker) and run ./server.sh clean"
echo ""
echo "if you see the following error:"
echo "network X is declared as external, but could not be found"
echo "run the script with the network option to create the required networks"
fi
}
function scale() {
if [ -z "$1" ]; then
echo "missing service name"
usage
elif [ -z "$2" ]; then
echo "missing number of instances"
usage
fi
docker service scale sre_"${1}"="${2}"
}
function stop() {
docker stack rm sre
}
function clean() {
stop
docker swarm leave --force
docker system prune --all --volumes
}
function stats() {
docker stats
}
function services() {
docker service ls
echo ""
echo "if you notice that some of the services are not up"
echo "then stop the server, publish the images, create the shared networks, and start the server again"
echo ""
echo "sre_kafka-topic-creator is expected to run only once"
}
function publish() {
local OPTIND
DEV=0
while getopts d opt; do
case $opt in
d) DEV=1 ;;
*) usage ;;
esac
done
if [ "$DEV" -eq "1" ]; then
echo "creating local registry"
if ! docker service ps -q registry > /dev/null 2>&1; then
if ! docker service create --name registry --publish published=5000,target=5000 registry:2; then
echo "failed to create registry"
usage
fi
else
echo "registry already exists"
fi
docker-compose -f docker-compose.dev.swarm.yml build --parallel && \
docker-compose -f docker-compose.dev.swarm.yml push
else
source .version
read -p "Publish version ${VERSION} to registry? [y/n] " -r VERSION_ANSWER
read -p "Publish version latest to registry? [y/n] " -r LATEST_ANSWER
if [ "$VERSION_ANSWER" != "y" ] && [ "$LATEST_ANSWER" != "y" ]; then
echo "aborting"
exit 1
fi
if [ "$VERSION_ANSWER" == "y" ]; then
env VERSION="${VERSION}" docker-compose -f docker-compose.swarm.yml build --parallel && \
env VERSION="${VERSION}" docker-compose -f docker-compose.swarm.yml push && \
echo "published version ${VERSION}"
fi
if [ "$LATEST_ANSWER" == "y" ]; then
env VERSION=latest docker-compose -f docker-compose.swarm.yml build --parallel && \
env VERSION=latest docker-compose -f docker-compose.swarm.yml push && \
echo "published version latest"
fi
fi
}
function unit-test() {
if [ -z "${1}" ]; then
echo "missing service name"
usage
fi
docker-compose -f docker-compose.test.yml up "$1" --build
}
function reload() {
if [ -z "${1}" ]; then
echo "missing service name"
usage
fi
docker-compose -f docker-compose.dev.swarm.yml build "${1}" && \
docker-compose -f docker-compose.dev.swarm.yml push && \
docker service update sre_"${1}" --force
}
function mongo-dump() {
if [ -z "$1" ]; then
echo "missing container name"
usage
elif [ -z "$2" ]; then
echo "missing host destination"
usage
fi
ID=$RANDOM
docker exec -it "${1}" mongodump --username root --password root --authenticationDatabase admin --db simulations --out /opt/bitnami/mongodb/dump_"$ID" && \
docker cp "${1}":/opt/bitnami/mongodb/dump_"$ID"/simulations "${2}" && \
docker exec -it "${1}" rm -rf /opt/bitnami/mongodb/dump_"$ID"
}
function mongo-restore() {
if [ -z "$1" ]; then
echo "missing container name"
usage
elif [ -z "$2" ]; then
echo "missing host source"
usage
fi
ID=$RANDOM
docker exec -it "${1}" mkdir -p /opt/bitnami/mongodb/dump_"$ID" && \
docker cp "${2}" "${1}":/opt/bitnami/mongodb/dump_"$ID" && \
docker exec -it "${1}" mongorestore --username root --password root --authenticationDatabase admin --db simulations --drop /opt/bitnami/mongodb/dump_"$ID"/simulations
}
case "${1}" in
init) init ;;
join) join "${2}" "${3}" ;;
network) network ;;
start) start "${@:2}" ;;
scale) scale "${2}" "${3}" ;;
stop) stop ;;
clean) clean ;;
stats) stats ;;
services) services ;;
publish) publish "${@:2}" ;;
unit-test) unit-test "${2}" ;;
reload) reload "${2}" ;;
mongo-dump) mongo-dump "${2}" "${3}" ;;
mongo-restore) mongo-restore "${2}" "${3}" ;;
*) usage ;;
esac