-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathjson-status
executable file
·333 lines (276 loc) · 10.2 KB
/
json-status
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
#!/bin/bash
#
# Upload output data from decoder to remote server
#
REMOTE_URL="https://adsbexchange.com/api/receive/"
REMOTE_HOST=$( echo $REMOTE_URL | awk -F'/' '{print $3}' )
# Set this to '0' if you don't want this script to ever try to self-cache DNS.
# Default is on, but script will automatically not cache if resolver is localhost, or if curl version is too old.
DNS_CACHE=1
# Cache time, default 10min
DNS_TTL=600
# Set this to 1 if you want to force using the cache always even if there is a local resolver.
DNS_IGNORE_LOCAL=0
# List all paths, IN PREFERRED ORDER, separated by a SPACE
# By default, only use the json from the feed client
JSON_PATHS=("/run/adsbexchange-feed")
######################################################################################################################
# If you know what you're doing, and you want to override the search path, you can do it easily in
# /etc/default/adsbexchange-stats, by setting the JSON_PATHS variable to something else (or even multiple).
# For example, the old stats used this:
# JSON_PATHS=("/run/adsbexchange-feed" "/run/readsb" "/run/dump1090-fa" "/run/dump1090-mutability" "/run/dump1090" )
# You can enable this old path by setting "USE_OLD_PATH=1", preferrably in /etc/default/adsbexchange-stats
######################################################################################################################
# source local overrides (commonly the JSON_PATH, or DNS cache settings)
if [ -r /etc/default/adsbexchange-stats ]; then
. /etc/default/adsbexchange-stats
# If 'USE_OLD_PATH' is set, override the entire list
if [ "x$USE_OLD_PATH" != "x" ] && [ $USE_OLD_PATH -eq 1 ]; then
echo "Note: 'USE_OLD_PATH' is set."
JSON_PATHS=("/run/adsbexchange-feed" "/run/readsb" "/run/dump1090-fa" "/run/dump1090-mutability" "/run/dump1090" )
fi
fi
# Small bit of sanity...
if [ "${#JSON_PATHS[@]}" -le 0 ]; then
echo "FATAL - You broke something. JSON_PATHS variable has no locations listed. Please fix."
exit 5
fi
JSON_DIR=""
TEMP_DIR="/run/adsbexchange-stats/"
# Do this a few times, in case we're still booting up (wait a bit between checks)
CHECK_LOOP=0
while [ "x$JSON_DIR" = "x" ]; do
# Check the paths IN ORDER, preferring the first one we find
for i in ${!JSON_PATHS[@]}; do
CHECK=${JSON_PATHS[$i]}
if [ -d $CHECK ]; then
JSON_DIR=$CHECK
break
fi
done
# Couldn't find any of them...
if [ "x$JSON_DIR" = "x" ]; then
CHECK_LOOP=$(( CHECK_LOOP + 1 ))
if [ $CHECK_LOOP -gt 4 ]; then
# Bad news. Complain and exit.
echo "ERROR: Tried multiple times, could not find any of the directories - ABORTING!"
exit 10
fi
echo "No valid data source directory Found. Tried each of: [${JSON_PATHS[@]}]"
sleep 20
fi
done
# UUID file
UUID_FILE="/boot/adsbx-uuid"
UUID=$(cat $UUID_FILE)
if ! [[ $UUID =~ ^\{?[A-F0-9a-f]{8}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{4}-[A-F0-9a-f]{12}\}?$ ]]; then
# Data in UUID file is invalid
echo "FATAL: Data in UUID file was invalid, exiting!"
exit 1
fi
#####################
# DNS cache setup #
#####################
declare -A DNS_LOOKUP
declare -A DNS_EXPIRE
# Let's FIRST make sure our version of curl will support what we need (--resolve arg)
CURL_VER=$( curl -V | head -1 | awk '{print $2}' )
if [ "x$CURL_VER" = "x" ]; then
echo "FATAL - curl is malfunctioning, can't get version info."
exit 11
fi
VER_OK=$( echo "$CURL_VER" | perl -ne '@v=split(/\./); if ($v[0] == 7) { if ($v[1] >= 22) { printf("1");exit; } else { printf("0");exit; } } if ($v[0] > 7) { pr
intf("1");exit; } printf("0");exit;')
if [ $VER_OK -ne 1 ]; then
echo "WARNING: curl version is too old ($CURL_VER < 7.22.0), not using script's DNS cache."
DNS_CACHE=0
fi
# This routine assumes you do no santiy-checking.
#
# Checks for the host in $DNS_LOOKUP{}, and if the corresponding $DNS_EXPIRE{} is less than NOW, return success.
# Otherwise, try looking it up. Save value if lookup succeeded.
#
# Returns:
# On Success: returns 0, and host will be in DNS_LOOKUP assoc array.
# On Fail: Various return codes:
# - 10 = No Hostname Provided
# - 20 = Hostname Format Invalid
# - 30 = Lookup Failed even after $DNS_MAX_LOOPS tries
DNS_WAIT=20
DNS_MAX_LOOPS=10
dns_lookup () {
local HOST=$1
local NOW=$( date +%s )
# You need to pass in a hostname :)
if [ "x$HOST" = "x" ]; then
echo "ERROR: dns_lookup called without a hostname" >&2
return 10
fi
# (is it even a syntactically-valid hostname?)
if ! [[ $HOST =~ ^[a-zA-Z0-9\.-]+$ ]]; then
echo "ERROR: Invalid hostname passed into dns_lookup [$HOST]" >&2
return 20
fi
# If the host is cached, and the TTL hasn't expired, return the cached data.
if [ ${DNS_LOOKUP[$HOST]} ]; then
if [ ${DNS_EXPIRE[$HOST]} -ge $NOW ]; then
return 0
fi
fi
# Try this several times
local LOOP=$DNS_MAX_LOOPS
while [ $LOOP -ge 1 ]; do
# Ok, let's look this hostname up! Use the first IP returned.
# - XXX : WARNING: This assumed the output format of 'host -v' doesn't change drastically! XXX -
# - Because this uses the "Trying" line, it should work for non-FQDN lookups, too -
HOST_IP=$( host -v -W $DNS_WAIT -t a "$HOST" | perl -ne 'if (/^Trying "(.*)"/){$h=$1; next;} if (/^$h\.\s+(\d+)\s+IN\s+A\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})/) {$i=$2; last}; END {printf("%s", $i);}' )
RV=$?
# If this is empty, something failed. Sleep some and try again...
if [ $RV -ne 0 ] || [ "x$HOST_IP" = "x" ]; then
echo "Failure resolving [$HOST], waiting and trying again..." >&2
LOOP=$(( LOOP - 1 ))
sleep 10
continue
fi
# If we get here, we successfully resolved it
break;
done
# If LOOP is zero, Something Bad happened.
if [ $LOOP -le 0 ]; then
echo "FATAL: unable to resolve $HOST even after $DNS_MAX_LOOPS tries. Giving up." >&2
return 30
fi
# Resolved ok!
NOW=$( date +%s )
DNS_LOOKUP["$HOST"]=$HOST_IP
DNS_EXPIRE["$HOST"]=$(( NOW + DNS_TTL ))
return 0
}
# First, see if we have a localhost resolver...
# - Only look at the first 'nameserver' entry in resolv.conf
# - This will assume any 127.x.x.x resolver entry is "local"
LOCAL_RESOLVER=$( grep nameserver /etc/resolv.conf | head -1 | egrep -c '[[:space:]]127\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' )
if [ $LOCAL_RESOLVER -ne 0 ]; then
if [ $DNS_IGNORE_LOCAL -eq 1 ]; then
echo "Found local resolver in resolv.conf, but DNS_IGNORE_LOCAL is on, so ignoring" >&2
else
echo "Found local resolver in resolv.conf, disabling DNS Cache" >&2
DNS_CACHE=0
fi
fi
# If we have a local resolver, just use the URL. If not, look up the host and use that IP (replace the URL appropriately)
# -- DNS Setup done
echo "Using UUID [${UUID}] for stats uploads"
echo "Using JSON directory [${JSON_DIR}] for source data"
if [ $DNS_CACHE -ne 0 ]; then
echo "Using script's DNS cache ($DNS_TTL seconds)"
else
echo "NOT using script's DNS cache"
fi
JSON_FILE="${JSON_DIR}/aircraft.json"
STAT_COUNT=0
# Grab the current timestamp of the file. Try in a loop a few times, in case
while [ $STAT_COUNT -lt 5 ]; do
JSON_STAT=$(stat --printf="%Y" $JSON_FILE 2> /dev/null)
RV=$?
if [ $RV -eq 0 ]; then
break
fi
STAT_COUNT=$(( STAT_COUNT + 1 ))
sleep 15
done
# Bad juju if we still don't have a stat...
if [ "x$JSON_STAT" = "x" ]; then
echo "ERROR: Can't seem to stat $JSON_FILE at startup, bailing out..."
exit 15
fi
# Complain if this file seems really old
NOW=$(date +%s)
DIFF=$(( NOW - JSON_STAT ))
if [ $DIFF -gt 60 ]; then
echo "WARNING: $JSON_FILE seems old, are you sure we're using the right path?"
fi
# Set this to a number
NEXT_JSON_READ=1
# How long to wait before uploads, minimum (in seconds)
WAIT_TIME=5
# How long curl will wait to send data (10 sec default)
MAX_CURL_TIME=10
# How much time (sec) has to pass since last JSON update before we say something
# Initial value is "AGE_COMPLAIN", and then it complains every "AGE_INTERVAL" after that
# Deftauls are:
# AGE_COMPLAIN = 30 sec
# AGE_INTERVAL = 30 min (1800 sec)
AGE_COMPLAIN=30
AGE_INTERVAL=$(( 30 * 60 ))
OLD_AGE=$AGE_COMPLAIN
while true; do
while [ $JSON_STAT -lt $NEXT_JSON_READ ]; do
sleep 1
NOW=$(date +%s)
# Grab new stat. If it fails, wait longer (otherwise assign to the main var)
NEW_STAT=$(stat --printf="%Y" $JSON_FILE 2> /dev/null)
RV=$?
if [ $RV -ne 0 ]; then
sleep 10
else
JSON_STAT=$NEW_STAT
fi
DIFF=$(( NOW - JSON_STAT ))
if [ $DIFF -gt $OLD_AGE ]; then
echo "WARNING: JSON file $JSON_FILE has not been updated in $DIFF seconds. Did your decoder die?"
OLD_AGE=$(( OLD_AGE + AGE_INTERVAL ))
else
# Reset this here, in case it comes back ;)
OLD_AGE=$AGE_COMPLAIN
fi
done
# Update this for the next loop
NEXT_JSON_READ=$(( JSON_STAT + $WAIT_TIME ))
# Move the JSON somewhere before operating on it...
RAND=$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c10)
TMPFILE="${TEMP_DIR}/json-${RAND}"
NEWFILE="${TMPFILE}-new"
# Sanity to make sure we can write
T=$(touch $TMPFILE 2>&1)
RV=$?
if [ $RV -ne 0 ]; then
echo "ERROR: Unable to write to $TMPFILE, aborting! ($T)"
exit 99
fi
CP=$(cp $JSON_FILE $TMPFILE 2>&1)
RV=$?
if [ $RV -ne 0 ]; then
# cp failed (file changed during copy, usually), wait a few and loop again
sleep 2
continue
fi
echo '{"uuid":"'$UUID'",' > $NEWFILE
echo '"aircraft":' >> $NEWFILE
# -c means "compact" form (not pretty-printed) -- saves space!
jq -c '.aircraft' $TMPFILE >> $NEWFILE
echo '}' >> $NEWFILE
CURL_EXTRA=""
# If DNS_CACHE is set, use the builtin cache (and correspondingly the additional curl arg
if [ $DNS_CACHE -ne 0 ]; then
dns_lookup $REMOTE_HOST
RV=$?
if [ $RV -ne 0 ]; then
# Some sort of error... We'll fall back to normal curl usage, but sleep a little.
echo "DNS Error for ${REMOTE_HOST}, falling back after sleeping some more..."
sleep $(( 30 + RANDOM % 30 ))
else
REMOTE_IP=${DNS_LOOKUP[$REMOTE_HOST]}
CURL_EXTRA="--resolve ${REMOTE_HOST}:443:$REMOTE_IP"
fi
fi
# Push up the data. 'curl' will wait no more than $MAX_CURL_TIME seconds for upload to complete
CURL=$(cat $NEWFILE | gzip | curl -m $MAX_CURL_TIME $CURL_EXTRA -sS -X POST -H "Content_Encoding: gzip" --data-binary @- $REMOTE_URL 2>&1 )
RV=$?
if [ $RV -ne 0 ]; then
echo "WARNING: curl process returned non-zero ($RV): [$CURL]"
echo "Sleeping a little extra."
sleep $(( 5 + RANDOM % 15 ))
fi
rm -f $TMPFILE $NEWFILE
done