Skip to content

Commit

Permalink
Enabling Kubernetes support
Browse files Browse the repository at this point in the history
Update README.md
  • Loading branch information
fmdlc committed Oct 21, 2020
1 parent 6fddfb0 commit 7d349ce
Show file tree
Hide file tree
Showing 8 changed files with 3,553 additions and 38 deletions.
3,403 changes: 3,403 additions & 0 deletions kubernetes/ISP-Checker-deploy.yaml

Large diffs are not rendered by default.

38 changes: 38 additions & 0 deletions kubernetes/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
## Kubernetes
The following configuration files are used to deploy this stack in Kubernetes.
You need to update them before running in your cluster.

> ***NOTE***: The Kubernetes deployment is in *BETA* version. Don't worry if you see something wrong here.
## Installing, the quick way:

1) Apply the `ISP-Checker-deploy.yaml`
```bash
$: kubectl apply -f ISP-Checker-deploy.yaml
```

### Configuration

1) Edit `secrets.yaml` in order to initialize your InfluxDB database.
2) Edit `configmap.yaml` to configure Telegraf.
3) Apply the other `YAML` files.
4) Expose your deployment or create a LoadBalancer/IngressRule to access Grafana.

### Accesing
#### LoadBalancer
If you want to use a `LoadBalancer` to access Grafana, run:
```bash
$: kubectl expose deployments/grafana --type=LoadBalancer --name=grafana-svc
```
#### port-foward
If you can't use a LoadBalancer, you can use a `ClusterIP` service and forward to your local port.

```
$: kubectl expose deployments/grafana --type=ClusterIP --name=grafana-svc
```

And finally use your IngressController to access the service or a `port-forward`:

```
$: kubectl port-forward svc/grafana-svc 3000:3000 -n monitoring
```
19 changes: 16 additions & 3 deletions kubernetes/configmap.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,17 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: monitoring
name: grafana-user-config
data:
default.json: |-
{
"homeDashboardId": 1,
"theme": "dark",
"timezone": "browser"
}
---
apiVersion: v1
kind: ConfigMap
Expand All @@ -7,7 +21,6 @@ metadata:
data:
networking.yaml: |-
apiVersion: 1
providers:
- name: 'ISP-Checker'
orgId: 1
Expand All @@ -16,7 +29,7 @@ data:
disableDeletion: true
editable: false
options:
path: /tmp/dashboard/network-dashboard.json
path: /tmp/config/network-dashboard.json
---
apiVersion: v1
Expand All @@ -41,7 +54,7 @@ data:
debug = false
[[outputs.influxdb]]
urls = ["http://influxdb:8086"]
urls = ["http://grafana-svc.monitoring.svc.cluster.local:8086/"]
database = "$INFLUXDB_DB"
timeout = "60s"
username = "$INFLUXDB_ADMIN_USER"
Expand Down
77 changes: 46 additions & 31 deletions kubernetes/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,23 +22,31 @@ spec:
imagePullPolicy: IfNotPresent
envFrom:
- secretRef:
name: isp-checker-secrets
name: isp-checker-secrets-rw
ports:
- containerPort: 8086
readinessProbe:
httpGet:
path: /ping
port: 8086
initialDelaySeconds: 30
periodSeconds: 30
livenessProbe:
httpGet:
path: /ping
port: 8086
initialDelaySeconds: 30
periodSeconds: 30
volumeMounts:
- mountPath: /var/lib/influxdb
name: var-lib-influxdb
resources:
requests:
memory: "512Mi"
cpu: "250m"
memory: "200Mi"
cpu: "0.2"
limits:
memory: "1024Mi"
cpu: "500m"
memory: "300Mi"
cpu: "0.5"
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
Expand Down Expand Up @@ -73,25 +81,21 @@ spec:
labels:
app: grafana
spec:
serviceAccountName: grafana-sidecar
containers:
- name: grafana
image: grafana/grafana:7.2.1
imagePullPolicy: IfNotPresent
envFrom:
- secretRef:
name: isp-checker-secrets
name: isp-checker-secrets-ro
volumeMounts:
- name: grafana-datasource-provisioner
mountPath: /etc/grafana/provisioning/datasources/datasource.yaml
subPath: datasource.yaml
readOnly: true
mountPath: /etc/grafana/provisioning/datasources/
- name: grafana-dashboard-provisioner
mountPath: /etc/grafana/provisioning/dashboards/networking.yaml
subPath: networking.yaml
readOnly: true
- name: grafana-dashboard-data
mountPath: /tmp/dashboard/network-dashboard.json
subPath: network-dashboard.json
mountPath: /etc/grafana/provisioning/dashboards/
- name: grafana-dashboard
mountPath: /tmp/config/
ports:
- containerPort: 3000
env:
Expand All @@ -101,23 +105,31 @@ spec:
httpGet:
path: /
port: 3000
initialDelaySeconds: 30
periodSeconds: 30
livenessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 30
periodSeconds: 30
resources:
requests:
memory: "512Mi"
cpu: "250m"
memory: "200Mi"
cpu: "0.25"
limits:
memory: "1024Mi"
cpu: "500m"
memory: "500Mi"
cpu: "0.4"
volumes:
- name: grafana-datasource-provisioner
secret:
secretName: grafana-datasource
- name: grafana-dashboard-provisioner
configMap:
name: network-dashboard-provisioner
- name: grafana-dashboard-data
- name: grafana-dashboard
configMap:
name: network-dashboard-data
name: grafana-dashboard
- name: grafana-datasource-provisioner
secret:
secretName: grafana-datasource
---
apiVersion: apps/v1
kind: Deployment
Expand All @@ -141,20 +153,23 @@ spec:
image: tty0/isp_telegraf:1.15.3
envFrom:
- secretRef:
name: isp-checker-secrets
name: isp-checker-secrets-rw
imagePullPolicy: IfNotPresent
volumeMounts:
- name: telegraf-config-volume
mountPath: /etc/telegraf/telegraf.conf
subPath: telegraf.conf
mountPath: /etc/telegraf/
readOnly: true
resources:
requests:
memory: "512Mi"
cpu: "250m"
memory: "100Mi"
cpu: "0.1"
limits:
memory: "1024Mi"
cpu: "500m"
memory: "200Mi"
cpu: "0.3"
initContainers:
- name: wait-for-influxdb
image: curlimages/curl:7.72.0
command: ['sh', '-c', "until curl -XGET http://influxdb-svc.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local:8086/; do echo Waiting\ for\ InfluxDB; sleep 2; done"]
volumes:
- name: telegraf-config-volume
configMap:
Expand Down
3 changes: 2 additions & 1 deletion kubernetes/network-dashboard.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
namespace: monitoring
name: network-dashboard-data
name: grafana-dashboard
data:
network-dashboard.json: |-
{
Expand Down
17 changes: 16 additions & 1 deletion kubernetes/pvc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,19 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storage: 5Gi

---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
namespace: monitoring
labels:
app: grafana
name: grafana-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
14 changes: 12 additions & 2 deletions kubernetes/secrets.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,22 @@ apiVersion: v1
kind: Secret
metadata:
namespace: monitoring
name: isp-checker-secrets
name: isp-checker-secrets-rw
type: Opaque
stringData:
INFLUXDB_DB: telegraf
INFLUXDB_ADMIN_USER: admin
INFLUXDB_ADMIN_PASSWORD: VerySecurePassword

---
apiVersion: v1
kind: Secret
metadata:
namespace: monitoring
name: isp-checker-secrets-ro
type: Opaque
stringData:
INFLUXDB_DB: telegraf
INFLUXDB_READ_USER: grafana
INFLUXDB_READ_PASSWORD: VerySecurePassword

Expand All @@ -25,7 +35,7 @@ stringData:
type: influxdb
access: proxy
orgId: 1
url: http://influxdb:8086
url: http://influxdb-svc.monitoring.svc.cluster.local:8086/
user: grafana
database: telegraf
isDefault: true
Expand Down
20 changes: 20 additions & 0 deletions kubernetes/services.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
---
apiversion: v1
kind: service
metadata:
labels:
app: influxdb
name: influxdb-svc
namespace: monitoring
spec:
ports:
- port: 8086
protocol: tcp
targetport: 8086
selector:
app: influxdb
sessionaffinity: none
type: clusterip
status:
loadbalancer: {}

0 comments on commit 7d349ce

Please sign in to comment.