Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

NEF Helm Chart #3

Merged
merged 5 commits into from
Oct 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 64 additions & 0 deletions .github/workflows/publish-docker-images.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
#
name: Publish Docker images

# Configures this workflow to run every time a change is pushed to the branch.
on:
push:
branches: ['main']

# Custom environment variables for the workflow.
env:
REGISTRY: atnog-harbor.av.it.pt
PROJECT: route25
latest-branch: main

# Jobs in this workflow.
jobs:
build-and-push-docker-images:
runs-on: ubuntu-24.04

# Matrix to run job multiple times with different configurations.
strategy:
fail-fast: true # Stops the job as soon as one of the matrix entries fails.
matrix:
include:
- dir: backend
file: Dockerfile.backend
repository: backend
- dir: backend
file: Dockerfile.report
repository: report

# Steps in this job.
steps:
- name: Checkout repository
uses: actions/checkout@v2

- name: Log in to the Registry
uses: docker/[email protected]
with:
registry: ${{ env.REGISTRY }}
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}

- name: Extract metadata (tags, labels) for Docker
id: meta
uses: docker/[email protected]
with:
images: ${{ env.REGISTRY }}/${{ env.PROJECT }}/${{ matrix.repository }}
tags: |
type=semver,pattern={{version}}
type=ref,event=branch
type=sha

- name: Build and push Docker image
uses: docker/[email protected]
with:
context: ${{ matrix.dir }}
file: ${{ matrix.dir }}/${{ matrix.file }}
tags: | # Tags for the Docker image. latest for the main branch, branch name for the lastest of each branch, and commit hash for each commit.
${{ github.ref_name == env.latest-branch && format('{0}/{1}/{2}:latest', env.REGISTRY, env.PROJECT, matrix.repository) || '' }}
${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
push: true

85 changes: 85 additions & 0 deletions .github/workflows/publish-helm-chart.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
#
name: Publish Helm Chart

# Configures this workflow to run every time a change is pushed to the branch.
on:
push:
branches: ['main']

# Custom environment variables for the workflow.
env:
REGISTRY: atnog-harbor.av.it.pt
PROJECT: route25

# Jobs in this workflow.
jobs:
package-and-push-helm-chart:
runs-on: ubuntu-24.04

# Matrix to run job multiple times with different configurations.
strategy:
fail-fast: true # Stops the job as soon as one of the matrix entries fails.
matrix:
include:
- dir: helm-chart

# Steps in this job.
steps:
- name: Checkout repository
uses: actions/checkout@v2

- name: Log in to the Registry
uses: docker/[email protected]
with:
registry: ${{ env.REGISTRY }}
username: ${{ secrets.REGISTRY_USERNAME }}
password: ${{ secrets.REGISTRY_PASSWORD }}

- name: Helm Chart Package and Push
shell: bash
run: |
# Package the Helm Chart and capture the path
CHART_PATH=$(helm package ${{ matrix.dir }} -u | awk '{print $NF}')

# Run the helm push command and capture both stdout and stderr
OUTPUT=$(helm push $CHART_PATH oci://${{ env.REGISTRY }}/${{ env.PROJECT }} 2>&1)
echo "Raw Output: $OUTPUT"

# Check if the helm push command was successful
if [ $? -ne 0 ]; then
echo "Helm push failed."
exit 1
fi

# Extract the Digest from the output
DIGEST=$(echo "$OUTPUT" | grep "Digest:" | awk '{print $2}')

# Extract the Chart Name from the output
CHART_NAME=$(echo "$OUTPUT" | grep "Pushed:" | awk '{print $2}' | awk -F '/' '{print $NF}'| cut -d':' -f1)

# Print the results
echo "Digest: $DIGEST"
echo "Chart Name: $CHART_NAME"

# Add tags to the Helm Chart
for tag in ${{ github.ref_name == 'main' && 'latest' || '' }} ${{ github.ref_name }} ${{ github.sha }} ; do
# if tag is '' or empty, skip the tagging
if [ -z "$tag" ]; then
continue
fi

echo "Tagging $CHART_NAME with $tag"

curl -u '${{ secrets.REGISTRY_USERNAME }}:${{ secrets.REGISTRY_PASSWORD }}' -X 'POST' \
"https://${{ env.REGISTRY }}/api/v2.0/projects/${{ env.PROJECT }}/repositories/$CHART_NAME/artifacts/$DIGEST/tags" \
-H 'accept: application/json' \
-H 'Content-Type: application/json' \
-d '{
"id": 0,
"repository_id": 0,
"artifact_id": 0,
"name": "'$tag'",
"immutable": true
}'

done
36 changes: 21 additions & 15 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
SHELL := /bin/bash

# Function to determine Docker Compose command
define docker_compose_cmd
$(if $(shell command -v docker-compose 2> /dev/null),docker-compose,$(if $(shell command -v docker compose 2> /dev/null),docker compose,))
endef


# Prepare DEVELOPMENT environment

prepare-dev-env:
Expand All @@ -9,40 +15,40 @@ prepare-dev-env:
# docker-compose TASKS

up:
docker-compose --profile dev up
$(call docker_compose_cmd) --profile dev up

upd:
docker-compose --profile dev up -d
docker compose --profile dev up -d

debug-up:
docker-compose --profile debug up
$(call docker_compose_cmd) --profile debug up

debug-upd:
docker-compose --profile debug up -d
$(call docker_compose_cmd) --profile debug up -d

down:
docker-compose down
$(call docker_compose_cmd) --profile dev down

down-v: # also removes volumes
docker-compose down -v
$(call docker_compose_cmd) --profile dev down -v

stop:
docker-compose stop
$(call docker_compose_cmd) stop

build:
docker-compose --profile debug build
$(call docker_compose_cmd) --profile debug build

build-no-cache:
docker-compose --profile debug build --no-cache
$(call docker_compose_cmd) --profile debug build --no-cache

logs:
docker-compose logs -f
$(call docker_compose_cmd) logs -f

logs-backend:
docker-compose logs -f backend
$(call docker_compose_cmd) logs -f backend

logs-mongo:
docker-compose logs -f mongo
$(call docker_compose_cmd) logs -f mongo

ps:
docker ps -a
Expand All @@ -62,8 +68,8 @@ db-init: #simple scenario with 3 UEs, 3 Cells, 1 gNB


db-reset:
docker-compose exec db psql -h localhost -U postgres -d app -c 'TRUNCATE TABLE cell, gnb, monitoring, path, points, ue RESTART IDENTITY;'
docker-compose exec mongo /bin/bash -c 'mongo fastapi -u $$MONGO_USER -p $$MONGO_PASSWORD --authenticationDatabase admin --eval "db.dropDatabase();"'
$(call docker_compose_cmd) exec db psql -h localhost -U postgres -d app -c 'TRUNCATE TABLE cell, gnb, monitoring, path, points, ue RESTART IDENTITY;'
$(call docker_compose_cmd) exec mongo /bin/bash -c 'mongo fastapi -u $$MONGO_USER -p $$MONGO_PASSWORD --authenticationDatabase admin --eval "db.dropDatabase();"'


db-reinit: db-reset db-init
Expand All @@ -72,4 +78,4 @@ db-reinit: db-reset db-init
#Individual logs

logs-location:
docker-compose logs -f backend 2>&1 | grep -E "(handovers|monitoringType|'ack')"
$(call docker_compose_cmd) logs -f backend 2>&1 | grep -E "(handovers|monitoringType|'ack')"
17 changes: 16 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
>This is a local implentation of the NEF emulator proposed by [EVOLVED-5G](https://5g-ppp.eu/evolved-5g-builds-the-first-3gpp-nef-emulator-to-support-smes-on-5g-programmability/).
## ⚙ Setup locally

### Docker Compose

**Host prerequisites**: `docker`, `docker-compose 1.29.2`, `build-essential`\*, `jq`\*\*

After cloning the repository, there are 4 more steps to do. For convinience, we have created a [`Makefile`](Makefile) that contains a command for each step + several common `docker-compose` tasks which you may find handy in the future.
Expand Down Expand Up @@ -37,6 +39,19 @@ make db-init

> \*\* 💡 Info: *The shell script used at step 4 (for adding test data) uses `jq` which is a lightweight and flexible command-line JSON processor. You can install it with `apt install jq`*

### k8s

**Host prerequisites**: `docker`, `docker registry`\*, `kubernetes cluster`, `helm`

After cloning the repository, there are more X steps to do. For convinience, we have created a bash script `run-helm.sh` that automatically builds the docker images, pushes them to the docker registry, and installs the helm chart.

```bash
./run-helm.sh -n <namespace> -r <release-name> -d <docker-registry>
```
docker run -d -p 5000:5000 --name registry registry:2.7

>\* 💡 Info: *The default docker registry used in the helm values is a local docker registry created using `docker run -d -p 5000:5000 --name registry registry:2.7`.*

### Try out your setup

After the containers are up and running:
Expand Down Expand Up @@ -203,4 +218,4 @@ Three possible ways to achieve the above approach:

3. with **docker network connect**, try adding your container to the bridge network:

docker network connect BRIDGE_NAME NETAPP_NAME
docker network connect BRIDGE_NAME NETAPP_NAME
3 changes: 2 additions & 1 deletion backend/app/app/api/api_v1/endpoints/broker.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from app.schemas import SinusoidalParameters
import ast # Import the ast module
from .qosMonitoring import signal_param_change
import os

router = APIRouter()
background_task = None
Expand Down Expand Up @@ -57,7 +58,7 @@ def execute_custom_function_from_file_content(self, file_content):
def run(self):
try:
# Connect to RabbitMQ server
self.connection = pika.BlockingConnection(pika.ConnectionParameters('rabbitmq'))
self.connection = pika.BlockingConnection(pika.ConnectionParameters(os.environ.get("RABBITMQ_HOST", "rabbitmq")))
self.channel = self.connection.channel()
self.channel.queue_declare(queue='my_queue')

Expand Down
3 changes: 2 additions & 1 deletion backend/app/app/api/api_v1/endpoints/qosMonitoring.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from .qosInformation import qos_reference_match
from .utils import ReportLogging
import pika
import os

router = APIRouter()
router.route_class = ReportLogging
Expand Down Expand Up @@ -174,7 +175,7 @@ def read_subscription(

if change_behavior:
# Create a connection to the RabbitMQ server
connection = pika.BlockingConnection(pika.ConnectionParameters('rabbitmq'))
connection = pika.BlockingConnection(pika.ConnectionParameters(os.environ.get("RABBITMQ_HOST", "rabbitmq")))
channel = connection.channel()
channel.queue_declare(queue='my_queue')

Expand Down
51 changes: 44 additions & 7 deletions backend/app/app/db/init_simple.sh
Original file line number Diff line number Diff line change
@@ -1,13 +1,50 @@
#!/bin/bash

PORT=8888
URL=http://localhost
PORT=8888
REPORT_PORT=3000
FIRST_SUPERUSER="[email protected]"
FIRST_SUPERUSER_PASSWORD="pass"

set -a # automatically export all variables
source .env
set +a

# help
for arg in "$@"; do
if [ "$arg" == "--help" ]; then
echo "Usage: cmd [-h URL] [-p PORT] [-r REPORT_PORT] [-n FIRST_SUPERUSER] [-u FIRST_SUPERUSER_PASSWORD] [--help]"
exit 0
fi
done

# get opts
while getopts ":h:p:r:n:u:s:" opt; do
case ${opt} in
h )
URL=$OPTARG
;;
p )
PORT=$OPTARG
;;
r )
REPORT_PORT=$OPTARG
;;
u )
FIRST_SUPERUSER=$OPTARG
;;
# Secret
s )
FIRST_SUPERUSER_PASSWORD=$OPTARG
;;
\? )
echo "Invalid option: -$OPTARG" >&2
echo "Usage: cmd [-h URL] [-p PORT] [-r REPORT_PORT] [-n FIRST_SUPERUSER] [-u FIRST_SUPERUSER_PASSWORD] [--help]"
exit 1
;;
esac
done

printf '\n==================================================\n'
printf 'Create Report'
printf '\n==================================================\n'
Expand Down Expand Up @@ -452,12 +489,12 @@ curl -X 'POST' \
"path": 2
}'

printf '\n==================================================\n'
printf 'Delete Report'
printf '\n==================================================\n'
# printf '\n==================================================\n'
# printf 'Delete Report'
# printf '\n==================================================\n'


curl -X 'DELETE' \
"${URL}:${REPORT_PORT}/report" \
# curl -X 'DELETE' \
# "${URL}:${REPORT_PORT}/report" \

printf '\n==================================================\n\n'
# printf '\n==================================================\n\n'
3 changes: 2 additions & 1 deletion backend/app/app/db/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,10 @@
from sqlalchemy.orm import sessionmaker
from pymongo import MongoClient
from app.core.config import settings
import os

engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True, pool_size=150, max_overflow=20) #Create a db URL for SQLAlchemy in core/config.py/ Settings class
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) #Each instance is a db session


client = MongoClient("mongodb://mongo:27017", username='root', password='pass')
client = MongoClient(os.environ.get("MONGO_URL", "mongodb://mongo:27017"), username=os.environ.get("MONGO_USER", "root"), password=os.environ.get("MONGO_PASSWORD", "pass"))
8 changes: 8 additions & 0 deletions helm-chart/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
name: nef-emulator
description: Helm Chart for NEF emulator
version: 0.0.1
apiVersion: v2
keywords:
- nef-emulator
sources:
home:
1 change: 1 addition & 0 deletions helm-chart/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# NEF emulator Helm Chart
Loading