From 4329e563b9c0052fe85325036efedbba656ece84 Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Sat, 1 Jun 2024 16:17:08 +0200 Subject: [PATCH 01/20] adding basic slurm file for job submission --- .gitignore | 4 +++- YOLO-detect-buoys/Job.slurm | 18 ++++++++++++++++++ YOLO-detect-buoys/__main__.py | 2 +- .../requirements.txt | Bin 4 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 YOLO-detect-buoys/Job.slurm rename requirements.txt => YOLO-detect-buoys/requirements.txt (100%) diff --git a/.gitignore b/.gitignore index b24b6e0..9128793 100644 --- a/.gitignore +++ b/.gitignore @@ -164,4 +164,6 @@ runs/ *jpg # data -data/ \ No newline at end of file +data/ +*.out +*/*.out diff --git a/YOLO-detect-buoys/Job.slurm b/YOLO-detect-buoys/Job.slurm new file mode 100644 index 0000000..df75fea --- /dev/null +++ b/YOLO-detect-buoys/Job.slurm @@ -0,0 +1,18 @@ +#!/bin/bash +#SBATCH --partition=GPUQ +#SBATCH --account=ie-idi +#SBATCH --time=01:05:00 +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=4 +#SBATCH --gres=gpu:a100:4 +#SBATCH --constraint="gpu40g|gpu80g|gpu32g" +#SBATCH --mem=12000 # Default units are megabytes. +#SBATCH --job-name="train_vortex_model_1" +#SBATCH --output=train_model_logs.out +module load foss/2022a +module load PyTorch/1.12.0-foss-2022a-CUDA-11.7.0 +module load torchvision/0.13.1-foss-2022a-CUDA-11.7.0 +module load CUDA/12.0.0 +pip install tqdm +pip install -r requirements.txt +python3 . diff --git a/YOLO-detect-buoys/__main__.py b/YOLO-detect-buoys/__main__.py index c0f15cd..ecced72 100644 --- a/YOLO-detect-buoys/__main__.py +++ b/YOLO-detect-buoys/__main__.py @@ -16,7 +16,7 @@ dataset = get_data() result = model.train( - data=dataset.location + "\\data.yaml", epochs=50, imgsz=640, device=device + data=dataset.location + "/data.yaml", epochs=50, imgsz=640, device=device ) model.val() diff --git a/requirements.txt b/YOLO-detect-buoys/requirements.txt similarity index 100% rename from requirements.txt rename to YOLO-detect-buoys/requirements.txt From be4ce19506eade939d06ff938a1fd7a2d54abcb1 Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Sat, 1 Jun 2024 16:22:35 +0200 Subject: [PATCH 02/20] updated the ci installation of requirenmetns to use dependencies from yolo folder --- .github/workflows/pylint.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml index dbfe041..7713f61 100644 --- a/.github/workflows/pylint.yml +++ b/.github/workflows/pylint.yml @@ -15,7 +15,7 @@ jobs: - name: Lint Code shell: bash run: | - pip install -r requirements.txt + pip install -r YOLO-detect-buoys/requirements.txt pip install -U pylint pylint $(git ls-files '*.py') @@ -37,7 +37,7 @@ jobs: - name: Run tests shell: bash run: | - pip install -r requirements.txt + pip install -r YOLO-detect-buoys/requirements.txt pip install -U pytest pytest --capture=sys --disable-warnings -v @@ -56,7 +56,7 @@ jobs: with: python-version: "3.11" - name: Install requirements - run: pip install -r requirements.txt + run: pip install -r YOLO-detect-buoys/requirements.txt - name: Run tests and collect coverage run: pytest --capture=sys --cov --disable-warnings -v --cov-report=xml - name: Upload coverage reports to Codecov with GitHub Action From a1480e0bd2abd1c03c82f5b895f11e8c01f810a8 Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Thu, 6 Jun 2024 15:40:27 +0200 Subject: [PATCH 03/20] =?UTF-8?q?=E2=9C=A8=20feat:=20Push=20the=20model=20?= =?UTF-8?q?to=20the=20hub?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/__main__.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/YOLO-detect-buoys/__main__.py b/YOLO-detect-buoys/__main__.py index ecced72..1b1a249 100644 --- a/YOLO-detect-buoys/__main__.py +++ b/YOLO-detect-buoys/__main__.py @@ -2,13 +2,19 @@ The main entry point for YOLO detecter buoys. """ -from os import getcwd, path +from os import getcwd, getenv, path +from dotenv import load_dotenv +from huggingface_hub import HfApi, Repository from ultralytics import YOLO from utils import get_data, get_device, process_video if "__main__" == __name__: + load_dotenv() + + HfApi().whoami(token=getenv("HF_TOKEN")) + device = get_device() model = YOLO("yolov8n.pt") @@ -16,12 +22,18 @@ dataset = get_data() result = model.train( - data=dataset.location + "/data.yaml", epochs=50, imgsz=640, device=device + data=dataset.location + "/data.yaml", epochs=50, imgsz=640, device=device ) model.val() - process_video("https://youtu.be/4WGpIOwkLA4?feature=shared", model) + path = model.export(format="onnx") # export to onnx + + # Initialize the repository + repo = Repository(local_dir=path, clone_from=getenv("HG_REPO_ID")) + + # Commit and push your changes + repo.push_to_hub(commit_message="push the model to the hub") # References: # https://docs.ultralytics.com/quickstart/#install-ultralytics From 3b4ca9f764f0c1976081f069d447c077b2c66e0f Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Fri, 7 Jun 2024 17:13:20 +0200 Subject: [PATCH 04/20] =?UTF-8?q?=F0=9F=93=8C=20update=20the=20requirement?= =?UTF-8?q?s.txt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/requirements.txt | Bin 4372 -> 2402 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/YOLO-detect-buoys/requirements.txt b/YOLO-detect-buoys/requirements.txt index 1afb8e499510d6d71f5fdeeb71629d2e271ef70b..e9aabf16220075f00e012c16d0397ea1499abe34 100644 GIT binary patch delta 63 zcmbQD^hk*5|G&-8*m;;bdt2Y3Oo0t^DPKmsBMaYq;` From daaa619216aca829fb13d58bc2df3b508bccddd4 Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Sun, 9 Jun 2024 01:46:39 +0200 Subject: [PATCH 05/20] =?UTF-8?q?=E2=9E=96=20simplify=20requirements.txt?= =?UTF-8?q?=20by=20removing=20unused=20stuff?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/requirements.txt | Bin 2402 -> 241 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/YOLO-detect-buoys/requirements.txt b/YOLO-detect-buoys/requirements.txt index e9aabf16220075f00e012c16d0397ea1499abe34..e7d7edf6c55167b1d1e86b77e49f8d161588fc0b 100644 GIT binary patch literal 241 zcmZvV+X{m)5JcbiS4x*8UeJgA7Ky1E!Dd66DE@sDN-4B`oHN52o0esD%dE{b*fbIe zR1#~je|UuzxODd?kwn$z75mAVeK71|SjWMi(Xu0|u}6WOc=tct>p4pVHrT+cyxurY xZZBqm5(1ArZ+w)TziNmaj3rVfT}+Zneo%&xwixtw#??HCy0YrOS@!lVJOOSUO4{ ztPd!&^pK|VoXfuroQueivt;`|?1QuwpIzE0hP@isC?|bQ6 z2!}lG6J8*h<%4Rl(q}D}D|K`#-$fdT_aWU!6r9~%*O!^u%Fl?_Ox;0PD;m9MSe-mT zaJIGh&>7EzYgu~xH86e)dmORW3tR9XWqsBcgi5`qH*lbDR*qMt=q9~{g}mgrjQBfQ zc0xJG{zE@6g-|qPFXpe62i5lmxC(H&(brry(m9joeTP9Nro$>XiZ{-~iD9YuMhsfT zGF6@oD=6_>Sz2kOQjeV$S1I>@D-5#qC-+MEFG6W83)nMNX%JrhIiyy%!4*Yj+=vge z1}jg3D}!(GSa$i&>q_iaGr?8K_4H2Ck4AXRc`ZHe7SUzTs5BL4qS=1-V%H&&y^mB$ zHM3e&_xbIku4E$*eMhBtib@Tnu#pe?$c9(0L)OSf@8sKv5qG8*qs3VVU|+nsR9q6U z^0XE!l|f6XX!h|fgHEA0!g3|3VAk0Pg=~dm zKbDDSZhTi^LY_VwU$a3>H9g;SduV009N%m!$XHzcCKwyfoN}LdONZ*-P}hZ@C*3@wcq1*`MC)}l zz4i^jV<$zvK>WRIQn5WL9^UzlG76W--D=g1xiV1^I((%MNaoI-c(#A3Ixk|v^kls9 zCiP#%RxdlZf(r=h+xa+175|&?UjW;|j2VgTFu&XG9q0Atwyf2cNr%?g`7A#7iuOCo jO*K-#uSZ$g7G68vy}9rTuToWOhLL^Tv%=n+?P>cDZ248E From a241df04774635b5c79e8498c2bfb31590915df4 Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Sun, 9 Jun 2024 14:18:05 +0200 Subject: [PATCH 06/20] =?UTF-8?q?=F0=9F=94=A5=20remove=20hugging=20face?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/__main__.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/YOLO-detect-buoys/__main__.py b/YOLO-detect-buoys/__main__.py index 1b1a249..3560dcc 100644 --- a/YOLO-detect-buoys/__main__.py +++ b/YOLO-detect-buoys/__main__.py @@ -13,8 +13,6 @@ load_dotenv() - HfApi().whoami(token=getenv("HF_TOKEN")) - device = get_device() model = YOLO("yolov8n.pt") @@ -29,11 +27,8 @@ path = model.export(format="onnx") # export to onnx - # Initialize the repository - repo = Repository(local_dir=path, clone_from=getenv("HG_REPO_ID")) - - # Commit and push your changes - repo.push_to_hub(commit_message="push the model to the hub") + print("Model exported to: " + path) + print(path) # References: # https://docs.ultralytics.com/quickstart/#install-ultralytics From bcff411cc7f42bb2b578826cff8e32920deb5e86 Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Mon, 15 Jul 2024 20:47:56 +0200 Subject: [PATCH 07/20] feat: update slurm file --- YOLO-detect-buoys/Job.slurm | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/YOLO-detect-buoys/Job.slurm b/YOLO-detect-buoys/Job.slurm index df75fea..e3af20b 100644 --- a/YOLO-detect-buoys/Job.slurm +++ b/YOLO-detect-buoys/Job.slurm @@ -13,6 +13,10 @@ module load foss/2022a module load PyTorch/1.12.0-foss-2022a-CUDA-11.7.0 module load torchvision/0.13.1-foss-2022a-CUDA-11.7.0 module load CUDA/12.0.0 -pip install tqdm pip install -r requirements.txt +pip uninstall protobuf +pip install protobuf==4.21.4 +pip install -U ultralytics +pip install --force-reinstall ultralytics +pip install --upgrade protobuf python3 . From b0604d320a4fdd8c74f9eddaa42d85299ed237d4 Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Mon, 15 Jul 2024 21:02:20 +0200 Subject: [PATCH 08/20] =?UTF-8?q?=E2=9C=A8=20using=20the=20pt=20format=20f?= =?UTF-8?q?or=20model=20storage?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/__main__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/YOLO-detect-buoys/__main__.py b/YOLO-detect-buoys/__main__.py index 3560dcc..7d431ff 100644 --- a/YOLO-detect-buoys/__main__.py +++ b/YOLO-detect-buoys/__main__.py @@ -25,7 +25,7 @@ model.val() - path = model.export(format="onnx") # export to onnx + path = model.export() # export to onnx print("Model exported to: " + path) print(path) From c1d49c3ffc71a6b312c24f1f687b02ebd41d5a0c Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Mon, 15 Jul 2024 21:13:58 +0200 Subject: [PATCH 09/20] =?UTF-8?q?=F0=9F=94=A7=20remove=20redundent=20steps?= =?UTF-8?q?=20from=20slurm=20file?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/Job.slurm | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/YOLO-detect-buoys/Job.slurm b/YOLO-detect-buoys/Job.slurm index e3af20b..b91b2d1 100644 --- a/YOLO-detect-buoys/Job.slurm +++ b/YOLO-detect-buoys/Job.slurm @@ -15,8 +15,5 @@ module load torchvision/0.13.1-foss-2022a-CUDA-11.7.0 module load CUDA/12.0.0 pip install -r requirements.txt pip uninstall protobuf -pip install protobuf==4.21.4 -pip install -U ultralytics -pip install --force-reinstall ultralytics -pip install --upgrade protobuf +pip install protobuf python3 . From 3f545ba701fcfb3ec3e5ee609fac1c20da2d9102 Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Thu, 22 Aug 2024 18:48:08 +0200 Subject: [PATCH 10/20] =?UTF-8?q?=E2=9C=A8=20feat:=20Update=20YOLO=20model?= =?UTF-8?q?=20training=20parameters?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/__main__.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/YOLO-detect-buoys/__main__.py b/YOLO-detect-buoys/__main__.py index 7d431ff..fc911e1 100644 --- a/YOLO-detect-buoys/__main__.py +++ b/YOLO-detect-buoys/__main__.py @@ -14,18 +14,24 @@ load_dotenv() device = get_device() + print("device", device) model = YOLO("yolov8n.pt") file_path = path.abspath(getcwd()) dataset = get_data() result = model.train( - data=dataset.location + "/data.yaml", epochs=50, imgsz=640, device=device + data=dataset.location + "/data.yaml", + epochs=100, + imgsz=640, + device=device, + batch=4, + cache=False, ) model.val() - path = model.export() # export to onnx + path = model.export(format="onnx") # export to onnx print("Model exported to: " + path) print(path) From ad41066e53ed7ef61d5d8b066846970d849d3a66 Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Thu, 22 Aug 2024 18:51:13 +0200 Subject: [PATCH 11/20] =?UTF-8?q?=E2=9E=96=20Update=20requirements.txt=20t?= =?UTF-8?q?o=20remove=20unused=20dependencies?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/requirements.txt | 2 -- 1 file changed, 2 deletions(-) diff --git a/YOLO-detect-buoys/requirements.txt b/YOLO-detect-buoys/requirements.txt index e7d7edf..bcbb79a 100644 --- a/YOLO-detect-buoys/requirements.txt +++ b/YOLO-detect-buoys/requirements.txt @@ -1,4 +1,3 @@ -huggingface_hub==0.21.4 huggingface_hub==0.23.3 numpy==1.26.4 opencv_contrib_python==4.9.0.80 @@ -8,5 +7,4 @@ python-dotenv==1.0.1 roboflow==1.1.24 torch==2.1.2+cu121 torch==2.2.1 -ultralytics==8.1.29 ultralytics==8.0.196 From 79c728dfade7c42f40a94eda08754777f74a407f Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Thu, 22 Aug 2024 18:52:53 +0200 Subject: [PATCH 12/20] =?UTF-8?q?=E2=9E=96=20Update=20requirements.txt=20t?= =?UTF-8?q?o=20remove=20unused=20dependencies?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/YOLO-detect-buoys/requirements.txt b/YOLO-detect-buoys/requirements.txt index bcbb79a..b3e182e 100644 --- a/YOLO-detect-buoys/requirements.txt +++ b/YOLO-detect-buoys/requirements.txt @@ -5,6 +5,5 @@ opencv_python==4.9.0.80 pafy==0.5.5 python-dotenv==1.0.1 roboflow==1.1.24 -torch==2.1.2+cu121 torch==2.2.1 ultralytics==8.0.196 From ade662654aac5ee9d4ea685538b684b9d3b930e5 Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Thu, 22 Aug 2024 18:55:25 +0200 Subject: [PATCH 13/20] =?UTF-8?q?=F0=9F=94=A5=20Update=20Job.slurm=20to=20?= =?UTF-8?q?install=20protobuf=20package?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/Job.slurm | 2 -- 1 file changed, 2 deletions(-) diff --git a/YOLO-detect-buoys/Job.slurm b/YOLO-detect-buoys/Job.slurm index b91b2d1..78766cc 100644 --- a/YOLO-detect-buoys/Job.slurm +++ b/YOLO-detect-buoys/Job.slurm @@ -14,6 +14,4 @@ module load PyTorch/1.12.0-foss-2022a-CUDA-11.7.0 module load torchvision/0.13.1-foss-2022a-CUDA-11.7.0 module load CUDA/12.0.0 pip install -r requirements.txt -pip uninstall protobuf -pip install protobuf python3 . From 793a88229ce637fd93c292f47274fb9d7b2bc623 Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Thu, 22 Aug 2024 19:05:55 +0200 Subject: [PATCH 14/20] =?UTF-8?q?=E2=9E=95=20Update=20protobuf=20package?= =?UTF-8?q?=20version=20in=20requirements.txt?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/YOLO-detect-buoys/requirements.txt b/YOLO-detect-buoys/requirements.txt index b3e182e..1774ae9 100644 --- a/YOLO-detect-buoys/requirements.txt +++ b/YOLO-detect-buoys/requirements.txt @@ -6,4 +6,5 @@ pafy==0.5.5 python-dotenv==1.0.1 roboflow==1.1.24 torch==2.2.1 +protobuf==4.24.0 ultralytics==8.0.196 From 4cfe24f7bf48fc98ce3d512c8b4c30fb08e3063a Mon Sep 17 00:00:00 2001 From: Yauhen Yavorski Date: Mon, 20 Jan 2025 17:40:22 +0100 Subject: [PATCH 15/20] =?UTF-8?q?=E2=9C=A8=20feat:=20Enhance=20Job.slurm?= =?UTF-8?q?=20for=20improved=20environment=20setup=20and=20package=20insta?= =?UTF-8?q?llation?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- YOLO-detect-buoys/Job.slurm | 51 ++++++++++++++++++++++++++++++------- 1 file changed, 42 insertions(+), 9 deletions(-) diff --git a/YOLO-detect-buoys/Job.slurm b/YOLO-detect-buoys/Job.slurm index 78766cc..1b69697 100644 --- a/YOLO-detect-buoys/Job.slurm +++ b/YOLO-detect-buoys/Job.slurm @@ -1,17 +1,50 @@ #!/bin/bash #SBATCH --partition=GPUQ #SBATCH --account=ie-idi -#SBATCH --time=01:05:00 +#SBATCH --time=999:99:99 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=4 #SBATCH --gres=gpu:a100:4 #SBATCH --constraint="gpu40g|gpu80g|gpu32g" -#SBATCH --mem=12000 # Default units are megabytes. -#SBATCH --job-name="train_vortex_model_1" -#SBATCH --output=train_model_logs.out -module load foss/2022a -module load PyTorch/1.12.0-foss-2022a-CUDA-11.7.0 -module load torchvision/0.13.1-foss-2022a-CUDA-11.7.0 -module load CUDA/12.0.0 +#SBATCH --job-name="vortex-img-process" +#SBATCH --output=vortex_img_process_log.out +#SBATCH --mem=32G + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/cluster/apps/eb/software/Python/3.10.4-GCCcore-11.3.0/lib/ + +set -e + +module purge +module --ignore_cache load foss/2022a +module --ignore_cache load Python/3.10.4-GCCcore-11.3.0 + +pip cache purge + +# makes sure that the pip is up to date +python3 -m pip install --upgrade pip + +# Create a temporary virtual environment +VENV_DIR=$(mktemp -d -t env-repaint-XXXXXXXXXX) +python3 -m venv $VENV_DIR +source $VENV_DIR/bin/activate + +pip install --upgrade pip + +# install the required packages pip install -r requirements.txt -python3 . +#pip install pyyaml # used to read the configuration file +#pip install blobfile # install blobfile to download the dataset +#pip install kagglehub # install kagglehub to download the dataset +pip install --force-reinstall torch -U +pip install torchvision torchaudio +#pip install diffusers transformers accelerate --user + +# Mixing expandable_segments:True with max_split_size doesn't make sense because the expandable segment is the size of RAM and so it could never be split with max_split_size. +# export PYTORCH_CUDA_ALLOC_CONF="expandable_segments:True,max_split_size_mb:128" +export PYTORCH_CUDA_ALLOC_CONF="expandable_segments:True" + +python3 train.py + +# Deactivate and remove the virtual environment +deactivate +rm -rf $VENV_DIR From 0a2897504c3e9c44de8e7e24755cfab2c8ce7b89 Mon Sep 17 00:00:00 2001 From: vortex-orin Date: Tue, 21 Jan 2025 16:17:29 +0100 Subject: [PATCH 16/20] added yolo roboflow training script --- yolo_roboflow_training/Job.slurm | 50 ++++++++++++++++++++ yolo_roboflow_training/requirements.txt | 10 ++++ yolo_roboflow_training/train.py | 62 +++++++++++++++++++++++++ 3 files changed, 122 insertions(+) create mode 100644 yolo_roboflow_training/Job.slurm create mode 100644 yolo_roboflow_training/requirements.txt create mode 100644 yolo_roboflow_training/train.py diff --git a/yolo_roboflow_training/Job.slurm b/yolo_roboflow_training/Job.slurm new file mode 100644 index 0000000..1b69697 --- /dev/null +++ b/yolo_roboflow_training/Job.slurm @@ -0,0 +1,50 @@ +#!/bin/bash +#SBATCH --partition=GPUQ +#SBATCH --account=ie-idi +#SBATCH --time=999:99:99 +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=4 +#SBATCH --gres=gpu:a100:4 +#SBATCH --constraint="gpu40g|gpu80g|gpu32g" +#SBATCH --job-name="vortex-img-process" +#SBATCH --output=vortex_img_process_log.out +#SBATCH --mem=32G + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/cluster/apps/eb/software/Python/3.10.4-GCCcore-11.3.0/lib/ + +set -e + +module purge +module --ignore_cache load foss/2022a +module --ignore_cache load Python/3.10.4-GCCcore-11.3.0 + +pip cache purge + +# makes sure that the pip is up to date +python3 -m pip install --upgrade pip + +# Create a temporary virtual environment +VENV_DIR=$(mktemp -d -t env-repaint-XXXXXXXXXX) +python3 -m venv $VENV_DIR +source $VENV_DIR/bin/activate + +pip install --upgrade pip + +# install the required packages +pip install -r requirements.txt +#pip install pyyaml # used to read the configuration file +#pip install blobfile # install blobfile to download the dataset +#pip install kagglehub # install kagglehub to download the dataset +pip install --force-reinstall torch -U +pip install torchvision torchaudio +#pip install diffusers transformers accelerate --user + +# Mixing expandable_segments:True with max_split_size doesn't make sense because the expandable segment is the size of RAM and so it could never be split with max_split_size. +# export PYTORCH_CUDA_ALLOC_CONF="expandable_segments:True,max_split_size_mb:128" +export PYTORCH_CUDA_ALLOC_CONF="expandable_segments:True" + +python3 train.py + +# Deactivate and remove the virtual environment +deactivate +rm -rf $VENV_DIR diff --git a/yolo_roboflow_training/requirements.txt b/yolo_roboflow_training/requirements.txt new file mode 100644 index 0000000..1774ae9 --- /dev/null +++ b/yolo_roboflow_training/requirements.txt @@ -0,0 +1,10 @@ +huggingface_hub==0.23.3 +numpy==1.26.4 +opencv_contrib_python==4.9.0.80 +opencv_python==4.9.0.80 +pafy==0.5.5 +python-dotenv==1.0.1 +roboflow==1.1.24 +torch==2.2.1 +protobuf==4.24.0 +ultralytics==8.0.196 diff --git a/yolo_roboflow_training/train.py b/yolo_roboflow_training/train.py new file mode 100644 index 0000000..ddfb055 --- /dev/null +++ b/yolo_roboflow_training/train.py @@ -0,0 +1,62 @@ +import os +# !which python +# !pip show ultralytics +# !pip show urllib3 +# !pip show requests +# !pip show requests-toolbelt +# Check GPU availability +import torch +print("CUDA available:", torch.cuda.is_available()) +print("CUDA version:", torch.version.cuda) +print("PyTorch built with CUDA:", torch.backends.cudnn.version()) + + +from ultralytics import YOLO +from roboflow import Roboflow + +# Step 1: Download Dataset from Roboflow +ROBOFLOW_API_KEY = "" # Replace with your Roboflow API Key +PROJECT_NAME = "" # Replace with your project id +VERSION = "" # Replace with your dataset version number exluding the 'v' + +rf = Roboflow(api_key=ROBOFLOW_API_KEY) +project = rf.workspace().project(PROJECT_NAME) +# List all available versions +versions = project.versions() +# print("Available versions:", [v['id'] for v in versions]) +dataset = project.version(VERSION).download("yolov8") + + + +# Step 2: Set up training configuration +model = YOLO("yolov8m.pt") # Use the smallest YOLOv8 model to start. Change to 'yolov8s.pt', etc., for larger models. + +# Define paths +data_yaml_path = os.path.join(dataset.location, "data.yaml") # Path to the dataset's data.yaml file +results_dir = "results" # Directory to save training results + + +if not torch.cuda.is_available(): + raise RuntimeError("CUDA is not available. Ensure your environment supports GPU acceleration.") + +print("Using GPU:", torch.cuda.get_device_name(0)) + +# Step 3: Train the YOLOv8 model +model.train( + data=data_yaml_path, # Path to dataset YAML file + epochs=200, # Number of training epochs + imgsz=640, # Image size + batch=16, # Batch size + device=0, # Use the first GPU (0). For multiple GPUs, use device="0,1,2" + project=results_dir, # Directory for saving results + name="custom_yolov8" # Subdirectory for this training run +) + +# Step 4: Evaluate the model +metrics = model.val(data=data_yaml_path) +print("Validation metrics:", metrics) + +# Step 5: Export the trained model +export_formats = ["onnx"] # Export formats for deployment +for fmt in export_formats: + model.export(format=fmt, device=0) # Ensure GPU is used during export if applicable From d07aa1abe74f4dfac24716c38836a756f743674c Mon Sep 17 00:00:00 2001 From: vortex-orin Date: Sat, 1 Feb 2025 17:50:38 +0100 Subject: [PATCH 17/20] unet training script --- unet_roboflow_training/Job.slurm | 50 +++++ unet_roboflow_training/requirements.txt | 10 + unet_roboflow_training/test.py | 159 ++++++++++++++ unet_roboflow_training/train.py | 266 ++++++++++++++++++++++++ 4 files changed, 485 insertions(+) create mode 100644 unet_roboflow_training/Job.slurm create mode 100644 unet_roboflow_training/requirements.txt create mode 100644 unet_roboflow_training/test.py create mode 100644 unet_roboflow_training/train.py diff --git a/unet_roboflow_training/Job.slurm b/unet_roboflow_training/Job.slurm new file mode 100644 index 0000000..1b69697 --- /dev/null +++ b/unet_roboflow_training/Job.slurm @@ -0,0 +1,50 @@ +#!/bin/bash +#SBATCH --partition=GPUQ +#SBATCH --account=ie-idi +#SBATCH --time=999:99:99 +#SBATCH --nodes=1 +#SBATCH --ntasks-per-node=4 +#SBATCH --gres=gpu:a100:4 +#SBATCH --constraint="gpu40g|gpu80g|gpu32g" +#SBATCH --job-name="vortex-img-process" +#SBATCH --output=vortex_img_process_log.out +#SBATCH --mem=32G + +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/cluster/apps/eb/software/Python/3.10.4-GCCcore-11.3.0/lib/ + +set -e + +module purge +module --ignore_cache load foss/2022a +module --ignore_cache load Python/3.10.4-GCCcore-11.3.0 + +pip cache purge + +# makes sure that the pip is up to date +python3 -m pip install --upgrade pip + +# Create a temporary virtual environment +VENV_DIR=$(mktemp -d -t env-repaint-XXXXXXXXXX) +python3 -m venv $VENV_DIR +source $VENV_DIR/bin/activate + +pip install --upgrade pip + +# install the required packages +pip install -r requirements.txt +#pip install pyyaml # used to read the configuration file +#pip install blobfile # install blobfile to download the dataset +#pip install kagglehub # install kagglehub to download the dataset +pip install --force-reinstall torch -U +pip install torchvision torchaudio +#pip install diffusers transformers accelerate --user + +# Mixing expandable_segments:True with max_split_size doesn't make sense because the expandable segment is the size of RAM and so it could never be split with max_split_size. +# export PYTORCH_CUDA_ALLOC_CONF="expandable_segments:True,max_split_size_mb:128" +export PYTORCH_CUDA_ALLOC_CONF="expandable_segments:True" + +python3 train.py + +# Deactivate and remove the virtual environment +deactivate +rm -rf $VENV_DIR diff --git a/unet_roboflow_training/requirements.txt b/unet_roboflow_training/requirements.txt new file mode 100644 index 0000000..1774ae9 --- /dev/null +++ b/unet_roboflow_training/requirements.txt @@ -0,0 +1,10 @@ +huggingface_hub==0.23.3 +numpy==1.26.4 +opencv_contrib_python==4.9.0.80 +opencv_python==4.9.0.80 +pafy==0.5.5 +python-dotenv==1.0.1 +roboflow==1.1.24 +torch==2.2.1 +protobuf==4.24.0 +ultralytics==8.0.196 diff --git a/unet_roboflow_training/test.py b/unet_roboflow_training/test.py new file mode 100644 index 0000000..d002ef9 --- /dev/null +++ b/unet_roboflow_training/test.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 +import torch +import torch.nn as nn +from torchvision import transforms +from PIL import Image +import matplotlib.pyplot as plt +import numpy as np + +############################################## +# 1. Define the U-Net model (same as during training) +############################################## +class DoubleConv(nn.Module): + """ + A block with two consecutive convolution layers each followed by + batch normalization and ReLU activation. + """ + def __init__(self, in_channels, out_channels): + super(DoubleConv, self).__init__() + self.double_conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True), + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True) + ) + def forward(self, x): + return self.double_conv(x) + +class UNet(nn.Module): + def __init__(self, in_channels=3, out_channels=1): + """ + For binary segmentation the model outputs 1 channel per pixel. + """ + super(UNet, self).__init__() + # Down-sampling path + self.down1 = DoubleConv(in_channels, 64) + self.pool1 = nn.MaxPool2d(2) + self.down2 = DoubleConv(64, 128) + self.pool2 = nn.MaxPool2d(2) + self.down3 = DoubleConv(128, 256) + self.pool3 = nn.MaxPool2d(2) + self.down4 = DoubleConv(256, 512) + self.pool4 = nn.MaxPool2d(2) + + # Bottleneck + self.bottleneck = DoubleConv(512, 1024) + + # Up-sampling path + self.up4 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2) + self.conv4 = DoubleConv(1024, 512) + self.up3 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2) + self.conv3 = DoubleConv(512, 256) + self.up2 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2) + self.conv2 = DoubleConv(256, 128) + self.up1 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2) + self.conv1 = DoubleConv(128, 64) + + self.final_conv = nn.Conv2d(64, out_channels, kernel_size=1) + + def forward(self, x): + # Down path + c1 = self.down1(x) + p1 = self.pool1(c1) + c2 = self.down2(p1) + p2 = self.pool2(c2) + c3 = self.down3(p2) + p3 = self.pool3(c3) + c4 = self.down4(p3) + p4 = self.pool4(c4) + + # Bottleneck + bn = self.bottleneck(p4) + + # Up path + u4 = self.up4(bn) + merge4 = torch.cat([u4, c4], dim=1) + c5 = self.conv4(merge4) + u3 = self.up3(c5) + merge3 = torch.cat([u3, c3], dim=1) + c6 = self.conv3(merge3) + u2 = self.up2(c6) + merge2 = torch.cat([u2, c2], dim=1) + c7 = self.conv2(merge2) + u1 = self.up1(c7) + merge1 = torch.cat([u1, c1], dim=1) + c8 = self.conv1(merge1) + output = self.final_conv(c8) + return output + +############################################## +# 2. Load the saved model +############################################## +# Set device to CUDA if available, else CPU +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + +# Initialize the model and load the saved weights +model = UNet(in_channels=3, out_channels=1) +model_path = "unet_segmentation.pth" # path to your saved model weights +model.load_state_dict(torch.load(model_path, map_location=device)) +model.to(device) +model.eval() # set model to evaluation mode + +############################################## +# 3. Prepare the test image +############################################## +# Define the transformation (should match the training transform) +transform = transforms.Compose([ + transforms.Resize((256, 256)), + transforms.ToTensor(), +]) + +# Path to the test image (update this path to your test image) +test_image_path = "test.jpg" +image = Image.open(test_image_path).convert("RGB") +input_tensor = transform(image).unsqueeze(0) # add batch dimension +input_tensor = input_tensor.to(device) + +############################################## +# 4. Run inference +############################################## +with torch.no_grad(): + output = model(input_tensor) + +# Apply sigmoid to convert logits to probabilities and then threshold for binary mask +output_prob = torch.sigmoid(output) +threshold = 0.2 +predicted_mask = (output_prob > threshold).float() + +# Remove batch and channel dimensions, and convert to NumPy array for visualization +mask_np = predicted_mask.squeeze().cpu().numpy() + +############################################## +# 5. Visualize the results +############################################## +plt.figure(figsize=(12, 6)) +unique_values = np.unique(mask_np) +print("Unique mask values:", unique_values) + +mask_uint8 = (mask_np * 255).astype("uint8") + +plt.imsave("predicted_mask.png", mask_uint8, cmap="gray") +print("Saved predicted mask to predicted_mask.png") + +# Display the original image +plt.subplot(1, 2, 1) +plt.imshow(image) +plt.title("Original Image") +plt.axis("off") + +# Display the predicted mask +plt.subplot(1, 2, 2) +plt.imshow(mask_uint8, cmap='gray') +plt.title("Predicted Mask") +plt.axis("off") + +plt.savefig("test_output.png") +print("Output saved to test_output.png") + diff --git a/unet_roboflow_training/train.py b/unet_roboflow_training/train.py new file mode 100644 index 0000000..46c8d69 --- /dev/null +++ b/unet_roboflow_training/train.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python3 +import os +import copy +import torch +import torch.nn as nn +import torch.optim as optim +from torch.utils.data import Dataset, DataLoader +from torchvision import transforms +from PIL import Image + +# Import Roboflow and download your dataset. +# Make sure you have installed it via: pip install roboflow +from roboflow import Roboflow + +############################################## +# 1. Define the U-Net model (with a simple UNet) +############################################## +class DoubleConv(nn.Module): + """ + A block with two consecutive convolution layers each followed by + batch normalization and ReLU activation. + """ + def __init__(self, in_channels, out_channels): + super(DoubleConv, self).__init__() + self.double_conv = nn.Sequential( + nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True), + nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True) + ) + def forward(self, x): + return self.double_conv(x) + +class UNet(nn.Module): + def __init__(self, in_channels=3, out_channels=1): + """ + For binary segmentation the model outputs 1 channel per pixel. + """ + super(UNet, self).__init__() + # Down-sampling path + self.down1 = DoubleConv(in_channels, 64) + self.pool1 = nn.MaxPool2d(2) + self.down2 = DoubleConv(64, 128) + self.pool2 = nn.MaxPool2d(2) + self.down3 = DoubleConv(128, 256) + self.pool3 = nn.MaxPool2d(2) + self.down4 = DoubleConv(256, 512) + self.pool4 = nn.MaxPool2d(2) + + # Bottleneck + self.bottleneck = DoubleConv(512, 1024) + + # Up-sampling path + self.up4 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2) + self.conv4 = DoubleConv(1024, 512) + self.up3 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2) + self.conv3 = DoubleConv(512, 256) + self.up2 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2) + self.conv2 = DoubleConv(256, 128) + self.up1 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2) + self.conv1 = DoubleConv(128, 64) + + self.final_conv = nn.Conv2d(64, out_channels, kernel_size=1) + + def forward(self, x): + # Down path + c1 = self.down1(x) + p1 = self.pool1(c1) + c2 = self.down2(p1) + p2 = self.pool2(c2) + c3 = self.down3(p2) + p3 = self.pool3(c3) + c4 = self.down4(p3) + p4 = self.pool4(c4) + + # Bottleneck + bn = self.bottleneck(p4) + + # Up path + u4 = self.up4(bn) + merge4 = torch.cat([u4, c4], dim=1) + c5 = self.conv4(merge4) + u3 = self.up3(c5) + merge3 = torch.cat([u3, c3], dim=1) + c6 = self.conv3(merge3) + u2 = self.up2(c6) + merge2 = torch.cat([u2, c2], dim=1) + c7 = self.conv2(merge2) + u1 = self.up1(c7) + merge1 = torch.cat([u1, c1], dim=1) + c8 = self.conv1(merge1) + output = self.final_conv(c8) + return output + +############################################## +# 2. Create a custom Dataset class for segmentation +# (Assuming images are .jpg and masks are .png with names like: +# "frame_0444.jpg" and "frame_0444_mask.png") +############################################## +class SingleFolderSegmentationDataset(Dataset): + def __init__(self, data_dir, transform=None, mask_transform=None): + """ + data_dir: directory containing both images and their masks. + transform: torchvision transforms for the image. + mask_transform: transforms for the mask. + """ + self.data_dir = data_dir + # List only image files (assuming images are .jpg and masks are not included) + self.image_files = sorted([f for f in os.listdir(data_dir) + if f.endswith('.jpg') and '_mask' not in f]) + self.transform = transform + self.mask_transform = mask_transform + + def __len__(self): + return len(self.image_files) + + def __getitem__(self, idx): + image_file = self.image_files[idx] + image_path = os.path.join(self.data_dir, image_file) + + # Derive the mask filename using a fixed .png extension + base, _ = os.path.splitext(image_file) + mask_file = base + "_mask.png" + mask_path = os.path.join(self.data_dir, mask_file) + + if not os.path.exists(mask_path): + raise FileNotFoundError(f"Mask file {mask_path} does not exist for image {image_file}") + + image = Image.open(image_path).convert("RGB") + mask = Image.open(mask_path).convert("L") # load mask as grayscale + + if self.transform: + image = self.transform(image) + else: + image = transforms.ToTensor()(image) + + if self.mask_transform: + mask = self.mask_transform(mask) + else: + mask = transforms.ToTensor()(mask) + + # Binarize the mask (assumes mask pixel values are 0 and 255) + mask = (mask > 0.5).float() + return image, mask + +############################################## +# 3. Define the training loop +############################################## +def train_model(model, dataloaders, criterion, optimizer, device, num_epochs=25): + best_model_wts = copy.deepcopy(model.state_dict()) + best_loss = float('inf') + + for epoch in range(num_epochs): + print(f"Epoch {epoch+1}/{num_epochs}") + print("-" * 20) + + # Each epoch has a training and validation phase + for phase in ['train', 'valid']: + if phase == 'train': + model.train() # Set model to training mode + else: + model.eval() # Set model to evaluate mode + + running_loss = 0.0 + + # Iterate over data. + for inputs, masks in dataloaders[phase]: + inputs = inputs.to(device) + masks = masks.to(device) + + optimizer.zero_grad() + + # Forward pass + with torch.set_grad_enabled(phase == 'train'): + outputs = model(inputs) + loss = criterion(outputs, masks) + # Backward pass and optimize only if in training phase + if phase == 'train': + loss.backward() + optimizer.step() + + running_loss += loss.item() * inputs.size(0) + + epoch_loss = running_loss / len(dataloaders[phase].dataset) + print(f"{phase} Loss: {epoch_loss:.4f}") + + # Deep copy the model if the validation loss improved + if phase == 'valid' and epoch_loss < best_loss: + best_loss = epoch_loss + best_model_wts = copy.deepcopy(model.state_dict()) + + print() + + print("Best validation Loss: {:.4f}".format(best_loss)) + model.load_state_dict(best_model_wts) + return model + +############################################## +# 4. Main function: download dataset, create dataloaders, and train +############################################## +def main(): + # ===== Retrieve dataset from Roboflow ===== + # Replace with your actual Roboflow API key, workspace, project, and version. + rf = Roboflow(api_key="") # Add your Roboflow API key here + project = rf.workspace("pipe-92at4").project("pipeline-detection-2") + version = project.version(5) + dataset = version.download("png-mask-semantic") + + # With the current dataset, the folder structure is expected as follows: + # dataset.location/Pipeline-Detection-2-5/train/ --> contains images (.jpg) and masks (.png) + # dataset.location/Pipeline-Detection-2-5/valid/ --> contains images (.jpg) and masks (.png) + # + # Update the directory paths accordingly: + train_dir = os.path.join(dataset.location, "train") + valid_dir = os.path.join(dataset.location, "valid") + + # ===== Define transforms ===== + # Resize images and masks to a fixed size (adjust as needed) + transform = transforms.Compose([ + transforms.Resize((256, 256)), + transforms.ToTensor() + ]) + mask_transform = transforms.Compose([ + transforms.Resize((256, 256)), + transforms.ToTensor() + ]) + + # ===== Create datasets ===== + train_dataset = SingleFolderSegmentationDataset(train_dir, + transform=transform, + mask_transform=mask_transform) + valid_dataset = SingleFolderSegmentationDataset(valid_dir, + transform=transform, + mask_transform=mask_transform) + + # ===== Create dataloaders ===== + batch_size = 4 # adjust batch size as needed + train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4) + valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, num_workers=4) + dataloaders = {'train': train_loader, 'valid': valid_loader} + + # ===== Set device ===== + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + print("Using device:", device) + + # ===== Initialize the model ===== + model = UNet(in_channels=3, out_channels=1) + model = model.to(device) + + # ===== Define loss function and optimizer ===== + # For binary segmentation, BCEWithLogitsLoss is common. + criterion = nn.BCEWithLogitsLoss() + optimizer = optim.Adam(model.parameters(), lr=1e-4) + + # ===== Train the model ===== + num_epochs = 25 # adjust the number of epochs as needed + trained_model = train_model(model, dataloaders, criterion, optimizer, device, num_epochs=num_epochs) + + # ===== Save the trained model ===== + torch.save(trained_model.state_dict(), "unet_segmentation.pth") + print("Model saved as unet_segmentation.pth") + +if __name__ == "__main__": + main() From 7a7cfaaeaa7e431b4c02379a5dc2d60ba0ee0fe2 Mon Sep 17 00:00:00 2001 From: Vegard Johansen Date: Sun, 16 Feb 2025 11:26:36 +0100 Subject: [PATCH 18/20] update: added correct account name and time --- yolo_roboflow_training/Job.slurm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yolo_roboflow_training/Job.slurm b/yolo_roboflow_training/Job.slurm index 1b69697..7ff5bb5 100644 --- a/yolo_roboflow_training/Job.slurm +++ b/yolo_roboflow_training/Job.slurm @@ -1,7 +1,7 @@ #!/bin/bash #SBATCH --partition=GPUQ -#SBATCH --account=ie-idi -#SBATCH --time=999:99:99 +#SBATCH --account=studiegrupper-vortex +#SBATCH --time=2:00:00 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=4 #SBATCH --gres=gpu:a100:4 From 5b186dbfaa43b0a32ae9980d994182d379816429 Mon Sep 17 00:00:00 2001 From: Andreas Kluge Svendsrud <89779148+kluge7@users.noreply.github.com> Date: Sun, 26 Oct 2025 19:31:43 +0100 Subject: [PATCH 19/20] Delete .github/workflows/pylint.yml --- .github/workflows/pylint.yml | 66 ------------------------------------ 1 file changed, 66 deletions(-) delete mode 100644 .github/workflows/pylint.yml diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml deleted file mode 100644 index 7713f61..0000000 --- a/.github/workflows/pylint.yml +++ /dev/null @@ -1,66 +0,0 @@ -name: Python CI - -on: [push, workflow_dispatch] - -jobs: - lint: - runs-on: ubuntu-latest - strategy: - matrix: - python-version: ["3.11"] - os: [ubuntu-latest, windows-latest] - steps: - - uses: actions/checkout@v4 - - - name: Lint Code - shell: bash - run: | - pip install -r YOLO-detect-buoys/requirements.txt - pip install -U pylint - pylint $(git ls-files '*.py') - - test: - runs-on: ubuntu-latest - env: - ROBOFLOW_API_KEY: ${{ secrets.ROBOFLOW_API_KEY }} - ROBOFLOW_PROJECT_ID: ${{ vars.ROBOFLOW_PROJECT_ID }} - WORKSPACE: ${{ vars.WORKSPACE }} - ROBOFLOW_PROJECT_VERSION: ${{ vars.ROBOFLOW_PROJECT_VERSION }} - DATASET_FORMAT: ${{ vars.DATASET_FORMAT }} - strategy: - matrix: - python-version: ["3.11"] - os: [ubuntu-latest, windows-latest] - steps: - - uses: actions/checkout@v4 - - - name: Run tests - shell: bash - run: | - pip install -r YOLO-detect-buoys/requirements.txt - pip install -U pytest - pytest --capture=sys --disable-warnings -v - - codecov: - runs-on: ubuntu-latest - needs: test - env: - ROBOFLOW_API_KEY: ${{ secrets.ROBOFLOW_API_KEY }} - ROBOFLOW_PROJECT_ID: ${{ vars.ROBOFLOW_PROJECT_ID }} - WORKSPACE: ${{ vars.WORKSPACE }} - ROBOFLOW_PROJECT_VERSION: ${{ vars.ROBOFLOW_PROJECT_VERSION }} - DATASET_FORMAT: ${{ vars.DATASET_FORMAT }} - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-python@v2 - with: - python-version: "3.11" - - name: Install requirements - run: pip install -r YOLO-detect-buoys/requirements.txt - - name: Run tests and collect coverage - run: pytest --capture=sys --cov --disable-warnings -v --cov-report=xml - - name: Upload coverage reports to Codecov with GitHub Action - uses: codecov/codecov-action@v4.0.1 - with: - token: ${{ secrets.CODECOV_TOKEN }} - slug: ${{ github.repository }} From 89102ab76ded63fc05c509c530753b683171cfb7 Mon Sep 17 00:00:00 2001 From: Andreas Kluge Svendsrud <89779148+kluge7@users.noreply.github.com> Date: Sun, 26 Oct 2025 19:32:22 +0100 Subject: [PATCH 20/20] Delete .gitignore --- .gitignore | 169 ----------------------------------------------------- 1 file changed, 169 deletions(-) delete mode 100644 .gitignore diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 9128793..0000000 --- a/.gitignore +++ /dev/null @@ -1,169 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -#poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ - -runs/ -*.pt -*jpg - -# data -data/ -*.out -*/*.out