Skip to content

Commit

Permalink
Yolov8n pose tutorial2 (#1100)
Browse files Browse the repository at this point in the history
* Minor fixes to YOLOv8n pose estimation tutorial and README update
  • Loading branch information
Idan-BenAmi authored Jun 9, 2024
1 parent 26044fd commit 12ba094
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 14 deletions.
5 changes: 3 additions & 2 deletions tutorials/mct_model_garden/models_pytorch/yolov8/yolov8.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,12 @@
import yaml
from torch import Tensor
from huggingface_hub import PyTorchModelHubMixin
import importlib

from model_compression_toolkit.core.pytorch.pytorch_device_config import get_working_device
from sony_custom_layers.pytorch.object_detection.nms import multiclass_nms

from tutorials.mct_model_garden.models_pytorch.yolov8.yolov8_postprocess import postprocess_yolov8_keypoints
if importlib.util.find_spec("sony_custom_layers"):
from sony_custom_layers.pytorch.object_detection.nms import multiclass_nms


def yaml_load(file: str = 'data.yaml', append_filename: bool = False) -> Dict[str, any]:
Expand Down
4 changes: 2 additions & 2 deletions tutorials/notebooks/imx500_notebooks/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,13 +53,13 @@ deployment performance.
</tr>
<tr>
<td >Instance Segmentation</td>
<td>YOLOv8n-seg</td>
<td> <a href="pytorch/pytorch_yolov8n_seg_for_imx500.ipynb">YOLOv8n-seg</a></td>
<td>PyTorch</td>
<td>COCO</td>
</tr>
<tr>
<td>Pose Estimation</td>
<td>YOLOv8n-pose</td>
<td> <a href="pytorch/pytorch_yolov8n_pose_for_imx500.ipynb">YOLOv8n-pose</a></td>
<td>PyTorch</td>
<td>COCO</td>
</tr>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,7 @@
"!pip install -q torch\n",
"!pip install onnx\n",
"!pip install -q pycocotools\n",
"!pip install 'huggingface-hub>=0.21.0'\n",
"!pip install --pre 'sony-custom-layers-dev>=0.2.0.dev5'"
"!pip install 'huggingface-hub>=0.21.0'"
]
},
{
Expand Down Expand Up @@ -130,9 +129,14 @@
"metadata": {},
"outputs": [],
"source": [
"from tutorials.mct_model_garden.models_pytorch.yolov8.yolov8 import ModelPyTorch, yaml_load, model_predict\n",
"from tutorials.mct_model_garden.models_pytorch.yolov8.yolov8 import ModelPyTorch, yaml_load\n",
"\n",
"# Load the adjusted model from hugging-face\n",
"cfg_dict = yaml_load(\"tutorials/mct_model_garden/models_pytorch/yolov8/yolov8n-pose.yaml\", append_filename=True)\n",
"model = ModelPyTorch.from_pretrained(\"SSI-DNN/pytorch_yolov8n_640x640\", cfg=cfg_dict)"
"model = ModelPyTorch.from_pretrained(\"SSI-DNN/pytorch_yolov8n_640x640\", cfg=cfg_dict)\n",
"\n",
"# Ensure the model is in evaluation mode\n",
"model = model.eval()"
]
},
{
Expand Down Expand Up @@ -170,7 +174,7 @@
" annotation_file=REPRESENTATIVE_DATASET_ANNOTATION_FILE,\n",
" preprocess=yolov8_preprocess_chw_transpose)\n",
"\n",
"gptq_representative_dataset = DataLoader(representative_dataset, BATCH_SIZE, shuffle=True)\n",
"representative_dataset_gen = DataLoader(representative_dataset, BATCH_SIZE, shuffle=True)\n",
"\n",
"# Define representative dataset generator\n",
"def get_representative_dataset(n_iter: int, dataset_loader: Iterator[Tuple]):\n",
Expand All @@ -191,15 +195,15 @@
"\n",
"# Get representative dataset generator\n",
"representative_dataset_gen = get_representative_dataset(n_iter=n_iters,\n",
" dataset_loader=representative_dataset)\n",
" dataset_loader=representative_dataset_gen)\n",
"\n",
"# Set IMX500-v1 TPC\n",
"tpc = mct.get_target_platform_capabilities(fw_name=\"pytorch\",\n",
" target_platform_name='imx500',\n",
" target_platform_version='v1')\n",
"\n",
"# Specify the necessary configuration for mixed precision quantization. To keep the tutorial brief, we'll use a small set of images and omit the hessian metric for mixed precision calculations. It's important to be aware that this choice may impact the resulting accuracy. \n",
"mp_config = mct.core.MixedPrecisionQuantizationConfig(num_of_images=64)\n",
"mp_config = mct.core.MixedPrecisionQuantizationConfig(num_of_images=5)\n",
"config = mct.core.CoreConfig(mixed_precision_config=mp_config,\n",
" quantization_config=mct.core.QuantizationConfig(shift_negative_activation_correction=True,\n",
" concat_threshold_update=True))\n",
Expand Down Expand Up @@ -371,6 +375,7 @@
"source": [
"from tutorials.mct_model_garden.models_pytorch.yolov8.yolov8 import keypoints_model_predict\n",
"from tutorials.mct_model_garden.evaluation_metrics.coco_evaluation import coco_evaluate\n",
"from model_compression_toolkit.core.pytorch.pytorch_device_config import get_working_device\n",
"\n",
"EVAL_DATASET_FOLDER = './coco/val2017'\n",
"EVAL_DATASET_ANNOTATION_FILE = './coco/annotations/person_keypoints_val2017.json'\n",
Expand All @@ -380,7 +385,7 @@
"output_resize = {'shape': (INPUT_RESOLUTION, INPUT_RESOLUTION), 'aspect_ratio_preservation': True}\n",
"\n",
"# Evaluate the model on coco\n",
"eval_results = coco_evaluate(model=model,\n",
"eval_results = coco_evaluate(model=model.to(get_working_device()),\n",
" dataset_folder=EVAL_DATASET_FOLDER,\n",
" annotation_file=EVAL_DATASET_ANNOTATION_FILE,\n",
" preprocess=yolov8_preprocess_chw_transpose,\n",
Expand Down Expand Up @@ -410,7 +415,7 @@
"outputs": [],
"source": [
"# Evaluate the quantized model with PostProcess on coco\n",
"eval_results = coco_evaluate(model=quant_model,\n",
"eval_results = coco_evaluate(model=quant_model.to(get_working_device()),\n",
" dataset_folder=EVAL_DATASET_FOLDER,\n",
" annotation_file=EVAL_DATASET_ANNOTATION_FILE,\n",
" preprocess=yolov8_preprocess_chw_transpose,\n",
Expand Down Expand Up @@ -439,7 +444,7 @@
"outputs": [],
"source": [
"# Evaluate the quantized using GPTQ model with PostProcess on coco\n",
"eval_results = coco_evaluate(model=gptq_quant_model,\n",
"eval_results = coco_evaluate(model=gptq_quant_model.to(get_working_device()),\n",
" dataset_folder=EVAL_DATASET_FOLDER,\n",
" annotation_file=EVAL_DATASET_ANNOTATION_FILE,\n",
" preprocess=yolov8_preprocess_chw_transpose,\n",
Expand Down

0 comments on commit 12ba094

Please sign in to comment.