-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathoystersize.py
1865 lines (1617 loc) · 79.1 KB
/
oystersize.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
oystersize.py provides size metrics of triploid Crassostrea
virginica oysters from images of population samples for all stages of
aquaculture production. With the user interacting through a tkinter GUI,
the program detects and classifies oysters and size standard objects in
an image. Once the size standard's known radius is entered, oyster sizes,
mean, median, and range are reported, along with an annotated image.
Results can be saved to file. Detection is based on a YOLOv8n model from
Ultralytics using transfer learning. The oyster sizing model was trained
on two custom classes: oyster images at various stages of growth, and
real and synthetic disk-like images as size standards.
Quit the program with Esc key, Ctrl-Q key, the close window icon of the
report window or File menubar. From the command line, use Ctrl-C.
See "Requirements" and "Usage" in the README.md file for more information.
Developed using Python 3.9 through 3.12.7.
"""
# Copyright (C) 2024 C.S. Echt, under GNU General Public License
# No warranty. Use at your own risk.
# Standard library imports.
from pathlib import Path
from signal import signal, SIGINT
from statistics import mean, median
from sys import exit as sys_exit
from time import time
from typing import Union, List, Tuple
# Third party imports.
# tkinter(Tk/Tcl) is included with most Python3 distributions,
# but may sometimes need to be regarded as third-party.
try:
import cv2
import numpy as np
import tkinter as tk
from tkinter import ttk, messagebox, filedialog, Event
from torch import device, cuda
from torch.backends import mps # used for macOS M1+ chips
from ultralytics import YOLO
from ultralytics.utils.ops import xywh2xyxy
except (ImportError, ModuleNotFoundError) as import_err:
sys_exit(
'*** One or more required Python packages were not found'
' or need an update:\n'
'OpenCV-Python, NumPy, tkinter (Tk/Tcl), PyTorch, Ultralytics.\n'
'To install: from the current folder, run this command'
' for the Python package installer (PIP):\n'
' python3 -m pip install -r requirements.txt\n'
'Alternative command formats (system dependent):\n'
' py -m pip install -r requirements.txt (Windows)\n'
' pip install -r requirements.txt\n'
'You may also install directly using, for example, this command,'
' for the Python package installer (PIP):\n'
' python3 -m pip install ultralytics\n'
'On Linux, if tkinter is the problem, then you may need:\n'
' sudo apt-get install python3-tk\n'
'See also: https://numpy.org/install/\n'
' https://tkdocs.com/tutorial/install.html\n'
' https://docs.opencv2.org/4.6.0/d5/de5/tutorial_py_setup_in_windows.html\n'
'Consider running this app and installing missing packages in a virtual environment.\n'
f'Error message:\n{import_err}')
# Local application imports.
# To ensure exit messaging, place local imports after try...except imports.
from utility_modules import (vcheck,
utils,
manage,
constants as const,
to_precision as to_p)
MY_OS = const.MY_OS
PROGRAM_NAME = utils.program_name()
class ProcessImage(tk.Tk):
"""
Apply YOLO object detection for tkinter and OpenCV image processing.
"""
def __init__(self):
super().__init__()
# Arrays of images to be processed. When used within a method,
# the purpose of self.tkimg[*] as an instance attribute is to
# retain the attribute reference and thus prevent garbage collection.
# Dict values will be defined for panels of PIL ImageTk.PhotoImage
# as Label images displayed in their respective tkimg_window Toplevel.
# The cvimg images are numpy arrays.
self.tkimg: dict = {}
self.cvimg: dict = {}
for _name in const.WINDOW_TITLES:
self.tkimg[_name] = tk.PhotoImage()
self.cvimg[_name] = const.STUB_ARRAY
# Note: The matching selector widgets for the following
# control variables are in ViewImage __init__.
# Dictionary attribute allows for easy addition of new sliders.
self.confidence_slide_val = tk.IntVar()
self.predicted_boxes= np.array([]) # x-ctr, y-ctr, width, height
self.predicted_class_distribution = np.array([]) # 0 is 'disk', 1 is 'oyster'
def prediction(self):
"""
YOLO prediction for oyster input image, with size standard object.
"""
# Initialize model
# The pytorch best.pt model is quite fast using a CPU for prediction.
# While the ONNX best.onnx model is faster, the perceived difference is
# not noticeable in most cases.
model_to_use = utils.valid_path_to(f"models/{const.MODEL_NAME}/weights/best.pt")
# model_to_use = utils.valid_path_to(f"models/{const.MODEL_NAME}/weights/best.onnx")
# model_to_use = utils.valid_path_to(f"models/{const.MODEL_NAME}/weights/best.mlpackage")
model = YOLO(model_to_use, task='detect')
# Run inference, on CPU if no GPU available.
# available_device = ('mps' if MY_OS == 'dar' and mps.is_available()
# else device('cuda' if cuda.is_available() else 'cpu')
# )
confidence: float = self.confidence_slide_val.get() / 100
# self.cvimg['input'] defined in open_input() from self.input_file_path
# via cv2.imread() of filedialog.askopenfilename().
# Use a copy of input image to avoid overwriting the original.
# The original is displayed as the 'sized' image when no obj are found.
# Use 'cpu' device for onnx model. cpu also works well for pytorch and coreml.
# half=True does not work for onnx model or on macOS and is slower for pt model.
results = model.predict(
source=self.cvimg['input'].copy(),
imgsz=const.PREDICT_IMGSZ,
conf=confidence,
device='cpu',
iou=const.PREDICT_IOU,
max_det=const.PREDICT_MAX_DET,
half=False,
augment=False,
verbose=False,
)
# Device-agnostic conversion of tensors to numpy arrays.
box_data = results[0].boxes
self.predicted_boxes = box_data.xywh.numpy(force=True).astype(int)
self.predicted_class_distribution = box_data.cls.numpy(force=True).astype(int)
results.clear() # Clear the results tensor to free memory. Useful?
class ViewImage(ProcessImage):
"""
A suite of methods to display images, YOLO model object detections,
and results text.
Methods:
set_auto_scale_factor
show_info_message
widget_control
update_image
is_interior
find_interior_objects
find_true_pos_objects
get_standard_sizes
validate_size_entry
determine_mean_standard_size
get_sig_fig
convert_bbox_data
get_text_position_offsets
annotate_object
display_all_objects
display_metrics_in_image
report_results
display_processing_info
process_sizes
process_prediction
"""
def __init__(self):
super().__init__()
self.first_run: bool = True
self.report_frame = tk.Frame()
self.selectors_frame = tk.Frame()
# self.configure(bg='green') # for development.
self.entry = {
'size_entry': tk.Entry(master=self.selectors_frame),
'size_std_val': tk.StringVar(master=self.selectors_frame),
'size_std_lbl': tk.Label(master=self.selectors_frame),
'size_std_lbl2': tk.Label(master=self.selectors_frame),
}
self.button = {
'update': ttk.Button(master=self),
'save_results': ttk.Button(master=self),
'new_input': ttk.Button(master=self),
}
self.slider = {
'confidence': tk.Scale(master=self.selectors_frame),
'confidence_lbl': tk.Label(master=self.selectors_frame),
}
# img_label dictionary is set up in SetupApp.setup_image_windows(),
# but is used in Class methods here.
self.img_label: dict = {}
# metrics dict is populated in SetupApp.open_input().
self.metrics: dict = {}
self.line_thickness: int = 0
self.font_scale: float = 0
self.time_start: float = 0
self.elapsed: Union[float, int, str] = 0
self.screen_width: int = 0
self.scale_factor = tk.DoubleVar()
self.color_val = tk.StringVar()
# Info label is gridded in configure_main_window().
self.info_txt = tk.StringVar()
self.info_label = tk.Label(textvariable=self.info_txt)
# Defined in widget_control() to reset values that user may have
# tried to change during prolonged processing times.
self.slider_val_saved: str = ''
# The following group of attributes is set in SetupApp.open_input().
self.input_file_path: str = ''
self.input_file_name: str = ''
self.input_folder_name: str = ''
self.input_ht: int = 0
self.input_w: int = 0
# Attributes used for filtering, sizing, and reporting.
self.interior_standards = np.array([])
self.interior_oysters = np.array([])
self.true_pos_standards = np.array([])
self.true_pos_oysters = np.array([])
self.standards_mean_px_size: float = 0
self.unit_per_px: float = 0
self.standards_mean_measured_size = tk.DoubleVar()
self.bbox_ratio_mean: float = 0
self.oyster_sizes: List[float] = []
self.report_txt: str = ''
def set_auto_scale_factor(self) -> None:
"""
As a convenience for user, set a default scale factor that will
easily fit images on the screen; either 1/3 screen px width or
2/3 screen px height, depending on input image orientation.
Called from open_input() and call_cmd().apply_default_settings().
Returns: None
"""
if self.input_w >= self.input_ht:
estimated_scale = round((self.screen_width * 0.33) / self.input_w, 2)
else:
estimated_scale = round((self.winfo_screenheight() * 0.66) / self.input_ht, 2)
self.scale_factor.set(estimated_scale)
def show_info_message(self, info: str, color: str) -> None:
"""
Configure for display and update the informational message in
the report and settings window.
Args:
info: The text string of the message to display.
color: The font color string, either as a key in the
const.COLORS_TK dictionary or as a Tk compatible fg
color string, i.e. hex code or X11 named color.
Returns: None
"""
self.info_txt.set(info)
self.info_label.config(fg=const.COLORS_TK.get(color, color))
def widget_control(self, action: str) -> None:
"""
Used to disable settings widgets when processing is running.
Provides a watch cursor while widgets are disabled. Also, gets
Scale() values at time of disabling and resets them upon
enabling, thus preventing user click events, which are retained
in memory during processing, from changing slider position
post-processing. Called from process_prediction().
Args:
action: Either 'off' to disable widgets, or 'on' to enable.
Returns:
None
"""
if action == 'off':
self.slider['confidence'].configure(state=tk.DISABLED)
# Grab the current slider values, in case user tries to change.
self.slider_val_saved = self.confidence_slide_val.get()
for _, _w in self.button.items():
_w.grid_remove()
self.show_info_message(info='\nProcessing...\n\n', color='black')
for _, _w in self.entry.items():
if not isinstance(_w, tk.StringVar):
_w.configure(state=tk.DISABLED)
self.config(cursor='watch')
else: # is 'on'
self.slider['confidence'].configure(state=tk.NORMAL)
# Restore the slider values to overwrite any changes.
self.confidence_slide_val.set(self.slider_val_saved)
for _, _w in self.button.items():
_w.grid()
for _, _w in self.entry.items():
if not isinstance(_w, tk.StringVar):
_w.configure(state=tk.NORMAL)
self.config(cursor='')
self.slider_val_saved = ''
# Use update(), not update_idletasks, here to speed up windows'
# response.
self.update()
def update_image(self, img_name: str) -> None:
"""
Process a cv2 image array to use as a tk PhotoImage and update
(configure) its window label for immediate display, at scale.
Calls module manage.tk_image(). Called from all methods that
display an image.
Args:
img_name: An item name used in the image_name tuple, for
use as key in tkimg, cvimg, and img_label dictionaries.
Returns:
None
"""
self.tkimg[img_name] = manage.tk_image(
image=self.cvimg[img_name],
scale_factor=self.scale_factor.get()
)
self.img_label[img_name].configure(image=self.tkimg[img_name])
def is_interior(self, xywh_bbox) -> bool:
"""
Filter an object's bouncing box position within a few pixels
of the input image border.
Uses ultralytics.utils.ops.xywh2xyxy() to convert YOLO format
to cv2 rectangle format x1, y1, x2, y2.
Called from find_interior_objects().
Args:
xywh_bbox: A numpy array of YOLO bounding box coordinates;
x_center, y_center, width, height
Returns:
True if the box is completely within the input image,
False if not.
"""
# x_center, y_center, width, height = xywh_bbox
# x1, y1 = x_center - width / 2, y_center - height / 2
# x2, y2 = x_center + width / 2, y_center + height / 2
x1, y1, x2, y2 = xywh2xyxy(xywh_bbox)
# Set limits for coordinate points to identify boxes that
# are within a few pixels of an image file border (edge).
return not (x1 <= const.EDGE_PROXIMITY
or y1 <= const.EDGE_PROXIMITY
or x2 >= self.input_w - const.EDGE_PROXIMITY
or y2 >= self.input_ht - const.EDGE_PROXIMITY)
def find_interior_objects(self) -> None:
"""
Filter out objects that are on or near the image border.
Popup window warns user if no objects are found.
Calls is_interior(), utils.no_objects_found_msg().
Called from process_sizes().
Returns: None
"""
# NOTE: predicted_boxes and predicted_class_distribution are
# defined in prediction(), which is called from process_prediction().
# In predicted_class_distribution, 0 is 'disk' and 1 is 'oyster' training class names.
std_objects: np.ndarray = self.predicted_boxes[self.predicted_class_distribution == 0]
oyster_objects: np.ndarray = self.predicted_boxes[self.predicted_class_distribution == 1]
self.interior_standards = (
std_objects[np.apply_along_axis(func1d=self.is_interior, axis=1, arr=std_objects)]
if std_objects.size else np.array([])
)
self.interior_oysters = (
oyster_objects[np.apply_along_axis(func1d=self.is_interior, axis=1, arr=oyster_objects)]
if oyster_objects.size else np.array([])
)
if not std_objects.size:
utils.no_objects_found_msg(caller='std_objects')
elif not self.interior_standards.size:
utils.no_objects_found_msg(caller='interior_std')
if not oyster_objects.size:
utils.no_objects_found_msg(caller='oyster_objects')
elif not self.interior_oysters.size:
utils.no_objects_found_msg(caller='interior_oyster')
def find_true_pos_objects(self) -> None:
"""
Filter out once class of objects that are also detected as other
class, that is, find true positive class detections.
Defines true_pos_standards and true_pos_oysters.
Need to call after find_interior_objects() and before
determine_mean_standard_size(). Called from process_sizes().
Calls utils.box_is_very_close_inarray().
Returns: None
"""
# Note: may need to adjust closeness threshold tolerance of
# utils.centered_boxes_very_close(); 0.1 is a good starting point.
are_not_close_to_standards = np.array(
[not utils.box_is_very_close_inarray(box, self.interior_standards, 0.1)
for box in self.interior_oysters]
)
are_not_close_to_oysters = np.array(
[not utils.box_is_very_close_inarray(box, self.interior_oysters,0.1)
for box in self.interior_standards]
)
# This vectorized approach may not be faster than using list expressions,
# but, when properly named, it is more readable and maintainable.
# And it is a good example of using numpy array indexing.
self.true_pos_standards = (
self.interior_standards[are_not_close_to_oysters]
if are_not_close_to_oysters.size else np.array([])
)
self.true_pos_oysters = (
self.interior_oysters[are_not_close_to_standards]
if are_not_close_to_standards.size else np.array([])
)
def set_bbox_ratio_mean(self, bbox_ary) -> None:
"""
Calculate the width and height bounding box ratios of objects
and set the mean for setting a size correction factor. Generally
used for oysters, but can be used for standards as well.
Called from process_sizes().
Args:
bbox_ary: A numpy array of bounding boxes in xywh format.
Returns: None
"""
if bbox_ary.size:
wh_max = bbox_ary[:, 2:].max(axis=1)
wh_min = bbox_ary[:, 2:].min(axis=1)
self.bbox_ratio_mean = (wh_max / wh_min).mean()
else:
self.bbox_ratio_mean = 0
def get_standard_sizes(self) -> np.ndarray:
"""
Return the maximum of pixel widths and heights of valid size
standards from a ndarray of one or more xywh bounding boxes.
Need to confirm truth of true_pos_standards.size before calling.
Called from determine_mean_standard_size(), display_processing_info().
Returns: A numpy array of standards' maximum pixel dimension.
"""
return self.true_pos_standards[:, 2:].max(axis=1)
def validate_size_entry(self) -> None:
"""
Check whether custom size Entry() value is a real number.
Corrects the Entry() value if it is not or posts a message if
entry is otherwise not valid.
Called from process_sizes().
Returns: None
"""
# Verify that entries are positive numbers.
# Custom sizes can be entered only as integer or float.
entered_num = ''.join(_c for _c in self.entry['size_std_val'].get()
if (_c.isdigit() or _c in {'.', '_'})
)
try:
# float() will raise ValueError if entry is not valid.
if not float(entered_num):
raise ValueError
self.entry['size_std_val'].set(entered_num)
except ValueError:
messagebox.showinfo(title='Custom size',
detail='Enter a number > 0.\n'
'Accepted types:\n'
' integer: 26, 2651, 2_651 or 2,651\n'
' decimal: 26.5, 0.265, .2\n'
)
self.entry['size_std_val'].set('1')
def determine_mean_standard_size(self) -> None:
"""
Calculate the mean size of the standard the custom size standard
value in the Entry widget. Calculate the unit_per_px value.
If no size standard found, mean size reported as 'n/a'.
Called from process_sizes().
Returns: None
"""
# Note: no standard objects found warn msg is in find_interior_objects().
if not self.true_pos_standards.size:
self.entry['size_std_val'].set('1')
return
# Flag from display_processing_info() if standards' sizes are non-concordant.
# Note: keep mean as string for proper SF evaluation in get_sig_fig().
std_sizes = self.get_standard_sizes()
self.standards_mean_px_size: str = to_p.to_precision(
value=std_sizes.mean(),
precision=utils.count_sig_fig(std_sizes.min())
)
# Get the entered standard size value and calculate the mean size.
# Note: standards_mean_measured_size is used only for reporting and
# is converted to correct sig. fig. in report_results().
# unit_per_px is used in convert_bbox_data() to calculate
# object sizes.
size_std_val = float(self.entry['size_std_val'].get())
self.unit_per_px = size_std_val / float(self.standards_mean_px_size)
std_calc_sizes: np.ndarray = std_sizes * self.unit_per_px
self.standards_mean_measured_size.set(std_calc_sizes.mean())
def get_sig_fig(self) -> int:
"""
Calculate the number of significant figures to display based on
the lesser of the entered custom size standard value or the
standard mean pixel diameter. Called from convert_bbox_data(),
display_metrics_in_image(), report_results(), process_sizes().
Calls utils.count_sig_fig().
Returns: None
"""
# Note: standards_mean_px_size is defined in determine_mean_standard_size().
return min(utils.count_sig_fig(self.entry['size_std_val'].get()),
utils.count_sig_fig(self.standards_mean_px_size))
def convert_bbox_data(self, bbox: np.ndarray) -> tuple:
"""
Convert bounding box xywh to a xyxy format for drawing a cv2
rectangle; measure object's size from its longest box dimension
multiplied by the unit_per_px factor.
Called from display_all_objects().
Calls to_p.to_precision() to set the number of significant figures.
Args:
bbox: A numpy array for a single bounding box element, in
xywh centered-box format.
Returns: tuple of data converted to use in cv2.rectangle, as
(x1, y1, x2, y2), and the object length, as text.
"""
x1, y1, x2, y2 = xywh2xyxy(bbox)
longest_box_side = bbox[2:].max() # max of width and height, pixels.
calculated_size: float = longest_box_side * self.unit_per_px
# Need to apply sig. fig. for sizes in annotated image and report.
display_size: str = to_p.to_precision(value=calculated_size,
precision=self.get_sig_fig())
if self.entry['size_std_val'].get() == '1':
display_size = f'{longest_box_side}px'
return x1, y1, x2, y2, display_size
def get_text_position_offsets(self, txt_string: str) -> Tuple[float, int]:
"""
Calculate the x and y position correction factors to help center
*txt_string* in cv2.putText() for annotating objects.
Called from annotate_object().
Args:
txt_string: A string of the object's size to display.
Returns:
A tuple of x and y position adjustment factors for size
annotation.
"""
((txt_width, _), baseline) = cv2.getTextSize(
text=txt_string,
fontFace=const.FONT_TYPE,
fontScale=self.font_scale,
thickness=self.line_thickness)
offset_x = txt_width / 2
return offset_x, baseline
def annotate_object(self,
point1: tuple,
point2: tuple,
object_size: str,
object_name: str) -> None:
"""
Draw a rectangle around the object and annotate its size.
Called from display_all_objects().
Calls get_text_position_offsets
Args:
point1: An x,y tuple of the top-left corner of the bounding box.
point2: An x,y tuple of the bottom-right corner of the bounding box.
object_size: A string of the object's size to display.
object_name: A string of the predicted object, either
'standard' for size standard or 'oyster' for oyster; used
to specify the annotation color.
Returns: None
"""
color_selection: tuple = const.COLORS_CV.get(self.color_val.get(), 'green')
if object_name == 'standard':
color_selection = const.COLORS_CV['DarkOrchid1']
# Use 'Cv' for oyster annotation instead of its measured size.
# if object_name == 'oyster':
# object_size = 'Cv'
# Draw the bounding box rectangle around the object.
arr = cv2.rectangle(img=self.cvimg['sized'],
pt1=point1,
pt2=point2,
color=color_selection,
thickness=self.line_thickness,
)
# Use the given center percentage of the box area to determine
# the best text color contrast in the annotation area.
# Keep in mind that the center rect of small objects may be smaller
# than the annotation text, so text contrast may not be optimal.
# Adjusting the center_pct value may help.
box = arr[point1[1]:point2[1], point1[0]:point2[0], :]
text_contrast = utils.auto_text_contrast(box_area=box, center_pct=0.3)
# Center the size text in the bounding box rectangle with org param.
# org: bottom-left corner of the text annotation for an object.
offset_x, offset_y = self.get_text_position_offsets(object_size)
center_x = (point1[0] + point2[0]) // 2
center_y = (point1[1] + point2[1]) // 2
cv2.putText(img=self.cvimg['sized'],
text=object_size,
org=(round(center_x - offset_x),
round(center_y + offset_y)),
fontFace=const.FONT_TYPE,
fontScale=self.font_scale,
color=text_contrast,
thickness=self.line_thickness,
lineType=cv2.LINE_AA,
)
def display_all_objects(self, event=None) -> Event:
"""
Draw all annotated objects in the image, with their size, in a
bounding box rectangle.
Called from process_prediction().
Calls convert_bbox_data(), annotate_object(), update_image().
Args:
event: Used for any implicit tkinter event.
Returns:
Event, a formality to pass IDE inspections.
"""
self.cvimg['sized'] = self.cvimg['input'].copy()
# Clear the list of sized oysters used in reporting metrics for
# when a new image or a new confidence limit is processed.
# Clearing is not needed for calls from view-related methods,
# but doing so keeps things simple w/o affecting performance.
self.oyster_sizes.clear()
def _display_objects(objects: np.ndarray, name: str) -> None:
for bbox in objects:
x1, y1, x2, y2, display_size = self.convert_bbox_data(bbox)
if name == 'oyster':
oyster_size = display_size.rstrip('px')
self.oyster_sizes.append(float(oyster_size))
self.annotate_object(point1=(x1, y1),
point2=(x2, y2),
object_size=display_size,
object_name=name)
_display_objects(self.true_pos_oysters, name='oyster')
_display_objects(self.true_pos_standards, name='standard')
self.update_image('sized')
return event
def display_metrics_in_image(self) -> None:
"""
Display the image metrics in a text box at the top left of the
Sized Objects image.
Called from start_now(), setup_main_menu(), bind_functions(),
process_prediction(), config_entries().
Calls get_text_position_offsets(), to_precision(), update_image().
Returns: None
"""
# Call display_all_objects() to ensure that the inserted text box
# is redrawn from the original image, not from the last annotated.
# This prevents the alpha overlay from being applied multiple times.
self.display_all_objects()
_sf: int = self.get_sig_fig()
_cf: float = utils.get_correction_factor(self.bbox_ratio_mean)
if self.oyster_sizes:
mean_size: float = mean(self.oyster_sizes) * _cf if _cf > 1.0 else self.oyster_sizes[0]
if self.entry['size_std_val'].get() == '1':
mean_oyster_size = str(int(mean_size))
else:
mean_oyster_size = (f'{to_p.to_precision(value=mean_size, precision=_sf)};'
f' {_sf} sig figs')
else:
mean_oyster_size = 'n/a'
display_metrics = (
f'Image: {self.input_file_name}\n'
f'Counted: {len(self.oyster_sizes)}\n'
f'Avg Size: {mean_oyster_size}\n'
)
longest_line: str = max(display_metrics.split('\n'), key=len)
x_offset, y_offset = self.get_text_position_offsets(longest_line)
textbox_px_width = round(x_offset * 2.2)
img_height = self.cvimg['input'].shape[1]
ht_coefficient = 11 # Used for rectangle height and line spacing.
# Template for transparent white text box:
# https://pyimagesearch.com/2016/03/07/transparent-overlays-with-opencv/
overlay = self.cvimg['sized'].copy()
cv2.rectangle(img=overlay,
pt1=(5, 5),
pt2=(textbox_px_width, img_height // ht_coefficient),
color=const.COLORS_CV['white'],
thickness=cv2.FILLED,
)
cv2.addWeighted(src1=overlay,
alpha=const.ALPHA,
src2=self.cvimg['sized'].copy(), # another copy, to avoid overwriting.
beta=1 - const.ALPHA,
gamma=0.0,
dst=self.cvimg['sized']
)
# Need to put one line at a time to avoid overlapping text.
# org: bottom-left corner of the text annotation.
for i, line in enumerate(display_metrics.split('\n'), start=1):
_y = i * img_height // (ht_coefficient * 4) + y_offset
cv2.putText(img=self.cvimg['sized'],
org=(10, round(_y)), # add 5 to the x indent of cv2.rectangle pt1.
text=line,
fontFace=const.FONT_TYPE,
fontScale=self.font_scale,
color=const.COLORS_CV['black'],
thickness=self.line_thickness,
lineType=cv2.LINE_AA,
)
self.update_image('sized')
def report_results(self) -> None:
"""
Write the current settings and cv metrics in a Text widget of
the report_frame. Same text is printed in Terminal from "Save"
button.
Called from start_now(), process_prediction(), process_sizes().
Calls get_sig_fig(), to_precision(), utils.display_report().
Returns: None
"""
size_std_dia = ('1, sizes are in pixels'
if self.entry['size_std_val'].get() == '1'
else self.entry['size_std_val'].get()
)
num_std_objects = len(self.true_pos_standards)
num_oysters = len(self.true_pos_oysters)
sig_fig: int = self.get_sig_fig()
avg_std_size: str = to_p.to_precision(
value=str(self.standards_mean_measured_size.get()),
precision=sig_fig)
# Need this hack when a new image is opened, but instead of clicking
# "Process or Update", the user again clicks "New input"
# followed by 'Cancel' before clicking "Process or Update".
# This is a rare case, but results in input_file_path being empty.
# The processed input_file_name is retained, so report that.
input_file = self.input_file_name if not self.input_file_path else self.input_file_path
# Work up some summary metrics with correct number of sig. fig.
# and estimated corrected oyster size metrics.
# When displaying sizes as pixels, don't apply sig. fig.
if self.oyster_sizes and self.interior_standards.size:
_cf: float = utils.get_correction_factor(self.bbox_ratio_mean)
if num_oysters > 1:
mean_size: float = mean(self.oyster_sizes) * _cf
median_size: float = median(self.oyster_sizes) * _cf
else: # there is only 1 oyster, no stats needed.
mean_size = median_size = self.oyster_sizes[0] * _cf
if self.entry['size_std_val'].get() == '1':
mean_oyster_size = str(int(mean_size))
median_oyster_size = str(int(median_size))
smallest = str(int(min(self.oyster_sizes)))
biggest = str(int(max(self.oyster_sizes)))
else:
mean_oyster_size: str = to_p.to_precision(value=mean_size, precision=sig_fig)
median_oyster_size: str = to_p.to_precision(value=median_size, precision=sig_fig)
smallest: str = to_p.to_precision(value=min(self.oyster_sizes), precision=sig_fig)
biggest: str = to_p.to_precision(value=max(self.oyster_sizes), precision=sig_fig)
median_oyster_txt = (f'{median_oyster_size} (corrected: {"+" if _cf > 1.0 else ""}'
f'{round((_cf - 1) * 100, 1)}%)')
size_range: str = f'{smallest}--{biggest}'
elif not self.interior_standards.size and self.entry['size_std_val'].get() != '1':
mean_oyster_size = median_oyster_txt = size_range = avg_std_size = 'n/a'
# Oysters found, but no standards found and size std is '1', calculate for pixels.
elif not self.interior_standards.size and self.entry['size_std_val'].get() == '1':
mean_size = mean(self.oyster_sizes)
median_size = median(self.oyster_sizes)
mean_oyster_size = str(int(mean_size))
median_oyster_size = str(int(median_size))
smallest = str(int(min(self.oyster_sizes)))
biggest = str(int(max(self.oyster_sizes)))
median_oyster_txt = f'{median_oyster_size} (pixels)'
size_range = f'{smallest}--{biggest}'
else: # standards found, but no oysters found.
mean_oyster_size = median_oyster_txt = size_range = 'n/a'
# Text is formatted for clarity in window, terminal, and saved file.
# Divider symbol is Box Drawings Double Horizontal from https://coolsymbol.com/
# Divider's unicode_escape: u'\u2550\'.
# Report oyster mean and median size as corrected for box ratio mean,
# but report range as uncorrected to match object annotations in image.
space = 26
tab = " " * space
divider = "═" * 20
self.report_txt = (
f'\nImage: {input_file}\n'
f'Image size, pixels (w x h): {self.input_w}x{self.input_ht}\n'
f'{divider}\n'
f'{"Confidence level (%):".ljust(space)}{self.confidence_slide_val.get()}\n'
f'{"# oysters:".ljust(space)}{num_oysters}\n'
f'{"# standards:".ljust(space)}{num_std_objects}\n'
f'{"Entered standard size:".ljust(space)}diameter = {size_std_dia}\n'
f'{"Avg. standard size used:".ljust(space)}{avg_std_size} ({sig_fig} sig. fig. used)\n'
f'{"Oyster sizes:".ljust(space)}average = {mean_oyster_size},'
f' median = {median_oyster_txt},\n'
f'{tab}range = {size_range} (uncorrected)'
)
utils.display_report(frame=self.report_frame,
report=self.report_txt)
def display_processing_info(self) -> None:
"""
Display an informational or warning message in the main window
for sizing and processing operations.
Called from process_prediction().
Calls show_info_message(), get_standard_sizes().
"""
# Elements are in order of condition priority. Use tuple instead of dict
# to ensure order of processing. The first true condition will break
# the loop and be the one displayed in the info_label. The color for
# message text in_info_message() is the second element of the message tuple.
processing_info_messages = (
(self.true_pos_standards.size and (self.get_standard_sizes()).std() > 10, (
'Detected standards (purple box) are different sizes.\n'
'Sizing results may be inaccurate.\n'
'Consider adjusting the Confidence level.\n',
"vermilion")),
(len(self.predicted_boxes) >= const.PREDICT_MAX_DET, (
f'DETECTION LIMIT of {const.PREDICT_MAX_DET} WAS MET.\n'
'Valid objects may have been excluded.\n'
'Sizing results may be inaccurate.\n'
'Consider increasing the Confidence level.',
"vermilion")),
(len(self.true_pos_oysters) < len(self.interior_oysters), (
'Overlapping false positives were found and removed.\n'
'Increase Confidence level if no size standard detected.\n\n',
"vermilion")),
(len(self.true_pos_standards) < len(self.interior_standards), (
'Overlapping false positives were found and removed.\n'
'Increasing Confidence level may improve results.\n\n',
"vermilion")),
(self.first_run, (
f'Initial processing time elapsed: {self.elapsed}\n'
'Identified size standard have a purple box.\n'
'Adjust Confidence level if any oysters have a purple box.\n',
"black")),
(not self.first_run, (
'Object detections completed.\n'
f'{self.elapsed} processing seconds elapsed.\n'
'Identified size standard have a purple box.\n'
'Adjust Confidence level if any oysters have a purple box.\n',
"blue")),
)
for condition, message in processing_info_messages:
if condition:
self.show_info_message(info=message[0], color=message[1])
break
def process_sizes(self) -> None:
"""
Process the sizes of oysters the image, for when a size standard
value is entered. Display and report the results.
Called from process_prediction() and from config_entries() as
an Entry() binding.
Returns: None
"""
self.find_interior_objects()
self.find_true_pos_objects()
self.set_bbox_ratio_mean(bbox_ary=self.true_pos_oysters)
self.validate_size_entry()
self.determine_mean_standard_size()
self.get_sig_fig()
self.display_all_objects()
self.report_results()
def process_prediction(self, event=None) -> Event:
"""
Calls methods Process_image.prediction() and process_sizes(),
which in turn calls methods for filtering, measuring, annotating,
and reporting.
Called from start_now() and various callbacks and bindings.
Args:
event: Used for any implicit tkinter event.
Returns:
Event, a formality to pass IDE inspections.
"""
# If no objects found, then no need to update beyond prediction().
# Record processing time to display in info_txt. When no objects
# are found, the elapsed time is considered n/a.
# The oyster_sizes list is cleared when no objects are found,
# otherwise it would retain the last run's sizes, which is
# normally cleared in display_all_objects() via process_sizes().
self.widget_control(action='off')
self.time_start: float = time()
self.prediction()
if self.predicted_boxes.size:
self.process_sizes()
self.elapsed =(round(time() - self.time_start, 3)
if (self.interior_standards.size and
self.interior_oysters.size)
else 'n/a')
else:
utils.no_objects_found_msg(caller='predicted_boxes')
self.oyster_sizes.clear()
self.report_results()
self.cvimg['sized'] = self.cvimg['input'].copy()
self.update_image('sized')
self.elapsed = 'n/a'
self.widget_control(action='on')
self.display_metrics_in_image()
self.display_processing_info()
return event # a formality to pass IDE inspections
class SetupApp(ViewImage):
"""
The mainloop Class for file handling and configuring windows and widgets.
Methods:
call_cmd
setup_main_window
setup_main_menu
start_now
open_input
close_window_message
setup_image_windows
configure_main_window
configure_buttons
config_entries
config_sliders
bind_focus_actions
bind_functions
set_defaults
grid_widgets
grid_img_labels
display_images
"""
def __init__(self):
super().__init__()
# Dictionary items are populated in setup_image_windows(), with