-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathnumber_plate_detection.py
231 lines (182 loc) · 6.58 KB
/
number_plate_detection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
# -*- coding: utf-8 -*-
"""Number plate detection
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1n5xyrDWy07HZPNke-F2WzQ3sRFssYWbW
# Step 1: Install Requirements
"""
# Commented out IPython magic to ensure Python compatibility.
#clone YOLOv5 and
!git clone https://github.com/ultralytics/yolov5 # clone repo
# %cd yolov5
# %pip install -qr requirements.txt # install dependencies
# %pip install -q roboflow
import torch
import os
from IPython.display import Image, clear_output # to display images
print(f"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})")
from roboflow import Roboflow
rf = Roboflow(model_format="yolov5", notebook="ultralytics")
# set up environment
os.environ["DATASET_DIRECTORY"] = "/content/datasets"
!pip install roboflow
from roboflow import Roboflow
rf = Roboflow(api_key="cVEtmQca6JMsH0rWK9rV")
project = rf.workspace("sudips-workspace").project("license-plate-detection-x9ngg")
dataset = project.version(1).download("yolov5")
"""# Step 3: Train Our Custom YOLOv5 model
Here, we are able to pass a number of arguments:
- **img:** define input image size
- **batch:** determine batch size
- **epochs:** define the number of training epochs. (Note: often, 3000+ are common here!)
- **data:** Our dataset locaiton is saved in the `dataset.location`
- **weights:** specify a path to weights to start transfer learning from. Here we choose the generic COCO pretrained checkpoint.
- **cache:** cache images for faster training
"""
!python train.py --img 416 --batch 16 --epochs 150 --data {dataset.location}/data.yaml --weights yolov5s.pt --cache
"""# Evaluate Custom YOLOv5 Detector Performance
Training losses and performance metrics are saved to Tensorboard and also to a logfile.
If you are new to these metrics, the one you want to focus on is `mAP_0.5` - learn more about mean average precision [here](https://blog.roboflow.com/mean-average-precision/).
"""
# Start tensorboard
# Launch after you have started training
# logs save in the folder "runs"
#%tensorboard --logdir runs
"""#Run Inference With Trained Weights
Run inference with a pretrained checkpoint on contents of `test/images` folder downloaded from Roboflow.
"""
!python detect.py --weights runs/train/exp/weights/best.pt --img 416 --conf 0.1 --source /content/image.jpg
#display inference on ALL test images
import glob
from IPython.display import Image, display
for imageName in glob.glob('/content/yolov5/runs/detect/exp3/image.jpg'): #assuming JPG
display(Image(filename=imageName))
print("\n")
#export your model's weights for future use
from google.colab import files
files.download('./runs/train/exp/weights/best.pt')
pip install requests pillow
import io
import cv2
import requests
from PIL import Image
from requests_toolbelt.multipart.encoder import MultipartEncoder
# Load Image with PIL
img = cv2.imread("/content/image.jpg")
image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
pilImage = Image.fromarray(image)
# Convert to JPEG Buffer
buffered = io.BytesIO()
pilImage.save(buffered, quality=100, format="JPEG")
# Build multipart form and post request
m = MultipartEncoder(fields={'file': ("imageToUpload", buffered.getvalue(), "image/jpeg")})
response = requests.post("https://detect.roboflow.com/license-plate-detection-x9ngg/1?api_key=cVEtmQca6JMsH0rWK9rV", data=m, headers={'Content-Type': m.content_type})
print(response)
print(response.json())
#cv2_imshow(img)
dic={}
dic=response.json()
out=dic['predictions']
for i in out:
cls=i['class']
confidence=i['confidence']
x=float(i['x'])
y=float(i['y'])
w=float(i['width'])
h=float(i['height'])
print('x=',x,'y=',y,'w=',w,'h=',h)
import cv2
from google.colab.patches import cv2_imshow
img = cv2.imread("/content/image.jpg")
print(type(img))
# Shape of the image
print("Shape of the image", img.shape)
# [rows, columns]
crop = img[int(y)-10:int(y+h)-30, int(x)-50:int(x+w)-55]
cv2_imshow(img)
print('Cropped Image')
cv2_imshow(crop)
#display(Image(crop))
cv2.waitKey(0)
cv2.destroyAllWindows()
crop.shape
def zoom(img, zoom_factor=1.5):
return cv2.resize(img, None, fx=zoom_factor, fy=zoom_factor)
zoomed = zoom(crop, 5)
cv2.imwrite('zoomed.jpg',zoomed)
cv2_imshow(zoomed)
files.download('zoomed.jpg')
# Commented out IPython magic to ensure Python compatibility.
# Clone Real-ESRGAN and enter the Real-ESRGAN
!git clone https://github.com/xinntao/Real-ESRGAN.git
# %cd Real-ESRGAN
# Set up the environment
!pip install basicsr
!pip install facexlib
!pip install gfpgan
!pip install -r requirements.txt
!python setup.py develop
# Download the pre-trained model
!wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P experiments/pretrained_models
import os
from google.colab import files
import shutil
upload_folder = 'upload'
result_folder = 'results'
if os.path.isdir(upload_folder):
shutil.rmtree(upload_folder)
if os.path.isdir(result_folder):
shutil.rmtree(result_folder)
os.mkdir(upload_folder)
os.mkdir(result_folder)
# upload images
uploaded = files.upload()
for filename in uploaded.keys():
dst_path = os.path.join(upload_folder, filename)
print(f'move {filename} to {dst_path}')
shutil.move(filename, dst_path)
!python inference_realesrgan.py -n RealESRGAN_x4plus -i upload --outscale 3.5 --face_enhance
# utils for visualization
import cv2
import matplotlib.pyplot as plt
def display(img1, img2):
fig = plt.figure(figsize=(25, 10))
ax1 = fig.add_subplot(1, 2, 1)
plt.title('Input image', fontsize=16)
ax1.axis('off')
ax2 = fig.add_subplot(1, 2, 2)
plt.title('Real-ESRGAN output', fontsize=16)
ax2.axis('off')
ax1.imshow(img1)
ax2.imshow(img2)
def imread(img_path):
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
# display each image in the upload folder
import os
import glob
input_folder = 'upload'
result_folder = 'results'
input_list = sorted(glob.glob(os.path.join(input_folder, '*')))
output_list = sorted(glob.glob(os.path.join(result_folder, '*')))
for input_path, output_path in zip(input_list, output_list):
img_input = imread(input_path)
img_output = imread(output_path)
display(img_input, img_output)
cv2.imwrite('esroutput.jpg',img_output)
files.download('esroutput.jpg')
!sudo apt install tesseract-ocr
!pip install pytesseract
import pytesseract
import shutil
import os
import random
try:
from PIL import Image
except ImportError:
import Image
from google.colab import files
uploaded = files.upload()
extractedInformation = pytesseract.image_to_string(Image.open('esroutput.jpg'))
print(extractedInformation)