-
Notifications
You must be signed in to change notification settings - Fork 4
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Added object detection and advanced lane detection files.
- Loading branch information
1 parent
99e8b8f
commit 1af4554
Showing
14 changed files
with
999 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
non-vehicles_smallset/ | ||
vehicles_smallset/ | ||
__pycache__/ | ||
.ipynb_checkpoints/ |
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file not shown.
Binary file not shown.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,182 @@ | ||
import matplotlib.image as mpimg | ||
import matplotlib.pyplot as plt | ||
import numpy as np | ||
import pickle | ||
import cv2 | ||
from scipy.ndimage.measurements import label | ||
from moviepy.editor import VideoFileClip | ||
from IPython.display import HTML | ||
from skimage.feature import hog | ||
|
||
|
||
def convert_color(img, conv='RGB2YCrCb'): | ||
if conv == 'RGB2YCrCb': | ||
return cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb) | ||
if conv == 'BGR2YCrCb': | ||
return cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb) | ||
if conv == 'RGB2LUV': | ||
return cv2.cvtColor(img, cv2.COLOR_RGB2LUV) | ||
|
||
def get_hog_features(img, orient, pix_per_cell, cell_per_block, | ||
vis=False, feature_vec=True): | ||
# Call with two outputs if vis==True | ||
if vis == True: | ||
features, hog_image = hog(img, orientations=orient, | ||
pixels_per_cell=(pix_per_cell, pix_per_cell), | ||
cells_per_block=(cell_per_block, cell_per_block), | ||
block_norm= 'L2-Hys', | ||
transform_sqrt=False, | ||
visualise=vis, feature_vector=feature_vec) | ||
return features, hog_image | ||
# Otherwise call with one output | ||
else: | ||
features = hog(img, orientations=orient, | ||
pixels_per_cell=(pix_per_cell, pix_per_cell), | ||
cells_per_block=(cell_per_block, cell_per_block), | ||
block_norm= 'L2-Hys', | ||
transform_sqrt=False, | ||
visualize=vis, feature_vector=feature_vec) | ||
return features | ||
|
||
def bin_spatial(img, size=(32, 32)): | ||
color1 = cv2.resize(img[:,:,0], size).ravel() | ||
color2 = cv2.resize(img[:,:,1], size).ravel() | ||
color3 = cv2.resize(img[:,:,2], size).ravel() | ||
return np.hstack((color1, color2, color3)) | ||
|
||
def color_hist(img, nbins=32): #bins_range=(0, 256) | ||
# Compute the histogram of the color channels separately | ||
channel1_hist = np.histogram(img[:,:,0], bins=nbins) | ||
channel2_hist = np.histogram(img[:,:,1], bins=nbins) | ||
channel3_hist = np.histogram(img[:,:,2], bins=nbins) | ||
# Concatenate the histograms into a single feature vector | ||
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0])) | ||
# Return the individual histograms, bin_centers and feature vector | ||
return hist_features | ||
|
||
# get attributes of our svc object | ||
svc = pickle.load(open("svc_pickle.p","rb")) | ||
X_scaler = pickle.load(open("X_scaler.p","rb")) | ||
print(X_scaler) | ||
orient = 8 | ||
pix_per_cell = 8 | ||
cell_per_block = 2 | ||
spatial_size = (16,16) | ||
hist_bins = (32,32) | ||
|
||
# Define a single function that can extract features using hog sub-sampling and make predictions | ||
def find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins): | ||
|
||
draw_img = np.copy(img) | ||
img = img.astype(np.float32)/255 | ||
|
||
img_tosearch = img[ystart:ystop,:,:] | ||
ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb') | ||
if scale != 1: | ||
imshape = ctrans_tosearch.shape | ||
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale))) | ||
|
||
ch1 = ctrans_tosearch[:,:,0] | ||
ch2 = ctrans_tosearch[:,:,1] | ||
ch3 = ctrans_tosearch[:,:,2] | ||
|
||
# Define blocks and steps as above | ||
nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1 | ||
nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1 | ||
nfeat_per_block = orient*cell_per_block**2 | ||
|
||
# 64 was the orginal sampling rate, with 8 cells and 8 pix per cell | ||
window = 64 | ||
nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1 | ||
cells_per_step = 2 # Instead of overlap, define how many cells to step | ||
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1 | ||
nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1 | ||
|
||
# Compute individual channel HOG features for the entire image | ||
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False) | ||
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False) | ||
hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, feature_vec=False) | ||
|
||
for xb in range(nxsteps): | ||
for yb in range(nysteps): | ||
ypos = yb*cells_per_step | ||
xpos = xb*cells_per_step | ||
# Extract HOG for this patch | ||
hog_feat1 = hog1[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() | ||
hog_feat2 = hog2[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() | ||
hog_feat3 = hog3[ypos:ypos+nblocks_per_window, xpos:xpos+nblocks_per_window].ravel() | ||
hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3)) | ||
|
||
xleft = xpos*pix_per_cell | ||
ytop = ypos*pix_per_cell | ||
|
||
# Extract the image patch | ||
subimg = cv2.resize(ctrans_tosearch[ytop:ytop+window, xleft:xleft+window], (64,64)) | ||
|
||
# Get color features | ||
spatial_features = bin_spatial(subimg, size=spatial_size) | ||
hist_features = color_hist(subimg, nbins=hist_bins) | ||
|
||
# Scale features and make a prediction | ||
test_features = X_scaler.transform(np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)) | ||
# test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1)) | ||
test_prediction = svc.predict(test_features) | ||
|
||
if test_prediction == 1: | ||
xbox_left = np.int(xleft*scale) | ||
ytop_draw = np.int(ytop*scale) | ||
win_draw = np.int(window*scale) | ||
cv2.rectangle(draw_img,(xbox_left, ytop_draw+ystart),(xbox_left+win_draw,ytop_draw+win_draw+ystart),(0,0,255),6) | ||
|
||
return draw_img | ||
|
||
def add_heat(heatmap, bbox_list): | ||
# Iterate through list of bboxes | ||
for box in bbox_list: | ||
# Add += 1 for all pixels inside each bbox | ||
# Assuming each "box" takes the form ((x1, y1), (x2, y2)) | ||
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1 | ||
|
||
# Return updated heatmap | ||
return heatmap# Iterate through list of bboxes | ||
|
||
def apply_threshold(heatmap, threshold): | ||
# Zero out pixels below the threshold | ||
heatmap[heatmap <= threshold] = 0 | ||
# Return thresholded map | ||
return heatmap | ||
|
||
def draw_labeled_bboxes(img, labels): | ||
# Iterate through all detected cars | ||
for car_number in range(1, labels[1]+1): | ||
# Find pixels with each car_number label value | ||
nonzero = (labels[0] == car_number).nonzero() | ||
# Identify x and y values of those pixels | ||
nonzeroy = np.array(nonzero[0]) | ||
nonzerox = np.array(nonzero[1]) | ||
# Define a bounding box based on min/max x and y | ||
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy))) | ||
# Draw the box on the image | ||
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6) | ||
# Return the image | ||
return img | ||
|
||
def process_image(img): | ||
ystart = 400 | ||
ystop = 656 | ||
scale = 1.5 | ||
out_img = find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, cell_per_block, spatial_size, hist_bins) | ||
heat = np.zeros_like(img[:,:,0]).astype(np.float) | ||
heat = add_heat(heat,out_img) | ||
heat = apply_threshold(heat,1) | ||
heatmap = np.clip(heat, 0, 255) | ||
labels = label(heatmap) | ||
draw_img = draw_labeled_bboxes(np.copy(img), labels) | ||
return draw_img | ||
|
||
output_vid = 'final_tracker.mp4' | ||
input_vid = 'lane_tracker.mp4' | ||
|
||
clip1 = VideoFileClip(input_vid) | ||
video_clip = clip1.fl_image(process_image) | ||
video_clip.write_videofile(output_vid, audio=False) |
Binary file not shown.
Binary file not shown.
Oops, something went wrong.