Skip to content

Commit

Permalink
Update the visual perception code
Browse files Browse the repository at this point in the history
Update consists KLT Tracker
  • Loading branch information
eroniki committed Apr 6, 2016
1 parent 93adaae commit adfd57b
Show file tree
Hide file tree
Showing 19 changed files with 1,422 additions and 8 deletions.
2 changes: 1 addition & 1 deletion src/Bayesian_Robotics/ar_drone_vision/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ project(ar_drone_vision)
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
# cv_bridge
cv_bridge
roscpp
rospy
)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
<launch>
<node pkg="ar_drone_vision" name="landing_zone_detector" type="landing_zone_detector.py" output="screen">

<remap from="input_rgb_image" to="/ardrone/bottom/image_raw" />

<rosparam>
show_text: True
feature_size: 1
</rosparam>

</node>
</launch>
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
<launch>
<node pkg="ar_drone_vision" name="landing_zone_tracker" type="landing_zone_tracker.py" output="screen">

<remap from="input_rgb_image" to="/ardrone/bottom/image_raw" />

<rosparam>
show_text: True
feature_size: 1
drop_feature_point_interval: 160
</rosparam>

</node>
</launch>
8 changes: 4 additions & 4 deletions src/Bayesian_Robotics/ar_drone_vision/nodes/base_detector.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def __init__(self, node_name):
self.feature_size = rospy.get_param("~feature_size", 1)

# Initialize Basler Camera Tf Frame
self.basler_tf_frame = rospy.get_param("~basler_tf_frame", "ur5_arm_camera_arm_link" )
self.basler_tf_frame = rospy.get_param("~basler_tf_frame", "/ardrone/bottom/image_raw" )

# Initialize the Point of Interest and its publisher
self.POI = PointStamped()
Expand All @@ -83,7 +83,7 @@ def __init__(self, node_name):
self.cps_n_values = 20
self.resize_window_width = 0
self.resize_window_height = 0
self.undistort_image = True
self.undistort_image = False
self.cameraMatrix = np.array([(2529.3016912669586, 0.0, 1007.0532160786125), (0.0, 2524.6309899852313, 650.2969085717225) , (0.0, 0.0, 1.0)])
self.distCoeffs = np.array([-0.006795069030464255, -0.5045652004390003, 0.004947680741251182, 0.005813011948658337, 0.0])
self.projectionMatrix = np.array([(2494.93408203125, 0.0, 1015.7040773447079, 0.0), (0.0, 2516.773681640625, 652.354580721294, 0.0), (0.0, 0.0, 1.0, 0.0)])
Expand Down Expand Up @@ -204,8 +204,8 @@ def image_callback(self, data):
# Publish the display image
self.publish_display_image(self.display_image)

# # Update the image display
# cv2.imshow(self.node_name, self.display_image)
# Update the image display
cv2.imshow(self.node_name, self.display_image)


def depth_callback(self, data):
Expand Down
Binary file not shown.
Empty file modified src/Bayesian_Robotics/ar_drone_vision/nodes/circle_detector.py
100644 → 100755
Empty file.
Empty file modified src/Bayesian_Robotics/ar_drone_vision/nodes/circle_tracker.py
100644 → 100755
Empty file.
Empty file modified src/Bayesian_Robotics/ar_drone_vision/nodes/corner_detector.py
100644 → 100755
Empty file.
Empty file modified src/Bayesian_Robotics/ar_drone_vision/nodes/corner_tracker.py
100644 → 100755
Empty file.
116 changes: 116 additions & 0 deletions src/Bayesian_Robotics/ar_drone_vision/nodes/landing_zone_detector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
#!/usr/bin/env python

""" circle_detector.py - Version 1.0 2016-02-23
Based on the R. Patrick Goebel's good_feature.py demo code
Locate HoughCircles feature closest to center of the image to track in a video stream.
Created for the CCAM Project
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""

import rospy
import cv2
import cv2.cv as cv
import numpy as np
from cv2 import BORDER_DEFAULT
from cv2.cv import CV_HOUGH_GRADIENT
from base_detector import BaseDetector

class LandingZoneDetector(BaseDetector):
def __init__(self, node_name):
super(LandingZoneDetector, self).__init__(node_name)

# Should I show text on the display?
self.show_text = rospy.get_param("~show_text", True)

# How big should the feature points be (in pixels)?
self.feature_size = rospy.get_param("~feature_size", 3)

# Initialize key variables
self.feature_point = list()
self.tracked_point = list()
self.mask = None

def process_image(self, cv_image):
try:
#Conver Image to HSV
hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)

# Mask image
self.mask = np.zeros_like(cv_image)
h = hsv[:,:,0] == 0
s = hsv[:,:,1] == 0
v = hsv[:,:,2] == 255
self.mask[h & s & v] = 255
self.mask = self.mask.astype(np.uint8)

kernel = np.ones((20,20), np.uint8)
self.mask = cv2.dilate(self.mask, kernel, iterations = 1)

if np.where(self.mask == 255):
index = np.where(self.mask == 255)
row_max = np.amax(index[0])
col_max = np.amax(index[1])
row_min = np.amin(index[0])
col_min = np.amin(index[1])

masked_image = cv2.bitwise_and(cv_image, self.mask)
masked_image = cv2.cvtColor(masked_image, cv2.COLOR_BGR2GRAY)
cv2.imshow('masked_image',masked_image)

# Get the HoughCircles feature closest to the image's center
feature_point = self.get_feature_point(masked_image)

if feature_point is not None and len(feature_point) > 0:
# Draw the center of the circle
cv2.circle(self.marker_image, (feature_point[0], feature_point[1]), self.feature_size, (0, 0, 255, 0), cv.CV_FILLED, 8, 0)
# Draw the outer circle
cv2.circle(self.marker_image, (feature_point[0], feature_point[1]), feature_point[2], (0, 255, 0, 0), self.feature_size, 8, 0)

# Convert feature_point from image coordinates to world coordinates
feature_point = np.array((feature_point[0], feature_point[1]))

# Provide self.tracked_point to publish_poi to be published on the /poi topic
self.tracked_point = feature_point
except:
pass

return cv_image

def get_feature_point(self, input_image):
# Initialize key variables
feature_points = list()
feature_point = list()
frame_height, frame_width = input_image.shape

# Compute the x, y, and radius of viable circle features
fp = cv2.HoughCircles(input_image, method=CV_HOUGH_GRADIENT, dp=1, minDist=frame_width/2)

if fp is not None and len(fp) > 0:
# Convert fp to a Numpy array
feature_points = np.float32(fp).reshape(-1, 3)
feature_point = feature_points[0]

return feature_point

if __name__ == '__main__':
try:
node_name = "landing_zone_detector"
LandingZoneDetector(node_name)
rospy.spin()
except KeyboardInterrupt:
print "Shutting down the Landing Zone Detector node."
cv.DestroyAllWindows()
Binary file not shown.
190 changes: 190 additions & 0 deletions src/Bayesian_Robotics/ar_drone_vision/nodes/landing_zone_tracker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,190 @@
#!/usr/bin/env python

""" circle_tracker.py - Version 1.0 2016-02-23
Based on the R. Patrick Goebel's lk_tracker.py demo code
Tracks the HoughCircles feature closest to the center of the image in a video stream
Created for the CCAM Project
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.html
"""

import rospy
import cv2
import cv2.cv as cv
import numpy as np
from cv2 import BORDER_DEFAULT
from landing_zone_detector import LandingZoneDetector

class LandingZoneTracker(LandingZoneDetector):
def __init__(self, node_name):
super(LandingZoneTracker, self).__init__(node_name)

#Do we show text on the display?
self.show_text = rospy.get_param("~show_text", True)

# How big should the feature points be (in pixels)?
self.feature_size = rospy.get_param("~feature_size", 1)

# How often should we refresh the feature
self.drop_feature_point_interval = rospy.get_param("~drop_feature_point_interval", 160)


# LK parameters
self.lk_winSize = rospy.get_param("~lk_winSize", (10, 10))
self.lk_maxLevel = rospy.get_param("~lk_maxLevel", 2)
self.lk_criteria = rospy.get_param("~lk_criteria", (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.01))

# Store all the LKT tracker parameters together for passing to tracker
self.lk_params = dict( winSize = self.lk_winSize,
maxLevel = self.lk_maxLevel,
criteria = self.lk_criteria)

# Initialize key variables
self.feature_point = None
self.tracked_point = None
self.mask = None
self.grey = None
self.prev_grey = None
self.frame_index = 0

def process_image(self, cv_image):
try:
#Step 1: Image processing
#Conver Image to HSV
hsv = cv2.cvtColor(cv_image, cv2.COLOR_BGR2HSV)

# Mask image
self.mask = np.zeros_like(cv_image)
h = hsv[:,:,0] == 0
s = hsv[:,:,1] == 0
v = hsv[:,:,2] == 255
self.mask[h & s & v] = 255
self.mask = self.mask.astype(np.uint8)

kernel = np.ones((20,20), np.uint8)
self.mask = cv2.dilate(self.mask, kernel, iterations = 1)

if np.where(self.mask == 255):
index = np.where(self.mask == 255)
row_max = np.amax(index[0])
col_max = np.amax(index[1])
row_min = np.amin(index[0])
col_min = np.amin(index[1])

masked_image = cv2.bitwise_and(cv_image, self.mask)
self.grey = cv2.cvtColor(masked_image, cv2.COLOR_BGR2GRAY)
cv2.imshow('masked_image',self.grey)

#Step 2: Extracting feature_points
# If we haven't yet started tracking a feature point, extract a feature point from the image
if self.tracked_point is None:
self.feature_point = self.get_feature_point(self.grey)


#Step 3: If we have a feature points, track it using optical flow
if len(self.feature_point) > 0:
# Store a copy of the current grey image used for LK tracking
if self.prev_grey is None:
self.prev_grey = self.grey

# Now that have a feature point, track it to the next frame using optical flow
self.tracked_point = self.track_feature_point(self.grey, self.prev_grey)
else:
# We have lost all feature_points so re-detect circle feature
self.tracked_point = None

# TODO: CHANGE COMMITTED BY MURAT TO STOP REINITIALIZATION

#Step 4: Re-dectect circle feature every N frames
# if self.frame_index % self.drop_feature_point_interval == 0 and len(self.feature_point) > 0:
#
# self.tracked_point = None
# self.frame_index = 0

# self.frame_index += 1

# Process any special keyboard commands for this module
if self.keystroke != -1:
try:
cc = chr(self.keystroke & 255).lower()
if cc == 'c':
# Clear the current feature_points
self.feature_point = None
self.tracked_point = None
except:
pass
self.prev_grey = self.grey
except:
pass

return cv_image

def track_feature_point(self, grey, prev_grey):
# We are tracking points between the previous frame and the current frame
img0, img1 = prev_grey, grey

# Reshape the current feature point into a Numpy array required by calcOpticalFlowPyrLK()
p0 = self.feature_point[:2].reshape(-1, 1 ,2)

# Calculate the optical flow from the previous frame to the current frame
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **self.lk_params)

# Do the reverse calculation: from the current frame to the previous frame
try:
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **self.lk_params)

# Compute the distance between corresponding points in the two flows
d = abs(p0-p0r).reshape(-1, 2).max(-1)

# If the distance between pairs of points is < 1 pixel, set
# a value in the "good" array to True, otherwise False
good = d < 1

# Initialize a list to hold new feature_points


# Cycle through all current and new feature_points and only keep
# those that satisfy the "good" condition above
for (x, y), good_flag in zip(p1.reshape(-1, 2), good):
if not good_flag:
continue
new_feature_point = np.array((x, y, self.feature_point[2]))

# Draw the center of the circle
cv2.circle(self.marker_image, (new_feature_point[0], new_feature_point[1]), self.feature_size, (0, 0, 255, 0), cv.CV_FILLED, 8, 0)
# Draw the outer circle
cv2.circle(self.marker_image, (new_feature_point[0], new_feature_point[1]), new_feature_point[2], (0, 255, 0, 0), self.feature_size, 8, 0)

# Set the global feature_point list to the new list
self.feature_point = new_feature_point
feature_point_to_track = np.array((new_feature_point[0], new_feature_point[1]))

# Provide self.tracked_point to publish_poi to be published on the /poi topic
self.tracked_point = feature_point_to_track

except:
self.tracked_point = None

return self.tracked_point

if __name__ == '__main__':
try:
node_name = "landing_zone_tracker"
LandingZoneTracker(node_name)
rospy.spin()
except KeyboardInterrupt:
print "Shutting down Landing Zone Tracking node."
cv.DestroyAllWindows()
Empty file.
Loading

0 comments on commit adfd57b

Please sign in to comment.