diff --git a/README.md b/README.md index 20ea970..df5d894 100644 --- a/README.md +++ b/README.md @@ -25,6 +25,8 @@ OpenPTrack is led by UCLA REMAP and Open Perception. Key collaborators include t If you use this code, please cite: +M. Munaro, F. Basso and E. Menegatti. OpenPTrack: Open Source Multi-Camera Calibration and People Tracking for RGB-D Camera Networks. Journal on Robotics and Autonomous Systems, Elsevier, vol. 75, part B, pp. 525-538, ISSN: 0921-8890, January 2016. + M. Munaro, A. Horn, R. Illum, J. Burke and R. B. Rusu. OpenPTrack: People Tracking for Heterogeneous Networks of Color-Depth Cameras. In IAS-13 Workshop Proceedings: 1st Intl. Workshop on 3D Robot Perception with Point Cloud Library, pp. 235-247, Padova, Italy, 2014. M. Munaro and E. Menegatti. Fast RGB-D people tracking for service robots. Journal on Autonomous Robots, vol. 37(3), pp. 227-242, Springer, 2014. diff --git a/bayes/CMakeLists.txt b/bayes/CMakeLists.txt index 2afec8d..3f8e710 100644 --- a/bayes/CMakeLists.txt +++ b/bayes/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 2.8.3) project(bayes) -set(CMAKE_BUILD_TYPE RelWithDebInfo) +set(CMAKE_BUILD_TYPE Release)#RelWithDebInfo) find_package(catkin REQUIRED COMPONENTS roscpp ) link_libraries(/usr/lib/liblapack.so.3gf) diff --git a/detection/CMakeLists.txt b/detection/CMakeLists.txt index b030e6a..442a16b 100644 --- a/detection/CMakeLists.txt +++ b/detection/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 2.8.3) project(detection) -set(CMAKE_BUILD_TYPE RelWithDebInfo) +set(CMAKE_BUILD_TYPE Release) find_package(catkin REQUIRED COMPONENTS cmake_modules roscpp rospy pcl_ros pcl_conversions nodelet rosconsole image_transport opt_utils opt_msgs dynamic_reconfigure) find_package(VTK REQUIRED) diff --git a/detection/apps/ground_based_people_detector_node.cpp b/detection/apps/ground_based_people_detector_node.cpp index ef96564..c6857ae 100644 --- a/detection/apps/ground_based_people_detector_node.cpp +++ b/detection/apps/ground_based_people_detector_node.cpp @@ -153,12 +153,9 @@ computeBackgroundCloud (int frames, float voxel_size, std::string frame_id, ros: background_cloud->points.clear(); for (unsigned int i = 0; i < frames; i++) { - // Voxel grid filtering: + // Point cloud pre-processing (downsampling and filtering): PointCloudT::Ptr cloud_filtered(new PointCloudT); - pcl::VoxelGrid voxel_grid_filter_object; - voxel_grid_filter_object.setInputCloud(cloud); - voxel_grid_filter_object.setLeafSize (voxel_size, voxel_size, voxel_size); - voxel_grid_filter_object.filter (*cloud_filtered); + cloud_filtered = people_detector.preprocessCloud (cloud); *background_cloud += *cloud_filtered; ros::spinOnce(); @@ -265,6 +262,7 @@ main (int argc, char** argv) nh.param("minimum_person_height", min_height, 1.3); double max_height; nh.param("maximum_person_height", max_height, 2.3); + // Point cloud sampling factor: int sampling_factor; nh.param("sampling_factor", sampling_factor, 1); std::string pointcloud_topic; @@ -291,11 +289,16 @@ main (int argc, char** argv) nh.param("voxel_size", voxel_size, 0.06); bool read_ground_from_file; // Flag stating if the ground should be read from file, if present nh.param("read_ground_from_file", read_ground_from_file, false); - bool apply_denoising; // Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise + bool remote_ground_selection; // Flag enabling manual ground selection via ssh: + nh.param("remote_ground_selection", remote_ground_selection, false); + // Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise + bool apply_denoising; nh.param("apply_denoising", apply_denoising, false); - int mean_k_denoising; // MeanK for denoising (the higher it is, the stronger is the filtering) + // MeanK for denoising (the higher it is, the stronger is the filtering) + int mean_k_denoising; nh.param("mean_k_denoising", mean_k_denoising, 5); - double std_dev_denoising; // Standard deviation for denoising (the lower it is, the stronger is the filtering) + // Standard deviation for denoising (the lower it is, the stronger is the filtering) + double std_dev_denoising; nh.param("std_dev_denoising", std_dev_denoising, 0.3); // Eigen::Matrix3f intrinsics_matrix; @@ -347,7 +350,7 @@ main (int argc, char** argv) reconfigure_server_->setCallback(f); // Loop until a valid point cloud is found - open_ptrack::detection::GroundplaneEstimation ground_estimator(ground_estimation_mode); + open_ptrack::detection::GroundplaneEstimation ground_estimator(ground_estimation_mode, remote_ground_selection); bool first_valid_frame = false; int no_valid_frame_counter = 0; while (!first_valid_frame) diff --git a/detection/apps/ground_based_people_detector_node_sr.cpp b/detection/apps/ground_based_people_detector_node_sr.cpp index 1d532a9..fda9863 100644 --- a/detection/apps/ground_based_people_detector_node_sr.cpp +++ b/detection/apps/ground_based_people_detector_node_sr.cpp @@ -406,6 +406,8 @@ main (int argc, char** argv) nh.param("voxel_size", voxel_size, 0.06); bool read_ground_from_file; // Flag stating if the ground should be read from file, if present nh.param("read_ground_from_file", read_ground_from_file, false); + bool remote_ground_selection; // Flag enabling manual ground selection via ssh: + nh.param("remote_ground_selection", remote_ground_selection, false); // Eigen::Matrix3f intrinsics_matrix; //horizontal field of view = 2 atan(0.5 width / focallength) @@ -475,7 +477,7 @@ main (int argc, char** argv) reconfigure_server_->setCallback(f); // Loop until a valid point cloud is found - open_ptrack::detection::GroundplaneEstimation ground_estimator(ground_estimation_mode); + open_ptrack::detection::GroundplaneEstimation ground_estimator(ground_estimation_mode, remote_ground_selection); bool first_valid_frame = false; while (!first_valid_frame) { diff --git a/detection/conf/ground_based_people_detector_kinect2.yaml b/detection/conf/ground_based_people_detector_kinect2.yaml index af5daf2..996c1ad 100644 --- a/detection/conf/ground_based_people_detector_kinect2.yaml +++ b/detection/conf/ground_based_people_detector_kinect2.yaml @@ -3,6 +3,8 @@ ####################### # Ground estimation mode - 0:manual, 1:semi-auto, 2:auto with user validation, 3:fully automatic (default 0) ground_estimation_mode: 3 +# Flag enabling manual ground selection via ssh: +remote_ground_selection: true # Flag stating if the ground should be read from file, if present: read_ground_from_file: false # Flag that locks the ground plane update: @@ -14,11 +16,11 @@ valid_points_threshold: 0.3 ## Background subtraction ## ############################ # Flag enabling/disabling background subtraction: -background_subtraction: false +background_subtraction: true # Resolution of the octree representing the background: background_resolution: 0.3 # Seconds to use to learn the background: -background_seconds: 3.0 +background_seconds: 0.4 ############################################## ## Ground based people detection parameters ## diff --git a/detection/include/open_ptrack/detection/ground_based_people_detection_app.h b/detection/include/open_ptrack/detection/ground_based_people_detection_app.h index 9fa81b8..f91c1b0 100644 --- a/detection/include/open_ptrack/detection/ground_based_people_detection_app.h +++ b/detection/include/open_ptrack/detection/ground_based_people_detection_app.h @@ -309,6 +309,16 @@ namespace open_ptrack Eigen::VectorXf rotateGround (Eigen::VectorXf ground_coeffs, Eigen::Affine3f transform); + /** + * \brief Perform pre-processing operations on the input cloud (downsampling, filtering). + * + * \param[in] input_cloud Input cloud. + * + * \return The cloud after pre-processing. + */ + PointCloudPtr + preprocessCloud (PointCloudPtr& input_cloud); + /** * \brief Perform people detection on the input data and return people clusters information. * diff --git a/detection/include/open_ptrack/detection/ground_segmentation.h b/detection/include/open_ptrack/detection/ground_segmentation.h index e76707a..38481a0 100644 --- a/detection/include/open_ptrack/detection/ground_segmentation.h +++ b/detection/include/open_ptrack/detection/ground_segmentation.h @@ -32,7 +32,7 @@ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * - * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Nicola Ristè + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Nicola Rist�� * */ @@ -50,6 +50,7 @@ #include #include #include +#include #include #include @@ -73,7 +74,7 @@ namespace open_ptrack typedef boost::shared_ptr PointCloudConstPtr; /** \brief Constructor. */ - GroundplaneEstimation (int ground_estimation_mode); + GroundplaneEstimation (int ground_estimation_mode, bool remote_ground_selection); /** \brief Destructor. */ virtual ~GroundplaneEstimation (); @@ -184,6 +185,12 @@ namespace open_ptrack static void pp_callback (const pcl::visualization::PointPickingEvent& event, void* args); + /** + * \brief Mouse clicking callback on OpenCV images. + */ + static void + click_cb (int event, int x, int y, int flags, void* args); + /** * \brief States which planar region is lower. * @@ -211,6 +218,9 @@ namespace open_ptrack /** \brief Flag stating if ground should be estimated manually (0), semi-automatically (1) or automatically with validation (2) or fully automatically (3) */ int ground_estimation_mode_; + /** \brief Flag enabling manual ground selection via ssh */ + bool remote_ground_selection_; + /** \brief pointer to the input cloud */ PointCloudPtr cloud_; @@ -225,6 +235,12 @@ namespace open_ptrack pcl::PointCloud::Ptr clicked_points_3d; pcl::visualization::PCLVisualizer* viewerPtr; }; + + /** \brief structure used to pass arguments to the callback function associated to an image */ + struct callback_args_image{ + std::vector clicked_points_2d; + bool selection_finished; + }; }; } /* namespace detection */ } /* namespace open_ptrack */ diff --git a/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp b/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp index 51ceed5..25b2f7b 100644 --- a/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp +++ b/detection/include/open_ptrack/detection/impl/ground_based_people_detection_app.hpp @@ -318,6 +318,74 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::rotateGround(Eige return ground_coeffs_new; } +template typename open_ptrack::detection::GroundBasedPeopleDetectionApp::PointCloudPtr +open_ptrack::detection::GroundBasedPeopleDetectionApp::preprocessCloud (PointCloudPtr& input_cloud) +{ + // Downsample of sampling_factor in every dimension: + PointCloudPtr cloud_downsampled(new PointCloud); + PointCloudPtr cloud_denoised(new PointCloud); + if (sampling_factor_ != 1) + { + cloud_downsampled->width = (input_cloud->width)/sampling_factor_; + cloud_downsampled->height = (input_cloud->height)/sampling_factor_; + cloud_downsampled->points.resize(cloud_downsampled->height*cloud_downsampled->width); + cloud_downsampled->is_dense = input_cloud->is_dense; + cloud_downsampled->header = input_cloud->header; + for (int j = 0; j < cloud_downsampled->width; j++) + { + for (int i = 0; i < cloud_downsampled->height; i++) + { + (*cloud_downsampled)(j,i) = (*input_cloud)(sampling_factor_*j,sampling_factor_*i); + } + } + } + + if (apply_denoising_) + { + // Denoising with statistical filtering: + pcl::StatisticalOutlierRemoval sor; + if (sampling_factor_ != 1) + sor.setInputCloud (cloud_downsampled); + else + sor.setInputCloud (input_cloud); + sor.setMeanK (mean_k_denoising_); + sor.setStddevMulThresh (std_dev_denoising_); + sor.filter (*cloud_denoised); + } + + // // Denoising viewer + // int v1(0); + // int v2(0); + // denoising_viewer_->removeAllPointClouds(v1); + // denoising_viewer_->removeAllPointClouds(v2); + // denoising_viewer_->createViewPort (0.0, 0.0, 0.5, 1.0, v1); + // pcl::visualization::PointCloudColorHandlerRGBField rgb(input_cloud); + // denoising_viewer_->addPointCloud (input_cloud, rgb, "original", v1); + // denoising_viewer_->createViewPort (0.5, 0.0, 1.0, 1.0, v2); + // pcl::visualization::PointCloudColorHandlerRGBField rgb2(cloud_denoised); + // denoising_viewer_->addPointCloud (cloud_denoised, rgb2, "denoised", v2); + // denoising_viewer_->spinOnce(); + + // Voxel grid filtering: + PointCloudPtr cloud_filtered(new PointCloud); + pcl::VoxelGrid voxel_grid_filter_object; + if (apply_denoising_) + voxel_grid_filter_object.setInputCloud(cloud_denoised); + else + { + if (sampling_factor_ != 1) + voxel_grid_filter_object.setInputCloud(cloud_downsampled); + else + voxel_grid_filter_object.setInputCloud(input_cloud); + } + voxel_grid_filter_object.setLeafSize (voxel_size_, voxel_size_, voxel_size_); + voxel_grid_filter_object.setFilterFieldName("z"); + voxel_grid_filter_object.setFilterLimits(0.0, max_distance_); + voxel_grid_filter_object.filter (*cloud_filtered); + + return cloud_filtered; +} + template bool open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vector >& clusters) { @@ -367,80 +435,28 @@ open_ptrack::detection::GroundBasedPeopleDetectionApp::compute (std::vec rgb_image_->points.clear(); // clear RGB pointcloud extractRGBFromPointCloud(cloud_, rgb_image_); // fill RGB pointcloud - // Downsample of sampling_factor in every dimension: - if (sampling_factor_ != 1) - { - PointCloudPtr cloud_downsampled(new PointCloud); - cloud_downsampled->width = (cloud_->width)/sampling_factor_; - cloud_downsampled->height = (cloud_->height)/sampling_factor_; - cloud_downsampled->points.resize(cloud_downsampled->height*cloud_downsampled->width); - cloud_downsampled->is_dense = cloud_->is_dense; - cloud_downsampled->header = cloud_->header; - for (int j = 0; j < cloud_downsampled->width; j++) - { - for (int i = 0; i < cloud_downsampled->height; i++) - { - (*cloud_downsampled)(j,i) = (*cloud_)(sampling_factor_*j,sampling_factor_*i); - } - } - (*cloud_) = (*cloud_downsampled); - } + // Point cloud pre-processing (downsampling and filtering): + PointCloudPtr cloud_filtered(new PointCloud); + cloud_filtered = preprocessCloud (cloud_); if (use_rgb_) { // Compute mean luminance: - int n_points = cloud_->points.size(); + int n_points = cloud_filtered->points.size(); double sumR, sumG, sumB = 0.0; - for (int j = 0; j < cloud_->width; j++) + for (int j = 0; j < cloud_filtered->width; j++) { - for (int i = 0; i < cloud_->height; i++) + for (int i = 0; i < cloud_filtered->height; i++) { - sumR += (*cloud_)(j,i).r; - sumG += (*cloud_)(j,i).g; - sumB += (*cloud_)(j,i).b; + sumR += (*cloud_filtered)(j,i).r; + sumG += (*cloud_filtered)(j,i).g; + sumB += (*cloud_filtered)(j,i).b; } } mean_luminance_ = 0.3 * sumR/n_points + 0.59 * sumG/n_points + 0.11 * sumB/n_points; // mean_luminance_ = 0.2126 * sumR/n_points + 0.7152 * sumG/n_points + 0.0722 * sumB/n_points; } - // Denoising with statistical filtering: - PointCloudPtr cloud_denoised(new PointCloud); - if (apply_denoising_) - { - pcl::StatisticalOutlierRemoval sor; - sor.setInputCloud (cloud_); - sor.setMeanK (mean_k_denoising_); - sor.setStddevMulThresh (std_dev_denoising_); - sor.filter (*cloud_denoised); - } - else - { - cloud_denoised = cloud_; - } - -// // Denoising viewer -// int v1(0); -// int v2(0); -// denoising_viewer_->removeAllPointClouds(v1); -// denoising_viewer_->removeAllPointClouds(v2); -// denoising_viewer_->createViewPort (0.0, 0.0, 0.5, 1.0, v1); -// pcl::visualization::PointCloudColorHandlerRGBField rgb(cloud_); -// denoising_viewer_->addPointCloud (cloud_, rgb, "original", v1); -// denoising_viewer_->createViewPort (0.5, 0.0, 1.0, 1.0, v2); -// pcl::visualization::PointCloudColorHandlerRGBField rgb2(cloud_denoised); -// denoising_viewer_->addPointCloud (cloud_denoised, rgb2, "denoised", v2); -// denoising_viewer_->spinOnce(); - - // Voxel grid filtering: - PointCloudPtr cloud_filtered(new PointCloud); - pcl::VoxelGrid voxel_grid_filter_object; - voxel_grid_filter_object.setInputCloud(cloud_denoised); - voxel_grid_filter_object.setLeafSize (voxel_size_, voxel_size_, voxel_size_); - voxel_grid_filter_object.setFilterFieldName("z"); - voxel_grid_filter_object.setFilterLimits(0.0, max_distance_); - voxel_grid_filter_object.filter (*cloud_filtered); - // Ground removal and update: pcl::IndicesPtr inliers(new std::vector); boost::shared_ptr > ground_model(new pcl::SampleConsensusModelPlane(cloud_filtered)); diff --git a/detection/include/open_ptrack/detection/impl/ground_segmentation.hpp b/detection/include/open_ptrack/detection/impl/ground_segmentation.hpp index 7dd999b..cdc946c 100644 --- a/detection/include/open_ptrack/detection/impl/ground_segmentation.hpp +++ b/detection/include/open_ptrack/detection/impl/ground_segmentation.hpp @@ -32,16 +32,17 @@ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * - * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Nicola Ristè + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Nicola Rist�� * */ #include template -open_ptrack::detection::GroundplaneEstimation::GroundplaneEstimation (int ground_estimation_mode) +open_ptrack::detection::GroundplaneEstimation::GroundplaneEstimation (int ground_estimation_mode, bool remote_ground_selection) { ground_estimation_mode_ = ground_estimation_mode; + remote_ground_selection_ = remote_ground_selection; if ((ground_estimation_mode > 3) || (ground_estimation_mode < 0)) { @@ -222,9 +223,6 @@ open_ptrack::detection::GroundplaneEstimation::compute () { std::cout << "Manual mode for ground plane estimation." << std::endl; - // Initialize viewer: - pcl::visualization::PCLVisualizer viewer("Pick 3 points"); - // Create XYZ cloud: pcl::PointCloud::Ptr cloud_xyzrgb(new pcl::PointCloud); pcl::PointXYZRGB xyzrgb_point; @@ -242,37 +240,92 @@ open_ptrack::detection::GroundplaneEstimation::compute () } } -//#if (XYZRGB_CLOUDS) -// pcl::visualization::PointCloudColorHandlerRGBField rgb(cloud_); -// viewer.addPointCloud (cloud_, rgb, "input_cloud"); -//#else -// viewer.addPointCloud (cloud_, "input_cloud"); -//#endif + pcl::PointCloud::Ptr clicked_points_3d (new pcl::PointCloud); + + if (remote_ground_selection_ == false) + { + // Initialize viewer: + pcl::visualization::PCLVisualizer viewer("Pick 3 points"); + + //#if (XYZRGB_CLOUDS) + // pcl::visualization::PointCloudColorHandlerRGBField rgb(cloud_); + // viewer.addPointCloud (cloud_, rgb, "input_cloud"); + //#else + // viewer.addPointCloud (cloud_, "input_cloud"); + //#endif + + pcl::visualization::PointCloudColorHandlerCustom rgb(cloud_xyzrgb, 255, 255, 255); + viewer.addPointCloud (cloud_xyzrgb, rgb, "input_cloud"); + viewer.setCameraPosition(0,0,-2,0,-1,0,0); - pcl::visualization::PointCloudColorHandlerCustom rgb(cloud_xyzrgb, 255, 255, 255); - viewer.addPointCloud (cloud_xyzrgb, rgb, "input_cloud"); - viewer.setCameraPosition(0,0,-2,0,-1,0,0); + // Add point picking callback to viewer: + struct callback_args_color cb_args; - // Add point picking callback to viewer: - struct callback_args_color cb_args; + //#if (XYZRGB_CLOUDS) + // PointCloudPtr clicked_points_3d (new PointCloud); + //#else + // pcl::PointCloud::Ptr clicked_points_3d (new pcl::PointCloud); + //#endif -//#if (XYZRGB_CLOUDS) -// PointCloudPtr clicked_points_3d (new PointCloud); -//#else -// pcl::PointCloud::Ptr clicked_points_3d (new pcl::PointCloud); -//#endif + cb_args.clicked_points_3d = clicked_points_3d; + cb_args.viewerPtr = &viewer; + viewer.registerPointPickingCallback (GroundplaneEstimation::pp_callback, (void*)&cb_args); - pcl::PointCloud::Ptr clicked_points_3d (new pcl::PointCloud); - cb_args.clicked_points_3d = clicked_points_3d; - cb_args.viewerPtr = &viewer; - viewer.registerPointPickingCallback (GroundplaneEstimation::pp_callback, (void*)&cb_args); + // Spin until 'Q' is pressed: + viewer.spin(); + viewer.setSize(1,1); // resize viewer in order to make it disappear + viewer.spinOnce(); + viewer.close(); // close method does not work + std::cout << "done." << std::endl; + } + else + { + cv::Mat curr_image (cloud_xyzrgb->height, cloud_xyzrgb->width, CV_8UC1); + for (int i=0;iheight;i++) + { + for (int j=0;jwidth;j++) + { + curr_image.at(i,j) = cloud_->at(j,i).r; + } + } + + // Image equalization + cv::equalizeHist(curr_image, curr_image); + + // Image conversion + cv::cvtColor(curr_image, curr_image, CV_GRAY2BGR); + + // Add point picking callback to viewer: + std::vector clicked_points_2d; + bool selection_finished = false; + struct callback_args_image cb_args; + cb_args.clicked_points_2d = clicked_points_2d; + cb_args.selection_finished = selection_finished; + cv::namedWindow("Pick 3 points"); + cv::setMouseCallback("Pick 3 points", click_cb, (void*)&cb_args); + cv::startWindowThread(); + // Select three points from the image: + while(!cb_args.selection_finished) + { + for(unsigned int i = 0; i < cb_args.clicked_points_2d.size(); i++) + { + cv::Point p = cb_args.clicked_points_2d[i]; + cv::circle(curr_image, p, 5, cv::Scalar(0, 0, 255), -1); + } + cv::imshow("Pick 3 points", curr_image); + cv::waitKey(1); + } + cv::destroyWindow("Pick 3 points"); + cv::waitKey(1); // If the window does not have focus its closure + // is not guaranteed - // Spin until 'Q' is pressed: - viewer.spin(); - viewer.setSize(1,1); // resize viewer in order to make it disappear - viewer.spinOnce(); - viewer.close(); // close method does not work - std::cout << "done." << std::endl; + // Select the corresponding 3D points from the point cloud: + for(unsigned int i = 0; i < cb_args.clicked_points_2d.size(); i++) + { + cv::Point p = cb_args.clicked_points_2d[i]; + clicked_points_3d->points.push_back (cloud_->at(p.x,p.y)); + } + } // Keep only the last three clicked points: while(clicked_points_3d->points.size()>3) @@ -625,6 +678,28 @@ open_ptrack::detection::GroundplaneEstimation::pp_callback (const pcl::v std::cout << current_point.x << " " << current_point.y << " " << current_point.z << std::endl; } +template void +open_ptrack::detection::GroundplaneEstimation::click_cb(int event, int x, int y, int flags, void* args) +{ + struct callback_args_image* data = (struct callback_args_image *)args; + + switch(event) + { + case CV_EVENT_LBUTTONUP: + { + //TODO control if depth is nan + cv::Point p(x, y); + data->clicked_points_2d.push_back(p); + break; + } + case CV_EVENT_RBUTTONUP: + { + data->selection_finished = true; + break; + } + } +} + template bool open_ptrack::detection::GroundplaneEstimation::planeHeightComparator (pcl::PlanarRegion region1, pcl::PlanarRegion region2) { diff --git a/docs/JETSON_HOW-TO.txt b/docs/JETSON_HOW-TO.txt new file mode 100644 index 0000000..2f72a4f --- /dev/null +++ b/docs/JETSON_HOW-TO.txt @@ -0,0 +1,195 @@ +JETSON HOW-TO + +This guide is a superset of https://github.com/xlz/libfreenect2/wiki/Jetson-TK1-HOWTO + +All the lines representing code has an initial tabulation. + +FLASH THE DEVICE: + +Use JetPack downloadable here: https://github.com/OpenPTrack/open_ptrack/tree/jetson-dev/jetpack +Follow the figures in the Jetson flashing doc (In particular, take care to the figure in which you should put the device in FORCE RECOVERY mode. The instructions showed in ther terminal window are bugged, follow the ones in this guide!). + +After the flashing phase you should be able to ssh-ing the device if it is powered on and attached to your Local Area Network. Try to connect to it with this command: + + ssh -X ubuntu@tegra-ubuntu.local + +The default password is: +ubuntu + +If the above command does not work check your network with IP scanning tools and see whether you see the device. +If you change the default username & password the following commands should be changed accordingly. + + + +PRELIMINARY STEPS: + +1) Enabling USB 3.0 Full performance +USB 3.0 must be enabled for full performance. (With 30 FPS, JPEG stream consumes 20MB/s, and depth stream consumes 85MB/s). Edit /boot/extlinux/extlinux.conf to change usb_port_owner_info=0 to usb_port_owner_info=2 to enable USB 3.0. +Add also usbcore.autosuspend=-1 at the end of the last command to disable the USB autosuspending. + +2) Add udev rule for Kinect v2: create /etc/udev/rules.d/90-kinect2.rules with the following content: + # ATTR{product}=="Kinect2" + SUBSYSTEM=="usb", ATTR{idVendor}=="045e", ATTR{idProduct}=="02c4", MODE="0666" + SUBSYSTEM=="usb", ATTR{idVendor}=="045e", ATTR{idProduct}=="02d8", MODE="0666" + SUBSYSTEM=="usb", ATTR{idVendor}=="045e", ATTR{idProduct}=="02d9", MODE="0666" + + + +INSTALL UTILITIES & PERMITS UNIVERSE+MULTIVERSE + + sudo apt-add-repository universe + sudo apt-add-repository multiverse + sudo apt-get update + sudo apt-get install -y bash-completion command-not-found locate git gitg vim + + + +INSTALL&CONFIGURE ROS BASE SYSTEM & POINT CLOUD LIBRARY + + sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' + sudo apt-key adv --keyserver hkp://pool.sks-keyservers.net --recv-key 0xB01FA116 + sudo apt-get update + sudo apt-get install -y ros-indigo-desktop + sudo apt-get install -y python-rosinstall + sudo add-apt-repository -y ppa:v-launchpad-jochen-sprickerhof-de/pcl + sudo apt-get update + sudo apt-get install -y libpcl-1.7-all + sudo apt-get install -y ros-indigo-compressed-depth-image-transport ros-indigo-compressed-image-transport ros-indigo-pcl-ros ros-indigo-camera-info-manager ros-indigo-driver-base ros-indigo-calibration + sudo rosdep init + rosdep update + echo "source /opt/ros/indigo/setup.bash" >> ~/.bashrc + source ~/.bashrc + mkdir -p ~/workspace/ros/catkin/src + cd ~/workspace/ros/catkin + catkin_make --force-cmake + echo "export KINECT_DRIVER=openni" >> ~/.bashrc + echo "export LC_ALL=C" >> ~/.bashrc + echo "source /home/ubuntu/workspace/ros/catkin/devel/setup.bash" >> ~/.bashrc + + + +INSTALL & CONFIGURE NTP + +Follow https://github.com/OpenPTrack/open_ptrack/wiki/Time%20Synchronization + + + +INSTALL CUDA + + wget http://developer.download.nvidia.com/compute/cuda/6_5/rel/installers/cuda-repo-l4t-r21.2-6-5-prod_6.5-34_armhf.deb + sudo dpkg -i cuda-repo-l4t-r21.2-6-5-prod_6.5-34_armhf.deb + sudo apt-get update + sudo apt-get install -y cuda-toolkit-6-5 + sudo usermod -a -G video $USER + echo "# Add CUDA bin & library paths:" >> ~/.bashrc + echo "export PATH=/usr/local/cuda/bin:$PATH" >> ~/.bashrc + echo "export LD_LIBRARY_PATH=/usr/local/cuda/lib:$LD_LIBRARY_PATH" >> ~/.bashrc + source ~/.bashrc + + + +INSTALL LIBFREENECT2 & ADD THE SCRIPTS FOR THE MAX PERFORMANCE + +Reboot your jetson + sudo reboot + +By default, only one core of the jetson is active and it works at the minimum frequency. Furthermore, even the memory bus, the GPU frequency and the GPU memory frequency should be set for the max performance. +You can done this using the scripts in the libfreenect2 repository. There are several scripts for enabling the max performance, check the Status of the card and for the disabling of the autosuspending mode of the USB (to be used after having connected the Kinect v2). + + cd + sudo apt-get install -y build-essential libturbojpeg libtool autoconf libudev-dev cmake mesa-common-dev freeglut3-dev libxrandr-dev doxygen libxi-dev libjpeg-turbo8-dev + git clone https://github.com/openptrack/libfreenect2.git + cd libfreenect2 + git checkout jetson-dev + cd depends + ./install_deps.sh + sudo ln -s /usr/lib/arm-linux-gnueabihf/libturbojpeg.so.0.0.0 /usr/lib/arm-linux-gnueabihf/libturbojpeg.so + cd .. + cd jetson_scripts + chmod +x * + sudo cp * /usr/bin +*check Protonect* +Kinect v2 connect instructions: +1) Before connecting the Kinect, open a terminal and launch the following command: + sudo MaxPerformance &> /dev/null +Don’t care of the output and invoke this command: + sudo Status + +If everything is good, you should see something like this: + +CPUs active (it should be 0-3): +0-3 +CPUs freq (it should be 2032500000): +2320500 +2320500 +2320500 +2320500 +CPU governor (it should be userspace): +userspace +userspace +userspace +userspace +GPU clock (it should be 852000000): +852000000 +GPU memory clock (it should be 924000000): +924000000 + +If something is not similar to these lines try to reboot the jetson and check the passages above. +2) Connect the already powered-on Kinect v2 cable to the USB port of the Jetson. +3) Wait 3 seconds +4) Invoke + sudo Status +At the end of the Status you should see the Autosuspending state with all -1 (no 2). If this is not the case something went wrong. Probably the Kinect v2 is powered off and it is not listed there or you have not done well the PRELIMINARY STEPS. Try to redo the steps and/or reboot the device. Take care of the order of the instructions!! +5) The Kinect v2 is correctly connected, now we will check if Protonect works correctly. + cd ../examples/protonect/ + mkdir build && cd build + cmake .. + make -j4 + ../bin/Protonect +You should see [with some delay (we are trying to ssh-ing a lot of information)] three windows opening, one with the depth information, one with the color information and one with the ir information. Don’t worry about the very low framerate, we are seeing a stream of information through an encrypted channel. If you don’t see anything and on the terminal there are a lot of +[DepthPacketStreamParser::handleNewData] subpacket incomplete (needed X received Y)] +probably the jetson is not in max performance mode or the Kinect has not been attached correctly (take care of the order specified previously!). +If you see some USB errors, probably Protonect has been linked with the uncorrect version of libusb or the USB is in Autosuspending mode. Try to reboot and/or check ldd Protonect. + +If Protonect works, you can install the libraries in the system (always inside the build dir). + sudo make install + + + +INSTALL IAI_KINECT2 + + cd /home/ubuntu/workspace/ros/catkin/src + git clone https://github.com/OpenPTrack/iai_kinect2 + cd iai_kinect2 + git checkout jetson-dev + Catkin + (only if you see internal compiler error) + CatkinJ1 + +You can check the installation by running: + sudo MaxPerformance &> /dev/null + roslaunch kinect2_bridge kinect2_bridge_ir.launch + +You should see output like these lines (after some initial messages): +[CudaDepthPacketProcessor] avg. time: 21.5727ms -> ~46.3548Hz +[TegraJpegRgbPacketProcessor] avg. time: 17.6418ms -> ~56.6837Hz + +Furthermore, you can check the point cloud generation frequency with this command: + rostopic hz /kinect2_head/depth_ir/points +You should see 20-21 frame per second (which is much more than what the original version of iai_kinect2 provided). + + + +INSTALL OPEN_PTRACK + + cd /home/ubuntu/workspace/ros/catkin/src + git clone https://github.com/OpenPTrack/open_ptrack + cd open_ptrack + git checkout jetson-dev + cd scripts + chmod +x * + ./calibration_toolkit_install.sh + cd /home/ubuntu/workspace/ros/catkin/ + catkin_make --pkg calibration_msgs + catkin_make --pkg opt_msgs + CatkinJ1 diff --git a/docs/Jetson_Flashing_guide.odt b/docs/Jetson_Flashing_guide.odt new file mode 100644 index 0000000..46a2e6c Binary files /dev/null and b/docs/Jetson_Flashing_guide.odt differ diff --git a/docs/jetson_imaging_guide.txt b/docs/jetson_imaging_guide.txt new file mode 100644 index 0000000..96109ff --- /dev/null +++ b/docs/jetson_imaging_guide.txt @@ -0,0 +1,71 @@ +This guide has been taken from http://jetsonhacks.com/2015/08/26/clone-image-nvidia-jetson-tk1/ + + + +Guideline and Instructions + +Note: This demonstration was on a Jetson originally flashed with L4T 21.4 using JetPack 1.2 on the host PC. +This is a demonstration of the simplest way to save the Jetson image. This technique will save what is called the ‘APP’ partition of the Jetson flash memory (which basically is the ‘Root File System’ and includes the working area of your system) to a file on the host PC which you used JetPack to flash the Jetson originally. + +If you are a Linux expert and have added more partitions, or are working on partitions other than ‘APP’ on the system side, see the Jetson/Cloning page on the Jetson wiki for more information about cloning the Jetson. If you don’t know what any of that gibberish means (like me), then you’re in luck because this article is for you. + +Cloning the Jetson is actually made up of two parts. The first part is saving the current ‘image’ from the Jetson flash memory as a file onto the JetPack PC. The second part is copying that image back to a Jetson. The Jetson may be the same one, or another Jetson TK1. As you might guess, this is useful when you are trying to replicate a Jetson on another Jetson, or multiple Jetsons. +Saving the Image + +In order to save the image, first you will need to navigate to the proper directory on the host PC to begin the process. Open a Terminal and ‘cd’ to the JetPack install directory. + +For example, on a default install, the install directory is ~/JetPackTK1-1.1/ + + $ cd ~/JetPackTK1-1.1 + +Then, switch over to the boot loader directory: + + $ cd Linux_for_Tegra/bootloader + +At this point, you’re almost ready to go. Make sure that the Jetson is attached to the PC using the supplied USB cable that you used to flash the Jetson with originally. Set the Jetson into recovery mode, as you did during the original flash. At this point, you should be able to execute on the PC: + + $ lsusb + +and see the Jetson which is identified as 0955:7140 Nvidia Corp. +To download the image from the Jetson, execute: + + $ sudo ./nvflash --read APP clone.img --bl ardbeg/fastboot.bin --go + +At this point, you should see magic happen. There will be a counter that is updated as the bytes are downloaded from the Jetson. There are 15GBs of data, so reading them takes a while over USB 2. On my machine it takes about 30 minutes. A PC system running from a virtual machine could take much longer. + +Once the download is complete, you will see a file in the directory named ‘clone.img’ which is about 15 GBs in size. This is the downloaded image from the Jetson. You will want to change the permissions of clone.img: + + $ sudo chmod 744 clone.img + +You can move clone.img to another place to save it. Note: the file can be compressed using Gzip to about a third the size, useful for archival purposes. +Restoring the Image + +Restoring a saved image consists of flashing the cloned image to a Jetson. This is the same procedure that happens ‘under the covers’ of JetPack except that instead of a basic ’empty’ system, everything that was loaded on the cloned device is flashed. + +The file ‘system.img’ in the bootloader directory is the name of the file which represents the disk image of the Jetson. In order to use the cloned image, you will need to copy the file ‘clone.img’ to ‘system.img’. First, you will need to be in the boot loader directory as described before. For example: + + $ cd ~/JetPackTK1-1.1/Linux_for_Tegra/bootloader + +Copy the cloned image to the system image that will be flashed: + + $ sudo cp clone.img system.img + +Notes: The clone.img file must be decompressed if you compressed it to save space. This also assumes that clone.img is still in the bootloader directory. If it was moved or the name changed, you will have to make the necessary adjustments. In any case, the uncompressed cloned image needs to end up in system.img in the bootloader directory. + +Next, make sure that the Jetson is connected to the PC through the micro connector USB cable as during normal flashing, and place the Jetson into recovery mode. At this point, you should be able to execute on the PC: + + $ lsusb + +and see the Jetson which is identified as 0955:7140 Nvidia Corp. + +At this point, we’re ready to flash. Here’s the magic incantation: + + $ cd ../ + # As an example, you should be in ~/JetPackTK1-1.1/Linux_for_Tegra/ + $ sudo ./flash.sh -r -S 14580MiB jetson-tk1 mmcblk0p1 + +The -s flag tells the flashing program to skip building and reuse the existing system.img file. The partition size (-S 14580MiB) is the default that JetPack uses. + +More magic should happen as the Jetson is flashed. Note that it will take longer than normal to flash the Jetson in comparison to the empty system as it restores all 15GBs. In the demonstration, the process took about 15 minutes. + +When the system is done flashing, reboot the Jetson. The Jetson will be restored to the state of the original at the time of cloning. \ No newline at end of file diff --git a/jetpack/JetPackTK1-1.1-cuda6.5-linux-x64.run b/jetpack/JetPackTK1-1.1-cuda6.5-linux-x64.run new file mode 100755 index 0000000..48a94b6 Binary files /dev/null and b/jetpack/JetPackTK1-1.1-cuda6.5-linux-x64.run differ diff --git a/opt_utils/apps/ros2udp_converter.cpp b/opt_utils/apps/ros2udp_converter.cpp index 8903ee8..8aa0005 100644 --- a/opt_utils/apps/ros2udp_converter.cpp +++ b/opt_utils/apps/ros2udp_converter.cpp @@ -150,7 +150,6 @@ aliveIDsCallback(const opt_msgs::IDArray::ConstPtr& alive_ids_msg) /// Copy string to message buffer: udp_data.si_num_byte_ = json_string.length()+1; char buf[udp_data.si_num_byte_]; - //for (unsigned int i = 0; i < udp_data.si_num_byte_; i++) for (unsigned int i = 0; i < udp_data.si_num_byte_; i++) { buf[i] = 0; diff --git a/scripts/ceres.patch b/scripts/ceres.patch old mode 100644 new mode 100755 diff --git a/scripts/ros_install.sh b/scripts/ros_install.sh index 1a282d0..3cf6c82 100755 --- a/scripts/ros_install.sh +++ b/scripts/ros_install.sh @@ -1,17 +1,12 @@ #!/bin/bash UBUNTU_VERSION=`lsb_release -c -s` -ROS_DISTRO=hydro - -if [ $UBUNTU_VERSION = trusty ] || [ $UBUNTU_VERSION = saucy ] ; then - ROS_DISTRO=indigo -fi +ROS_DISTRO=indigo ROS_PACKAGES="python-rosinstall ros-$ROS_DISTRO-robot-state-publisher ros-$ROS_DISTRO-cmake-modules ros-$ROS_DISTRO-freenect-stack ros-$ROS_DISTRO-openni-launch ros-$ROS_DISTRO-camera-info-manager-py" echo "deb http://packages.ros.org/ros/ubuntu $UBUNTU_VERSION main" | sudo tee /etc/apt/sources.list.d/ros-latest.list wget http://packages.ros.org/ros.key -O - | sudo apt-key add - sudo apt-get update -sudo apt-get upgrade -y --force-yes sudo apt-get install ros-$ROS_DISTRO-desktop-full -y sudo rosdep init rosdep update diff --git a/tracking/src/kalman_filter.cpp b/tracking/src/kalman_filter.cpp index ccde5fd..9efd8d1 100644 --- a/tracking/src/kalman_filter.cpp +++ b/tracking/src/kalman_filter.cpp @@ -127,6 +127,9 @@ namespace open_ptrack this->position_variance_ = orig.position_variance_; this->depth_multiplier_ = orig.depth_multiplier_; this->acceleration_variance_ = orig.acceleration_variance_; + delete this->predict_model_; + delete this->observe_model_; + delete this->filter_; this->predict_model_ = new PredictModel(dt_, acceleration_variance_); this->observe_model_ = new ObserveModel(position_variance_, output_dimension_); this->filter_ = new Bayesian_filter::Unscented_scheme(4, 2); diff --git a/tracking/src/munkres.cpp b/tracking/src/munkres.cpp index 26a4a3f..5445ad6 100644 --- a/tracking/src/munkres.cpp +++ b/tracking/src/munkres.cpp @@ -385,6 +385,23 @@ namespace open_ptrack } } + // Releasing the memory + for(int r = 0; r < max_dim; ++r) + { + delete []matrix[r]; + } + delete []matrix; + delete []rowCover; + delete []colCover; + for (int i = 0; i < rows; i++) { + delete []m[i]; + } + delete []m; + for (int i = 0; i < rows*cols; i++) { + delete []path[i]; + } + delete []path; + return matrix_out; }