diff --git a/detection/CMakeLists.txt b/detection/CMakeLists.txt index b030e6a..f63134c 100644 --- a/detection/CMakeLists.txt +++ b/detection/CMakeLists.txt @@ -1,15 +1,30 @@ cmake_minimum_required(VERSION 2.8.3) project(detection) set(CMAKE_BUILD_TYPE RelWithDebInfo) -find_package(catkin REQUIRED COMPONENTS cmake_modules roscpp rospy pcl_ros pcl_conversions nodelet rosconsole image_transport opt_utils opt_msgs dynamic_reconfigure) + + +# Check for c++11 support +INCLUDE(CheckCXXCompilerFlag) +CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) +CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X) +IF(COMPILER_SUPPORTS_CXX11) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +ELSEIF(COMPILER_SUPPORTS_CXX0X) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") +ELSE() + MESSAGE(ERROR "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.") +ENDIF() + +find_package(catkin REQUIRED COMPONENTS cmake_modules roscpp rospy pcl_ros pcl_conversions nodelet rosconsole image_transport opt_utils opt_msgs dynamic_reconfigure std_msgs rostime sensor_msgs message_filters compressed_image_transport compressed_depth_image_transport kinect2_bridge) +find_package(cv_bridge ) find_package(VTK REQUIRED) if(VTK_FOUND) - include_directories(${VTK_INCLUDE_DIRS}) - link_directories(${VTK_LIBRARY_DIRS}) - add_definitions(${VTK_DEFINITIONS}) - include (${VTK_USE_FILE}) -endif() + include_directories(${VTK_INCLUDE_DIRS}) + link_directories(${VTK_LIBRARY_DIRS}) + add_definitions(${VTK_DEFINITIONS}) + include (${VTK_USE_FILE}) +endif() find_package(OpenCV REQUIRED) include_directories(${OpenCV_INCLUDE_DIRS}) @@ -32,8 +47,7 @@ if (NOT PCL_FOUND) endif (NOT PCL_FOUND) # Dynamic reconfigure support -generate_dynamic_reconfigure_options(cfg/HaarDispAdaDetector.cfg cfg/GroundBasedPeopleDetector.cfg) - +generate_dynamic_reconfigure_options(cfg/HaarDispAdaDetector.cfg cfg/GroundBasedPeopleDetector.cfg cfg/multiple_objects_detection.cfg) include_directories(cfg/cpp) catkin_package( @@ -43,10 +57,15 @@ catkin_package( ) -add_library(${PROJECT_NAME} src/detection_source.cpp src/detection.cpp) +add_library(${PROJECT_NAME} src/detection_source.cpp src/detection.cpp src/multiple_objects_detection/object_detector.cpp src/multiple_objects_detection/roi_zz.cpp) add_dependencies(${PROJECT_NAME} ${PROJECT_NAME}_gencfg) target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBS}) + +add_executable(multiple_objects_detection_node apps/multiple_objects_detection_node.cpp) +target_link_libraries(multiple_objects_detection_node ${PROJECT_NAME} ${OpenCV_LIBRARIES} ${PCL_LIBRARIES} ${catkin_LIBRARIES}) + + add_executable(ground_based_people_detector apps/ground_based_people_detector_node.cpp) SET_TARGET_PROPERTIES(ground_based_people_detector PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) target_link_libraries(ground_based_people_detector ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) diff --git a/detection/apps/multiple_objects_detection_node.cpp b/detection/apps/multiple_objects_detection_node.cpp new file mode 100644 index 0000000..3adfd08 --- /dev/null +++ b/detection/apps/multiple_objects_detection_node.cpp @@ -0,0 +1,121 @@ +#include +#include +#include + +#include "ros/ros.h" +#include "std_msgs/String.h" + +#include +#include +#include "open_ptrack/multiple_objects_detection/multiple_objects_detection.h" + +using namespace std; +using namespace cv; + + +//Parms for receiving color and depth image +std::string cameraName; +bool useExact = false; +bool useCompressed = false; + + +std::string output_detection_topic; + +bool set_object_names;//if to use this detector to set the object names + +//Parms for use_background_removal +bool use_background_removal; +int background_calculate_frames; +int threshold_4_detecting_foreground;//threshold of depth difference,use the difference to ditinguish the background and foreground + +bool show_2D_tracks;//show 2d tracks or not + +void configCb(detection::multiple_objects_detectionConfig &config, uint32_t level) +{ + Object_Detector::HMin=config.HMin; + Object_Detector::HMin=config.HMin; + Object_Detector::SMin=config.SMin; + Object_Detector::VMin=config.VMin; + Object_Detector::HMax=config.HMax; + Object_Detector::SMax=config.HMax; + Object_Detector::VMax=config.VMax; + Object_Detector::h_bins=config.h_bins; + Object_Detector::s_bins=config.s_bins; + Object_Detector::d_bins=config.d_bins; + Object_Detector::AREA_TOLERANCE=config.AREA_TOLERANCE; + Object_Detector::QUALITY_TOLERANCE=config.QUALITY_TOLERANCE; + Object_Detector::DENSITY_TOLORENCE=config.DENSITY_TOLORENCE; +} + + +int main(int argc, char** argv){ + + ros::init(argc, argv, "multiple_objects_detection"); + ros::NodeHandle nh("~"); + + if(!ros::ok()) + { + return 0; + } + + nh.param("use_background_removal",use_background_removal,false); + nh.param("background_calculate_frames",background_calculate_frames,100); + nh.param("threshold_4_detecting_foreground",threshold_4_detecting_foreground,100); + nh.param("show_2D_track",show_2D_tracks,false); + nh.param("output_detection_topic",output_detection_topic,std::string("/objects_detector/detections")); + nh.param("set_object_names",set_object_names,false); + + + //set the stastic varables of object_detector class,the instruction of the varable can be found in the header of the class + nh.param("HMin",Object_Detector::HMin,0); + nh.param("SMin",Object_Detector::SMin,80); + nh.param("VMin",Object_Detector::VMin,100); + nh.param("HMax",Object_Detector::HMax,360); + nh.param("SMax",Object_Detector::SMax,256); + nh.param("VMax",Object_Detector::VMax,256); + nh.param("h_bins",Object_Detector::h_bins,36); + nh.param("s_bins",Object_Detector::s_bins,18); + nh.param("d_bins",Object_Detector::d_bins,100); + + nh.param("AREA_TOLERANCE",Object_Detector::AREA_TOLERANCE,20); + nh.param("QUALITY_TOLERANCE",Object_Detector::QUALITY_TOLERANCE,20000); + nh.param("DENSITY_TOLORENCE",Object_Detector::DENSITY_TOLORENCE,4.0); + + nh.param("Backprojection_Mode",Object_Detector::Backprojection_Mode,std::string("HSD")); + + + // std::cout << "output detection topic: " << output_detection_topic << std::endl; + + std::cout << "HSV range:"<< std::endl + << "H: (" << Object_Detector::HMin <<"~"<< Object_Detector::HMax <<")"<< std::endl + << "S: (" << Object_Detector::SMin <<"~"<< Object_Detector::SMax <<")"<< std::endl + << "V: (" << Object_Detector::VMin <<"~"<< Object_Detector::VMax <<")"<< std::endl + << std::endl; + + + std::cout << "Occlusion Parms:"<< std::endl + <<"AREA_TOLERANCE: "< server; + dynamic_reconfigure::Server::CallbackType f; + f = boost::bind(&configCb, _1, _2); + server.setCallback(f); + + + // main class for detection + Multiple_Objects_Detection _multiple_objects_detection(output_detection_topic,set_object_names,useExact, useCompressed, + use_background_removal,background_calculate_frames,threshold_4_detecting_foreground,show_2D_tracks); + + std::cout << "start detecting..." << std::endl; + _multiple_objects_detection.run_detection(); + + ros::shutdown(); + return 0; +} diff --git a/detection/cfg/multiple_objects_detection.cfg b/detection/cfg/multiple_objects_detection.cfg new file mode 100755 index 0000000..9753a6f --- /dev/null +++ b/detection/cfg/multiple_objects_detection.cfg @@ -0,0 +1,36 @@ +#! /usr/bin/env python + +# Declare parameters that control haar-based people detection + +PACKAGE='detection' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +############################################## +## multiple_objects_detection:object detector parameters ## +############################################## + + +gen.add("HMax", int_t, 0, "Hue max", 360, 0, 360) +gen.add("SMax", int_t, 0, "Saturation max", 255, 0, 255) +gen.add("VMax", int_t, 0, "Value max", 255, 0, 255) +gen.add("HMin", int_t, 0, "Hue min", 0, 0, 360) +gen.add("SMin", int_t, 0, "Saturation min", 80, 0, 255) +gen.add("VMin", int_t, 0, "Value min", 100, 0, 255) + +gen.add("h_bins", int_t, 0, "Divide (HMin,HMax) to h_bins parts", 36, 1, 100) +gen.add("s_bins", int_t, 0, "Divide (SMin,SMax) to s_bins parts", 18, 1, 100) +gen.add("d_bins", int_t, 0, "Divide (DMin,DMax) to d_bins parts", 100, 1, 255) + +gen.add("AREA_TOLERANCE", int_t, 0, "AREA_TOLERANCE", 20, 5, 100) +gen.add("QUALITY_TOLERANCE", int_t, 0, "QUALITY_TOLERANCE", 20000, 5000, 65535) +gen.add("DENSITY_TOLORENCE", int_t, 0, "DENSITY_TOLORENCE", 4, 0, 50) + + + + + + +exit(gen.generate(PACKAGE, "detection", "multiple_objects_detection")) diff --git a/detection/conf/ground_based_people_detector_kinect2.yaml b/detection/conf/ground_based_people_detector_kinect2.yaml index af5daf2..12be92e 100644 --- a/detection/conf/ground_based_people_detector_kinect2.yaml +++ b/detection/conf/ground_based_people_detector_kinect2.yaml @@ -14,7 +14,7 @@ valid_points_threshold: 0.3 ## Background subtraction ## ############################ # Flag enabling/disabling background subtraction: -background_subtraction: false +background_subtraction: true # Resolution of the octree representing the background: background_resolution: 0.3 # Seconds to use to learn the background: @@ -44,7 +44,7 @@ heads_minimum_distance: 0.3 # Voxel size used to downsample the point cloud (lower: detection slower but more precise; higher: detection faster but less precise): voxel_size: 0.06 # Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise: -apply_denoising: false +apply_denoising: true # MeanK for denoising (the higher it is, the stronger is the filtering): mean_k_denoising: 5 # Standard deviation for denoising (the lower it is, the stronger is the filtering): diff --git a/detection/conf/multiple_objects_detection.yaml b/detection/conf/multiple_objects_detection.yaml new file mode 100644 index 0000000..422e152 --- /dev/null +++ b/detection/conf/multiple_objects_detection.yaml @@ -0,0 +1,81 @@ +############################ +## multiple_objects_detection ## +############################ + +output_detection_topic: /objects_detector/detections + +#if you want to see the tracks result on the image, set show_2D_track to true +show_2D_track: true + + +###########For choose on which camera to set the object names########### +set_object_names: false +###########For choose on which camera to set the object names########### + + +############################ +## Background removal ## +############################ + +use_background_removal: false + +#use how many frame to calculate the background +background_calculate_frames: 100 + +#threshold of depth difference,use the difference to ditinguish the background and foreground +threshold_4_detecting_foreground: 100 + + + +############################################## +## object_detector parameters ## +############################################## + + +########for the HSV mask###### +#set a mask(filter) to gain a better performance(set SMin or VMin smaller to get a larger range, also more noise) +HMin: 0 +HMax: 360 + +SMin: 80 +SMax: 256 + +VMin: 100 +VMax: 256 +########for the HSV mask####### + + +###########For camshift######## +#for the hue backrojection hist ,divide (HMin,HMax) to "h_bins" parts, +h_bins: 36 +s_bins: 18 + +#for the depth, transfer the depth from (1000,9000) into (0, 255), than divide them into "d_bins" parts +d_bins: 100 + +# Three options : H HS HSD : +# if the objects are in different color, choose HS which is faster, or choose HSD. +Backprojection_Mode: HS +###########For camshift######### + + + +###########For camshift recover from occlusion########### + +##for camshift, for the window in backproject image,the value represent the similirity with the ROI. the value of the pixel is equal to 255*percentage ,the percentage come from the histogram(problility) +## QUALITY_TOLERANCE here means the TOLERANCE of sum of the window in backproject image,it represent the quality of the window. +## DENSITY_TOLORENCE here means the TOLERANCE of the DENSITY,DENSITY=QUALITY/window.area(); +## AREA_TOLERANCE here means the TOLERANCE of the x,y ,window.it means the objects maybe apprear in the next frame ,in the window which increase it's size by AREA_TOLERANCE every frame.so the bigger AREA_TOLERANCE,the objects can move faster. +## if the QUALITYDENSITY_TOLORENCE,it will be "not occluded"; +AREA_TOLERANCE: 20 ##AREA_TOLERANCE is also used to create the position_maks +QUALITY_TOLERANCE: 20000 +DENSITY_TOLORENCE: 2.0 +###########For camshift recover from occlusion########### + + + + + diff --git a/detection/include/open_ptrack/detection/detection.h b/detection/include/open_ptrack/detection/detection.h index 3434bfa..f23bcf6 100644 --- a/detection/include/open_ptrack/detection/detection.h +++ b/detection/include/open_ptrack/detection/detection.h @@ -153,6 +153,13 @@ namespace open_ptrack double getDistance(); + + //get the object name from msg + std::string + getObjectName(); + + + /** * \brief Returns if the detection corresponds to an occluded person or not. * diff --git a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h new file mode 100644 index 0000000..e23a409 --- /dev/null +++ b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h @@ -0,0 +1,723 @@ +/** + * Copyright 2014 University of Bremen, Institute for Artificial Intelligence + * Author: Thiemo Wiedemeyer + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include + +#include +#include + +#include +#include +#include +#include + +#include + +#include "open_ptrack/multiple_objects_detection/object_detector.h" +#include "open_ptrack/multiple_objects_detection/roi_zz.h" + +//publish the detections +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include + +#include + +using namespace opt_msgs; +using namespace sensor_msgs; +using namespace std; + +using namespace cv; + +class Multiple_Objects_Detection +{ +public: + +private: + + ////////////////////////For receiving color and depth//////////////////////// + std::mutex lock; + std::string topicColor, topicDepth; + const bool useExact, useCompressed; + bool updateImage; + bool running; + const size_t queueSize; + cv::Mat color,depth; + cv::Mat cameraMatrixColor, cameraMatrixDepth; + typedef message_filters::sync_policies::ExactTime ExactSyncPolicy; + typedef message_filters::sync_policies::ApproximateTime ApproximateSyncPolicy; + ros::NodeHandle nh; + ros::AsyncSpinner spinner; + image_transport::ImageTransport it; + // image_transport::Subscriber roi_image_sub; + ros::Subscriber roi_image_sub; + image_transport::SubscriberFilter *subImageColor, *subImageDepth; + message_filters::Subscriber *subCameraInfoColor, *subCameraInfoDepth; + message_filters::Synchronizer *syncExact; + message_filters::Synchronizer *syncApproximate; + ////////////////////////For receiving color and depth////////////////////////// + + + ////////////For roi selection//////////// + bool objects_selected; + bool finished_select_rois_from_file; + // std::vector rois_from_gui; + // + + std::vector> rois_from_gui; + + std::vector rois_from_file; + std::vector rois_from_file_namelist; + + + //for roi projection + tf::TransformListener tf_listener; + tf::StampedTransform transform_camera2world; + + ros::Subscriber sub_image2D_rois; + + ros::Subscriber sub_object_names_from_tracker; + ///////////For roi selection///////////// + + + ///////////For background removal/////////// + cv::Mat depth_max; + bool use_background_removal; + int background_calculate_frames; + int threshold_4_detecting_foreground; + ///////////For background removal/////////// + + + ///////////For camshift recover from occlusion/////////// + std::vector occludes; + ///////////For camshift recover from occlusion/////////// + + + ///////////For generating 3d position/////////// + float fx ,fy,cx,cy; + ///////////For generating 3d position/////////// + + + ///////////For publish detection topic/////////// + std::string output_detection_topic; + ros::Publisher detection_pub; + std::string color_header_frameId; + ///////////For publish detection topic/////////// + + + bool set_object_names;//if to use this detector to set the object names + + + ///////////For main detection/////////// + std::vector Object_Detectors; + std::vector current_detected_boxes; + cv::Mat main_color,main_depth_16,main_depth_8; + ///////////For main detection/////////// + + + ///////////For display/////////// + bool show_2D_tracks; + std::vector> tracks_2D; + ///////////For display/////////// + + //test time cost + std::chrono::time_point start, now; + + + +public: + Multiple_Objects_Detection(const std::string &output_detection_topic,const bool set_object_names,const bool useExact, const bool useCompressed, + const bool use_background_removal, const int background_calculate_frames,const int threshold_4_detecting_foreground,const bool show_2D_tracks) + : output_detection_topic(output_detection_topic),set_object_names(set_object_names),useExact(useExact), useCompressed(useCompressed),updateImage(false), running(false), + use_background_removal(use_background_removal),objects_selected(false), finished_select_rois_from_file(false),background_calculate_frames(background_calculate_frames),threshold_4_detecting_foreground(threshold_4_detecting_foreground), queueSize(5), + nh(), spinner(0), it(nh) ,show_2D_tracks(show_2D_tracks) + { + std::string cameraName = "kinect2_head"; + topicColor = "/" + cameraName + "/" + K2_TOPIC_LORES_COLOR K2_TOPIC_RAW; + topicDepth = "/" + cameraName + "/" + K2_TOPIC_LORES_DEPTH K2_TOPIC_RAW; + + cameraMatrixColor = cv::Mat::zeros(3, 3, CV_64F); + cameraMatrixDepth = cv::Mat::zeros(3, 3, CV_64F); + detection_pub=nh.advertise(output_detection_topic,3); + } + + ~Multiple_Objects_Detection() + { + } + + void run_detection(){ + + start_reciver();// define some subscribers + + for(; running && ros::ok();) + { + if(updateImage)//if recieved color and depth msg + { + lock.lock(); + main_color = this->color; + main_depth_16 = this->depth; + updateImage = false; + lock.unlock(); + + // if accept background_removal ,generate the new color image without background + if(use_background_removal) + { + if ((background_calculate_frames>0))// if the background has not been got + { + background_calculation(); + } + else//after getting the bakground (depth_max), use it to do background removal,generate new color + { + background_removal(); + } + } + + // if don't accept background_removal or background is already removed + if(!use_background_removal||(use_background_removal&&background_calculate_frames==0)) + { + //!!!!!!!!!!!!!!!!!!!!!!!main loop!!!!!!!!!!!!!!!!!!!!!!! + + if(finished_select_rois_from_file)//keep checkin gif there are new rois + select_rois_from_file(); + + if(!rois_from_gui.empty())//keep checkin gif there are new rois + select_rois_from_gui(); + + multiple_objects_detection_main();// main detection + + if(show_2D_tracks) + { + show_2D_tracks_on_image(); + } + } + } + } + } +private: + void start_reciver() + { + + sub_image2D_rois = nh.subscribe("/kinect2_head/image2D_rois_from_gui", 5, &Multiple_Objects_Detection::image2D_rois_from_gui_Callback, this); + // roi_image_sub = it.subscribe("/kinect2_head/image2D_rois_from_file/image",1,&Multiple_Objects_Detection::image2D_rois_from_file_Callback,this,image_transport::TransportHints("raw")); + roi_image_sub = nh.subscribe("/kinect2_head/image2D_rois_from_file/image",1,&Multiple_Objects_Detection::image2D_rois_from_file_Callback,this); + + if(!set_object_names) + sub_object_names_from_tracker=nh.subscribe("/tracker/object_names",5,&Multiple_Objects_Detection::object_names_from_tracker_callback,this); + + + running = true; + + //////////////////////////subscribe the color and depth data////////////////////////// + std::string topicCameraInfoColor = topicColor.substr(0, topicColor.rfind('/')) + "/camera_info"; + std::string topicCameraInfoDepth = topicDepth.substr(0, topicDepth.rfind('/')) + "/camera_info"; + + image_transport::TransportHints hints(useCompressed ? "compressed" : "raw"); + image_transport::TransportHints hintsDepth(useCompressed ? "compressedDepth" : "raw"); + subImageColor = new image_transport::SubscriberFilter(it, topicColor, queueSize, hints); + subImageDepth = new image_transport::SubscriberFilter(it, topicDepth, queueSize, hintsDepth); + subCameraInfoColor = new message_filters::Subscriber(nh, topicCameraInfoColor, queueSize); + subCameraInfoDepth = new message_filters::Subscriber(nh, topicCameraInfoDepth, queueSize); + + if(useExact) + { + syncExact = new message_filters::Synchronizer(ExactSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth); + syncExact->registerCallback(boost::bind(&Multiple_Objects_Detection::image_callback, this, _1, _2, _3, _4)); + } + else + { + syncApproximate = new message_filters::Synchronizer(ApproximateSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth); + syncApproximate->registerCallback(boost::bind(&Multiple_Objects_Detection::image_callback, this, _1, _2, _3, _4)); + } + //////////////////////////subscribe the color and depth data////////////////////////// + + + spinner.start(); + std::chrono::milliseconds duration(1); + while(!updateImage ) + { + if(!ros::ok()) + { + return; + } + std::this_thread::sleep_for(duration); + } + } + + void background_calculation() + { + Mat test_backgroundfile=imread("/tmp/depth_background.png",CV_LOAD_IMAGE_UNCHANGED); + if(!test_backgroundfile.empty())// if the background successfully read from file + { + depth_max = test_backgroundfile; + background_calculate_frames=0; + printf("use background removal and background is successfully read from file......\n"); + printf("please press P to select ROIs......\n"); + } + else//background can't be read from file, being calculated + { + printf("use background removal but background can't be read from file, being calculated......\n"); + if(depth_max.empty()) + depth_max=Mat::zeros(main_depth_16.size(),CV_16UC1); + else + depth_max=cv::max(depth_max,main_depth_16); + background_calculate_frames--; + if(background_calculate_frames==0)//save background file + { + std::vector compression_params; + compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); + compression_params.push_back(0); + // pcl::io::savePCDFileASCII ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud); + cv::imwrite("/tmp/depth_background.png", depth_max, compression_params); + printf("background is sucessfully calculated and saved......\n"); + printf("please press P to select ROIs......\n"); + } + } + } + + void background_removal() + { + Mat depth_diff=depth_max-main_depth_16; + + int nr=depth_diff.rows; + int nc=depth_diff.cols; + if(depth_diff.isContinuous()&&main_color.isContinuous()) + { + nr=1; + nc=nc*depth_diff.rows*depth_diff.channels(); + } + for(int i=0;i(i); + uchar* colorData=main_color.ptr(i*3); + for(int j=0;j::iterator it =rois_from_file_namelist.begin();it!=rois_from_file_namelist.end();it++) + { + size_t lastindex = (*it).find_last_of("."); + *it = (*it).substr(0, lastindex); + } + + + for(int roiIndex=0; roiIndex tracks_2D_(10); + tracks_2D.push_back(tracks_2D_); + bool occlude_=true; + occludes.push_back(occlude_); + Object_Detector::current_detected_boxes=current_detected_boxes; + } + objects_selected=true; + finished_select_rois_from_file=false; + std::cout<>::const_iterator roi_it= rois_from_gui.begin();roi_it!=rois_from_gui.end();roi_it++) + { + Object_Detector newDetector; + if(set_object_names) + newDetector.setObjectName((*roi_it).first); + else + newDetector.setObjectName("default"); + newDetector.setCurrentRect((*roi_it).second); + + Object_Detectors.push_back(newDetector); + current_detected_boxes.push_back((*roi_it).second); + std::list tracks_2D_(10); + tracks_2D.push_back(tracks_2D_); + bool occlude_=false; + occludes.push_back(occlude_); + Object_Detector::current_detected_boxes=current_detected_boxes; + } + objects_selected=true; + std::cout<header.frame_id = color_header_frameId; + detection_array_msg->header.stamp = ros::Time::now(); + + for(int i = 0; i < 3; i++) + for(int j = 0; j < 3; j++) + detection_array_msg->intrinsic_matrix.push_back(cameraMatrixColor.at(i, j)); + + //set the parms so that we can use the openptrack's tracking node + detection_array_msg->confidence_type = std::string("hog+svm"); + detection_array_msg->image_type = std::string("rgb"); + //!!!!!!!!!!!!!!!!!!!!!!!detection_array_msg msg!!!!!!!!!!!!!!!!!!!!!!! + + //detect objects one by one + for(size_t i=0; i(current_center2D_d.y,current_center2D_d.x)/1000.0;//meter ,not mm + + tf::Vector3 camera_3D;//center point in camera3D coordinate + camera_3D.setX((current_center2D_d.x-cx)*fx*current_center_depth); + camera_3D.setY((current_center2D_d.y-cy)*fy*current_center_depth); + camera_3D.setZ(current_center_depth); + + // set items of the detection msg + /////set every 3D point of the detection box with the center point in camera3D coordinate///// + detection_msg.box_3D.p1.z=camera_3D.z(); + detection_msg.box_3D.p1.x=camera_3D.x(); + detection_msg.box_3D.p1.y=camera_3D.y(); + + detection_msg.box_3D.p2.z=detection_msg.box_3D.p1.z; + detection_msg.box_3D.p2.x=detection_msg.box_3D.p1.x; + detection_msg.box_3D.p2.y=detection_msg.box_3D.p1.y; + + detection_msg.centroid.x=detection_msg.box_3D.p1.x; + detection_msg.centroid.y=detection_msg.box_3D.p1.y; + detection_msg.centroid.z=detection_msg.box_3D.p1.z; + + detection_msg.top.x=detection_msg.box_3D.p1.x; + detection_msg.top.y=detection_msg.box_3D.p1.y; + detection_msg.top.z=detection_msg.box_3D.p1.z; + + detection_msg.bottom.x=detection_msg.box_3D.p1.x; + detection_msg.bottom.y=detection_msg.box_3D.p1.y; + detection_msg.bottom.z=detection_msg.box_3D.p1.z; + /////set every 3D point of the detection box with the center point in camera3D coordinate///// + + + detection_msg.box_2D.x = current_detected_boxes[i].x; + detection_msg.box_2D.y = current_detected_boxes[i].y; + detection_msg.box_2D.width = current_detected_boxes[i].width; + detection_msg.box_2D.height = current_detected_boxes[i].height; + + + detection_msg.confidence = 10;//set a fixed value + + //the distance between the camera and the object + detection_msg.distance = std::sqrt(detection_msg.centroid.x * detection_msg.centroid.x + detection_msg.centroid.z * detection_msg.centroid.z); + + + // set the the height of the detection in world_3D, this will pass to the tracker ,the only reason to do this is it is needed in json message. + tf::Vector3 world_3D; + world_3D = transform_camera2world(camera_3D); + detection_msg.height =world_3D.z()*4/3 ; + + detection_msg.object_name=object_name; + + if(current_center_depth>0.01) + detection_array_msg->detections.push_back(detection_msg);//push to the msg array + } + else{//if occluded ,use a empty_rect because of the tracker will use this to generate the other_objects_mask + Rect empty_rect(0,0,1,1); + current_detected_boxes[i]=empty_rect; + Object_Detector::current_detected_boxes=current_detected_boxes; + } + } + + detection_pub.publish(detection_array_msg); // publish message + } + + void show_2D_tracks_on_image()//show track points on 2D image + { + for(size_t i=0; i current_track=tracks_2D[i]; + for( std::list ::iterator it = current_track.begin(); it!=current_track.end(); it++) + { + Rect test_Rect((*it).x+((*it).width)/2,(*it).y+((*it).height)/2,1,1); + test_Rect=test_Rect&Rect(0,0,main_color.size().width,main_color.size().height); + cv::rectangle(main_color, test_Rect, cv::Scalar(250*(i), 250*(i-1), 250*(i-2)), 2, CV_AA); + } + } + cv::namedWindow("show_2D_tracks"); + cv::imshow( "show_2D_tracks", main_color ); + cv::waitKey(10); + } + + + + //recieve the color and depth image + void image_callback(const sensor_msgs::Image::ConstPtr imageColor, const sensor_msgs::Image::ConstPtr imageDepth, + const sensor_msgs::CameraInfo::ConstPtr cameraInfoColor, const sensor_msgs::CameraInfo::ConstPtr cameraInfoDepth) + { + cv::Mat _color, _depth; + readImage(imageColor, _color); + readImage(imageDepth, _depth); + + color_header_frameId=imageColor->header.frame_id; + + if(!objects_selected)// the camrea info just need to be read once + { + readCameraInfo(cameraInfoColor, cameraMatrixColor); + readCameraInfo(cameraInfoDepth, cameraMatrixDepth); + + fx = 1.0f / cameraMatrixColor.at(0, 0); + fy = 1.0f / cameraMatrixColor.at(1, 1); + cx = cameraMatrixColor.at(0, 2); + cy = cameraMatrixColor.at(1, 2); + + std::string world_frame_id="/world"; + try{ + tf_listener.lookupTransform(world_frame_id, color_header_frameId, ros::Time(0), transform_camera2world); + } + catch(tf::TransformException ex){ + ROS_ERROR("%s",ex.what()); + ros::Duration(1.0).sleep(); + } + } + + + if(_color.type() == CV_16U) + { + cv::Mat tmp; + _color.convertTo(tmp, CV_8U, 0.02); + cv::cvtColor(tmp, _color, CV_GRAY2BGR); + } + + lock.lock(); + this->color = _color; + this->depth = _depth; + updateImage = true; + lock.unlock(); + } + + + //recieve the roi msg(x,y,width,height) from marking in the gui + void image2D_rois_from_gui_Callback(const opt_msgs::Image2D_roi_array::ConstPtr& image2D_roi_msg) + { + for( std::vector::const_iterator it = image2D_roi_msg->Rois.begin(); + it != image2D_roi_msg->Rois.end(); it++) + { + string object_name=(*it).name; + cv::Rect selection((*it).x,(*it).y,(*it).width,(*it).height); + selection=selection&Rect(0,0,main_color.size().width,main_color.size().height); + + std::pair new_rois_from_gui(object_name,selection); + + lock.lock(); + rois_from_gui.push_back(new_rois_from_gui); + lock.unlock(); + std::cout<<"got image msg comes from gui"<name!="end_flag") + { + cv_bridge::CvImagePtr cv_ptr; + try + { + cv_ptr = cv_bridge::toCvCopy(msg->image_roi, sensor_msgs::image_encodings::BGR8); + cv::Mat conversion_mat_; + cv_ptr->image.copyTo(conversion_mat_); + lock.lock(); + rois_from_file.push_back(conversion_mat_); + lock.unlock(); + + string object_name=msg->name; + rois_from_file_namelist.push_back(object_name); + + std::cout<<"got image msg comes from file"<header.frame_id)==color_header_frameId) + { + for( std::vector::const_iterator it = object_names_msg->object_names.begin(); + it!=object_names_msg->object_names.end();it++) + { + int number=(*it).no; + if(Object_Detectors[number].object_name=="default") + { + Object_Detectors[number].object_name=(*it).object_name; + std::cout<<"set number "<encoding); + pCvImage->image.copyTo(image); + } + + void readCameraInfo(const sensor_msgs::CameraInfo::ConstPtr cameraInfo, cv::Mat &cameraMatrix) const + { + double *itC = cameraMatrix.ptr(0, 0); + for(size_t i = 0; i < 9; ++i, ++itC) + { + *itC = cameraInfo->K[i]; + } + } +}; + + diff --git a/detection/include/open_ptrack/multiple_objects_detection/object_detector.h b/detection/include/open_ptrack/multiple_objects_detection/object_detector.h new file mode 100644 index 0000000..8701e0a --- /dev/null +++ b/detection/include/open_ptrack/multiple_objects_detection/object_detector.h @@ -0,0 +1,140 @@ +#ifndef Object_Detector_H +#define Object_Detector_H +//#include + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/video/tracking.hpp" +using namespace cv; +class Object_Detector +{ +public: + + + + // ///////////For hsv mask/////////// + static int HMin, SMin, VMin; + static int HMax,SMax, VMax; + // ///////////For hsv mask/////////// + + + //////////////////////////For camshift////////////////////////// + static std::string Backprojection_Mode; + // Backprojection_Mode{ + // H ,//just use hue of select roi to do calhist for one time, then use the permenet hue hist to do the backproject on every hue frame + // HS,//use hue and saturation of select roi to do calhist(named:HS_hist) for one time, then use the permenet hs hist to do the backproject on every hue&saturation frame + // HSD//use hue and saturation of select roi to do calhist for one time(got :hs_hist_for_HSD,it is just used under occlusion), calculate the pdf of this hist(got hs_hist_pdf), + // //then use the roi of depth data to do calhist and pdf(got :tmp_depth_hist_pdf) for every frame, + // //then combine the permenet hs hist pdf (named:hs_hist_pdf) and tmp depth hist(named: tmp_depth_hist_pdf) to do the backproject on every hsd frame,when occluded + // }; + + static int h_bins,s_bins;//for the hue backrojection hist ,divide (HMin,HMax) to h_bins parts + static int d_bins;// depth bins, + //////////////////////////For camshift////////////////////////// + + + + + + ///////////For camshift recover from occlusion/////////// + //for camshift, for the window in backproject image,the value represent the similirity with the ROI. the value of the pixel is equal to 255*percentage ,the percentage come from the histogram(problility) + // QUALITY_TOLERANCE here means the TOLERANCE of sum of the window in backproject image,it represent the quality of the window. + // DENSITY_TOLORENCE here means the TOLERANCE of the DENSITY,DENSITY=QUALITY/window.area(); + // AREA_TOLERANCE here means the TOLERANCE of the x,y ,window.it means the objects maybe apprear in the next frame ,in the window which increase it's size by AREA_TOLERANCE every frame.so the bigger AREA_TOLERANCE,the objects can move faster. + // if the QUALITYDENSITY_TOLORENCE,it will be "not occluded"; + static int AREA_TOLERANCE;//AREA_TOLERANCE is also used to create the position_maks + static int QUALITY_TOLERANCE; + static double DENSITY_TOLORENCE; + + bool occluded; + bool half_occluded; + double half_occluded_frames; + + ///////////For camshift recover from occlusion/////////// + + static std::vector current_detected_boxes;//this one come from class:multiple_objects_detection, every time of every object's detection ,multiple_objects_detection will update this varable ,we use it to generate the other_object_mask + cv::Mat roi_from_file;// the roi come from file which is set when "select_rois_from_file" + std::string object_name; + +private: + static cv::Mat mainColor; + static cv::Mat mainDepth; + cv::Mat Color,Depth;//copied from mainColor,mainDepth + + + bool firstRun; + cv::Rect currentRect;// just used in the initilization(no important) + cv::Rect selection;// just used in the initilization(no important) + + cv::RotatedRect current_detectedBox;//detction result (important) + cv::Rect detectWindow;// used as search window at the beginning of detection(important) + + + //////////main variable for calculate the histogram /////////// + cv::Mat hsv, hue, hsv_mask,backproj; + cv::Mat h_hist;//H mode + cv::Mat hs_hist_for_HS;//HS mode + cv::Mat hs_hist_for_HSD,//just used under occlusion,because ,when oclluded ,can't use depth infomation to detect the objects + hs_hist_pdf,//use hs_hist_for_HSD to get pdf ,use it as the permnet part + tmp_depth_hist_pdf,//use every depth frame to get this ,use it as the tmp part + hsd_hist;//combine hs_hist_pdf(perment) and tmp_depth_hist_pdf(tmp,every frame) to get this + ///////////main variable for calculate the histogram /////////// + + + Mat other_object_mask,position_mask,depth_mask;//some mask that used for get the good back projection image + + + +public: + Object_Detector() + :firstRun(true),occluded(false),half_occluded(false),half_occluded_frames(10) + {} + + static void setMainColor(const cv::Mat _mainColor); + static cv::Mat getMainColor(); + + static void setMainDepth(const cv::Mat _mainDepth); + static cv::Mat getMainDepth(); + + void setCurrentRect(const cv::Rect _currentRect); + cv::Rect getCurrentRect(); + + void setObjectName(const std::string object_name); + std::string getObjectName(); + + void setcurrent_detected_boxes(std::vector _current_detected_boxes); + std::vector getcurrent_detected_boxes(); + + + void H_backprojection(); + void HS_backprojection(); + void HSD_backprojection(); + + cv::RotatedRect object_shift(InputArray _probColor,Rect& window, TermCriteria criteria);//camshift + occlusion handle + cv::RotatedRect detectCurrentRect(int id);//main detection function +}; + +//stastic varable defination +int Object_Detector::HMax; +int Object_Detector::SMax; +int Object_Detector::VMax; +int Object_Detector::HMin; +int Object_Detector::SMin; +int Object_Detector::VMin; + +int Object_Detector::h_bins; +int Object_Detector::s_bins; +int Object_Detector::d_bins; + +int Object_Detector::AREA_TOLERANCE; +int Object_Detector::QUALITY_TOLERANCE; +double Object_Detector::DENSITY_TOLORENCE; + +std::vector Object_Detector::current_detected_boxes; + +std::string Object_Detector::Backprojection_Mode; +#endif // Object_Detector_H diff --git a/detection/include/open_ptrack/multiple_objects_detection/roi_zz.h b/detection/include/open_ptrack/multiple_objects_detection/roi_zz.h new file mode 100644 index 0000000..ff3b424 --- /dev/null +++ b/detection/include/open_ptrack/multiple_objects_detection/roi_zz.h @@ -0,0 +1,70 @@ +#ifndef ROI_ZZ_H +#define ROI_ZZ_H + +//#include "opencv2/core/utility.hpp" + +#include + + + +#include "opencv2/ocl/ocl.hpp" +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/video/tracking.hpp" + +#include "opencv2/imgproc/types_c.h" +//#include "feature.hpp" +//#include "onlineMIL.hpp" +//#include "onlineBoosting.hpp" + +using namespace cv; + +//class roi_zz +//{ +//public: +// roi_zz(); +//}; + + + +class roi_zz { +public: + Rect select(Mat img, bool fromCenter = true); + Rect select(const cv::String& windowName, Mat img, bool showCrossair = true, bool fromCenter = true); + void select(const cv::String& windowName, Mat img, std::vector & boundingBox, bool fromCenter = true); + + struct handlerT{ + // basic parameters + bool isDrawing; + Rect box; + Mat image; + + // parameters for drawing from the center + bool drawFromCenter; + Point2f center; + + // initializer list + handlerT() : isDrawing(false), drawFromCenter(true) {}; + }selectorParams; + + // to store the tracked objects + std::vector objects; + +private: + static void mouseHandler(int event, int x, int y, int flags, void *param); + void opencv_mouse_callback(int event, int x, int y, int, void *param); + + // save the keypressed characted + int key; +}; + + + + + + + + + +#endif // ROI_ZZ_H diff --git a/detection/launch/detector_kinect2.launch b/detection/launch/detector_kinect2.launch index b2125d9..aded8ee 100644 --- a/detection/launch/detector_kinect2.launch +++ b/detection/launch/detector_kinect2.launch @@ -1,5 +1,6 @@ + @@ -14,7 +15,7 @@ - + @@ -27,7 +28,21 @@ - - + + + + + + + + + + + + + + + + diff --git a/detection/launch/object_detector_kinect2.launch b/detection/launch/object_detector_kinect2.launch new file mode 100644 index 0000000..4699992 --- /dev/null +++ b/detection/launch/object_detector_kinect2.launch @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/detection/launch/people_detector_kinect2.launch b/detection/launch/people_detector_kinect2.launch new file mode 100644 index 0000000..b2125d9 --- /dev/null +++ b/detection/launch/people_detector_kinect2.launch @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/detection/src/detection.cpp b/detection/src/detection.cpp index cef1012..71ce350 100644 --- a/detection/src/detection.cpp +++ b/detection/src/detection.cpp @@ -112,6 +112,13 @@ namespace open_ptrack return detection_msg_.distance; } + std::string + Detection::getObjectName() + { + return detection_msg_.object_name; + } + + bool Detection::isOccluded() { diff --git a/detection/src/multiple_objects_detection/object_detector.cpp b/detection/src/multiple_objects_detection/object_detector.cpp new file mode 100644 index 0000000..64035a9 --- /dev/null +++ b/detection/src/multiple_objects_detection/object_detector.cpp @@ -0,0 +1,488 @@ +#include "open_ptrack/multiple_objects_detection/object_detector.h" +#include +// Static member definition ... +cv::Mat Object_Detector::mainColor; +cv::Mat Object_Detector::mainDepth; + +void Object_Detector::setMainColor(const cv::Mat _mainColor) +{ + _mainColor.copyTo(mainColor); +} + +cv::Mat Object_Detector::getMainColor() +{ + return mainColor; +} + +void Object_Detector::setMainDepth(const cv::Mat _mainDepth) +{ + _mainDepth.copyTo(mainDepth); +} + +cv::Mat Object_Detector::getMainDepth() +{ + return mainDepth; +} + + +void Object_Detector::setCurrentRect(const cv::Rect _currentRect) +{ + currentRect = _currentRect; + selection = currentRect; +} + +cv::Rect Object_Detector::getCurrentRect() +{ + return currentRect; +} + + +void Object_Detector::setObjectName(const std::string _object_name) +{ + object_name=_object_name; +} +std::string Object_Detector::getObjectName() +{ + return object_name; +} + + +void Object_Detector::setcurrent_detected_boxes(std::vector _current_detected_boxes) +{ + current_detected_boxes=_current_detected_boxes; +} + +std::vector Object_Detector::getcurrent_detected_boxes() +{ + return current_detected_boxes; +} + +void Object_Detector::H_backprojection() +{ + float h_ranges[] = {0,(float)HMax}; + const float* ph_ranges = h_ranges; + int h_channels[] = {0, 0}; + hue.create(hsv.size(), hsv.depth()); + cv::mixChannels(&hsv, 1, &hue, 1, h_channels, 1); + + if( firstRun ) + { + if(!occluded) + { + cv::Mat roi(hue, selection), maskroi(hsv_mask, selection); + cv::calcHist(&roi, 1, 0, maskroi, h_hist, 1, &h_bins, &ph_ranges); + cv::normalize(h_hist, h_hist, 0, 255, CV_MINMAX); + detectWindow = selection; + firstRun = false; + // std::cout<<"H mode"<hsd + + if( firstRun ) + { + if(!occluded)//if the roi is from the file, the first frame will be set to occluded, in this situation,we just calculate the hs pdf and use it to search the object + { + cv::Mat roi(hsv, selection), maskroi(hsv_mask, selection); + cv::calcHist(&roi, 1, hs_channels, maskroi, hs_hist_for_HSD, 2, hs_size, phs_ranges, true, false); + cv::normalize(hs_hist_for_HSD , hs_hist_for_HSD , 0, 255, CV_MINMAX); + + double sum_hs_hist=sum(hs_hist_for_HSD)[0]; + hs_hist_pdf=hs_hist_for_HSD/sum_hs_hist; + + //used to generate the initial hsd_hist(use this just for the right data format ) + cv::calcHist(&roi, 1, hsd_channels, maskroi, hsd_hist, 3, hsd_size, phsd_ranges, true, false); + + cv::Mat roi_depth(Depth, selection); + + //calculate the the current_trackBox(rotatedrect) mask,named depth_mask(in this mask ,just the the value in the area :current_detectBox(rotatedrect) is 255) + Point2f vertices[4]; + current_detectedBox.points(vertices); + std::vector< std::vector > co_ordinates; + co_ordinates.push_back(std::vector()); + co_ordinates[0].push_back(vertices[0]); + co_ordinates[0].push_back(vertices[1]); + co_ordinates[0].push_back(vertices[2]); + co_ordinates[0].push_back(vertices[3]); + depth_mask=Mat::zeros(Color.size(),CV_8UC1); + drawContours( depth_mask,co_ordinates,0, Scalar(255),CV_FILLED, 8 ); + depth_mask&=hsv_mask; + cv::Mat depth_mask_roi(depth_mask, detectWindow); + cv::calcHist(&roi_depth, 1, 0, depth_mask_roi, tmp_depth_hist_pdf, 1, &d_bins, &pd_ranges); + double sum_tmp_depth_hist_pdf=sum(tmp_depth_hist_pdf)[0]; + tmp_depth_hist_pdf=tmp_depth_hist_pdf/sum_tmp_depth_hist_pdf; + + + //combine the hs and depth + for (int i=0; i(i,j,k)=255*hs_hist_pdf.at(i,j)*tmp_depth_hist_pdf.at(k); + } + } + } + + //normalize hsd_hist + double hMin,hMax; + minMaxIdx(hsd_hist, &hMin, &hMax); + hsd_hist = 255 * hsd_hist / hMax; + + detectWindow = selection; + firstRun = false; + } + else{ // if we get the roi from file ,we just calculate the hs pdf + + Mat roi_color= roi_from_file; + Mat roi_hsv,roi_hsv_mask; + cv::cvtColor(roi_color, roi_hsv, CV_BGR2HSV); + cv::inRange(roi_hsv, cv::Scalar(HMin, SMin, MIN(VMin,VMax)), + cv::Scalar(HMax, SMax, MAX(VMin, VMax)), roi_hsv_mask); + // imshow("roi_hsv_mask",roi_hsv_mask); + // cv::waitKey(10); + + cv::calcHist(&roi_hsv, 1, hs_channels, roi_hsv_mask, hs_hist_for_HSD, 2, hs_size, phs_ranges, true, false); + cv::normalize(hs_hist_for_HSD , hs_hist_for_HSD , 0, 255, CV_MINMAX); + + double sum_hs_hist=sum(hs_hist_for_HSD)[0]; + hs_hist_pdf=hs_hist_for_HSD/sum_hs_hist; + + //used to generate the initial hsd_hist(use this just for the right data format ) + cv::calcHist(&roi_hsv, 1, hsd_channels, roi_hsv_mask, hsd_hist, 3, hsd_size, phsd_ranges, true, false); + + detectWindow = selection; + firstRun = false; + } + } + + else//main loop to get the hsd pdf, just update the depth pdf , and combine it with the initial hs pdf + { + if(!occluded&&!half_occluded) + { + cv::Mat roi_depth(Depth, detectWindow); + + //calculate the the current_trackBox(rotatedrect) mask,named depth_mask(in this mask ,just the the value in the area :current_detectBox(rotatedrect) is 255) + Point2f vertices[4]; + current_detectedBox.points(vertices); + std::vector< std::vector > co_ordinates; + co_ordinates.push_back(std::vector()); + co_ordinates[0].push_back(vertices[0]); + co_ordinates[0].push_back(vertices[1]); + co_ordinates[0].push_back(vertices[2]); + co_ordinates[0].push_back(vertices[3]); + + depth_mask=Mat::zeros(Color.size(),CV_8UC1); + drawContours( depth_mask,co_ordinates,0, Scalar(255),CV_FILLED, 8 ); + depth_mask&=hsv_mask; + + cv::Mat depth_mask_roi(depth_mask, detectWindow); + + cv::calcHist(&roi_depth, 1, 0, depth_mask_roi, tmp_depth_hist_pdf, 1, &d_bins, &pd_ranges); + double sum_tmp_depth_hist_pdf=sum(tmp_depth_hist_pdf)[0]; + tmp_depth_hist_pdf=tmp_depth_hist_pdf/sum_tmp_depth_hist_pdf; + + //////////////////////cost a lot of time ////////////////////// + for (int i=0; i(i,j,k)=255*hs_hist_pdf.at(i,j)*tmp_depth_hist_pdf.at(k); + } + } + } + //////////////////////rcost a lot of time ////////////////////// + + double hMin,hMax; + minMaxIdx(hsd_hist, &hMin, &hMax); + hsd_hist = 255 * hsd_hist / hMax; + } + } + + + if(!occluded&&!half_occluded)//if not occluded, use hsd pdf + { + cv::calcBackProject( &hsv, 1, hsd_channels, hsd_hist, backproj, phsd_ranges, 1, true ); + } + else//if occluded, use hs pdf + { + cv::calcBackProject( &hsv, 1, hs_channels, hs_hist_for_HSD, backproj, phs_ranges, 1, true ); + } + +} + +//camshift + occlusion handle +cv::RotatedRect Object_Detector::object_shift(InputArray _probColor,Rect& window, TermCriteria criteria) +{ + Size size; + Mat mat; + mat = _probColor.getMat(), size = mat.size(); + cv::meanShift( _probColor, window, criteria ); + // std::cout<<"real QUALITY_TOLERANCE"<= 360) + box.angle -= 360; + if(box.angle >= 180) + box.angle -= 180; + box.center = Point2f( window.x + window.width*0.5f, window.y + window.height*0.5f); + + return box; +} + +cv::RotatedRect Object_Detector::detectCurrentRect(int id) +{ + + mainColor.copyTo(Color); + mainDepth.copyTo(Depth); + + cv::cvtColor(Color, hsv, CV_BGR2HSV); + + //calculate the hsv_mask by the range + cv::inRange(hsv, cv::Scalar(HMin, SMin, MIN(VMin,VMax)), cv::Scalar(HMax, SMax, MAX(VMin, VMax)), hsv_mask); + + if(Backprojection_Mode=="H") + { + H_backprojection(); + } + else if(Backprojection_Mode=="HS") + { + HS_backprojection(); + } + else + { + HSD_backprojection(); + } + // imshow("backproj first",backproj); + + + // calculate the other_object_mask with the current_detected_boxes + other_object_mask=Mat::ones(Color.size(), CV_8U)*255; + for (int i=0; i1) + { + //use x y to generate position_mask,the object can move in the window which is AREA_TOLERANCE size bigger than the last detected window + position_mask=Mat::zeros(Color.size(),CV_8UC1); + int detectWindow_XL=detectWindow.x-AREA_TOLERANCE,detectWindow_XR=detectWindow.x+detectWindow.width+AREA_TOLERANCE; + int detectWindow_YT=detectWindow.y-AREA_TOLERANCE,detectWindow_YB=detectWindow.y+detectWindow.height+AREA_TOLERANCE; + Rect search_window=Rect(detectWindow_XL,detectWindow_YT,detectWindow_XR-detectWindow_XL,detectWindow_YB-detectWindow_YT)&Rect(0,0,Color.size().width,Color.size().height); + position_mask(search_window)=255; + position_mask &= hsv_mask; + backproj &= position_mask; + backproj &= other_object_mask; + } + else{ + backproj &= hsv_mask; + backproj &= other_object_mask; + } + + if(detectWindow.area()>1) + current_detectedBox = object_shift(backproj, detectWindow, + cv::TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 )); + + // cv::rectangle(backproj, detectWindow, cv::Scalar(255, 0, 0), 2, CV_AA); + // imshow("backproj final",backproj); + + return current_detectedBox; + +} + diff --git a/detection/src/multiple_objects_detection/roi_zz.cpp b/detection/src/multiple_objects_detection/roi_zz.cpp new file mode 100644 index 0000000..f0ad3f2 --- /dev/null +++ b/detection/src/multiple_objects_detection/roi_zz.cpp @@ -0,0 +1,127 @@ +#include "open_ptrack/multiple_objects_detection/roi_zz.h" + +void roi_zz::mouseHandler(int event, int x, int y, int flags, void *param){ + roi_zz *self =static_cast(param); + self->opencv_mouse_callback(event,x,y,flags,param); +} + +void roi_zz::opencv_mouse_callback( int event, int x, int y, int , void *param ){ + handlerT * data = (handlerT*)param; + switch( event ){ + // update the selected bounding box + case EVENT_MOUSEMOVE: + if( data->isDrawing ){ + if(data->drawFromCenter){ + data->box.width = 2*(x-data->center.x)/*data->box.x*/; + data->box.height = 2*(y-data->center.y)/*data->box.y*/; + data->box.x = data->center.x-data->box.width/2.0; + data->box.y = data->center.y-data->box.height/2.0; + }else{ + data->box.width = x-data->box.x; + data->box.height = y-data->box.y; + } + } + break; + + // start to select the bounding box + case EVENT_LBUTTONDOWN: + data->isDrawing = true; + data->box = cvRect( x, y, 0, 0 ); + data->center = Point2f((float)x,(float)y); + break; + + // cleaning up the selected bounding box + case EVENT_LBUTTONUP: + data->isDrawing = false; + if( data->box.width < 0 ){ + data->box.x += data->box.width; + data->box.width *= -1; + } + if( data->box.height < 0 ){ + data->box.y += data->box.height; + data->box.height *= -1; + } + break; + } +} + +Rect roi_zz::select(const cv::String& windowName, Mat img, bool showCrossair, bool fromCenter){ + + key=0; + + // set the drawing mode + selectorParams.drawFromCenter = fromCenter; + + // show the image and give feedback to user + imshow(windowName,img); + + // copy the data, rectangle should be drawn in the fresh image + selectorParams.image=img.clone(); + + // select the object + setMouseCallback( windowName, mouseHandler, (void *)&selectorParams ); + + // end selection process on SPACE (32) ESC (27) or ENTER (13) + while(!(key==32 || key==27 || key==13)){ + // draw the selected object + rectangle( + selectorParams.image, + selectorParams.box, + Scalar(255,0,0),2,1 + ); + + // draw cross air in the middle of bounding box + if(showCrossair){ + // horizontal line + line( + selectorParams.image, + Point((int)selectorParams.box.x,(int)(selectorParams.box.y+selectorParams.box.height/2)), + Point((int)(selectorParams.box.x+selectorParams.box.width),(int)(selectorParams.box.y+selectorParams.box.height/2)), + Scalar(255,0,0),2,1 + ); + + // vertical line + line( + selectorParams.image, + Point((int)(selectorParams.box.x+selectorParams.box.width/2),(int)selectorParams.box.y), + Point((int)(selectorParams.box.x+selectorParams.box.width/2),(int)(selectorParams.box.y+selectorParams.box.height)), + Scalar(255,0,0),2,1 + ); + } + + // show the image bouding box + imshow(windowName,selectorParams.image); + + // reset the image + selectorParams.image=img.clone(); + + //get keyboard event, extract lower 8 bits for scancode comparison + key=waitKey(1) & 0xFF; + } + + + return selectorParams.box; +} + +void roi_zz::select(const cv::String& windowName, Mat img, std::vector & boundingBox, bool fromCenter){ + std::vector box; + Rect temp; + key=0; + + // show notice to user + std::cout<<"Select an object to track and then press SPACE or ENTER button!"<0 && temp.height>0) + box.push_back(temp); + } + boundingBox=box; +} + +Rect roi_zz::select(Mat img, bool fromCenter){ + return select("ROI selector", img, fromCenter); +} diff --git a/opt_calibration/apps/listener.py b/opt_calibration/apps/listener.py index 3d8f5f0..853bdc7 100755 --- a/opt_calibration/apps/listener.py +++ b/opt_calibration/apps/listener.py @@ -48,30 +48,30 @@ def __init__(self) : self.sensor_launchers_dir = rospy.get_param('~sensor_launchers_dir') if self.sensor_launchers_dir[len(self.sensor_launchers_dir) - 1] != '/': self.sensor_launchers_dir = self.sensor_launchers_dir + '/' - + self.detector_launchers_dir = rospy.get_param('~detector_launchers_dir') if self.detector_launchers_dir[len(self.detector_launchers_dir) - 1] != '/': self.detector_launchers_dir = self.detector_launchers_dir + '/' - + self.camera_poses_dir = rospy.get_param('~camera_poses_dir') if self.camera_poses_dir[len(self.camera_poses_dir) - 1] != '/': self.camera_poses_dir = self.camera_poses_dir + '/' - + self.create_sensor_launch_srv = rospy.Service('create_sensor_launch', OPTSensor, self.handle_create_sensor_launch) self.create_detector_launch_srv = rospy.Service('create_detector_launch', OPTSensor, self.handle_create_detector_launch) self.create_camera_poses_srv = rospy.Service('create_camera_poses', OPTTransform, self.handle_create_camera_poses) - - + + def handle_create_sensor_launch(self, request) : - + file_name = self.sensor_launchers_dir + 'sensor_' + request.id + '.launch' file = open(file_name, 'w') file.write('\n') file.write('\n') file.write('\n\n') - + file.write(' \n') - + if request.type == OPTSensorRequest.TYPE_SR4500: file.write(' \n') if request.ip == '': @@ -79,24 +79,24 @@ def handle_create_sensor_launch(self, request) : rospy.logerr('Missing "ip" field for the SR4500 sensor!') return (OPTSensorResponse.STATUS_ERROR, 'Missing "ip" field!') file.write(' \n\n') - + file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n\n') - + elif request.type == OPTSensorRequest.TYPE_KINECT1: file.write(' \n') if request.serial != '': file.write(' \n') file.write('\n') - + file.write(' \n') file.write(' \n') if request.serial != '': @@ -106,17 +106,17 @@ def handle_create_sensor_launch(self, request) : file.write(' \n') file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n\n') - + elif request.type == OPTSensorRequest.TYPE_STEREO_PG: file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') file.write(' \n') @@ -128,13 +128,13 @@ def handle_create_sensor_launch(self, request) : file.write(' \n') file.write(' \n') file.write(' \n') - file.write(' \n\n') + file.write(' \n\n') elif request.type == OPTSensorRequest.TYPE_KINECT2: file.write(' \n') if request.serial != '': file.write(' \n') file.write('\n') - + file.write(' \n') file.write(' \n') if request.serial != '': @@ -144,27 +144,30 @@ def handle_create_sensor_launch(self, request) : # file.write(' \n') file.write(' \n') file.write(' \n') - file.write(' \n\n') + file.write(' \n\n') file.write(' \n') - file.write(' \n\n') - + file.write(' \n\n') + file.write('\n') file.close(); rospy.loginfo(file_name + ' created!'); - + return (OPTSensorResponse.STATUS_OK, file_name + ' created!') - + + ########################################## + ## PEOPLE and OBJECT DETECTION LAUNCHER ## + ########################################## def handle_create_detector_launch(self, request) : - + file_name = self.detector_launchers_dir + 'detection_node_' + request.id + '.launch' file = open(file_name, 'w') file.write('\n') file.write('\n') file.write('\n\n') - + file.write(' \n') - + if request.type == OPTSensorRequest.TYPE_SR4500: file.write(' \n') if request.ip == '': @@ -173,7 +176,7 @@ def handle_create_detector_launch(self, request) : return (OPTSensorResponse.STATUS_ERROR, 'Missing "ip" field!') file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') file.write(' \n') @@ -181,12 +184,12 @@ def handle_create_detector_launch(self, request) : file.write(' \n') file.write(' \n') file.write(' \n\n') - + elif request.type == OPTSensorRequest.TYPE_KINECT1: if request.serial != '': file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') if request.serial != '': @@ -197,14 +200,14 @@ def handle_create_detector_launch(self, request) : file.write(' \n') file.write(' \n') file.write(' \n\n') - + elif request.type == OPTSensorRequest.TYPE_STEREO_PG: file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') file.write(' \n') @@ -218,7 +221,7 @@ def handle_create_detector_launch(self, request) : if request.serial != '': file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') if request.serial != '': @@ -229,40 +232,214 @@ def handle_create_detector_launch(self, request) : file.write(' \n') file.write(' \n') file.write(' \n\n') - + + file.write('\n') + file.close(); + rospy.loginfo(file_name + ' created!'); + ########################################## + ########################################## + + + + ############################### + ## PEOPLE DETECTION LAUNCHER ## + ############################### + file_name = self.detector_launchers_dir + 'people_detection_node_' + request.id + '.launch' + file = open(file_name, 'w') + file.write('\n') + file.write('\n') + file.write('\n\n') + + file.write(' \n') + + if request.type == OPTSensorRequest.TYPE_SR4500: + file.write(' \n') + if request.ip == '': + file.close(); + rospy.logerr('Missing "ip" field for the SR4500 sensor!') + return (OPTSensorResponse.STATUS_ERROR, 'Missing "ip" field!') + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + elif request.type == OPTSensorRequest.TYPE_KINECT1: + if request.serial != '': + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + if request.serial != '': + file.write(' \n') + file.write(' \n') + else: + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + elif request.type == OPTSensorRequest.TYPE_STEREO_PG: + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + elif request.type == OPTSensorRequest.TYPE_KINECT2: + if request.serial != '': + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + #if request.serial != '': + #file.write(' \n') + #file.write(' \n') + #else: + #file.write(' \n') + file.write(' \n') + #file.write(' \n') + file.write(' \n\n') + + file.write('\n') + file.close(); + rospy.loginfo(file_name + ' created!'); + + ############################### + ############################### + + + + ############################### + ## OBJECT DETECTION LAUNCHER ## + ############################### + file_name = self.detector_launchers_dir + 'object_detection_node_' + request.id + '.launch' + file = open(file_name, 'w') + file.write('\n') + file.write('\n') + file.write('\n\n') + + file.write(' \n') + + if request.type == OPTSensorRequest.TYPE_SR4500: + file.write(' \n') + if request.ip == '': + file.close(); + rospy.logerr('Missing "ip" field for the SR4500 sensor!') + return (OPTSensorResponse.STATUS_ERROR, 'Missing "ip" field!') + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + elif request.type == OPTSensorRequest.TYPE_KINECT1: + if request.serial != '': + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + if request.serial != '': + file.write(' \n') + file.write(' \n') + else: + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + elif request.type == OPTSensorRequest.TYPE_STEREO_PG: + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + elif request.type == OPTSensorRequest.TYPE_KINECT2: + if request.serial != '': + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + #if request.serial != '': + #file.write(' \n') + #file.write(' \n') + #else: + #file.write(' \n') + file.write(' \n') + #file.write(' \n') + file.write(' \n\n') + file.write('\n') file.close(); rospy.loginfo(file_name + ' created!'); - + return (OPTSensorResponse.STATUS_OK, file_name + ' created!') - + ############################### + ############################### + + + + def handle_create_camera_poses(self, request) : - + file_name = self.camera_poses_dir + 'camera_poses.txt' file = open(file_name, 'w') file.write('# Auto-generated file.\n') file.write('# CALIBRATION ID: ' + str(request.calibration_id) + '\n') - + data = zip(request.child_id, request.transform) for item in data: t = item[1].translation r = item[1].rotation file.write(item[0] + ': ' + str(t.x) + ' ' + str(t.y) + ' ' + str(t.z) + ' ') file.write(str(r.x) + ' ' + str(r.y) + ' ' + str(r.z) + ' ' + str(r.w) + '\n') - + file.close() rospy.loginfo(file_name + ' created!'); - + return (OPTTransformResponse.STATUS_OK, file_name + ' created!') - + if __name__ == '__main__' : - + rospy.init_node('listener') - + try: - + listener = Listener() rospy.spin() - + except rospy.ROSInterruptException: pass diff --git a/opt_calibration/launch/opt_calibration_master.launch b/opt_calibration/launch/opt_calibration_master.launch new file mode 100644 index 0000000..c8e22a9 --- /dev/null +++ b/opt_calibration/launch/opt_calibration_master.launch @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/opt_calibration/launch/opt_calibration_results.launch b/opt_calibration/launch/opt_calibration_results.launch new file mode 100644 index 0000000..2570aeb --- /dev/null +++ b/opt_calibration/launch/opt_calibration_results.launch @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/opt_calibration/launch/opt_define_reference_frame.launch b/opt_calibration/launch/opt_define_reference_frame.launch new file mode 100644 index 0000000..2fac742 --- /dev/null +++ b/opt_calibration/launch/opt_define_reference_frame.launch @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/opt_calibration/launch/sensor_kinect2_head.launch b/opt_calibration/launch/sensor_kinect2_head.launch new file mode 100644 index 0000000..2dfe94d --- /dev/null +++ b/opt_calibration/launch/sensor_kinect2_head.launch @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/opt_gui/CMakeLists.txt b/opt_gui/CMakeLists.txt new file mode 100644 index 0000000..f3942ae --- /dev/null +++ b/opt_gui/CMakeLists.txt @@ -0,0 +1,73 @@ +############################################################################## +# CMake +############################################################################## +INCLUDE(CheckCXXCompilerFlag) +CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) +CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X) +IF(COMPILER_SUPPORTS_CXX11) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +ELSEIF(COMPILER_SUPPORTS_CXX0X) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") +ELSE() + MESSAGE(ERROR "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.") +ENDIF() +cmake_minimum_required(VERSION 2.8.0) +project(opt_gui) + +############################################################################## +# Catkin +############################################################################## + +# qt_build provides the qt cmake glue, roscpp the comms for a default talker +find_package(catkin REQUIRED COMPONENTS qt_build roscpp sensor_msgs image_transport cv_bridge opt_msgs) + +set(QML_IMPORT_PATH "${QML_IMPORT_PATH};${CATKIN_GLOBAL_LIB_DESTINATION}" ) +set(QML_IMPORT_PATH2 "${QML_IMPORT_PATH};${CATKIN_GLOBAL_LIB_DESTINATION}" ) +include_directories(${catkin_INCLUDE_DIRS} include) +# Use this to define what the package will export (e.g. libs, headers). +# Since the default here is to produce only a binary, we don't worry about +# exporting anything. +#add_message_files(FILES Roi_object.msg Roi_object_array.msg) +#generate_messages(DEPENDENCIES std_msgs geometry_msgs sensor_msgs) + +catkin_package( + + CATKIN_DEPENDS qt_build roscpp sensor_msgs image_transport cv_bridge +) + +#generate_messages() +############################################################################## +# Qt Environment +############################################################################## + +# this comes from qt_build's qt-ros.cmake which is automatically +# included via the dependency ca ll in package.xml +#rosbuild_prepare_qt4(QtCore Qtgui QtQml QtQuick) # Add the appropriate components to the component list here +find_package(Qt5 COMPONENTS Core Gui Qml Quick REQUIRED) + +############################################################################## +# Sections +############################################################################## + +file(GLOB QT_RESOURCES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} resources/*.qrc) +file(GLOB_RECURSE QT_MOC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} FOLLOW_SYMLINKS include/opt_gui/*.hpp include/component/*.hpp) + +QT5_ADD_RESOURCES(QT_RESOURCES_CPP ${QT_RESOURCES}) +QT5_WRAP_CPP(QT_MOC_HPP ${QT_MOC}) + +############################################################################## +# Sources +############################################################################## + +file(GLOB_RECURSE QT_SOURCES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} FOLLOW_SYMLINKS src/*.cpp src/component/*.cpp) + +############################################################################## +# Binaries +############################################################################## + +add_executable(opt_gui ${QT_SOURCES} ${QT_RESOURCES_CPP} ${QT_FORMS_HPP} ${QT_MOC_HPP}) +qt5_use_modules(opt_gui Quick Core) +target_link_libraries(opt_gui ${QT_LIBRARIES} ${catkin_LIBRARIES}) +target_include_directories(opt_gui PUBLIC include ${catkin_INCLUDE_DIRS}) +add_dependencies(opt_gui ${catkin_EXPORTED_TARGETS}) +install(TARGETS opt_gui RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}) diff --git a/opt_gui/acknowledge b/opt_gui/acknowledge new file mode 100644 index 0000000..10eb154 --- /dev/null +++ b/opt_gui/acknowledge @@ -0,0 +1,5 @@ +rqt_image_view +bluesat/owr-qtgui +kinect2_bridge +iai_kinect + diff --git a/opt_gui/data/readme b/opt_gui/data/readme new file mode 100644 index 0000000..c2b906f --- /dev/null +++ b/opt_gui/data/readme @@ -0,0 +1,6 @@ +1. Mkdir in this folder with the names of the sensors + +2. Drawing boudingbox in the gui , the images of the boudingbox will be saved here + +3. Select some good images and put them in the right folder(named with the sensor name) for the save/restore + diff --git a/opt_gui/include/component/opt_streaming_component.hpp b/opt_gui/include/component/opt_streaming_component.hpp new file mode 100644 index 0000000..0faffad --- /dev/null +++ b/opt_gui/include/component/opt_streaming_component.hpp @@ -0,0 +1,92 @@ +#ifndef OPT_Streaming_Component_H +#define OPT_Streaming_Component_H + +//QT +#include +#include +#include + +//ROS +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +#include + +//OPENCV +#include +#include +#include "opencv2/highgui/highgui.hpp" + + +class OPT_Streaming_Component : public QQuickPaintedItem { + //make this a Qt Widget + Q_OBJECT + // defines a qml value for the topic + Q_PROPERTY(QString topic READ get_topic WRITE set_topic NOTIFY topic_changed) + Q_PROPERTY(QString rois_dir_path READ get_rois_dir_path WRITE set_rois_dir_path NOTIFY rois_dir_path_changed) + Q_PROPERTY(QString all_rois_dir_path READ get_all_rois_dir_path) + + +public: + // Constructor, takes parent widget, which defaults to null + OPT_Streaming_Component(QQuickItem * parent = 0); + + void paint(QPainter *painter); + Q_INVOKABLE void setup();//set up the subscriber(image msg from sensor) and advertiser(roi msg) + + //getters and setters for Q_PROPERTYs + void set_topic(const QString &new_value); + QString get_topic() const; + + void set_rois_dir_path(const QString rois_dir_path_);//set the roi dir for the current camera view + QString get_rois_dir_path() const; + + QString get_all_rois_dir_path() const;// get the main folder of all the rois from all the cameras + + + Q_INVOKABLE void save_ROI(QString sensor_name,int no_ROI, int rect_X,int rect_Y, int rect_width, int rect_height );//save the selected rois as iamges + + Q_INVOKABLE void add_roi(int no_ROI, QString roi_name, int rect_X,int rect_Y, int rect_width, int rect_height);// add every selected roi to msg:image2D_rois_msg + + Q_INVOKABLE void publish_rois_from_gui(); + Q_INVOKABLE void publish_rois_from_file(); + +signals: + void topic_changed(); + void rois_dir_path_changed(); + +private: + void receive_image(const sensor_msgs::Image::ConstPtr & msg);//call back function to recieve the images for displaying + + // ROS + ros::NodeHandle nh; + image_transport::ImageTransport * img_trans; + image_transport::Subscriber image_sub; + QString topic_value;//topic name + QString rois_dir_path_value;//roi file path for the current sensor + QString all_rois_dir_path_value;//roi file main path for all the sensors + bool ros_ready; + // Used for buffering the image + QImage current_image; + cv::Mat conversion_mat_; + uchar * current_buffer; + + //for publish msgs + int number_of_rois; + opt_msgs::Image2D_roi current_image2D_roi_msg; + opt_msgs::Image2D_roi_array image2D_rois_msg; + ros::Publisher image2D_rois_from_gui_pub; + // image_transport::Publisher image2D_rois_from_file_pub; + ros::Publisher image2D_rois_from_file_pub; +}; + +#endif // OPT_Streaming_Component_H diff --git a/opt_gui/include/opt_gui/main_application.hpp b/opt_gui/include/opt_gui/main_application.hpp new file mode 100644 index 0000000..2ae76ff --- /dev/null +++ b/opt_gui/include/opt_gui/main_application.hpp @@ -0,0 +1,31 @@ +#ifndef MAIN_APPLICATION_H +#define MAIN_APPLICATION_H + +#include +#include +#include +#include +class Main_Application : public QQmlApplicationEngine { + Q_OBJECT + public: + Main_Application(); + // this method is used to setup all the ros functionality we need + // before the application starts runnings + void run(); + + // this defines a slot that will be called when the application is idle + public slots: + void main_loop(); + + private: + ros::NodeHandle nh; + std::string sensor_name1; + std::string sensor_name2; + std::string sensor_name3; + std::string sensor_name4; + std::string sensor_name5; + std::string sensor_name6; + +}; + +#endif // MAIN_APPLICATION_H diff --git a/opt_gui/launch/opt_gui.launch b/opt_gui/launch/opt_gui.launch new file mode 100644 index 0000000..21356fc --- /dev/null +++ b/opt_gui/launch/opt_gui.launch @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/opt_gui/package.xml b/opt_gui/package.xml new file mode 100644 index 0000000..d2a47d1 --- /dev/null +++ b/opt_gui/package.xml @@ -0,0 +1,62 @@ + + + opt_gui + 0.1.0 + The opt_gui package + + + + + OpenPTrack + + + + + + Apache 2.0 + + + + + + + + + + + + + Yongheng Zhao + + + + + + + + + + + + + + catkin + qt_build + roscpp + image_transport + sensor_msgs + libqt4-dev + cv_bridge + qt_build + image_transport + sensor_msgs + roscpp + libqt4-dev + cv_bridge + + + + + + + diff --git a/opt_gui/resources/DeleteRed.png b/opt_gui/resources/DeleteRed.png new file mode 100644 index 0000000..495106c Binary files /dev/null and b/opt_gui/resources/DeleteRed.png differ diff --git a/opt_gui/resources/Rois_listview_delegate.qml b/opt_gui/resources/Rois_listview_delegate.qml new file mode 100644 index 0000000..c74ea0a --- /dev/null +++ b/opt_gui/resources/Rois_listview_delegate.qml @@ -0,0 +1,120 @@ +import QtQuick 2.0 +import QtQuick.Controls 1.1 + +Component { + id: imageDelegate + Item { + id: imagewraper + width: show_rois.width/3 + height: show_rois.width/3+20 + Rectangle {//main rect + id: image + color: "black" + anchors.top: parent.top + width: show_rois.width/3 + height:show_rois.width/3+20 + Image {//image display + id:image_ + anchors.top: parent.top + width: show_rois.width/3 + height:show_rois.width/3 + cache : false + fillMode: Image.Stretch + source: roiSource + Image {//icon for remove the current roi + id: delete_red + z:2 + visible: showConfirmbutton + anchors.top:parent.top + anchors.right: parent.right + source: "/images/DeleteRed.png" + width: 30 + height: 30 + fillMode: Image.PreserveAspectFit + MouseArea{ + anchors.fill: parent + onClicked: + { + var roi_no_=roi_visual_Model.get(index).roiNo + roi_visual_Model.remove(index) + roiModel.remove(roi_no_) + main_view.no_roi-- + } + } + } + } + TextInput { + id:roi_name + anchors.top: image_.bottom + anchors.left: image_.left + height: 20 + width: show_rois.width/3-50 + color: "lime" + text:roiName + font.pixelSize: 16 + + } + Button{ + id:confirm_button + anchors.top: image_.bottom + anchors.right: image_.right + height: 20 + width: 50 + visible: showConfirmbutton + z:1 + text: "confirm" + property int roi_no_ + onClicked://add the current roi to the models + { + roi_name.focus=false + main_view.focus=true + roi_visual_Model.setProperty(index, "roiName", roi_name.text) + roi_visual_Model.setProperty(index, "showConfirmbutton", false) + roi_no_=roi_visual_Model.get(index).roiNo + roiModel.setProperty(roi_no_, "showConfirmbutton", false) + roiModel.setProperty(roi_no_, "roiName", roi_name.text) + confirm_button.visible=false + delete_red.visible=false + } + } + } +/* + MouseArea {//strech the image + id:imagearea + z:0 + anchors.top: imagewraper.top + anchors.bottom: imagewraper.bottom + anchors.bottomMargin: 20 + anchors.left: imagewraper.left + anchors.right: imagewraper.right + onClicked: { + switch(imagewraper.state) + { + case "":imagewraper.state="expanded";break; + case "expanded":imagewraper.state="";break; + default:imagewraper.state="";break; + } + } + } + states: [ + State { + name: "expanded" + PropertyChanges { target: imagewraper; height: roiblock.height/2 ;width:roiblock.width;z:100} + PropertyChanges { target: image_; width: roiblock.width; height: roiblock.height/2} + PropertyChanges { target: imagewraper.ListView.view; contentX: imagewraper.x;contentY: imagewraper.y; interactive: true } + } + ] + + + transitions: [ + Transition { + NumberAnimation { + duration: 100; + properties: "height,width,anchors.rightMargin,anchors.topMargin,opacity,contentX,contentY" + } + } + ] + */ + + } +} diff --git a/opt_gui/resources/Streaming_listview_delegate.qml b/opt_gui/resources/Streaming_listview_delegate.qml new file mode 100644 index 0000000..4f2b373 --- /dev/null +++ b/opt_gui/resources/Streaming_listview_delegate.qml @@ -0,0 +1,58 @@ +import QtQuick 2.0 +import QtQuick.Controls 1.1 +import OPT_Streaming_Component 1.0 +import QtQuick.Window 2.1 + +Component{ + id:steaming_delegate + Rectangle{ + width: (Screen.height-40)*16/54 + height: (Screen.height-40)/6 + color: "#c92c2c" + OptStreamingComponent {//main component from c++ + id:opt_streaming + anchors.fill: parent + topic:modelData + } + Text { + id: topic_text + anchors.left: parent.left + anchors.top: parent.top + color: "lime" + text: opt_streaming.topic + font.pixelSize: 16 + font.bold: true + } + MouseArea{//click on the area to select this camera view as the main camera view in the "main_view" + anchors.top: parent.top + anchors.topMargin: 20 + anchors.bottom: parent.bottom + anchors.left: parent.left + anchors.right: parent.right + onClicked: + { + // set the camera topic in the "main_view",this change will activete the "set_topic" function in c++, + // and modify the subscriber to subscribe another camera topic ,at the same time ,the advertiser of roi will + // also be changed with the new sensor name from the new topic + main_videoStream.topic= opt_streaming.topic; + + var current_topic_name_list = main_videoStream.topic.split('/'); + var current_sensor_name=current_topic_name_list[1]; + + + //clear the roi_visual_Model and add the rois from "roiModel" to roi_visual_Model with the current sensor name + roi_visual_Model.clear(); + for( var i = 0; i< roiModel.count; i++ ) + { + if (roiModel.get(i).sensorName===current_sensor_name) + roi_visual_Model.append(roiModel.get(i)) + } + + // set the rois_dir_path in the main_videoStream with the sensor name + main_videoStream.rois_dir_path=main_videoStream.all_rois_dir_path+current_sensor_name+"/" + } + } + Component.onCompleted: opt_streaming.setup()// set up the subscriber to subscribe camera topic ,at the same time ,the advertiser of roi will + // also be setted with the sensor name from the topic + } +} diff --git a/opt_gui/resources/images.qrc b/opt_gui/resources/images.qrc new file mode 100644 index 0000000..1574a91 --- /dev/null +++ b/opt_gui/resources/images.qrc @@ -0,0 +1,6 @@ + + + openptrack-logo.png + DeleteRed.png + + diff --git a/opt_gui/resources/main_window.qml b/opt_gui/resources/main_window.qml new file mode 100644 index 0000000..21af7b2 --- /dev/null +++ b/opt_gui/resources/main_window.qml @@ -0,0 +1,285 @@ +import QtQuick 2.0 +import QtQuick.Window 2.1 +import OPT_Streaming_Component 1.0 +import QtQuick.Controls 1.1 +import QtQuick.Layouts 1.1 +import Qt.labs.folderlistmodel 2.1 +Window { + id: main_window + //visibility: Window.FullScreen + width: Screen.width + height: Screen.height + title: "MOT" + visible: true + + RowLayout{//Main row, 3 items: Multi-Streaming_listview; main_view; rois_view + spacing: 10 + + // Multi-Streaming_listview + ListView { + spacing: 20 + width: (Screen.height-40)*16/54 + height: (Screen.height-40) + model: topic_model// this model comes from c++ in :"main_application.cpp" + delegate: Streaming_listview_delegate{} + } + + //Main_view + ColumnLayout {//main_view + spacing: 50 + Image {//logo + id: openptrack_logo + source: "/images/openptrack-logo.png" + width: 244 + height: 116 + anchors.horizontalCenter: parent.horizontalCenter + fillMode: Image.PreserveAspectFit + } + + //main_view where to select rois + Rectangle { + width: 960 + height:540 + color: "#c92c2c" + focus: true + id: main_view + property int no_roi : 0 + property int rect_X + property int rect_Y + property int rect_width + property int rect_height + OptStreamingComponent {//main stream + id:main_videoStream + anchors.fill: parent + Component.onCompleted: main_videoStream.setup() + } + Text {//display the text of the topic name + id: main_topic + anchors.left: parent.left + anchors.top: parent.top + z:1 + color: "lime" + text: main_videoStream.topic + font.pixelSize: 20 + font.bold: true + font.italic: true + } + MouseArea {// area to draw the bounding boxes + id: selectArea; + property bool ready2add : false + z:0 + anchors.top: parent.top + anchors.bottom: parent.bottom + anchors.left: parent.left + anchors.right: parent.right + onPressed: { + selectArea.focus=true + if (highlightItem !== null) + { + // if there is already a selection, delete it + highlightItem.destroy (); + } + // create a new rectangle at the wanted position + highlightItem = roiComponent.createObject (selectArea, {"x" : mouse.x, "y" : mouse.y ,"width" : 0, "height" :0}); + } + onPositionChanged: { + highlightItem.width = (Math.abs (mouse.x - highlightItem.x)); + highlightItem.height = (Math.abs (mouse.y - highlightItem.y)); + } + onReleased: { + main_view.rect_X=highlightItem.x; + main_view.rect_Y=highlightItem.y; + main_view.rect_width=highlightItem.width; + main_view.rect_height=highlightItem.height; + ready2add=true + } + property Rectangle highlightItem : null; + Component { + id: roiComponent; + Rectangle { + id:roi_rec + border.color: "red" + border.width: 2 + color: "transparent" + x: selectArea.x + y: selectArea.y + width: selectArea.width + height:selectArea.height + } + } + } + Keys.onSpacePressed:{//press space to confirm your BB, save the rois as images , add them to the model + if(selectArea.ready2add) + { + var topic_name_list = main_topic.text.split('/'); + var sensor_name=topic_name_list[1]; + var roi_name=sensor_name+"_roi_"+no_roi.toString(); + var roiSourceUrl=main_videoStream.all_rois_dir_path+sensor_name+"_roi_"+no_roi.toString()+".png" + main_videoStream.save_ROI(sensor_name,no_roi,rect_X,rect_Y,rect_width,rect_height);//save the roi as image + roiModel.append({"roiNo": no_roi ,"sensorName":sensor_name, "roiName": roi_name,"rectX":rect_X,"rectY":rect_Y,"rectWidth":rect_width,"rectHeight":rect_height,"roiSource": roiSourceUrl ,"showConfirmbutton":true, "published":false}); + roi_visual_Model.append({"roiNo": no_roi ,"sensorName":sensor_name, "roiName": roi_name,"rectX":rect_X,"rectY":rect_Y,"rectWidth":rect_width,"rectHeight":rect_height,"roiSource": roiSourceUrl ,"showConfirmbutton":true, "published":false}); + selectArea.highlightItem.destroy (); + no_roi++; + } + selectArea.ready2add=false + } + } + } + + //Rois_view + Rectangle{ + id: show_rois + width:Screen.width-(Screen.height-40)*16/54-980 + height: Screen.height + RowLayout{//One Row who has two items:: show roi_from_gui; shwo roi_form_file + anchors.fill: parent + + //show roi_from_gui + ListView { + id: roiblock + width: parent.width/2 + height: parent.height + spacing: 20 + clip: true + model: roi_visual_Model + delegate: Rois_listview_delegate{} + header: header_roiblock + footer: footer_roiblock + ListModel{ + id: roiModel//list model for all the rois in all the cameras + } + ListModel{ + id: roi_visual_Model// list model for the rois of the current camera + } + + Component {//header of the list view: Text and Button + id: header_roiblock + Rectangle + { + width: parent.width/2 + height: 50 + ColumnLayout{ + anchors.fill: parent + spacing: 10 + Text { + id: header_roiblock_text + width: parent.width + height: 20 + color: "blue" + font.pixelSize: 20 + text: "ROIs_from_ marking" + } + Button{ + id: publish_rois_from_gui + anchors.top:header_roiblock_text.bottom + height: 20 + width: parent.width + text: qsTr("Publish_rois_from_gui") + onClicked: + {// add all the rois in the roi_visual_Model to the msg ,and publish the msg(all in c++) + for( var i = 0; i< roi_visual_Model.count; i++ ) + { + if(roi_visual_Model.get(i).published===false) + { + main_videoStream.add_roi(roi_visual_Model.get(i).roiNo,roi_visual_Model.get(i).roiName, roi_visual_Model.get(i).rectX,roi_visual_Model.get(i).rectY, roi_visual_Model.get(i).rectWidth, roi_visual_Model.get(i).rectHeight);//add roi to the msg + roi_visual_Model.setProperty(i, "published", true)//set flag in the model + var no_ROI_=roi_visual_Model.get(i).roiNo; + roiModel.setProperty(no_ROI_, "published", true)//set flag in the model + } + } + main_videoStream.publish_rois_from_gui();//publish the msg + } + } + } + } + } + Component {//footer of the list view:empty rectangle + id: footer_roiblock + Rectangle + { + width: parent.width/2 + height: 50 + } + } + } + + + + //show roi_form_file + ListView { + id:roi_folder + width: parent.width/2 + height: parent.height + spacing: 20 + model: roi_model_file + delegate: roifileDelegate + header: roi_folder_header + + FolderListModel {//model from folder file list + id:roi_model_file + nameFilters: ["*.png"] + folder: main_videoStream.rois_dir_path + } + Component {////header of the list view: Text and Button + id: roi_folder_header + Rectangle + { + width: parent.width/2 + height: 50 + ColumnLayout{ + anchors.fill: parent + spacing: 10 + Text { + id: header_roiblock_text2 + height: 20 + text: "ROIs_from_ folder" + color: "green" + font.pixelSize: 20 + } + Button{ + id: publish_rois_from_file + anchors.top:header_roiblock_text2.bottom + height: 20 + width: parent.width + text: qsTr("publish_rois_from_file") + onClicked: + { + main_videoStream.publish_rois_from_file();// call the c++ fuction + } + } + } + } + } + + Component {//delegate of the list view:show the image and text(roi file name) + id: roifileDelegate + Rectangle { + width: show_rois.width/3 + height:show_rois.width/3+20 + Image { + id:roi_folder_image + anchors.top: parent.top + width: show_rois.width/3 + height:show_rois.width/3 + z:1 + fillMode: Image.Stretch + source: roi_model_file.folder+fileName + } + Text { + id:roi_name + anchors.top: roi_folder_image.bottom + anchors.left: roi_folder_image.left + height: 20 + width: show_rois.width/3 + color: "lime" + text:fileName.substring(0,fileName.indexOf('.')) + font.pixelSize: 16 + } + } + } + } + } + } + } +} + diff --git a/opt_gui/resources/openptrack-logo.png b/opt_gui/resources/openptrack-logo.png new file mode 100644 index 0000000..c536f01 Binary files /dev/null and b/opt_gui/resources/openptrack-logo.png differ diff --git a/opt_gui/resources/qml.qrc b/opt_gui/resources/qml.qrc new file mode 100644 index 0000000..ff4983f --- /dev/null +++ b/opt_gui/resources/qml.qrc @@ -0,0 +1,7 @@ + + + main_window.qml + Streaming_listview_delegate.qml + Rois_listview_delegate.qml + + diff --git a/opt_gui/src/component/opt_streaming_component.cpp b/opt_gui/src/component/opt_streaming_component.cpp new file mode 100644 index 0000000..5993b8d --- /dev/null +++ b/opt_gui/src/component/opt_streaming_component.cpp @@ -0,0 +1,240 @@ +#include "../../include/component/opt_streaming_component.hpp" + + +OPT_Streaming_Component::OPT_Streaming_Component(QQuickItem * parent) : + nh(), + QQuickPaintedItem(parent), + current_image(NULL), + current_buffer(NULL), + topic_value("Click_on_one_of_the_camera_views_on_the_left_to_start"), + ros_ready(false), + img_trans(NULL),number_of_rois(0) { + all_rois_dir_path_value=QString::fromStdString("file:"+ros::package::getPath("opt_gui")+"/data/"); +} + +//set up the subscriber(image msg from sensor) and advertiser(roi msg) +void OPT_Streaming_Component::setup() { + img_trans = new image_transport::ImageTransport(nh); + + image_sub = img_trans->subscribe( + topic_value.toStdString(), + 1, + &OPT_Streaming_Component::receive_image, + this, + image_transport::TransportHints("raw") + ); + + std::string sensor_name=topic_value.toStdString().substr(1, (topic_value.toStdString().size()-17)); + std::string image2D_rois_from_gui_pub_name = "/"+sensor_name + "/image2D_rois_from_gui"; + image2D_rois_from_gui_pub=nh.advertise(image2D_rois_from_gui_pub_name,3); + + std::string image2D_rois_from_file_pub_name = "/"+sensor_name + "/image2D_rois_from_file/image"; +// image2D_rois_from_file_pub=nh.advertise(image2D_rois_from_file_pub_name,3); + image2D_rois_from_file_pub=nh.advertise(image2D_rois_from_file_pub_name,3); + + ros_ready = true; + ROS_INFO("Setup of video component complete"); +} + + +//call back function to recieve images from sensor +void OPT_Streaming_Component::receive_image(const sensor_msgs::Image::ConstPtr &msg) { + image2D_rois_msg.header=msg->header; + + //-----------------------------This part comes from rtq_image_view---------------------------------- + try + { + // First let cv_bridge do its magic + cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg, sensor_msgs::image_encodings::RGB8); + conversion_mat_ = cv_ptr->image; + } + catch (cv_bridge::Exception& e) + { + try + { + // If we're here, there is no conversion that makes sense, but let's try to imagine a few first + cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg); + if (msg->encoding == "CV_8UC3") + { + // assuming it is rgb + conversion_mat_ = cv_ptr->image; + } else if (msg->encoding == "8UC1") { + // convert gray to rgb + cv::cvtColor(cv_ptr->image, conversion_mat_, CV_GRAY2RGB); + } else if (msg->encoding == "16UC1" || msg->encoding == "32FC1") { + // scale / quantify + double min = 0; + double max = 10; + if (msg->encoding == "16UC1") max *= 1000; + // dynamically adjust range based on min/max in image + cv::minMaxLoc(cv_ptr->image, &min, &max); + if (min == max) { + // completely homogeneous images are displayed in gray + min = 0; + max = 2; + } + cv::Mat img_scaled_8u; + cv::Mat(cv_ptr->image-min).convertTo(img_scaled_8u, CV_8UC1, 255. / (max - min)); + cv::cvtColor(img_scaled_8u, conversion_mat_, CV_GRAY2RGB); + } else { + qWarning("ImageView.callback_image() could not convert image from '%s' to 'rgb8' (%s)", msg->encoding.c_str(), e.what()); + return; + } + } + catch (cv_bridge::Exception& e) + { + qWarning("ImageView.callback_image() while trying to convert image from '%s' to 'rgb8' an exception was thrown (%s)", msg->encoding.c_str(), e.what()); + return; + } + } + //-----------------------------This part comes from rtq_image_view---------------------------------- + + // Only Qimage can be painted + current_image=QImage(conversion_mat_.data,conversion_mat_.cols,conversion_mat_.rows,QImage::Format_RGB888); + + // finally we need to re-render the component to display the new image + update(); +} + + +//This function will run after "update();" is called +void OPT_Streaming_Component::paint(QPainter * painter) { + painter->drawImage(boundingRect().adjusted(1, 1, -1, -1), current_image);//paint the image to fit the size of the Rectangle +} + + +//When the sensor name is modified in the camera view, change the topics with the new sensor name in the subscriber and advertiser +void OPT_Streaming_Component::set_topic(const QString & new_value) { + if(topic_value != new_value) { + topic_value = new_value; + if(ros_ready) { + image_sub.shutdown(); + image_sub = img_trans->subscribe( + topic_value.toStdString(), + 1, + &OPT_Streaming_Component::receive_image, + this, + image_transport::TransportHints("raw") + ); + + image2D_rois_msg.Rois.clear(); + + std::string sensor_name=topic_value.toStdString().substr(1, (topic_value.toStdString().size()-17)); + std::string image2D_rois_from_gui_pub_name = "/"+sensor_name + "/image2D_rois_from_gui"; + image2D_rois_from_gui_pub=nh.advertise(image2D_rois_from_gui_pub_name,3); + + std::string image2D_rois_from_file_pub_name = "/"+sensor_name + "/image2D_rois_from_file/image"; + image2D_rois_from_file_pub=nh.advertise(image2D_rois_from_file_pub_name,3); + + } + emit topic_changed(); + } +} + +QString OPT_Streaming_Component::get_topic() const { + return topic_value; +} + + +void OPT_Streaming_Component::set_rois_dir_path(const QString rois_dir_path_) +{ + rois_dir_path_value=rois_dir_path_; + emit rois_dir_path_changed(); +} + +QString OPT_Streaming_Component::get_rois_dir_path() const { + return rois_dir_path_value; +} +QString OPT_Streaming_Component::get_all_rois_dir_path() const { + return all_rois_dir_path_value; +} + + +//save the selected rois in the gui as images to their folder named with the corresponding sensor names +void OPT_Streaming_Component::save_ROI(QString sensor_name,int no_ROI, int rect_X,int rect_Y, int rect_width, int rect_height ) +{ + //get the roi with the bouding box you created in qml main_view, save it as a file + cv::Rect roi_rec=cv::Rect(rect_X,rect_Y,rect_width,rect_height) & cv::Rect(0,0,conversion_mat_.size().width,conversion_mat_.size().height); + cv::Mat roi(conversion_mat_,roi_rec); + cv::cvtColor(roi, roi, CV_BGR2RGB); + std::string roi_file_name=ros::package::getPath("opt_gui")+"/data/"+sensor_name.toStdString()+"_roi_"+ QString::number(no_ROI).toStdString()+".png"; + cv::imwrite(roi_file_name,roi); +} + +// Add rois one by one to the msg in the qml after push the button named "publish_rois_from_gui" +void OPT_Streaming_Component::add_roi(int no_ROI,QString roi_name, int rect_X,int rect_Y, int rect_width, int rect_height) +{ + current_image2D_roi_msg.no=no_ROI; + current_image2D_roi_msg.name=roi_name.toStdString(); + current_image2D_roi_msg.x=rect_X; + current_image2D_roi_msg.y=rect_Y; + current_image2D_roi_msg.width=rect_width; + current_image2D_roi_msg.height=rect_height; + image2D_rois_msg.Rois.push_back(current_image2D_roi_msg); +} + +//After Add all the rois , publish the msg, this is also excuted in qml as the "add_roi" function +void OPT_Streaming_Component::publish_rois_from_gui() +{ + image2D_rois_from_gui_pub.publish(image2D_rois_msg); + std::cout< roi_filenames = dir.GetListFiles(rois_dir_path, ".png"); + + //publish roi iamge one by one + for(std::vector::iterator it =roi_filenames.begin(); it!=roi_filenames.end();it++) + { + cv::Mat image = cv::imread(rois_dir_path+*it); + sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", image).toImageMsg(); + image2D_rois_from_file_pub.publish(msg); + std::cout<<"image msg for file has published"< roi_filenames = dir.GetListFiles(rois_dir_path, ".png"); + + //publish roi iamge one by one + for(std::vector::iterator it =roi_filenames.begin(); it!=roi_filenames.end();it++) + { + cv::Mat image = cv::imread(rois_dir_path+*it); + + + sensor_msgs::ImagePtr image_msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", image).toImageMsg(); + + opt_msgs::Image2D_roi_file current_image2D_roi_file_msg; + current_image2D_roi_file_msg.image_roi=*image_msg; + current_image2D_roi_file_msg.name=*it; + + image2D_rois_from_file_pub.publish(current_image2D_roi_file_msg); + std::cout<<"image msg for file has published"< +#include "../include/component/opt_streaming_component.hpp" +#include +#include "../../opt_gui/include/opt_gui/main_application.hpp" + +Main_Application::Main_Application(): + nh("~") +{} +void Main_Application::run() { + + +//////////////////////////////////////////////////////////////////////////////////// +////Read sensor_name from camera_network.yaml adn use them to create topicrList , +////then create topic_model which can be used in qml +//////////////////////////////////////////////////////////////////////////////////// + + QStringList topicrList; +// std::vector keys; + // nh.getParamNames(keys); +// ROS_INFO_STREAM("These are all the parameters: "); +// std::copy(keys.begin(), keys.end(), std::ostream_iterator(std::cout, "\n")); +// ROS_INFO_STREAM("Reading network!"); + XmlRpc::XmlRpcValue list; + if (nh.getParam("network", list)) + { + //ROS_INFO_STREAM("initial: " << list); + for(int i =0; i < list.size(); ++i) + { + //ROS_INFO_STREAM("item: " << list[i]); + XmlRpc::XmlRpcValue v(list[i]["sensors"]); + //ROS_INFO_STREAM("Sensors: " << v); + for(int k = 0; k < v.size(); ++k) + { + // ROS_INFO_STREAM("id"<rootContext(); + context->setContextProperty("topic_model", QVariant::fromValue(topicrList)); + + qmlRegisterType("OPT_Streaming_Component", 1, 0, "OptStreamingComponent"); + // this loads the qml file we are about to create + this->load(QUrl(QStringLiteral("qrc:/main_window.qml"))); + // Setup a timer to get the application's idle loop + QTimer * timer = new QTimer(this); + connect(timer, SIGNAL(timeout()), this, SLOT(main_loop())); + timer->start(0); +} + +void Main_Application::main_loop() { + ros::spinOnce(); +} diff --git a/opt_gui/src/opt_gui_main.cpp b/opt_gui/src/opt_gui_main.cpp new file mode 100644 index 0000000..8248f20 --- /dev/null +++ b/opt_gui/src/opt_gui_main.cpp @@ -0,0 +1,13 @@ +#include +#include +#include"../include/opt_gui/main_application.hpp" +int main(int argc, char **argv) { + // Set up ROS. + ros::init(argc, argv, "opt_gui"); + + QGuiApplication app(argc, argv); + Main_Application engine; + engine.run(); + + return app.exec(); +} diff --git a/opt_msgs/CMakeLists.txt b/opt_msgs/CMakeLists.txt index f259354..374439e 100644 --- a/opt_msgs/CMakeLists.txt +++ b/opt_msgs/CMakeLists.txt @@ -2,7 +2,8 @@ cmake_minimum_required(VERSION 2.8.3) project(opt_msgs) find_package(catkin REQUIRED COMPONENTS roscpp message_generation geometry_msgs sensor_msgs) -add_message_files(FILES Rois.msg RoiRect.msg HumanEntry.msg HumanEntries.msg BoundingBox2D.msg BoundingBox3D.msg Detection.msg DetectionArray.msg Track.msg TrackArray.msg CalibrationStatus.msg IDArray.msg) +add_message_files(FILES Rois.msg RoiRect.msg HumanEntry.msg HumanEntries.msg BoundingBox2D.msg BoundingBox3D.msg Detection.msg DetectionArray.msg Track.msg TrackArray.msg CalibrationStatus.msg IDArray.msg + Image2D_roi.msg Image2D_roi_array.msg World3D_roi.msg World3D_roi_array.msg Track3D.msg Track3DArray.msg ObjectName.msg ObjectNameArray.msg Image2D_roi_file.msg) add_service_files(FILES OPTSensor.srv OPTTransform.srv) diff --git a/opt_msgs/CMakeLists.txt~ b/opt_msgs/CMakeLists.txt~ new file mode 100644 index 0000000..10704f1 --- /dev/null +++ b/opt_msgs/CMakeLists.txt~ @@ -0,0 +1,23 @@ +cmake_minimum_required(VERSION 2.8.3) +project(opt_msgs) +find_package(catkin REQUIRED COMPONENTS roscpp message_generation geometry_msgs sensor_msgs) + +add_message_files(FILES Rois.msg RoiRect.msg HumanEntry.msg HumanEntries.msg BoundingBox2D.msg BoundingBox3D.msg Detection.msg DetectionArray.msg Track.msg TrackArray.msg CalibrationStatus.msg IDArray.msg + Image2D_roi.msg Image2D_roi_array.msg World3D_roi.msg World3D_roi_arra.msg) + +add_service_files(FILES OPTSensor.srv OPTTransform.srv) + +generate_messages(DEPENDENCIES std_msgs geometry_msgs sensor_msgs) + + +catkin_package( + INCLUDE_DIRS + LIBRARIES + CATKIN_DEPENDS roscpp std_msgs sensor_msgs message_runtime +) + +include_directories(include include/open_ptrack/${PROJECT_NAME}/) + +add_library(roi_msgs src/overlap.cpp ) + +target_link_libraries(roi_msgs ${catkin_LIBRARIES}) diff --git a/opt_msgs/msg/Detection.msg b/opt_msgs/msg/Detection.msg index 396d6c4..cde3e89 100644 --- a/opt_msgs/msg/Detection.msg +++ b/opt_msgs/msg/Detection.msg @@ -9,3 +9,5 @@ float64 height float64 confidence float64 distance bool occluded + +string object_name diff --git a/opt_msgs/msg/Image2D_roi.msg b/opt_msgs/msg/Image2D_roi.msg new file mode 100644 index 0000000..a5640d4 --- /dev/null +++ b/opt_msgs/msg/Image2D_roi.msg @@ -0,0 +1,6 @@ +int32 no +string name +int32 x +int32 y +int32 width +int32 height diff --git a/opt_msgs/msg/Image2D_roi.msg~ b/opt_msgs/msg/Image2D_roi.msg~ new file mode 100644 index 0000000..a59c871 --- /dev/null +++ b/opt_msgs/msg/Image2D_roi.msg~ @@ -0,0 +1,6 @@ +int32 no +string name +int32 x +int32 y +int32 width +int32 height diff --git a/opt_msgs/msg/Image2D_roi_array.msg b/opt_msgs/msg/Image2D_roi_array.msg new file mode 100644 index 0000000..d6b106f --- /dev/null +++ b/opt_msgs/msg/Image2D_roi_array.msg @@ -0,0 +1,2 @@ +Header header +Image2D_roi[] Rois diff --git a/opt_msgs/msg/Image2D_roi_file.msg b/opt_msgs/msg/Image2D_roi_file.msg new file mode 100644 index 0000000..291f03d --- /dev/null +++ b/opt_msgs/msg/Image2D_roi_file.msg @@ -0,0 +1,2 @@ +sensor_msgs/Image image_roi +string name diff --git a/opt_msgs/msg/ObjectName.msg b/opt_msgs/msg/ObjectName.msg new file mode 100644 index 0000000..ae57deb --- /dev/null +++ b/opt_msgs/msg/ObjectName.msg @@ -0,0 +1,2 @@ +int32 no +string object_name diff --git a/opt_msgs/msg/ObjectNameArray.msg b/opt_msgs/msg/ObjectNameArray.msg new file mode 100644 index 0000000..4621be3 --- /dev/null +++ b/opt_msgs/msg/ObjectNameArray.msg @@ -0,0 +1,2 @@ +Header header +opt_msgs/ObjectName[] object_names diff --git a/opt_msgs/msg/Track.msg b/opt_msgs/msg/Track.msg index b068460..7874af3 100644 --- a/opt_msgs/msg/Track.msg +++ b/opt_msgs/msg/Track.msg @@ -14,4 +14,4 @@ float64 confidence uint8 visibility opt_msgs/BoundingBox2D box_2D - +string object_name diff --git a/opt_msgs/msg/Track3D.msg b/opt_msgs/msg/Track3D.msg new file mode 100644 index 0000000..94bdbd5 --- /dev/null +++ b/opt_msgs/msg/Track3D.msg @@ -0,0 +1,18 @@ +uint8 VISIBLE = 0 +uint8 OCCLUDED = 1 +uint8 NOT_VISIBLE = 2 + +int32 id + +float64 x +float64 y +float64 z +float64 height +float64 distance +float64 age +float64 confidence + +uint8 visibility + +opt_msgs/BoundingBox2D box_2D + diff --git a/opt_msgs/msg/Track3DArray.msg b/opt_msgs/msg/Track3DArray.msg new file mode 100644 index 0000000..3e5ad96 --- /dev/null +++ b/opt_msgs/msg/Track3DArray.msg @@ -0,0 +1,3 @@ +Header header + +opt_msgs/Track3D[] tracks diff --git a/opt_msgs/msg/World3D_roi.msg b/opt_msgs/msg/World3D_roi.msg new file mode 100644 index 0000000..f30091a --- /dev/null +++ b/opt_msgs/msg/World3D_roi.msg @@ -0,0 +1,7 @@ +int32 no +float64 x1 +float64 y1 +float64 z1 +float64 x2 +float64 y2 +float64 z2 diff --git a/opt_msgs/msg/World3D_roi.msg~ b/opt_msgs/msg/World3D_roi.msg~ new file mode 100644 index 0000000..8481e4a --- /dev/null +++ b/opt_msgs/msg/World3D_roi.msg~ @@ -0,0 +1,7 @@ +int32 no +int32 x1 +int32 y1 +int32 z1 +int32 x2 +int32 y2 +int32 z2 diff --git a/opt_msgs/msg/World3D_roi_arra.msg~ b/opt_msgs/msg/World3D_roi_arra.msg~ new file mode 100644 index 0000000..d6b106f --- /dev/null +++ b/opt_msgs/msg/World3D_roi_arra.msg~ @@ -0,0 +1,2 @@ +Header header +Image2D_roi[] Rois diff --git a/opt_msgs/msg/World3D_roi_array.msg b/opt_msgs/msg/World3D_roi_array.msg new file mode 100644 index 0000000..6f789e5 --- /dev/null +++ b/opt_msgs/msg/World3D_roi_array.msg @@ -0,0 +1,2 @@ +Header header +World3D_roi[] Rois diff --git a/opt_msgs/msg/image2D_roi_array.msg~ b/opt_msgs/msg/image2D_roi_array.msg~ new file mode 100644 index 0000000..9c51bca --- /dev/null +++ b/opt_msgs/msg/image2D_roi_array.msg~ @@ -0,0 +1,2 @@ +Header header +Roi_object[] Rois diff --git a/opt_utils/CMakeLists.txt b/opt_utils/CMakeLists.txt index d65ce2e..e466ced 100644 --- a/opt_utils/CMakeLists.txt +++ b/opt_utils/CMakeLists.txt @@ -30,5 +30,8 @@ target_link_libraries(roi_viewer boost_system boost_filesystem boost_signals ${P add_executable(ros2udp_converter apps/ros2udp_converter.cpp src/udp_messaging.cpp)# src/json.cpp) target_link_libraries(ros2udp_converter ${catkin_LIBRARIES} json) +add_executable(ros2udp_converter_object apps/ros2udp_converter_object.cpp src/udp_messaging.cpp) +target_link_libraries(ros2udp_converter_object ${catkin_LIBRARIES} json) + add_executable(udp_listener apps/udp_listener.cpp src/udp_messaging.cpp) target_link_libraries(udp_listener ${catkin_LIBRARIES}) diff --git a/opt_utils/apps/ros2udp_converter.cpp b/opt_utils/apps/ros2udp_converter.cpp index 8aa0005..03fe985 100644 --- a/opt_utils/apps/ros2udp_converter.cpp +++ b/opt_utils/apps/ros2udp_converter.cpp @@ -84,7 +84,7 @@ trackingCallback(const opt_msgs::TrackArray::ConstPtr& tracking_msg) tracks.Add(current_track); } - root.Add("tracks", tracks); + root.Add("people_tracks", tracks); /// Convert JSON object to string: Jzon::Format message_format = Jzon::StandardFormat; diff --git a/opt_utils/apps/ros2udp_converter_object.cpp b/opt_utils/apps/ros2udp_converter_object.cpp new file mode 100644 index 0000000..f90a823 --- /dev/null +++ b/opt_utils/apps/ros2udp_converter_object.cpp @@ -0,0 +1,248 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it] + * + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +// Global variables: +int udp_buffer_length; // UDP message buffer length +int udp_port; // UDP port +std::string hostip; // UDP host +int json_indent_size; // indent size for JSON message +bool json_newline; // use newlines (true) or not (false) in JSON messages +bool json_spacing; // use spacing (true) or not (false) in JSON messages +bool json_use_tabs; // use tabs (true) or not (false) in JSON messages +struct ComData udp_data; // parameters for UDP messaging +open_ptrack::opt_utils::UDPMessaging udp_messaging(udp_data); // instance of class UDPMessaging +ros::Time last_heartbeat_time; +double heartbeat_interval; + + +void +trackingCallback(const opt_msgs::TrackArray::ConstPtr& tracking_msg) +{ + /// Create JSON-formatted message: + Jzon::Object root, header, stamp; + + /// Add header (84 characters): + header.Add("seq", int(tracking_msg->header.seq)); + stamp.Add("sec", int(tracking_msg->header.stamp.sec)); + stamp.Add("nsec", int(tracking_msg->header.stamp.nsec)); + header.Add("stamp", stamp); + header.Add("frame_id", tracking_msg->header.frame_id); + root.Add("header", header); + + /// Add tracks array: + // >50 characters for every track + Jzon::Array tracks; + + + for (unsigned int i = 0; i < tracking_msg->tracks.size(); i++) + { + Jzon::Object current_track; + int index_roiname=tracking_msg->tracks[i].id; + current_track.Add("id", index_roiname); + current_track.Add("object_name",tracking_msg->tracks[i].object_name); + current_track.Add("x", tracking_msg->tracks[i].x); + current_track.Add("y", tracking_msg->tracks[i].y); + current_track.Add("height", tracking_msg->tracks[i].height); + current_track.Add("age", tracking_msg->tracks[i].age); + current_track.Add("confidence", tracking_msg->tracks[i].confidence); + tracks.Add(current_track); + } + root.Add("object_tracks", tracks); + + /// Convert JSON object to string: + Jzon::Format message_format = Jzon::StandardFormat; + message_format.indentSize = json_indent_size; + message_format.newline = json_newline; + message_format.spacing = json_spacing; + message_format.useTabs = json_use_tabs; + Jzon::Writer writer(root, message_format); + writer.Write(); + std::string json_string = writer.GetResult(); + // std::cout << "String sent: " << json_string << std::endl; + + /// Copy string to message buffer: + udp_data.si_num_byte_ = json_string.length()+1; + char buf[udp_data.si_num_byte_]; + for (unsigned int i = 0; i < udp_data.si_num_byte_; i++) + { + buf[i] = 0; + } + sprintf(buf, "%s", json_string.c_str()); + udp_data.pc_pck_ = buf; // buffer where the message is written + + /// Send message: + udp_messaging.sendFromSocketUDP(&udp_data); +} + +void +aliveIDsCallback(const opt_msgs::IDArray::ConstPtr& alive_ids_msg) +{ + ros::Time msg_time = ros::Time(alive_ids_msg->header.stamp.sec, alive_ids_msg->header.stamp.nsec); + if ((msg_time - last_heartbeat_time).toSec() > heartbeat_interval) + { + /// Create JSON-formatted message: + Jzon::Object root, header, stamp; + + /// Add header: + header.Add("seq", int(alive_ids_msg->header.seq)); + stamp.Add("sec", int(alive_ids_msg->header.stamp.sec)); + stamp.Add("nsec", int(alive_ids_msg->header.stamp.nsec)); + header.Add("stamp", stamp); + header.Add("frame_id", "heartbeat"); + root.Add("header", header); + + Jzon::Array alive_IDs; + for (unsigned int i = 0; i < alive_ids_msg->ids.size(); i++) + { + alive_IDs.Add(alive_ids_msg->ids[i]); + } + root.Add("alive_IDs", alive_IDs); + root.Add("max_ID", alive_ids_msg->max_ID); + + /// Convert JSON object to string: + Jzon::Format message_format = Jzon::StandardFormat; + message_format.indentSize = json_indent_size; + message_format.newline = json_newline; + message_format.spacing = json_spacing; + message_format.useTabs = json_use_tabs; + Jzon::Writer writer(root, message_format); + writer.Write(); + std::string json_string = writer.GetResult(); + // std::cout << "String sent: " << json_string << std::endl; + + /// Copy string to message buffer: + udp_data.si_num_byte_ = json_string.length()+1; + char buf[udp_data.si_num_byte_]; + for (unsigned int i = 0; i < udp_data.si_num_byte_; i++) + { + buf[i] = 0; + } + sprintf(buf, "%s", json_string.c_str()); + udp_data.pc_pck_ = buf; // buffer where the message is written + + /// Send message: + udp_messaging.sendFromSocketUDP(&udp_data); + + last_heartbeat_time = msg_time; + } +} + + + +typedef unsigned long uint32; +// convert a string represenation of an IP address into its numeric equivalent +static uint32 Inet_AtoN(const char * buf) +{ + + uint32 ret = 0; + int shift = 24; // fill out the MSB first + bool startQuad = true; + while((shift >= 0)&&(*buf)) + { + if (startQuad) + { + unsigned char quad = (unsigned char) atoi(buf); + ret |= (((uint32)quad) << shift); + shift -= 8; + } + startQuad = (*buf == '.'); + buf++; + } + return ret; +} + +int +main(int argc, char **argv) +{ + // Initialization: + ros::init(argc, argv, "ros2udp_converter"); + ros::NodeHandle nh("~"); + + // Read input parameters: + nh.param("udp/port", udp_port, 21234); + nh.param("udp/hostip", hostip, std::string("127.0.0.1")); + nh.param("udp/buffer_length", udp_buffer_length, 2048); + nh.param("json/indent_size", json_indent_size, 0); + nh.param("json/newline", json_newline, false); + nh.param("json/spacing", json_spacing, false); + nh.param("json/use_tabs", json_use_tabs, false); + nh.param("json/heartbeat_interval", heartbeat_interval, 0.25); + + + // ROS subscriber: + ros::Subscriber tracking_sub = nh.subscribe("input_topic", 1, trackingCallback); + + ros::Subscriber alive_ids_sub = nh.subscribe("alive_ids_topic", 1, aliveIDsCallback); + + + // Initialize UDP parameters: + char buf[0]; + udp_data.si_port_ = udp_port; // port + udp_data.si_retry_ = 1; + udp_data.si_num_byte_ = udp_buffer_length; // number of bytes to write (2048 -> about 30 tracks) + udp_data.pc_pck_ = buf; // buffer where the message is written + udp_data.si_timeout_ = 4; + udp_data.sj_addr_ = Inet_AtoN(hostip.c_str()); + + /// Create object for UDP messaging: + udp_messaging = open_ptrack::opt_utils::UDPMessaging(udp_data); + + /// Create client socket: + udp_messaging.createSocketClientUDP(&udp_data); + + // Execute callbacks: + ros::spin(); + + // Close socket: + udp_messaging.closeSocketUDP(&udp_data); + + return 0; +} diff --git a/opt_utils/launch/ros2udp_converter.launch b/opt_utils/launch/ros2udp_converter.launch index 34e2131..0683135 100644 --- a/opt_utils/launch/ros2udp_converter.launch +++ b/opt_utils/launch/ros2udp_converter.launch @@ -1,8 +1,17 @@ - + + + + + + + + + + diff --git a/opt_utils/launch/single_camera_visualization.launch b/opt_utils/launch/single_camera_visualization.launch new file mode 100644 index 0000000..a443057 --- /dev/null +++ b/opt_utils/launch/single_camera_visualization.launch @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/swissranger_camera/cfg/cpp/swissranger_camera/SwissRangerConfig.h b/swissranger_camera/cfg/cpp/swissranger_camera/SwissRangerConfig.h new file mode 100644 index 0000000..d5cdb4f --- /dev/null +++ b/swissranger_camera/cfg/cpp/swissranger_camera/SwissRangerConfig.h @@ -0,0 +1,531 @@ +//#line 2 "/opt/ros/indigo/share/dynamic_reconfigure/templates/ConfigType.h.template" +// ********************************************************* +// +// File autogenerated for the swissranger_camera package +// by the dynamic_reconfigure package. +// Please do not edit. +// +// ********************************************************/ + +#ifndef __swissranger_camera__SWISSRANGERCONFIG_H__ +#define __swissranger_camera__SWISSRANGERCONFIG_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace swissranger_camera +{ + class SwissRangerConfigStatics; + + class SwissRangerConfig + { + public: + class AbstractParamDescription : public dynamic_reconfigure::ParamDescription + { + public: + AbstractParamDescription(std::string n, std::string t, uint32_t l, + std::string d, std::string e) + { + name = n; + type = t; + level = l; + description = d; + edit_method = e; + } + + virtual void clamp(SwissRangerConfig &config, const SwissRangerConfig &max, const SwissRangerConfig &min) const = 0; + virtual void calcLevel(uint32_t &level, const SwissRangerConfig &config1, const SwissRangerConfig &config2) const = 0; + virtual void fromServer(const ros::NodeHandle &nh, SwissRangerConfig &config) const = 0; + virtual void toServer(const ros::NodeHandle &nh, const SwissRangerConfig &config) const = 0; + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, SwissRangerConfig &config) const = 0; + virtual void toMessage(dynamic_reconfigure::Config &msg, const SwissRangerConfig &config) const = 0; + virtual void getValue(const SwissRangerConfig &config, boost::any &val) const = 0; + }; + + typedef boost::shared_ptr AbstractParamDescriptionPtr; + typedef boost::shared_ptr AbstractParamDescriptionConstPtr; + + template + class ParamDescription : public AbstractParamDescription + { + public: + ParamDescription(std::string name, std::string type, uint32_t level, + std::string description, std::string edit_method, T SwissRangerConfig::* f) : + AbstractParamDescription(name, type, level, description, edit_method), + field(f) + {} + + T (SwissRangerConfig::* field); + + virtual void clamp(SwissRangerConfig &config, const SwissRangerConfig &max, const SwissRangerConfig &min) const + { + if (config.*field > max.*field) + config.*field = max.*field; + + if (config.*field < min.*field) + config.*field = min.*field; + } + + virtual void calcLevel(uint32_t &comb_level, const SwissRangerConfig &config1, const SwissRangerConfig &config2) const + { + if (config1.*field != config2.*field) + comb_level |= level; + } + + virtual void fromServer(const ros::NodeHandle &nh, SwissRangerConfig &config) const + { + nh.getParam(name, config.*field); + } + + virtual void toServer(const ros::NodeHandle &nh, const SwissRangerConfig &config) const + { + nh.setParam(name, config.*field); + } + + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, SwissRangerConfig &config) const + { + return dynamic_reconfigure::ConfigTools::getParameter(msg, name, config.*field); + } + + virtual void toMessage(dynamic_reconfigure::Config &msg, const SwissRangerConfig &config) const + { + dynamic_reconfigure::ConfigTools::appendParameter(msg, name, config.*field); + } + + virtual void getValue(const SwissRangerConfig &config, boost::any &val) const + { + val = config.*field; + } + }; + + class AbstractGroupDescription : public dynamic_reconfigure::Group + { + public: + AbstractGroupDescription(std::string n, std::string t, int p, int i, bool s) + { + name = n; + type = t; + parent = p; + state = s; + id = i; + } + + std::vector abstract_parameters; + bool state; + + virtual void toMessage(dynamic_reconfigure::Config &msg, const boost::any &config) const = 0; + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, boost::any &config) const =0; + virtual void updateParams(boost::any &cfg, SwissRangerConfig &top) const= 0; + virtual void setInitialState(boost::any &cfg) const = 0; + + + void convertParams() + { + for(std::vector::const_iterator i = abstract_parameters.begin(); i != abstract_parameters.end(); ++i) + { + parameters.push_back(dynamic_reconfigure::ParamDescription(**i)); + } + } + }; + + typedef boost::shared_ptr AbstractGroupDescriptionPtr; + typedef boost::shared_ptr AbstractGroupDescriptionConstPtr; + + template + class GroupDescription : public AbstractGroupDescription + { + public: + GroupDescription(std::string name, std::string type, int parent, int id, bool s, T PT::* f) : AbstractGroupDescription(name, type, parent, id, s), field(f) + { + } + + GroupDescription(const GroupDescription& g): AbstractGroupDescription(g.name, g.type, g.parent, g.id, g.state), field(g.field), groups(g.groups) + { + parameters = g.parameters; + abstract_parameters = g.abstract_parameters; + } + + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, boost::any &cfg) const + { + PT* config = boost::any_cast(cfg); + if(!dynamic_reconfigure::ConfigTools::getGroupState(msg, name, (*config).*field)) + return false; + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = &((*config).*field); + if(!(*i)->fromMessage(msg, n)) + return false; + } + + return true; + } + + virtual void setInitialState(boost::any &cfg) const + { + PT* config = boost::any_cast(cfg); + T* group = &((*config).*field); + group->state = state; + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = boost::any(&((*config).*field)); + (*i)->setInitialState(n); + } + + } + + virtual void updateParams(boost::any &cfg, SwissRangerConfig &top) const + { + PT* config = boost::any_cast(cfg); + + T* f = &((*config).*field); + f->setParams(top, abstract_parameters); + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = &((*config).*field); + (*i)->updateParams(n, top); + } + } + + virtual void toMessage(dynamic_reconfigure::Config &msg, const boost::any &cfg) const + { + const PT config = boost::any_cast(cfg); + dynamic_reconfigure::ConfigTools::appendGroup(msg, name, id, parent, config.*field); + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + (*i)->toMessage(msg, config.*field); + } + } + + T (PT::* field); + std::vector groups; + }; + +class DEFAULT +{ + public: + DEFAULT() + { + state = true; + name = "Default"; + } + + void setParams(SwissRangerConfig &config, const std::vector params) + { + for (std::vector::const_iterator _i = params.begin(); _i != params.end(); ++_i) + { + boost::any val; + (*_i)->getValue(config, val); + + if("frame_id"==(*_i)->name){frame_id = boost::any_cast(val);} + if("camera_info_url"==(*_i)->name){camera_info_url = boost::any_cast(val);} + if("auto_exposure"==(*_i)->name){auto_exposure = boost::any_cast(val);} + if("integration_time"==(*_i)->name){integration_time = boost::any_cast(val);} + if("amp_threshold"==(*_i)->name){amp_threshold = boost::any_cast(val);} + } + } + + std::string frame_id; +std::string camera_info_url; +int auto_exposure; +int integration_time; +int amp_threshold; + + bool state; + std::string name; + + +}groups; + + + +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + std::string frame_id; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + std::string camera_info_url; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + int auto_exposure; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + int integration_time; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + int amp_threshold; +//#line 218 "/opt/ros/indigo/share/dynamic_reconfigure/templates/ConfigType.h.template" + + bool __fromMessage__(dynamic_reconfigure::Config &msg) + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + + int count = 0; + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + if ((*i)->fromMessage(msg, *this)) + count++; + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); i ++) + { + if ((*i)->id == 0) + { + boost::any n = boost::any(this); + (*i)->updateParams(n, *this); + (*i)->fromMessage(msg, n); + } + } + + if (count != dynamic_reconfigure::ConfigTools::size(msg)) + { + ROS_ERROR("SwissRangerConfig::__fromMessage__ called with an unexpected parameter."); + ROS_ERROR("Booleans:"); + for (unsigned int i = 0; i < msg.bools.size(); i++) + ROS_ERROR(" %s", msg.bools[i].name.c_str()); + ROS_ERROR("Integers:"); + for (unsigned int i = 0; i < msg.ints.size(); i++) + ROS_ERROR(" %s", msg.ints[i].name.c_str()); + ROS_ERROR("Doubles:"); + for (unsigned int i = 0; i < msg.doubles.size(); i++) + ROS_ERROR(" %s", msg.doubles[i].name.c_str()); + ROS_ERROR("Strings:"); + for (unsigned int i = 0; i < msg.strs.size(); i++) + ROS_ERROR(" %s", msg.strs[i].name.c_str()); + // @todo Check that there are no duplicates. Make this error more + // explicit. + return false; + } + return true; + } + + // This version of __toMessage__ is used during initialization of + // statics when __getParamDescriptions__ can't be called yet. + void __toMessage__(dynamic_reconfigure::Config &msg, const std::vector &__param_descriptions__, const std::vector &__group_descriptions__) const + { + dynamic_reconfigure::ConfigTools::clear(msg); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->toMessage(msg, *this); + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); ++i) + { + if((*i)->id == 0) + { + (*i)->toMessage(msg, *this); + } + } + } + + void __toMessage__(dynamic_reconfigure::Config &msg) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + __toMessage__(msg, __param_descriptions__, __group_descriptions__); + } + + void __toServer__(const ros::NodeHandle &nh) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->toServer(nh, *this); + } + + void __fromServer__(const ros::NodeHandle &nh) + { + static bool setup=false; + + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->fromServer(nh, *this); + + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); i++){ + if (!setup && (*i)->id == 0) { + setup = true; + boost::any n = boost::any(this); + (*i)->setInitialState(n); + } + } + } + + void __clamp__() + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const SwissRangerConfig &__max__ = __getMax__(); + const SwissRangerConfig &__min__ = __getMin__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->clamp(*this, __max__, __min__); + } + + uint32_t __level__(const SwissRangerConfig &config) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + uint32_t level = 0; + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->calcLevel(level, config, *this); + return level; + } + + static const dynamic_reconfigure::ConfigDescription &__getDescriptionMessage__(); + static const SwissRangerConfig &__getDefault__(); + static const SwissRangerConfig &__getMax__(); + static const SwissRangerConfig &__getMin__(); + static const std::vector &__getParamDescriptions__(); + static const std::vector &__getGroupDescriptions__(); + + private: + static const SwissRangerConfigStatics *__get_statics__(); + }; + + template <> // Max and min are ignored for strings. + inline void SwissRangerConfig::ParamDescription::clamp(SwissRangerConfig &config, const SwissRangerConfig &max, const SwissRangerConfig &min) const + { + return; + } + + class SwissRangerConfigStatics + { + friend class SwissRangerConfig; + + SwissRangerConfigStatics() + { +SwissRangerConfig::GroupDescription Default("Default", "", 0, 0, true, &SwissRangerConfig::groups); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.frame_id = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.frame_id = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.frame_id = "camera"; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("frame_id", "str", 3, "ROS tf frame of reference, resolved with tf_prefix unless absolute.", "", &SwissRangerConfig::frame_id))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("frame_id", "str", 3, "ROS tf frame of reference, resolved with tf_prefix unless absolute.", "", &SwissRangerConfig::frame_id))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.camera_info_url = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.camera_info_url = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.camera_info_url = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("camera_info_url", "str", 0, "Camera calibration URL for this video_mode (uncalibrated if null).", "", &SwissRangerConfig::camera_info_url))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("camera_info_url", "str", 0, "Camera calibration URL for this video_mode (uncalibrated if null).", "", &SwissRangerConfig::camera_info_url))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.auto_exposure = 0; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.auto_exposure = 1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.auto_exposure = 1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("auto_exposure", "int", 0, "Auto exposure control state.", "{'enum_description': 'Feature control states', 'enum': [{'srcline': 54, 'description': 'Use fixed value', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Off'}, {'srcline': 55, 'description': 'Camera sets continuously', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Auto'}]}", &SwissRangerConfig::auto_exposure))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("auto_exposure", "int", 0, "Auto exposure control state.", "{'enum_description': 'Feature control states', 'enum': [{'srcline': 54, 'description': 'Use fixed value', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Off'}, {'srcline': 55, 'description': 'Camera sets continuously', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Auto'}]}", &SwissRangerConfig::auto_exposure))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.integration_time = 0; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.integration_time = 255; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.integration_time = 1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("integration_time", "int", 0, "integration_time control.", "", &SwissRangerConfig::integration_time))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("integration_time", "int", 0, "integration_time control.", "", &SwissRangerConfig::integration_time))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.amp_threshold = -1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.amp_threshold = 65535; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.amp_threshold = -1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("amp_threshold", "int", 0, "amplitude_threshold control.", "", &SwissRangerConfig::amp_threshold))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("amp_threshold", "int", 0, "amplitude_threshold control.", "", &SwissRangerConfig::amp_threshold))); +//#line 233 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.convertParams(); +//#line 233 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __group_descriptions__.push_back(SwissRangerConfig::AbstractGroupDescriptionConstPtr(new SwissRangerConfig::GroupDescription(Default))); +//#line 353 "/opt/ros/indigo/share/dynamic_reconfigure/templates/ConfigType.h.template" + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); ++i) + { + __description_message__.groups.push_back(**i); + } + __max__.__toMessage__(__description_message__.max, __param_descriptions__, __group_descriptions__); + __min__.__toMessage__(__description_message__.min, __param_descriptions__, __group_descriptions__); + __default__.__toMessage__(__description_message__.dflt, __param_descriptions__, __group_descriptions__); + } + std::vector __param_descriptions__; + std::vector __group_descriptions__; + SwissRangerConfig __max__; + SwissRangerConfig __min__; + SwissRangerConfig __default__; + dynamic_reconfigure::ConfigDescription __description_message__; + + static const SwissRangerConfigStatics *get_instance() + { + // Split this off in a separate function because I know that + // instance will get initialized the first time get_instance is + // called, and I am guaranteeing that get_instance gets called at + // most once. + static SwissRangerConfigStatics instance; + return &instance; + } + }; + + inline const dynamic_reconfigure::ConfigDescription &SwissRangerConfig::__getDescriptionMessage__() + { + return __get_statics__()->__description_message__; + } + + inline const SwissRangerConfig &SwissRangerConfig::__getDefault__() + { + return __get_statics__()->__default__; + } + + inline const SwissRangerConfig &SwissRangerConfig::__getMax__() + { + return __get_statics__()->__max__; + } + + inline const SwissRangerConfig &SwissRangerConfig::__getMin__() + { + return __get_statics__()->__min__; + } + + inline const std::vector &SwissRangerConfig::__getParamDescriptions__() + { + return __get_statics__()->__param_descriptions__; + } + + inline const std::vector &SwissRangerConfig::__getGroupDescriptions__() + { + return __get_statics__()->__group_descriptions__; + } + + inline const SwissRangerConfigStatics *SwissRangerConfig::__get_statics__() + { + const static SwissRangerConfigStatics *statics; + + if (statics) // Common case + return statics; + + boost::mutex::scoped_lock lock(dynamic_reconfigure::__init_mutex__); + + if (statics) // In case we lost a race. + return statics; + + statics = SwissRangerConfigStatics::get_instance(); + + return statics; + } + +//#line 54 "/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg" + const int SwissRanger_Off = 0; +//#line 55 "/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg" + const int SwissRanger_Auto = 1; +} + +#endif // __SWISSRANGERRECONFIGURATOR_H__ diff --git a/swissranger_camera/src/swissranger_camera/__init__.py b/swissranger_camera/src/swissranger_camera/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/swissranger_camera/src/swissranger_camera/cfg/SwissRangerConfig.py b/swissranger_camera/src/swissranger_camera/cfg/SwissRangerConfig.py new file mode 100644 index 0000000..20e2b76 --- /dev/null +++ b/swissranger_camera/src/swissranger_camera/cfg/SwissRangerConfig.py @@ -0,0 +1,38 @@ +## ********************************************************* +## +## File autogenerated for the swissranger_camera package +## by the dynamic_reconfigure package. +## Please do not edit. +## +## ********************************************************/ + +from dynamic_reconfigure.encoding import extract_params + +inf = float('inf') + +config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 233, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 259, 'description': 'ROS tf frame of reference, resolved with tf_prefix unless absolute.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'frame_id', 'edit_method': '', 'default': 'camera', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 259, 'description': 'Camera calibration URL for this video_mode (uncalibrated if null).', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'camera_info_url', 'edit_method': '', 'default': '', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 259, 'description': 'Auto exposure control state.', 'max': 1, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'auto_exposure', 'edit_method': "{'enum_description': 'Feature control states', 'enum': [{'srcline': 54, 'description': 'Use fixed value', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Off'}, {'srcline': 55, 'description': 'Camera sets continuously', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Auto'}]}", 'default': 1, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 259, 'description': 'integration_time control.', 'max': 255, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'integration_time', 'edit_method': '', 'default': 1, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 259, 'description': 'amplitude_threshold control.', 'max': 65535, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'amp_threshold', 'edit_method': '', 'default': -1, 'level': 0, 'min': -1, 'type': 'int'}], 'type': '', 'id': 0} + +min = {} +max = {} +defaults = {} +level = {} +type = {} +all_level = 0 + +#def extract_params(config): +# params = [] +# params.extend(config['parameters']) +# for group in config['groups']: +# params.extend(extract_params(group)) +# return params + +for param in extract_params(config_description): + min[param['name']] = param['min'] + max[param['name']] = param['max'] + defaults[param['name']] = param['default'] + level[param['name']] = param['level'] + type[param['name']] = param['type'] + all_level = all_level | param['level'] + +SwissRanger_Off = 0 +SwissRanger_Auto = 1 diff --git a/swissranger_camera/src/swissranger_camera/cfg/__init__.py b/swissranger_camera/src/swissranger_camera/cfg/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tracking/CMakeLists.txt b/tracking/CMakeLists.txt index ba24988..f53dde9 100644 --- a/tracking/CMakeLists.txt +++ b/tracking/CMakeLists.txt @@ -24,12 +24,17 @@ catkin_package( CATKIN_DEPENDS roscpp bayes opencv2 pcl_ros detection tf tf_conversions opt_msgs opt_utils ) -add_library(${PROJECT_NAME} src/munkres.cpp src/kalman_filter.cpp src/track.cpp src/tracker.cpp) +add_library(${PROJECT_NAME} src/munkres.cpp src/kalman_filter.cpp src/track.cpp src/tracker.cpp src/track_object.cpp src/tracker_object.cpp) target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES}) add_executable(tracker apps/tracker_node.cpp) target_link_libraries(tracker ${PROJECT_NAME} ${catkin_LIBRARIES}) +add_executable(tracker_object apps/tracker_object_node.cpp) +target_link_libraries(tracker_object ${PROJECT_NAME} ${catkin_LIBRARIES}) +#add_executable(tracker3d apps/tracker_node_3d.cpp) +#target_link_libraries(tracker3d ${PROJECT_NAME} ${catkin_LIBRARIES}) + #add_executable(tracker_filter apps/tracker_filter_node.cpp) #add_dependencies(tracker_filter ${PROJECT_NAME}_gencfg) #target_link_libraries(tracker_filter ${PROJECT_NAME} ${catkin_LIBRARIES}) diff --git a/tracking/apps/tracker_node.cpp b/tracking/apps/tracker_node.cpp index 0111bd8..a354067 100644 --- a/tracking/apps/tracker_node.cpp +++ b/tracking/apps/tracker_node.cpp @@ -122,6 +122,9 @@ ros::Duration max_time_between_detections_; std::map > number_messages_delay_map_; +//for object tracking +std::vector association_for_initialize_objectnames; + /** * \brief Create marker to be visualized in RViz * @@ -359,6 +362,12 @@ detection_cb(const opt_msgs::DetectionArray::ConstPtr& msg) tracker->newFrame(detections_vector); tracker->updateTracks(); + + //for object tracking +// association_for_initialize_objectnames=tracker->association_for_initialize_objectnames_; + //write the corresponding names?????????? + + // Create a TrackingResult message with the output of the tracking process if(output_tracking_results) { diff --git a/tracking/apps/tracker_object_node.cpp b/tracking/apps/tracker_object_node.cpp new file mode 100644 index 0000000..14d2f71 --- /dev/null +++ b/tracking/apps/tracker_object_node.cpp @@ -0,0 +1,834 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +//#include + +// Dynamic reconfigure: +#include +#include + +typedef tracking::TrackerConfig Config; +typedef dynamic_reconfigure::Server ReconfigureServer; + +// Global variables: +std::map detection_sources_map; +tf::TransformListener* tf_listener; +std::string world_frame_id; +bool output_history_pointcloud; +int output_history_size; +int detection_history_size; +bool output_markers; +bool output_image_rgb; +bool output_tracking_results; +bool output_detection_results; // Enables/disables the publishing of detection positions to be visualized in RViz +bool vertical; +ros::Publisher results_pub; +ros::Publisher object_names_pub; + +ros::Publisher marker_pub_tmp; +ros::Publisher marker_pub; +ros::Publisher pointcloud_pub; +ros::Publisher detection_marker_pub; +ros::Publisher detection_trajectory_pub; +ros::Publisher alive_ids_pub; +size_t starting_index; +size_t detection_insert_index; +tf::Transform camera_frame_to_world_transform; +tf::Transform world_to_camera_frame_transform; +bool extrinsic_calibration; +double period; +open_ptrack::tracking::Tracker_object* tracker_object; +pcl::PointCloud::Ptr history_pointcloud(new pcl::PointCloud); +pcl::PointCloud::Ptr detection_history_pointcloud(new pcl::PointCloud); +bool swissranger; +double min_confidence; +double min_confidence_sr; +double min_confidence_detections; +double min_confidence_detections_sr; +std::vector camera_colors; // vector containing colors to use to identify cameras in the network +std::map color_map; // map between camera frame_id and color +// Chi square distribution +std::map chi_map; +bool velocity_in_motion_term; +double acceleration_variance; +double position_variance_weight; +double voxel_size; +double gate_distance; +bool calibration_refinement; +std::map registration_matrices; +double max_detection_delay; +ros::Time latest_time; + +std::map last_received_detection_; +ros::Duration max_time_between_detections_; + +std::map > number_messages_delay_map_; + +//for object tracking +std::vector association_for_initialize_objectnames; + +/** + * \brief Create marker to be visualized in RViz + * + * \param[in] id The marker ID. + * \param[in] frame_id The marker reference frame. + * \param[in] position The marker position. + * \param[in] color The marker color. + */ +visualization_msgs::Marker +createMarker (int id, std::string frame_id, ros::Time stamp, Eigen::Vector3d position, cv::Vec3f color) +{ + visualization_msgs::Marker marker; + + marker.header.frame_id = world_frame_id; + marker.header.stamp = stamp; + marker.ns = frame_id; + marker.id = id; + marker.type = visualization_msgs::Marker::SPHERE; + marker.action = visualization_msgs::Marker::ADD; + marker.pose.position.x = position(0); + marker.pose.position.y = position(1); + marker.pose.position.z = position(2); + marker.scale.x = 0.1; + marker.scale.y = 0.1; + marker.scale.z = 0.1; + marker.color.r = color(0); + marker.color.g = color(1); + marker.color.b = color(2); + marker.color.a = 1.0; + marker.lifetime = ros::Duration(0.2); + return marker; +} + +void +plotCameraLegend (std::map curr_color_map) +{ + // Compose camera legend: + cv::Mat legend_image = cv::Mat::zeros(500, 500, CV_8UC3); + for(std::map::iterator colormap_iterator = curr_color_map.begin(); colormap_iterator != curr_color_map.end(); colormap_iterator++) + { + int color_index = colormap_iterator->second; + cv::Vec3f color = camera_colors[color_index]; + int y_coord = color_index * legend_image.rows / (curr_color_map.size()+1) + 0.5 * legend_image.rows / (curr_color_map.size()+1); + cv::line(legend_image, cv::Point(0,y_coord), cv::Point(100,y_coord), cv::Scalar(255*color(2), 255*color(1), 255*color(0)), 8); + cv::putText(legend_image, colormap_iterator->first, cv::Point(110,y_coord), 1, 1, cv::Scalar(255, 255, 255), 1); + } + + // Display the cv image + cv::imshow("Camera legend", legend_image); + cv::waitKey(1); +} + +Eigen::Matrix4d +readMatrixFromFile (std::string filename) +{ + Eigen::Matrix4d matrix; + std::string line; + std::ifstream myfile (filename.c_str()); + if (myfile.is_open()) + { + int k = 0; + std::string number; + while (myfile >> number) + { + matrix(int(k/4), int(k%4)) = std::atof(number.c_str()); + k++; + } + myfile.close(); + } + + std::cout << matrix << std::endl; + + return matrix; +} + +/** + * \brief Read the DetectionArray message and use the detections for creating/updating/deleting tracks + * + * \param[in] msg the DetectionArray message. + */ +void +detection_cb(const opt_msgs::DetectionArray::ConstPtr& msg) +{ + // Read message header information: + std::string frame_id = msg->header.frame_id; + ros::Time frame_time = msg->header.stamp; + + std::string frame_id_tmp = frame_id; + int pos = frame_id_tmp.find("_rgb_optical_frame"); + if (pos != std::string::npos) + frame_id_tmp.replace(pos, std::string("_rgb_optical_frame").size(), ""); + pos = frame_id_tmp.find("_depth_optical_frame"); + if (pos != std::string::npos) + frame_id_tmp.replace(pos, std::string("_depth_optical_frame").size(), ""); + last_received_detection_[frame_id_tmp] = frame_time; + + // Compute delay of detection message, if any: + double time_delay = 0.0; + if (frame_time > latest_time) + { + latest_time = frame_time; + time_delay = 0.0; + } + else + { + time_delay = (latest_time - frame_time).toSec(); + } + + tf::StampedTransform transform; + tf::StampedTransform inverse_transform; + // cv_bridge::CvImage::Ptr cvPtr; + + try + { + // Read transforms between camera frame and world frame: + if (!extrinsic_calibration) + { + static tf::TransformBroadcaster world_to_camera_tf_publisher; +// world_to_camera_tf_publisher.sendTransform(tf::StampedTransform(camera_frame_to_world_transform, ros::Time::now(), world_frame_id, frame_id)); + world_to_camera_tf_publisher.sendTransform(tf::StampedTransform(world_to_camera_frame_transform, ros::Time::now(), frame_id, world_frame_id)); + } + + //Calculate direct and inverse transforms between camera and world frame: + tf_listener->lookupTransform(world_frame_id, frame_id, ros::Time(0), transform); + tf_listener->lookupTransform(frame_id, world_frame_id, ros::Time(0), inverse_transform); + + // cvPtr = cv_bridge::toCvCopy(msg->image, sensor_msgs::image_encodings::BGR8); + + // Read camera intrinsic parameters: + Eigen::Matrix3d intrinsic_matrix; + for(int i = 0; i < 3; i++) + for(int j = 0; j < 3; j++) + intrinsic_matrix(i, j) = msg->intrinsic_matrix[i * 3 + j]; + + // Add a new DetectionSource or update an existing one: + if(detection_sources_map.find(frame_id) == detection_sources_map.end()) + { + detection_sources_map[frame_id] = new open_ptrack::detection::DetectionSource(cv::Mat(0, 0, CV_8UC3), + transform, inverse_transform, intrinsic_matrix, frame_time, frame_id); + } + else + { + detection_sources_map[frame_id]->update(cv::Mat(0, 0, CV_8UC3), transform, inverse_transform, + intrinsic_matrix, frame_time, frame_id); + double d = detection_sources_map[frame_id]->getDuration().toSec() / period; + int lostFrames = int(round(d)) - 1; + } + open_ptrack::detection::DetectionSource* source = detection_sources_map[frame_id]; + + // Create a Detection object for every detection in the detection message: + std::vector detections_vector; + for(std::vector::const_iterator it = msg->detections.begin(); + it != msg->detections.end(); it++) + { + detections_vector.push_back(open_ptrack::detection::Detection(*it, source)); + } + + // Convert HOG+SVM confidences to HAAR+ADABOOST-like people detection confidences: + if (not std::strcmp(msg->confidence_type.c_str(), "hog+svm")) + { + for(unsigned int i = 0; i < detections_vector.size(); i++) + { +// double new_confidence = detections_vector[i].getConfidence(); +// new_confidence = (new_confidence - min_confidence_detections_sr) / (min_confidence_sr - min_confidence_detections_sr) * +// (min_confidence - min_confidence_detections) + min_confidence_detections; +// detections_vector[i].setConfidence(new_confidence+2); + + double new_confidence = detections_vector[i].getConfidence(); + new_confidence = (new_confidence - (-3)) / 3 * 4 + 2; + detections_vector[i].setConfidence(new_confidence); + +// std::cout<< detections_vector[i].getConfidence() << std::endl; + } + } + + // Detection correction by means of calibration refinement: + if (calibration_refinement) + { + if (strcmp(frame_id.substr(0,1).c_str(), "/") == 0) + { + frame_id = frame_id.substr(1, frame_id.size() - 1); + } + + Eigen::Matrix4d registration_matrix; + std::map::iterator registration_matrices_iterator = registration_matrices.find(frame_id); + if (registration_matrices_iterator != registration_matrices.end()) + { // camera already present + registration_matrix = registration_matrices_iterator->second; + } + else + { // camera not present + std::cout << "Reading refinement matrix of " << frame_id << " from file." << std::endl; + std::string refinement_filename = ros::package::getPath("opt_calibration") + "/conf/registration_" + frame_id + ".txt"; + std::ifstream f(refinement_filename.c_str()); + if (f.good()) // if the file exists + { + f.close(); + registration_matrix = readMatrixFromFile (refinement_filename); + registration_matrices.insert(std::pair (frame_id, registration_matrix)); + } + else // if the file does not exist + { + // insert the identity matrix + std::cout << "Refinement file not found! Not doing refinement for this sensor." << std::endl; + registration_matrices.insert(std::pair (frame_id, Eigen::Matrix4d::Identity())); + } + } + + if(detections_vector.size() > 0) + { + // Apply detection refinement: + for(unsigned int i = 0; i < detections_vector.size(); i++) + { + Eigen::Vector3d old_centroid = detections_vector[i].getWorldCentroid(); + +// std::cout << frame_id << std::endl; +// std::cout << registration_matrix << std::endl; +// std::cout << "old_centroid: " << old_centroid.transpose() << std::endl; + Eigen::Vector4d old_centroid_homogeneous(old_centroid(0), old_centroid(1), old_centroid(2), 1.0); + Eigen::Vector4d refined_centroid = registration_matrix * old_centroid_homogeneous; + detections_vector[i].setWorldCentroid(Eigen::Vector3d(refined_centroid(0), refined_centroid(1), refined_centroid(2))); + + Eigen::Vector3d refined_centroid2 = detections_vector[i].getWorldCentroid(); +// std::cout << "refined_centroid2: " << refined_centroid2.transpose() << std::endl; +// std::cout << "difference: " << (refined_centroid2 - old_centroid).transpose() << std::endl << std::endl; + } + } + } + + // If at least one detection has been received: + if((detections_vector.size() > 0) && (time_delay < max_detection_delay)) + { + + // Perform detection-track association: + tracker_object->newFrame(detections_vector); + tracker_object->updateTracks(); + + //for object tracking +// association_for_initialize_objectnames=tracker_object->association_for_initialize_objectnames_; + + //write the corresponding names?????????? + + + // Create a TrackingResult message with the output of the tracking process + if(output_tracking_results) + { + opt_msgs::TrackArray::Ptr tracking_results_msg(new opt_msgs::TrackArray); + tracking_results_msg->header.stamp = ros::Time::now();//frame_time; + tracking_results_msg->header.frame_id = world_frame_id; + tracker_object->toMsg(tracking_results_msg); + // Publish tracking message: + results_pub.publish(tracking_results_msg); + } + + + opt_msgs::ObjectNameArray::Ptr object_names_msg(new opt_msgs::ObjectNameArray); + object_names_msg->header.stamp = ros::Time::now(); + object_names_msg->header.frame_id=frame_id; + tracker_object->to_object_name_Msg(object_names_msg); + object_names_pub.publish(object_names_msg); + + +// //Show the tracking process' results as an image +// if(output_image_rgb) +// { +// tracker->drawRgb(); +// for(std::map::iterator +// it = detection_sources_map.begin(); it != detection_sources_map.end(); it++) +// { +// cv::Mat image_to_show = it->second->getImage(); +// if (not vertical) +// { +// //cv::imshow("TRACKER " + it->first, image_to_show); +// cv::imshow("TRACKER ", image_to_show); // TODO: use the above row if using multiple cameras +// } +// else +// { +// cv::flip(image_to_show.t(), image_to_show, -1); +// cv::flip(image_to_show, image_to_show, 1); +// //cv::imshow("TRACKER " + it->first, image_to_show); +// cv::imshow("TRACKER ", image_to_show); // TODO: use the above row if using multiple cameras +// } +// cv::waitKey(2); +// } +// } + + // Publish IDs of active tracks: + opt_msgs::IDArray::Ptr alive_ids_msg(new opt_msgs::IDArray); + alive_ids_msg->header.stamp = ros::Time::now(); + alive_ids_msg->header.frame_id = world_frame_id; + tracker_object->getAliveIDs (alive_ids_msg); + alive_ids_pub.publish (alive_ids_msg); + + // Show the pose of each tracked object with a 3D marker (to be visualized with ROS RViz) + if(output_markers) + { + visualization_msgs::MarkerArray::Ptr marker_msg(new visualization_msgs::MarkerArray); + tracker_object->toMarkerArray(marker_msg); + marker_pub.publish(marker_msg); + } + + // Show the history of the movements in 3D (3D trajectory) of each tracked object as a PointCloud (which can be visualized in RViz) + if(output_history_pointcloud) + { + history_pointcloud->header.stamp = frame_time.toNSec() / 1e3; // Convert from ns to us + history_pointcloud->header.frame_id = world_frame_id; + starting_index = tracker_object->appendToPointCloud(history_pointcloud, starting_index, + output_history_size); + pointcloud_pub.publish(history_pointcloud); + } + + // Create message for showing detection positions in RViz: + if (output_detection_results) + { + visualization_msgs::MarkerArray::Ptr marker_msg(new visualization_msgs::MarkerArray); + detection_history_pointcloud->header.stamp = frame_time.toNSec() / 1e3; // Convert from ns to us + detection_history_pointcloud->header.frame_id = world_frame_id; + std::string frame_id = detections_vector[0].getSource()->getFrameId(); + + // Define color: + int color_index; + std::map::iterator colormap_iterator = color_map.find(frame_id); + if (colormap_iterator != color_map.end()) + { // camera already present + color_index = colormap_iterator->second; + } + else + { // camera not present + color_index = color_map.size(); + color_map.insert(std::pair (frame_id, color_index)); + + // Plot legend with camera names and colors: + plotCameraLegend (color_map); + } + for (unsigned int i = 0; i < detections_vector.size(); i++) + { + // Create marker and add it to message: + Eigen::Vector3d centroid = detections_vector[i].getWorldCentroid(); + visualization_msgs::Marker marker = createMarker (i, frame_id, frame_time, centroid, camera_colors[color_index]); + marker_msg->markers.push_back(marker); + + // Point cloud: + pcl::PointXYZRGB point; + point.x = marker.pose.position.x; + point.y = marker.pose.position.y; + point.z = marker.pose.position.z; + point.r = marker.color.r * 255.0f; + point.g = marker.color.g * 255.0f; + point.b = marker.color.b * 255.0f; + detection_insert_index = (detection_insert_index + 1) % detection_history_size; + detection_history_pointcloud->points[detection_insert_index] = point; + } + detection_marker_pub.publish(marker_msg); // publish marker message + detection_trajectory_pub.publish(detection_history_pointcloud); // publish trajectory message + } + } + else // if no detections have been received or detection_delay is above max_detection_delay + { + if(output_tracking_results) + { // Publish an empty tracking message + opt_msgs::TrackArray::Ptr tracking_results_msg(new opt_msgs::TrackArray); + tracking_results_msg->header.stamp = frame_time; + tracking_results_msg->header.frame_id = world_frame_id; + results_pub.publish(tracking_results_msg); + } + if((detections_vector.size() > 0) && (time_delay >= max_detection_delay)) + { + if (number_messages_delay_map_.find(msg->header.frame_id) == number_messages_delay_map_.end()) + number_messages_delay_map_[msg->header.frame_id] = std::pair(0.0, 0); + + number_messages_delay_map_[msg->header.frame_id].first += time_delay; + number_messages_delay_map_[msg->header.frame_id].second++; + + if (number_messages_delay_map_[msg->header.frame_id].second == 100) + { + double avg = number_messages_delay_map_[msg->header.frame_id].first / number_messages_delay_map_[msg->header.frame_id].second; + ROS_WARN_STREAM("[" << msg->header.frame_id << "] received 100 detections with average delay " << avg << " > " << max_detection_delay); + number_messages_delay_map_[msg->header.frame_id] = std::pair(0.0, 0); + } + } + } + } +// catch(cv_bridge::Exception& ex) +// { +// ROS_ERROR("cv_bridge exception: %s", ex.what()); +// } + catch(tf::TransformException& ex) + { + ROS_ERROR("transform exception: %s", ex.what()); + } +} + +void +generateColors(int colors_number, std::vector& colors) +{ + for (unsigned int i = 0; i < colors_number; i++) + { + colors.push_back(cv::Vec3f( + float(rand() % 256) / 255, + float(rand() % 256) / 255, + float(rand() % 256) / 255)); + } +} + +void +fillChiMap(std::map& chi_map, bool velocity_in_motion_term) +{ + if (velocity_in_motion_term) // chi square values with state dimension = 4 + { + chi_map[0.5] = 3.357; + chi_map[0.75] = 5.385; + chi_map[0.8] = 5.989; + chi_map[0.9] = 7.779; + chi_map[0.95] = 9.488; + chi_map[0.98] = 11.668; + chi_map[0.99] = 13.277; + chi_map[0.995] = 14.860; + chi_map[0.998] = 16.924; + chi_map[0.999] = 18.467; + } + else // chi square values with state dimension = 2 + { + chi_map[0.5] = 1.386; + chi_map[0.75] = 2.773; + chi_map[0.8] = 3.219; + chi_map[0.9] = 4.605; + chi_map[0.95] = 5.991; + chi_map[0.98] = 7.824; + chi_map[0.99] = 9.210; + chi_map[0.995] = 10.597; + chi_map[0.998] = 12.429; + chi_map[0.999] = 13.816; + } +} + +void +configCb(Config &config, uint32_t level) +{ + tracker_object->setMinConfidenceForTrackInitialization (config.min_confidence_initialization); + max_detection_delay = config.max_detection_delay; + calibration_refinement = config.calibration_refinement; + tracker_object->setSecBeforeOld (config.sec_before_old); + tracker_object->setSecBeforeFake (config.sec_before_fake); + tracker_object->setSecRemainNew (config.sec_remain_new); + tracker_object->setDetectionsToValidate (config.detections_to_validate); + tracker_object->setDetectorLikelihood (config.detector_likelihood); + tracker_object->setLikelihoodWeights (config.detector_weight*chi_map[0.999]/18.467, config.motion_weight); + +// if (config.velocity_in_motion_term != velocity_in_motion_term) +// { +// // Take chi square values with regards to the state dimension: +// fillChiMap(chi_map, config.velocity_in_motion_term); +// +// double position_variance = config.position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; +// tracker->setVelocityInMotionTerm (config.velocity_in_motion_term, config.acceleration_variance, position_variance); +// } +// else +// { + if (config.acceleration_variance != acceleration_variance) + { + tracker_object->setAccelerationVariance (config.acceleration_variance); + } + + if (config.position_variance_weight != position_variance_weight) + { + double position_variance = config.position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; + tracker_object->setPositionVariance (position_variance); + } +// } + + gate_distance = chi_map.find(config.gate_distance_probability) != chi_map.end() ? chi_map[config.gate_distance_probability] : chi_map[0.999]; + tracker_object->setGateDistance (config.gate_distance_probability); +} + +int +main(int argc, char** argv) +{ + + ros::init(argc, argv, "tracker"); + ros::NodeHandle nh("~"); + + // Subscribers/Publishers: + ros::Subscriber input_sub = nh.subscribe("input", 5, detection_cb); + marker_pub_tmp = nh.advertise("/tracker/markers", 1); + marker_pub = nh.advertise("/tracker/markers_array", 1); + pointcloud_pub = nh.advertise >("/tracker/history", 1); + results_pub = nh.advertise("/tracker/tracks", 100); + object_names_pub=nh.advertise("/tracker/object_names", 100); + detection_marker_pub = nh.advertise("/detector/markers_array", 1); + detection_trajectory_pub = nh.advertise >("/detector/history", 1); + alive_ids_pub = nh.advertise("/tracker/alive_ids", 1); + + // Dynamic reconfigure + boost::recursive_mutex config_mutex_; + boost::shared_ptr reconfigure_server_; + + tf_listener = new tf::TransformListener(); + + // Read tracking parameters: + nh.param("world_frame_id", world_frame_id, std::string("/odom")); + + nh.param("orientation/vertical", vertical, false); + nh.param("extrinsic_calibration", extrinsic_calibration, false); + + nh.param("voxel_size", voxel_size, 0.06); + + double rate; + nh.param("rate", rate, 30.0); + +// double min_confidence; + nh.param("min_confidence_initialization", min_confidence, -2.5); //0.0); + + double chi_value; + nh.param("gate_distance_probability", chi_value, 0.9); + + nh.param("acceleration_variance", acceleration_variance, 1.0); + + nh.param("position_variance_weight", position_variance_weight, 1.0); + + bool detector_likelihood; + nh.param("detector_likelihood", detector_likelihood, false); + + nh.param("velocity_in_motion_term", velocity_in_motion_term, false); + + double detector_weight; + nh.param("detector_weight", detector_weight, -1.0); + + double motion_weight; + nh.param("motion_weight", motion_weight, 0.5); + + double sec_before_old; + nh.param("sec_before_old", sec_before_old, 3.6); + + double sec_before_fake; + nh.param("sec_before_fake", sec_before_fake, 2.4); + + double sec_remain_new; + nh.param("sec_remain_new", sec_remain_new, 1.2); + + int detections_to_validate; + nh.param("detections_to_validate", detections_to_validate, 5); + + double haar_disp_ada_min_confidence, ground_based_people_detection_min_confidence; + nh.param("haar_disp_ada_min_confidence", haar_disp_ada_min_confidence, -2.5); //0.0); + nh.param("ground_based_people_detection_min_confidence", ground_based_people_detection_min_confidence, -2.5); //0.0); + + nh.param("swissranger", swissranger, false); + + nh.param("ground_based_people_detection_min_confidence_sr", min_confidence_detections_sr, -1.5); + nh.param("min_confidence_initialization_sr", min_confidence_sr, -1.1); + + nh.param("history_pointcloud", output_history_pointcloud, false); + nh.param("history_size", output_history_size, 1000); + nh.param("markers", output_markers, true); + nh.param("image_rgb", output_image_rgb, true); + nh.param("tracking_results", output_tracking_results, true); + + nh.param("detection_debug", output_detection_results, true); + nh.param("detection_history_size", detection_history_size, 1000); + + bool debug_mode; + nh.param("debug_active", debug_mode, false); + + nh.param("calibration_refinement", calibration_refinement, false); + nh.param("max_detection_delay", max_detection_delay, 3.0); + + double max_time_between_detections_d; + nh.param("max_time_between_detections", max_time_between_detections_d, 10.0); + max_time_between_detections_ = ros::Duration(max_time_between_detections_d); + + // Read number of sensors in the network: + int num_cameras = 1; + if (extrinsic_calibration) + { + num_cameras = 0; + XmlRpc::XmlRpcValue network; + nh.getParam("network", network); + for (unsigned i = 0; i < network.size(); i++) + { + num_cameras += network[i]["sensors"].size(); + for (unsigned j = 0; j < network[i]["sensors"].size(); j++) + { + std::string frame_id = network[i]["sensors"][j]["id"]; + last_received_detection_["/" + frame_id] = ros::Time(0); + } + } + } + + // Set min_confidence_detections variable based on sensor type: + if (swissranger) + min_confidence_detections = ground_based_people_detection_min_confidence; + else + min_confidence_detections = haar_disp_ada_min_confidence; + + // Take chi square values with regards to the state dimension: + fillChiMap(chi_map, velocity_in_motion_term); + + // Compute additional parameters: + period = 1.0 / rate; + gate_distance = chi_map.find(chi_value) != chi_map.end() ? chi_map[chi_value] : chi_map[0.999]; + + double position_variance; +// position_variance = 3*std::pow(2 * voxel_size, 2) / 12.0; // DEFAULT + position_variance = position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; + std::vector likelihood_weights; + likelihood_weights.push_back(detector_weight*chi_map[0.999]/18.467); + likelihood_weights.push_back(motion_weight); + + // Generate colors used to identify different cameras: + generateColors(num_cameras, camera_colors); + + // Initialize point cloud containing detections trajectory: + pcl::PointXYZRGB nan_point; + nan_point.x = std::numeric_limits::quiet_NaN(); + nan_point.y = std::numeric_limits::quiet_NaN(); + nan_point.z = std::numeric_limits::quiet_NaN(); + detection_history_pointcloud->points.resize(detection_history_size, nan_point); + + ros::Rate hz(num_cameras*rate); + +// cv::namedWindow("TRACKER ", CV_WINDOW_NORMAL); + + // Initialize an instance of the Tracker object: + tracker_object = new open_ptrack::tracking::Tracker_object( + gate_distance, + detector_likelihood, + likelihood_weights, + velocity_in_motion_term, + min_confidence, + min_confidence_detections, + sec_before_old, + sec_before_fake, + sec_remain_new, + detections_to_validate, + period, + position_variance, + acceleration_variance, + world_frame_id, + debug_mode, + vertical); + + starting_index = 0; + + // Set up dynamic reconfiguration + ReconfigureServer::CallbackType f = boost::bind(&configCb, _1, _2); + reconfigure_server_.reset(new ReconfigureServer(config_mutex_, nh)); + reconfigure_server_->setCallback(f); + + // If extrinsic calibration is not available: + if (!extrinsic_calibration) + { // Set fixed transformation from rgb frame and base_link + tf::Vector3 fixed_translation(0, 0, 0); // camera_rgb_optical_frame -> world + tf::Quaternion fixed_rotation(-0.5, 0.5, -0.5, -0.5); // camera_rgb_optical_frame -> world + tf::Vector3 inv_fixed_translation(0.0, 0.0, 0); // world -> camera_rgb_optical_frame + tf::Quaternion inv_fixed_rotation(-0.5, 0.5, -0.5, 0.5); // world -> camera_rgb_optical_frame + world_to_camera_frame_transform = tf::Transform(fixed_rotation, fixed_translation); + camera_frame_to_world_transform = tf::Transform(inv_fixed_rotation, inv_fixed_translation); + } + + // Spin and execute callbacks: +// ros::spin(); + + std::map last_message; + for (std::map::const_iterator it = last_received_detection_.begin(); it != last_received_detection_.end(); ++it) + last_message[it->first] = ros::Time::now(); + + ros::Time last_camera_legend_update = ros::Time::now(); // last time when the camera legend has been updated + + while (ros::ok()) + { + ros::spinOnce(); + ros::Time now = ros::Time::now(); + for (std::map::const_iterator it = last_received_detection_.begin(); it != last_received_detection_.end(); ++it) + { + ros::Duration duration(now - it->second); + if (duration > max_time_between_detections_) + { + if (it->second > ros::Time(0) and now - last_message[it->first] > max_time_between_detections_) + { + ROS_WARN_STREAM("[" << it->first << "] last detection was " << duration.toSec() << " seconds ago"); + last_message[it->first] = now; + } + else if (now - last_message[it->first] > max_time_between_detections_) + { + ROS_WARN_STREAM("[" << it->first << "] still waiting for detection messages..."); + last_message[it->first] = now; + } + } + + // Update camera legend every second: + if ((now - last_camera_legend_update) > ros::Duration(1.0)) // if more than one second passed since last update + { // update OpenCV image with a waitKey: + cv::waitKey(1); + last_camera_legend_update = now; + } + } + hz.sleep(); + } + + return 0; +} diff --git a/tracking/conf/multicamera_tracking.rviz b/tracking/conf/multicamera_tracking.rviz index 868f31a..5d9ec3b 100644 --- a/tracking/conf/multicamera_tracking.rviz +++ b/tracking/conf/multicamera_tracking.rviz @@ -6,10 +6,9 @@ Panels: Expanded: - /Global Options1 - /Status1 - - /PointCloud21 - - /MarkerArray1 - Splitter Ratio: 0.526846 - Tree Height: 775 + - /TF1/Frames1 + Splitter Ratio: 0.355837 + Tree Height: 719 - Class: rviz/Selection Name: Selection - Class: rviz/Tool Properties @@ -28,7 +27,7 @@ Panels: Experimental: false Name: Time SyncMode: 0 - SyncSource: "" + SyncSource: PointCloud2 Visualization Manager: Class: "" Displays: @@ -62,7 +61,7 @@ Visualization Manager: Color: 255; 255; 255 Color Transformer: RGB8 Decay Time: 0 - Enabled: false + Enabled: true Invert Rainbow: false Max Color: 255; 255; 255 Max Intensity: 4096 @@ -73,15 +72,16 @@ Visualization Manager: Queue Size: 10 Selectable: true Size (Pixels): 5 - Size (m): 0.01 - Style: Points - Topic: /tracker/history_smoothed + Size (m): 0.03 + Style: Spheres + Topic: /tracker/object_history + Unreliable: false Use Fixed Frame: true Use rainbow: true - Value: false + Value: true - Class: rviz/MarkerArray Enabled: true - Marker Topic: /tracker/markers_array_smoothed + Marker Topic: /tracker/object_markers_array Name: MarkerArray Namespaces: numbers: true @@ -93,17 +93,41 @@ Visualization Manager: Frame Timeout: 15 Frames: All Enabled: false - Kinect: + kinect2_far: + Value: true + kinect2_far_ir_frame: + Value: true + kinect2_far_ir_optical_frame: + Value: true + kinect2_far_link: + Value: false + kinect2_far_rgb_frame: + Value: true + kinect2_far_rgb_optical_frame: + Value: true + kinect2_head: + Value: true + kinect2_head_ir_frame: + Value: true + kinect2_head_ir_optical_frame: + Value: true + kinect2_head_link: + Value: false + kinect2_head_rgb_frame: + Value: true + kinect2_head_rgb_optical_frame: Value: true - Kinect_depth_frame: + kinect2_lenovo: Value: true - Kinect_depth_optical_frame: + kinect2_lenovo_ir_frame: Value: true - Kinect_link: + kinect2_lenovo_ir_optical_frame: Value: true - Kinect_rgb_frame: + kinect2_lenovo_link: + Value: false + kinect2_lenovo_rgb_frame: Value: true - Kinect_rgb_optical_frame: + kinect2_lenovo_rgb_optical_frame: Value: true world: Value: true @@ -114,16 +138,259 @@ Visualization Manager: Show Names: true Tree: world: - Kinect: - Kinect_link: - Kinect_depth_frame: - Kinect_depth_optical_frame: + kinect2_far: + kinect2_far_link: + kinect2_far_ir_frame: + kinect2_far_ir_optical_frame: {} - Kinect_rgb_frame: - Kinect_rgb_optical_frame: + kinect2_far_rgb_frame: + kinect2_far_rgb_optical_frame: + {} + kinect2_head: + kinect2_head_link: + kinect2_head_ir_frame: + kinect2_head_ir_optical_frame: + {} + kinect2_head_rgb_frame: + kinect2_head_rgb_optical_frame: + {} + kinect2_lenovo: + kinect2_lenovo_link: + kinect2_lenovo_ir_frame: + kinect2_lenovo_ir_optical_frame: + {} + kinect2_lenovo_rgb_frame: + kinect2_lenovo_rgb_optical_frame: {} Update Interval: 0 Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 9 + Selectable: true + Size (Pixels): 3 + Size (m): 0.03 + Style: Boxes + Topic: /tracker/people_history_smoothed + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /tracker/people_markers_array + Name: MarkerArray + Namespaces: + {} + Queue Size: 100 + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_head/depth_ir/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_head/depth_lowres/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_far/depth_lowres/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_far/depth_ir/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_lenovo/depth_ir/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_lenovo/depth_lowres/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /detector/object_markers_array + Name: MarkerArray + Namespaces: + /kinect2_far_rgb_optical_frame: true + /kinect2_head_rgb_optical_frame: true + Queue Size: 100 + Value: true Enabled: true Global Options: Background Color: 48; 48; 48 @@ -148,25 +415,30 @@ Visualization Manager: Views: Current: Class: rviz/Orbit - Distance: 7.62407 + Distance: 10.4249 + Enable Stereo Rendering: + Stereo Eye Separation: 0.06 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false Focal Point: - X: -0.326729 - Y: 0.467386 - Z: 1.2193 + X: 0.0471745 + Y: 1.9946 + Z: 0.00524044 Name: Current View Near Clip Distance: 0.01 - Pitch: 1.5698 + Pitch: 1.09246 Target Frame: Value: Orbit (rviz) - Yaw: 1.5704 + Yaw: 3.24367 Saved: ~ Window Geometry: Displays: - collapsed: false - Height: 1056 - Hide Left Dock: false + collapsed: true + Height: 1000 + Hide Left Dock: true Hide Right Dock: true - QMainWindow State: 000000ff00000000fd00000004000000000000013c00000396fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000002800000396000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000003efc0100000002fb0000000800540069006d0065010000000000000780000002f600fffffffb0000000800540069006d006501000000000000045000000000000000000000063e0000039600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd0000000400000000000002c90000035efc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c00610079007300000000280000035e000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004bf0000003efc0100000002fb0000000800540069006d00650100000000000004bf000002f600fffffffb0000000800540069006d00650100000000000004500000000000000000000004bf0000035e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: @@ -175,6 +447,6 @@ Window Geometry: collapsed: false Views: collapsed: true - Width: 1920 - X: 0 - Y: 24 + Width: 1215 + X: 55 + Y: 32 diff --git a/tracking/conf/object_tracker_multicamera.yaml b/tracking/conf/object_tracker_multicamera.yaml new file mode 100644 index 0000000..ba4ad4d --- /dev/null +++ b/tracking/conf/object_tracker_multicamera.yaml @@ -0,0 +1,78 @@ +######################### +## Tracking parameters ## +######################### +# Mininum confidence for track initialization: +min_confidence_initialization: 4.0 +# Tracking maximum frame rate per sensor: +rate: 30 +# Tracking reference frame: +world_frame_id: "/world" +# Voxel size used for people detection: +voxel_size: 0.06 +# Flag stating if extrinsic (multicamera) calibration has been performed or not: +extrinsic_calibration: true +# Maximum delay that a detection message can have in order to be considered for tracking: +max_detection_delay: 2.0 +# Flag stating if the results of a calibration refinement procedure should be used to correct detection positions: +calibration_refinement: true + +######################## +## Sensor orientation ## +######################## +# Flag stating if the sensor is vertically placed, or not: +orientation: + vertical: false + +####################### +## Motion parameters ## +####################### +# The higher is this, the higher motion variance is allowed +# it can assume these values: 0.5 0.75 0.8 0.9 0.95 0.98 0.99 0.995 0.998 0.999 +gate_distance_probability: 0.999 +# Acceleration variance in people motion model: +acceleration_variance: 1000 +# Additional weight for position variance in people motion model: +position_variance_weight: 100 + +################################# +## Data association parameters ## +################################# +# Flag stating if detection likelihood should be used in data association: +detector_likelihood: false +# Flag stating if track velocity should be considered in data association: +velocity_in_motion_term: false +# Weight of detection likelihood in data association: +detector_weight: -0.25 +# Weight of motion likelihood in data association: +motion_weight: 0.25 + +################################ +## Tracking policy parameters ## +################################ +# Track duration (seconds) after last valid detection: +sec_before_old: 8 +# Seconds within which a track should be validated (otherwise it is discarded): +sec_before_fake: 2.4 +# Seconds after which a visible track obtain NORMAL status: +sec_remain_new: 1.2 +# Minimum number of detection<->track associations needed for validating a track: +detections_to_validate: 3 + +########### +## Debug ## +########### +# Flag activating debug/visualization options: +debug_active: true +# Flag stating if a point cloud containing track trajectories (visible in RViz) should be created: +history_pointcloud: true +# Dimension of the point cloud containing track trajectories: +history_size: 500 +# Flag stating if a spheric marker (with number) for every track (visible in RViz) should be created: +markers: true +# Flag stating if tracking results should be sent over the network: +tracking_results: true +# Flag stating if markers and a point cloud containing detection trajectories (visible in RViz) should be created: +detection_debug: true +# Dimension of the point cloud containing detection trajectories: +detection_history_size: 500 + diff --git a/tracking/conf/single_camera_tracking.rviz b/tracking/conf/single_camera_tracking.rviz new file mode 100644 index 0000000..c99d69b --- /dev/null +++ b/tracking/conf/single_camera_tracking.rviz @@ -0,0 +1,238 @@ +Panels: + - Class: rviz/Displays + Help Height: 78 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + - /Status1 + - /TF1/Frames1 + - /PointCloud22 + - /MarkerArray2 + Splitter Ratio: 0.355837 + Tree Height: 719 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.588679 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: PointCloud2 +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.03 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 5 + Size (m): 0.03 + Style: Spheres + Topic: /tracker/object_history + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /tracker/object_markers_array + Name: MarkerArray + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/TF + Enabled: true + Frame Timeout: 15 + Frames: + All Enabled: false + kinect2_head_ir_optical_frame: + Value: true + world: + Value: true + Marker Scale: 1 + Name: TF + Show Arrows: true + Show Axes: true + Show Names: true + Tree: + kinect2_head_ir_optical_frame: + world: + {} + Update Interval: 0 + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 9 + Selectable: true + Size (Pixels): 3 + Size (m): 0.03 + Style: Boxes + Topic: /tracker/people_history_smoothed + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /tracker/people_markers_array + Name: MarkerArray + Namespaces: + {} + Queue Size: 100 + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_head/depth_ir/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + Enabled: true + Global Options: + Background Color: 48; 48; 48 + Fixed Frame: world + Frame Rate: 60 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Topic: /initialpose + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/Orbit + Distance: 17.2803 + Enable Stereo Rendering: + Stereo Eye Separation: 0.06 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Focal Point: + X: 0.0471745 + Y: 1.9946 + Z: 0.00524044 + Name: Current View + Near Clip Distance: 0.01 + Pitch: 0.17246 + Target Frame: + Value: Orbit (rviz) + Yaw: 2.95367 + Saved: ~ +Window Geometry: + Displays: + collapsed: false + Height: 1000 + Hide Left Dock: false + Hide Right Dock: true + QMainWindow State: 000000ff00000000fd0000000400000000000002c90000035efc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c00610079007301000000280000035e000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004bf0000003efc0100000002fb0000000800540069006d00650100000000000004bf000002f600fffffffb0000000800540069006d00650100000000000004500000000000000000000001f00000035e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: true + Width: 1215 + X: 55 + Y: 22 diff --git a/tracking/conf/tracker_multicamera.yaml b/tracking/conf/tracker_multicamera.yaml index 2cb5a99..276a278 100644 --- a/tracking/conf/tracker_multicamera.yaml +++ b/tracking/conf/tracker_multicamera.yaml @@ -14,7 +14,7 @@ extrinsic_calibration: true # Maximum delay that a detection message can have in order to be considered for tracking: max_detection_delay: 2.0 # Flag stating if the results of a calibration refinement procedure should be used to correct detection positions: -calibration_refinement: true +calibration_refinement: false ######################## ## Sensor orientation ## diff --git a/tracking/include/open_ptrack/tracking/track_object.h b/tracking/include/open_ptrack/tracking/track_object.h new file mode 100644 index 0000000..e05569d --- /dev/null +++ b/tracking/include/open_ptrack/tracking/track_object.h @@ -0,0 +1,409 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#ifndef OPEN_PTRACK_TRACKING_TRACK_OBJECT_H_ +#define OPEN_PTRACK_TRACKING_TRACK_OBJECT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace open_ptrack +{ + namespace tracking + { + /** \brief Track represents information about a track (or target) */ + class Track_object + { + public: + + /** \brief A track has Status NEW if it has been recently created, otherwise it has NORMAL Status */ + enum Status + { + NEW, + NORMAL + }; + + /** \brief Visibility states if the track is currently visible by the sensor or partially occluded or totally occluded */ + enum Visibility + { + VISIBLE = 0, // No occlusion + OCCLUDED = 1, // Partially occlusion + NOT_VISIBLE = 2 // Total occlusion + }; + + /** \brief The object names comes from the detections*/ + std::string object_name_; + + protected: + + /** \brief Dimension of a circular buffer which keep tracks of filter parameters along time */ + int MAX_SIZE; + + /** \brief Track ID */ + const int id_; + + /** \brief Track frame id (frame id of the last detection associated to the track */ + const std::string frame_id_; + + /** \brief Inverse of the frame rate */ + const double period_; + + /** \brief If true, the track is validated, meaning that it has been associated with a certain number of high confidence detections */ + bool validated_; + + /** \brief Kalman filter associated to the track */ + open_ptrack::tracking::KalmanFilter* filter_; + + /** \brief Temporary copy of the Kalman filter associated to the track (used for recovery filter information when a track is re-found) */ + open_ptrack::tracking::KalmanFilter* tmp_filter_; + + /** \brief First time a detection is associated to the track */ + ros::Time first_time_detected_; + + /** \brief Last time a detection is associated to the track */ + ros::Time last_time_detected_; + + /** \brief Last time a detection with high detection confidence is associated to the track */ + ros::Time last_time_detected_with_high_confidence_; + + /** \brief Last time a prediction has been performed for the track */ + ros::Time last_time_predicted_; + + /** \brief Index in the circular buffer corresponding to the last time a prediction has been performed */ + int last_time_predicted_index_; + + /** Variables used for computing the detection/track Mahalanobis distance */ + std::vector mahalanobis_map2d_; + + /** Variables used for computing the detection/track Mahalanobis distance */ + std::vector mahalanobis_map4d_; + + /** \brief Track Status*/ + Status status_; + + /** \brief Track Visibility */ + Visibility visibility_; + + /** \brief Number of high confidence detections associated to the track */ + int updates_with_enough_confidence_; + + /** \brief Track centroid z coordinate */ + double z_; + + /** \brief Track height */ + double height_; + + /** \brief Track distance from the camera */ + double distance_; + + /** \brief Track age (in seconds) */ + double age_; + + /** \brief Confidence of the last detection associated to the track */ + double last_detector_confidence_; + + /** \brief Last data association score obtained by this track */ + double data_association_score_; + + /** \brief Color associated to the track */ + Eigen::Vector3f color_; + + /** \brief DetectionSource which provided the last detection associated to the track */ + open_ptrack::detection::DetectionSource* detection_source_; + + /** \brief If true, both position and velocity are considered in computing detection<->track Mahalanobis distance */ + bool velocity_in_motion_term_; + + /** \brief Count the number of consecutive updates with low confidence detections */ + int low_confidence_consecutive_frames_; + + + + public: + + /** \brief Constructor. */ + Track_object( + int id, + std::string frame_id, + double position_variance, + double acceleration_variance, + double period, + bool velocity_in_motion_term); + + /** \brief Destructor. */ + virtual ~Track_object(); + + /** \brief Track initialization with an old track. */ + void + init(const Track_object& old_track); + + /** + * \brief Track initialization. + * + * \param[in] x Track centroid x coordinate + * \param[in] y Track centroid y coordinate + * \param[in] z Track centroid z coordinate + * \param[in] height Track height + * \param[in] distance Track distance from the sensor + * \param[in] detection_source DetectionSource which provided the last detection associated to the track + */ + void + init( + double x, + double y, + double z, + double height, + double distance, + std::string object_name, + open_ptrack::detection::DetectionSource* detection_source); + + /** + * \brief Update track with new detection information. + * + * \param[in] x Detection centroid x coordinate + * \param[in] y Detection centroid y coordinate + * \param[in] z Detection centroid z coordinate + * \param[in] height Detection height + * \param[in] distance Detection distance from the sensor + * \param[in] confidence Detection confidence + * \param[in] min_confidence Minimum confidence for track initialization + * \param[in] min_confidence_detections Minimum confidence for detection + * \param[in] detection_source DetectionSource which provided the detection + */ + void + update( + double x, + double y, + double z, + double height, + double distance, + std::string object_name, + double data_assocation_score, + double confidence, + double min_confidence, + double min_confidence_detections, + open_ptrack::detection::DetectionSource* detection_source, + bool first_update = false); + + /** + * \brief Compute Mahalanobis distance between detection with position (x,y) and track. + * + * \param[in] x Detection centroid x coordinate. + * \param[in] y Detection centroid y coordinate. + * \param[in] when Time instant. + * + * \return the Mahalanobis distance. + */ + double + getMahalanobisDistance(double x, double y, const ros::Time& when); + + /* Validate a track */ + void + validate(); + + /** + * \brief Get track validation flag + * + * \return true if the track has been validated, false otherwise. + */ + bool + isValidated(); + + /** + * \brief Get track ID + * + * \return track ID + */ + int + getId(); + + /** + * \brief Set track status to s + * + * \param[in] s status + */ + void + setStatus(Status s); + + /** + * \brief Get track status + * + * \return track status + */ + Status + getStatus(); + + /** + * \brief Set track Visibility. + * + * \param[in] v Visibility status. + */ + void + setVisibility(Visibility v); + + /** + * \brief Get track Visibility. + * + * \return track Visibility. + */ + Visibility + getVisibility(); + + /** + * \brief Get time passed from first detection-track association. + * + * \return time passed from first detection-track association. + */ + float + getSecFromFirstDetection(ros::Time current_time); + + /** + * \brief Get time passed from last detection-track association. + * + * \return time passed from last detection-track association. + */ + float + getSecFromLastDetection(ros::Time current_time); + + /** + * \brief Get time passed from last detection-track association with a high confidence detection. + * + * \return time passed from last detection-track association with a high confidence detection. + */ + float + getSecFromLastHighConfidenceDetection(ros::Time current_time); + + /** + * \brief Get the number of consecutive updates with low confidence detections. + * + * \return the number of consecutive updates with low confidence detections. + */ + float + getLowConfidenceConsecutiveFrames(); + + /** + * \brief Get the number of updates with enough confidence detections. + * + * \return the number of updates with enough confidence detections. + */ + int + getUpdatesWithEnoughConfidence(); + + /** + * \brief Draw track bounding box in the image. + * + * \param[in] vertical States if the camera is vertically oriented (true) or not (false). + */ + void + draw(bool vertical); + + /** + * \brief Create RViz visualization marker with the track position. + * + * \param[in/out] msg Array containing markers of every track. + */ + void + createMarker(visualization_msgs::MarkerArray::Ptr& msg); + + /** + * \brief Get a XYZRGB point from a point cloud. + * + * \param[in/out] p Point containing position information and to be filled with color. + * + * \return true if track is visible, false if not visible. + */ + bool + getPointXYZRGB(pcl::PointXYZRGB& p); + + /** + * \brief Create track ROS message. + * + * \param[in/out] track_msg Track ROS message. + * \param[in] vertical States if the camera is vertically oriented (true) or not (false). + */ + void + toMsg(opt_msgs::Track& track_msg, bool vertical); + + /** + * \brief Get the DetectionSource corresponding to the last associated detection. + * + * \return the DetectionSource corresponding to the last associated detection. + */ + open_ptrack::detection::DetectionSource* + getDetectionSource(); + + /** + * \brief Set flag stating if people velocity should be used in motion term for data association + * + * \param[in] velocity_in_motion_term If true, people velocity is also used in motion term for data association + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); + + /** + * \brief Set acceleration variance (for Kalman Filter) + * + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + */ + void + setAccelerationVariance (double acceleration_variance); + + /** + * \brief Set position variance (for Kalman Filter) + * + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setPositionVariance (double position_variance); + }; + + } /* namespace tracking */ +} /* namespace open_ptrack */ +#endif /* OPEN_PTRACK_TRACKING_TRACK_OBJECT_H_ */ diff --git a/tracking/include/open_ptrack/tracking/tracker.h b/tracking/include/open_ptrack/tracking/tracker.h index a275acd..dd96faf 100644 --- a/tracking/include/open_ptrack/tracking/tracker.h +++ b/tracking/include/open_ptrack/tracking/tracker.h @@ -49,178 +49,181 @@ namespace open_ptrack { - namespace tracking - { - /** \brief Tracker performs tracking-by-detection */ - class Tracker - { - protected: - /** \brief List of all active tracks */ - std::list tracks_; +namespace tracking +{ +/** \brief Tracker performs tracking-by-detection */ +class Tracker +{ +protected: + /** \brief List of all active tracks */ + std::list tracks_; + + /** \brief List of lost tracks */ + std::list lost_tracks_; + + /** \brief List of tracks with Status = NEW */ + std::list new_tracks_; + + /** \brief List of current detections */ + std::vector detections_; - /** \brief List of lost tracks */ - std::list lost_tracks_; + /** \brief List of current detections not associated to any track */ + std::list unassociated_detections_; - /** \brief List of tracks with Status = NEW */ - std::list new_tracks_; + /** \brief Track ID counter */ + int tracks_counter_; - /** \brief List of current detections */ - std::vector detections_; + /** \brief World reference frame used for tracking */ + std::string world_frame_id_; - /** \brief List of current detections not associated to any track */ - std::list unassociated_detections_; + /** \brief Minimum confidence for track initialization */ + double min_confidence_; - /** \brief Track ID counter */ - int tracks_counter_; + /** \brief Minimum confidence of detections sent to tracking */ + const double min_confidence_detections_; - /** \brief World reference frame used for tracking */ - std::string world_frame_id_; + /** \brief Minimum number of detection<->track associations needed for validating a track */ + int detections_to_validate_; - /** \brief Minimum confidence for track initialization */ - double min_confidence_; + /** \brief Time after which a not visible track becomes old */ + double sec_before_old_; - /** \brief Minimum confidence of detections sent to tracking */ - const double min_confidence_detections_; + /** \brief Time after which a visible track obtain NORMAL status */ + double sec_remain_new_; - /** \brief Minimum number of detection<->track associations needed for validating a track */ - int detections_to_validate_; + /** \brief Time within which a track should be validated (otherwise it is discarded) */ + double sec_before_fake_; - /** \brief Time after which a not visible track becomes old */ - double sec_before_old_; + /** \brief Gate distance for joint likelihood in data association */ + double gate_distance_; - /** \brief Time after which a visible track obtain NORMAL status */ - double sec_remain_new_; + /** \brief Flag stating if people detection confidence should be used in data association (true) or not (false) */ + bool detector_likelihood_; - /** \brief Time within which a track should be validated (otherwise it is discarded) */ - double sec_before_fake_; + /** \brief Weights for the single terms of the joint likelihood */ + std::vector likelihood_weights_; - /** \brief Gate distance for joint likelihood in data association */ - double gate_distance_; + /** \brief If true, people velocity is also used in motion term for data association */ + bool velocity_in_motion_term_; - /** \brief Flag stating if people detection confidence should be used in data association (true) or not (false) */ - bool detector_likelihood_; + /** \brief Minimum time period between two detections messages */ + const double period_; - /** \brief Weights for the single terms of the joint likelihood */ - std::vector likelihood_weights_; + /** \brief Position variance (for Kalman Filter) */ + double position_variance_; - /** \brief If true, people velocity is also used in motion term for data association */ - bool velocity_in_motion_term_; + /** \brief Acceleration variance (for Kalman Filter) */ + double acceleration_variance_; - /** \brief Minimum time period between two detections messages */ - const double period_; + /** \brief Flag enabling debug mode */ + const bool debug_mode_; - /** \brief Position variance (for Kalman Filter) */ - double position_variance_; + /** \brief Detections<->tracks distance matrix for data association */ + cv::Mat_ distance_matrix_; - /** \brief Acceleration variance (for Kalman Filter) */ - double acceleration_variance_; + /** \brief Detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ + cv::Mat_ cost_matrix_; - /** \brief Flag enabling debug mode */ - const bool debug_mode_; + /** \brief if true, the sensor is considered to be vertically placed (portrait mode) */ + bool vertical_; - /** \brief Detections<->tracks distance matrix for data association */ - cv::Mat_ distance_matrix_; + /** \brief Create detections<->tracks distance matrix for data association */ + void + createDistanceMatrix(); - /** \brief Detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ - cv::Mat_ cost_matrix_; + /** \brief Create detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ + void + createCostMatrix(); - /** \brief if true, the sensor is considered to be vertically placed (portrait mode) */ - bool vertical_; + /** \brief Update tracks associated to a detection in the current frame */ + void + updateDetectedTracks(); - /** \brief Create detections<->tracks distance matrix for data association */ - void - createDistanceMatrix(); + /** \brief Fill list containing unassociated detections */ + void + fillUnassociatedDetections(); - /** \brief Create detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ - void - createCostMatrix(); + /** \brief Create new tracks with high confidence unassociated detections */ + void + createNewTracks(); - /** \brief Update tracks associated to a detection in the current frame */ - void - updateDetectedTracks(); + /** \brief Create a new track with detection information */ + int + createNewTrack(open_ptrack::detection::Detection& detection); - /** \brief Fill list containing unassociated detections */ - void - fillUnassociatedDetections(); + /** \brief Update lost tracks */ + void + updateLostTracks(); - /** \brief Create new tracks with high confidence unassociated detections */ - void - createNewTracks(); - /** \brief Create a new track with detection information */ - int - createNewTrack(open_ptrack::detection::Detection& detection); - /** \brief Update lost tracks */ - void - updateLostTracks(); - public: - /** \brief Constructor */ - Tracker(double gate_distance, bool detector_likelihood, std::vector likelihood_weights, bool velocity_in_motion_term, +public: + /** \brief Constructor */ + Tracker(double gate_distance, bool detector_likelihood, std::vector likelihood_weights, bool velocity_in_motion_term, double min_confidence, double min_confidence_detections, double sec_before_old, double sec_before_fake, double sec_remain_new, int detections_to_validate, double period, double position_variance, double acceleration_variance, std::string world_frame_id, bool debug_mode, bool vertical); - /** \brief Destructor */ - virtual ~Tracker(); + /** \brief Destructor */ + virtual ~Tracker(); - /** + /** * \brief Initialization when a new set of detections arrive. * * \param[in] detections Vector of current detections. * */ - void - newFrame(const std::vector& detections); + void + newFrame(const std::vector& detections); - /** + /** * \brief Update the list of tracks according to the current set of detections. */ - void - updateTracks(); + void + updateTracks(); -// /** -// * \brief Draw the tracks into the RGB image given by its sensor. -// */ -// void -// drawRgb(); + // /** + // * \brief Draw the tracks into the RGB image given by its sensor. + // */ + // void + // drawRgb(); - /** + /** * \brief Fills the MarkerArray message with a marker for each visible track (in correspondance * of its centroid) and its number. * * \param[in] msg The MarkerArray message to fill. */ - void - toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg); + void + toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg); - /** + /** * \brief Writes the state of each track into a TrackArray message. * * \param[in] msg The TrackArray message to fill. */ - void - toMsg(opt_msgs::TrackArray::Ptr& msg); + void + toMsg(opt_msgs::TrackArray::Ptr& msg); - /** + /** * \brief Writes the state of tracks with a given frame id into a TrackArray message. * * \param[in] msg The TrackArray message to fill. * \param[in] source_frame_id Frame id of tracks that have to be written to msg. */ - void - toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id); + void + toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id); - /** + /** * \brief Writes the ID of each alive track into an IDArray message. * * \param[in] msg The IDArray message to fill. */ - void - getAliveIDs (opt_msgs::IDArray::Ptr& msg); + void + getAliveIDs (opt_msgs::IDArray::Ptr& msg); - /** + /** * \brief Appends the location of each track to a point cloud starting from starting_index (using * a circular array) * @@ -230,103 +233,103 @@ namespace open_ptrack * * \return the new starting_index. */ - size_t - appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, - size_t starting_index, size_t max_size); + size_t + appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, + size_t starting_index, size_t max_size); - /** + /** * \brief Set minimum confidence for track initialization * * \param[in] min_confidence Minimum confidence for track initialization */ - void - setMinConfidenceForTrackInitialization (double min_confidence); + void + setMinConfidenceForTrackInitialization (double min_confidence); - /** + /** * \brief Set time after which a not visible track becomes old * * \param[in] sec_before_old Time after which a not visible track becomes old */ - void - setSecBeforeOld (double sec_before_old); + void + setSecBeforeOld (double sec_before_old); - /** + /** * \brief Set time within which a track should be validated (otherwise it is discarded) * * \param[in] sec_before_fake Time within which a track should be validated (otherwise it is discarded) */ - void - setSecBeforeFake (double sec_before_fake); + void + setSecBeforeFake (double sec_before_fake); - /** + /** * \brief Set time after which a visible track obtain NORMAL status * * \param[in] sec_remain_new Time after which a visible track obtain NORMAL status */ - void - setSecRemainNew (double sec_remain_new); + void + setSecRemainNew (double sec_remain_new); - /** + /** * \brief Set minimum number of detection<->track associations needed for validating a track * * \param[in] detections_to_validate Minimum number of detection<->track associations needed for validating a track */ - void - setDetectionsToValidate (int detections_to_validate); + void + setDetectionsToValidate (int detections_to_validate); - /** + /** * \brief Set flag stating if people detection confidence should be used in data association (true) or not (false) * * \param[in] detector_likelihood Flag stating if people detection confidence should be used in data association (true) or not (false) */ - void - setDetectorLikelihood (bool detector_likelihood); + void + setDetectorLikelihood (bool detector_likelihood); - /** + /** * \brief Set likelihood weights for data association * * \param[in] detector_weight Weight for detector likelihood * \param[in] motion_weight Weight for motion likelihood */ - void - setLikelihoodWeights (double detector_weight, double motion_weight); + void + setLikelihoodWeights (double detector_weight, double motion_weight); - /** + /** * \brief Set flag stating if people velocity should be used in motion term for data association * * \param[in] velocity_in_motion_term If true, people velocity is also used in motion term for data association * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) * \param[in] position_variance Position variance (for Kalman Filter) */ - void - setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); + void + setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); - /** + /** * \brief Set acceleration variance (for Kalman Filter) * * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) */ - void - setAccelerationVariance (double acceleration_variance); + void + setAccelerationVariance (double acceleration_variance); - /** + /** * \brief Set position variance (for Kalman Filter) * * \param[in] position_variance Position variance (for Kalman Filter) */ - void - setPositionVariance (double position_variance); + void + setPositionVariance (double position_variance); - /** + /** * \brief Set gate distance for joint likelihood in data association * * \param[in] gate_distance Gate distance for joint likelihood in data association. */ - void - setGateDistance (double gate_distance); - }; + void + setGateDistance (double gate_distance); +}; - } /* namespace tracking */ +} /* namespace tracking */ } /* namespace open_ptrack */ #endif /* OPEN_PTRACK_TRACKING_TRACKER_H_ */ diff --git a/tracking/include/open_ptrack/tracking/tracker_object.h b/tracking/include/open_ptrack/tracking/tracker_object.h new file mode 100644 index 0000000..be48984 --- /dev/null +++ b/tracking/include/open_ptrack/tracking/tracker_object.h @@ -0,0 +1,347 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#ifndef OPEN_PTRACK_TRACKING_TRACKER_OBJECT_H_ +#define OPEN_PTRACK_TRACKING_TRACKER_OBJECT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace open_ptrack +{ +namespace tracking +{ +/** \brief Tracker performs tracking-by-detection */ +class Tracker_object +{ +protected: + /** \brief List of all active tracks */ + std::list tracks_; + + /** \brief List of lost tracks */ + std::list lost_tracks_; + + /** \brief List of tracks with Status = NEW */ + std::list new_tracks_; + + /** \brief List of current detections */ + std::vector detections_; + + /** \brief List of current detections not associated to any track */ + std::list unassociated_detections_; + + /** \brief Track ID counter */ + int tracks_counter_; + + /** \brief World reference frame used for tracking */ + std::string world_frame_id_; + + /** \brief Minimum confidence for track initialization */ + double min_confidence_; + + /** \brief Minimum confidence of detections sent to tracking */ + const double min_confidence_detections_; + + /** \brief Minimum number of detection<->track associations needed for validating a track */ + int detections_to_validate_; + + /** \brief Time after which a not visible track becomes old */ + double sec_before_old_; + + /** \brief Time after which a visible track obtain NORMAL status */ + double sec_remain_new_; + + /** \brief Time within which a track should be validated (otherwise it is discarded) */ + double sec_before_fake_; + + /** \brief Gate distance for joint likelihood in data association */ + double gate_distance_; + + /** \brief Flag stating if people detection confidence should be used in data association (true) or not (false) */ + bool detector_likelihood_; + + /** \brief Weights for the single terms of the joint likelihood */ + std::vector likelihood_weights_; + + /** \brief If true, people velocity is also used in motion term for data association */ + bool velocity_in_motion_term_; + + /** \brief Minimum time period between two detections messages */ + const double period_; + + /** \brief Position variance (for Kalman Filter) */ + double position_variance_; + + /** \brief Acceleration variance (for Kalman Filter) */ + double acceleration_variance_; + + /** \brief Flag enabling debug mode */ + const bool debug_mode_; + + /** \brief Detections<->tracks distance matrix for data association */ + cv::Mat_ distance_matrix_; + + /** \brief Detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ + cv::Mat_ cost_matrix_; + + /** \brief if true, the sensor is considered to be vertically placed (portrait mode) */ + bool vertical_; + + /** \brief Create detections<->tracks distance matrix for data association */ + void + createDistanceMatrix(); + + /** \brief Create detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ + void + createCostMatrix(); + + /** \brief Update tracks associated to a detection in the current frame */ + void + updateDetectedTracks(); + + /** \brief Fill list containing unassociated detections */ + void + fillUnassociatedDetections(); + + /** \brief Create new tracks with high confidence unassociated detections */ + void + createNewTracks(); + + /** \brief Create a new track with detection information */ + int + createNewTrack(open_ptrack::detection::Detection& detection); + + /** \brief Update lost tracks */ + void + updateLostTracks(); + + +public: + //for object tracking + std::vector association_for_initialize_objectnames_; + + +public: + /** \brief Constructor */ + Tracker_object(double gate_distance, bool detector_likelihood, std::vector likelihood_weights, bool velocity_in_motion_term, + double min_confidence, double min_confidence_detections, double sec_before_old, double sec_before_fake, + double sec_remain_new, int detections_to_validate, double period, double position_variance, + double acceleration_variance, std::string world_frame_id, bool debug_mode, bool vertical); + + /** \brief Destructor */ + virtual ~Tracker_object(); + + /** + * \brief Initialization when a new set of detections arrive. + * + * \param[in] detections Vector of current detections. + * + */ + void + newFrame(const std::vector& detections); + + /** + * \brief Update the list of tracks according to the current set of detections. + */ + void + updateTracks(); + + // /** + // * \brief Draw the tracks into the RGB image given by its sensor. + // */ + // void + // drawRgb(); + + /** + * \brief Fills the MarkerArray message with a marker for each visible track (in correspondance + * of its centroid) and its number. + * + * \param[in] msg The MarkerArray message to fill. + */ + void + toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg); + + /** + * \brief Writes the state of each track into a TrackArray message. + * + * \param[in] msg The TrackArray message to fill. + */ + void + toMsg(opt_msgs::TrackArray::Ptr& msg); + + /** + * \brief Writes the state of tracks with a given frame id into a TrackArray message. + * + * \param[in] msg The TrackArray message to fill. + * \param[in] source_frame_id Frame id of tracks that have to be written to msg. + */ + void + toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id); + + + void + to_object_name_Msg(opt_msgs::ObjectNameArray::Ptr& msg); + + + + + /** + * \brief Writes the ID of each alive track into an IDArray message. + * + * \param[in] msg The IDArray message to fill. + */ + void + getAliveIDs (opt_msgs::IDArray::Ptr& msg); + + /** + * \brief Appends the location of each track to a point cloud starting from starting_index (using + * a circular array) + * + * \param[in] pointcloud The point cloud where to append the locations. + * \param[in] starting_index The starting index of the array. + * \param[in] max_size The maximum size of the point cloud (when reached the points overwrite the initial ones) + * + * \return the new starting_index. + */ + size_t + appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, + size_t starting_index, size_t max_size); + + /** + * \brief Set minimum confidence for track initialization + * + * \param[in] min_confidence Minimum confidence for track initialization + */ + void + setMinConfidenceForTrackInitialization (double min_confidence); + + /** + * \brief Set time after which a not visible track becomes old + * + * \param[in] sec_before_old Time after which a not visible track becomes old + */ + void + setSecBeforeOld (double sec_before_old); + + /** + * \brief Set time within which a track should be validated (otherwise it is discarded) + * + * \param[in] sec_before_fake Time within which a track should be validated (otherwise it is discarded) + */ + void + setSecBeforeFake (double sec_before_fake); + + /** + * \brief Set time after which a visible track obtain NORMAL status + * + * \param[in] sec_remain_new Time after which a visible track obtain NORMAL status + */ + void + setSecRemainNew (double sec_remain_new); + + /** + * \brief Set minimum number of detection<->track associations needed for validating a track + * + * \param[in] detections_to_validate Minimum number of detection<->track associations needed for validating a track + */ + void + setDetectionsToValidate (int detections_to_validate); + + /** + * \brief Set flag stating if people detection confidence should be used in data association (true) or not (false) + * + * \param[in] detector_likelihood Flag stating if people detection confidence should be used in data association (true) or not (false) + */ + void + setDetectorLikelihood (bool detector_likelihood); + + /** + * \brief Set likelihood weights for data association + * + * \param[in] detector_weight Weight for detector likelihood + * \param[in] motion_weight Weight for motion likelihood + */ + void + setLikelihoodWeights (double detector_weight, double motion_weight); + + /** + * \brief Set flag stating if people velocity should be used in motion term for data association + * + * \param[in] velocity_in_motion_term If true, people velocity is also used in motion term for data association + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); + + /** + * \brief Set acceleration variance (for Kalman Filter) + * + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + */ + void + setAccelerationVariance (double acceleration_variance); + + /** + * \brief Set position variance (for Kalman Filter) + * + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setPositionVariance (double position_variance); + + /** + * \brief Set gate distance for joint likelihood in data association + * + * \param[in] gate_distance Gate distance for joint likelihood in data association. + */ + void + setGateDistance (double gate_distance); +}; + +} /* namespace tracking */ +} /* namespace open_ptrack */ + +#endif /* OPEN_PTRACK_TRACKING_TRACKER_OBJECT_H_ */ diff --git a/tracking/launch/detection_and_tracking.launch b/tracking/launch/detection_and_tracking.launch index 39f7fae..71caead 100644 --- a/tracking/launch/detection_and_tracking.launch +++ b/tracking/launch/detection_and_tracking.launch @@ -1,15 +1,26 @@ + + + + - - + + - - + + + + + + + + - + + diff --git a/tracking/launch/object_detection_and_tracking.launch b/tracking/launch/object_detection_and_tracking.launch new file mode 100644 index 0000000..fddb798 --- /dev/null +++ b/tracking/launch/object_detection_and_tracking.launch @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tracking/launch/people_detection_and_tracking.launch b/tracking/launch/people_detection_and_tracking.launch new file mode 100644 index 0000000..317f7a8 --- /dev/null +++ b/tracking/launch/people_detection_and_tracking.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/tracking/launch/people_detection_and_tracking_kinect1.launch b/tracking/launch/people_detection_and_tracking_kinect1.launch new file mode 100644 index 0000000..39f7fae --- /dev/null +++ b/tracking/launch/people_detection_and_tracking_kinect1.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracker_network.launch b/tracking/launch/tracker_network.launch index e8427d2..032d6b1 100644 --- a/tracking/launch/tracker_network.launch +++ b/tracking/launch/tracker_network.launch @@ -1,11 +1,20 @@ - - - - - + + + + + + + + + + + + + + @@ -13,12 +22,43 @@ - + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracker_network.launch~ b/tracking/launch/tracker_network.launch~ new file mode 100644 index 0000000..751c180 --- /dev/null +++ b/tracking/launch/tracker_network.launch~ @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracker_single_camera.launch b/tracking/launch/tracker_single_camera.launch new file mode 100644 index 0000000..bec53a3 --- /dev/null +++ b/tracking/launch/tracker_single_camera.launch @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracking_node.launch b/tracking/launch/tracking_node.launch index a7af869..90f70a2 100644 --- a/tracking/launch/tracking_node.launch +++ b/tracking/launch/tracking_node.launch @@ -3,9 +3,14 @@ - + + + + + + diff --git a/tracking/src/track_object.cpp b/tracking/src/track_object.cpp new file mode 100644 index 0000000..7f1bc71 --- /dev/null +++ b/tracking/src/track_object.cpp @@ -0,0 +1,648 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#include + +#include + +namespace open_ptrack +{ +namespace tracking +{ + +Track_object::Track_object( + int id, + std::string frame_id, + double position_variance, + double acceleration_variance, + double period, + bool velocity_in_motion_term) : + id_(id), + frame_id_(frame_id), + period_(period), + velocity_in_motion_term_(velocity_in_motion_term) +{ + color_ = Eigen::Vector3f( + float(rand() % 256) / 255, + float(rand() % 256) / 255, + float(rand() % 256) / 255); + + MAX_SIZE = 90; //XXX create a parameter!!! + if (velocity_in_motion_term_) + { + filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 4); + tmp_filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 4); + mahalanobis_map4d_.resize(MAX_SIZE, MahalanobisParameters4d()); + } + else + { + filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 2); + tmp_filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 2); + mahalanobis_map2d_.resize(MAX_SIZE, MahalanobisParameters2d()); + } + +} + +Track_object::~Track_object() +{ + delete filter_; + delete tmp_filter_; +} + +void +Track_object::init(const Track_object& old_track) +{ + double x, y; + old_track.filter_->getState(x, y); + + filter_->init(x, y, 10, old_track.velocity_in_motion_term_); + + *tmp_filter_ = *filter_; + visibility_ = old_track.visibility_; + + ROS_INFO("%d -> %d", old_track.id_, id_); + + z_ = old_track.z_; + height_ = old_track.height_; + distance_ = old_track.distance_; + if(old_track.object_name_!="default") + object_name_=old_track.object_name_; + age_ = old_track.age_; + + detection_source_ = old_track.detection_source_; + velocity_in_motion_term_ = old_track.velocity_in_motion_term_; + validated_ = validated_ || old_track.validated_; + low_confidence_consecutive_frames_ = old_track.low_confidence_consecutive_frames_; + + first_time_detected_ = old_track.first_time_detected_; + last_time_detected_ = old_track.last_time_detected_; + last_time_detected_with_high_confidence_ = old_track.last_time_detected_with_high_confidence_; + last_time_predicted_ = old_track.last_time_predicted_; + last_time_predicted_index_ = old_track.last_time_predicted_index_; + + data_association_score_ = old_track.data_association_score_; +} + + + +void +Track_object::init(double x, double y, double z, double height, double distance,std::string object_name, + open_ptrack::detection::DetectionSource* detection_source) +{ + //Init Kalman filter + filter_->init(x, y, distance, velocity_in_motion_term_); + z_ = z; + height_ = height; + distance_ = distance; + if(object_name!="default") + object_name_=object_name; + status_ = NEW; + visibility_ = VISIBLE; + validated_ = false; + updates_with_enough_confidence_ = low_confidence_consecutive_frames_ = 0; + detection_source_ = detection_source; + first_time_detected_ = detection_source->getTime(); + last_time_predicted_ = last_time_detected_ = last_time_detected_with_high_confidence_ = detection_source->getTime(); + last_time_predicted_index_ = 0; + age_ = 0.0; +} + +void +Track_object::update( + double x, + double y, + double z, + double height, + double distance, + std::string object_name, + double data_assocation_score, + double confidence, + double min_confidence, + double min_confidence_detections, + open_ptrack::detection::DetectionSource* detection_source, + bool first_update) +{ + //Update Kalman filter + int difference; + double vx, vy; + if (velocity_in_motion_term_) + { + ros::Duration d(1.0); + ros::Duration d2(2.0); + + double t = std::max(first_time_detected_.toSec(), (detection_source->getTime() - d).toSec()); + t = std::min(t, last_time_detected_.toSec()); + t = std::max(t, (detection_source->getTime() - d2).toSec()); + double dt = t - last_time_predicted_.toSec(); + + difference = int(round(dt / period_)); + int vIndex = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + + if(difference != 0) + { + vx = - (x - mahalanobis_map4d_[vIndex].x) / dt; + vy = - (y - mahalanobis_map4d_[vIndex].y) / dt; + } + else + { + vx = mahalanobis_map4d_[vIndex].x; + vy = mahalanobis_map4d_[vIndex].x; + } + } + + // Update Kalman filter from the last time the track was visible: + int framesLost = int(round((detection_source->getTime() - last_time_detected_).toSec() / period_)) - 1; + + for(int i = 0; i < framesLost; i++) + { + filter_->predict(); + filter_->update(); + } + + filter_->predict(); + if (velocity_in_motion_term_) + { + filter_->update(x, y, vx, vy, distance); + } + else + { + filter_->update(x, y, distance); + } + + *tmp_filter_ = *filter_; + difference = int(round((detection_source->getTime() - last_time_predicted_).toSec() / period_)); + last_time_predicted_index_ = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + last_time_predicted_ = last_time_detected_ = detection_source->getTime(); + if (velocity_in_motion_term_) + filter_->getMahalanobisParameters(mahalanobis_map4d_[last_time_predicted_index_]); + else + filter_->getMahalanobisParameters(mahalanobis_map2d_[last_time_predicted_index_]); + + // Update z_ and height_ with a weighted combination of current and new values: + z_ = z_ * 0.9 + z * 0.1; + height_ = height_ * 0.9 + height * 0.1; + distance_ = distance; + if(object_name!="default") + object_name_=object_name; + if(confidence > min_confidence) + { + updates_with_enough_confidence_++; + last_time_detected_with_high_confidence_ = last_time_detected_; + } + + // if (((confidence - 0.5) < min_confidence_detections) && ((last_detector_confidence_ - 0.5) < min_confidence_detections)) + if ((confidence < (min_confidence + min_confidence_detections)/2) && (last_detector_confidence_ < (min_confidence + min_confidence_detections)/2)) + { + low_confidence_consecutive_frames_++; + } + else + { + low_confidence_consecutive_frames_ = 0; + } + last_detector_confidence_ = confidence; + + data_association_score_ = data_assocation_score; + + // Compute track age: + age_ = (detection_source->getTime() - first_time_detected_).toSec(); + + detection_source_ = detection_source; +} + +void +Track_object::validate() +{ + validated_ = true; +} + +bool +Track_object::isValidated() +{ + return validated_; +} + +int +Track_object::getId() +{ + return id_; +} + +void +Track_object::setStatus(Track_object::Status s) +{ + status_ = s; +} + +Track_object::Status +Track_object::getStatus() +{ + return status_; +} + +void +Track_object::setVisibility(Track_object::Visibility v) +{ + visibility_ = v; +} + +Track_object::Visibility +Track_object::getVisibility() +{ + return visibility_; +} + +float +Track_object::getSecFromFirstDetection(ros::Time current_time) +{ + return (current_time - first_time_detected_).toSec(); +} + +float +Track_object::getSecFromLastDetection(ros::Time current_time) +{ + return (current_time - last_time_detected_).toSec(); +} + +float +Track_object::getSecFromLastHighConfidenceDetection(ros::Time current_time) +{ + return (current_time - last_time_detected_with_high_confidence_).toSec(); +} + +float +Track_object::getLowConfidenceConsecutiveFrames() +{ + return low_confidence_consecutive_frames_; +} + +int +Track_object::getUpdatesWithEnoughConfidence() +{ + return updates_with_enough_confidence_; +} + +double +Track_object::getMahalanobisDistance(double x, double y, const ros::Time& when) +{ + int difference = int(round((when - last_time_predicted_).toSec() / period_)); + // std::cout << "time difference from last detection: " << difference << std::endl; + int index; + if(difference <= 0) + { + index = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + } + else + { + for(int i = 0; i < difference; i++) + { + tmp_filter_->predict(); + last_time_predicted_index_ = (last_time_predicted_index_ + 1) % MAX_SIZE; + if (velocity_in_motion_term_) + tmp_filter_->getMahalanobisParameters(mahalanobis_map4d_[last_time_predicted_index_]); + else + tmp_filter_->getMahalanobisParameters(mahalanobis_map2d_[last_time_predicted_index_]); + tmp_filter_->update(); + } + last_time_predicted_ = when; + index = last_time_predicted_index_; + } + + if (velocity_in_motion_term_) + { + ros::Duration d(1.0); + ros::Duration d2(2.0); + + double t = std::max(first_time_detected_.toSec(), (when - d).toSec()); + t = std::min(t, last_time_detected_.toSec()); + t = std::max(t, (last_time_predicted_ - d2).toSec()); + double dt = t - last_time_predicted_.toSec(); + + difference = int(round(dt / period_)); + int vIndex = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + + // std::cout << "dt: " << dt << std::endl; + // std::cout << "vIndex: " << vIndex << std::endl; + + double vx, vy; + if(difference != 0) + { + vx = - (x - mahalanobis_map4d_[vIndex].x) / dt; + vy = - (y - mahalanobis_map4d_[vIndex].y) / dt; + } + else + { + vx = mahalanobis_map4d_[vIndex].x; + vy = mahalanobis_map4d_[vIndex].x; + } + + // std::cout << "vx: " << vx << ", vy: " << vy<< std::endl; + + return open_ptrack::tracking::KalmanFilter::performMahalanobisDistance(x, y, vx, vy, mahalanobis_map4d_[index]); + } + else + { + return open_ptrack::tracking::KalmanFilter::performMahalanobisDistance(x, y, mahalanobis_map2d_[index]); + } + +} + +void +Track_object::draw(bool vertical) +{ + cv::Scalar color(int(255.0 * color_(0)), int(255.0 * color_(1)), int(255.0 * color_(2))); + + double _x2, _y2; + tmp_filter_->getState(_x2, _y2); + Eigen::Vector3d centroid2(_x2, _y2, z_); + centroid2 = detection_source_->transformToCam(centroid2); + + if(visibility_ == Track_object::NOT_VISIBLE) + return; + + double _x, _y; + filter_->getState(_x, _y); + + cv::Scalar darkBlue(130,0,0); + cv::Scalar white(255,255,255); + Eigen::Vector3d centroid(_x, _y, z_ ); + Eigen::Vector3d top(_x, _y, z_ + (height_/2)); + Eigen::Vector3d bottom(_x, _y, z_ - (height_/2)); + + std::vector points; + double delta = height_ / 5.0; + points.push_back(Eigen::Vector3d(_x - delta, _y - delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y - delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y + delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x - delta, _y + delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x - delta, _y - delta, z_ + (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y - delta, z_ + (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y + delta, z_ + (height_/2))); + points.push_back(Eigen::Vector3d(_x - delta, _y + delta, z_ + (height_/2))); + + //TODO loop for each detection source + + centroid = detection_source_->transformToCam(centroid); + cv::circle(detection_source_->getImage(), cv::Point(centroid(0), centroid(1)), 5, color, 1); + top = detection_source_->transformToCam(top); + bottom = detection_source_->transformToCam(bottom); + for(std::vector::iterator it = points.begin(); it != points.end(); it++) + *it = detection_source_->transformToCam(*it); + + // Draw a paralellepiped around the person: + for(size_t i = 0; i < 4; i++) + { + cv::line(detection_source_->getImage(), cv::Point(points[i](0), points[i](1)), + cv::Point(points[(i + 1) % 4](0), points[(i + 1) % 4](1)), color, + visibility_ == VISIBLE ? 2 : 1, CV_AA); + cv::line(detection_source_->getImage(), cv::Point(points[i + 4](0), points[i + 4](1)), + cv::Point(points[(i + 1) % 4 + 4](0), points[(i + 1) % 4 + 4](1)), color, + visibility_ == VISIBLE ? 2 : 1, CV_AA); + cv::line(detection_source_->getImage(), cv::Point(points[i](0), points[i](1)), + cv::Point(points[i + 4](0), points[i + 4](1)), color, + visibility_ == VISIBLE ? 2 : 1, CV_AA); + } + + std::stringstream ss; + float distance_to_display = float(int(distance_*100))/100; + ss << id_ << ": " << distance_to_display; + + float id_half_number_of_digits = (float)(ss.str().size())/2; + + // Draw white id_ on a blue background: + if (not vertical) + { + cv::rectangle(detection_source_->getImage(), cv::Point(top(0)-8*id_half_number_of_digits, top(1)-12), + cv::Point(top(0)+12*id_half_number_of_digits, top(1) +2), darkBlue, CV_FILLED, + visibility_ == VISIBLE ? 8 : 1);//, 0); + cv::putText(detection_source_->getImage(), ss.str(), cv::Point(top(0)-8*id_half_number_of_digits, + top(1)), cv::FONT_HERSHEY_SIMPLEX, 0.5, white, 1.7, CV_AA); // white id_ + } + else + { + cv::Mat rotated_image = detection_source_->getImage(); + cv::flip(rotated_image.t(), rotated_image, -1); + cv::flip(rotated_image, rotated_image, 1); + cv::rectangle(rotated_image, cv::Point(top(1)-8*id_half_number_of_digits, (rotated_image.rows - top(0)+1)-12), + cv::Point(top(1) +12*id_half_number_of_digits, (rotated_image.rows - top(0)+1)+2), darkBlue, CV_FILLED, + visibility_ == VISIBLE ? 8 : 1);//, 0); + cv::putText(rotated_image, ss.str(), cv::Point(top(1)-8*id_half_number_of_digits, rotated_image.rows - top(0)+1), + cv::FONT_HERSHEY_SIMPLEX, 0.5, white, 1.7, CV_AA); // white id_ + cv::flip(rotated_image, rotated_image, -1); + cv::flip(rotated_image, rotated_image, 1); + rotated_image = rotated_image.t(); + detection_source_->setImage(rotated_image); + } + //TODO end loop +} + +void +Track_object::createMarker(visualization_msgs::MarkerArray::Ptr& msg) +{ + if(visibility_ == Track_object::NOT_VISIBLE) + return; + + double _x, _y; + filter_->getState(_x, _y); + + visualization_msgs::Marker marker; + + marker.header.frame_id = frame_id_; + marker.header.stamp = ros::Time::now(); + + marker.ns = "people"; + marker.id = id_; + + marker.type = visualization_msgs::Marker::SPHERE; + marker.action = visualization_msgs::Marker::ADD; + + marker.pose.position.x = _x; + marker.pose.position.y = _y; + marker.pose.position.z = z_; + marker.pose.orientation.x = 0.0; + marker.pose.orientation.y = 0.0; + marker.pose.orientation.z = 0.0; + marker.pose.orientation.w = 1.0; + + marker.scale.x = 0.1; + marker.scale.y = 0.1; + marker.scale.z = 0.1; + + marker.color.r = color_(2); + marker.color.g = color_(1); + marker.color.b = color_(0); + marker.color.a = 1.0; + + marker.lifetime = ros::Duration(0.2); + + msg->markers.push_back(marker); + + /////////////////////////////////// + + visualization_msgs::Marker text_marker; + + text_marker.header.frame_id = frame_id_; + text_marker.header.stamp = ros::Time::now(); + + text_marker.ns = "numbers"; + text_marker.id = id_; + + text_marker.type = visualization_msgs::Marker::TEXT_VIEW_FACING; + text_marker.action = visualization_msgs::Marker::ADD; + + std::stringstream ss; + ss << id_; +// text_marker.text = ss.str(); + text_marker.text = object_name_; + + text_marker.pose.position.x = _x; + text_marker.pose.position.y = _y; + // text_marker.pose.position.z = z_ + (height_/2) + 0.1; + text_marker.pose.position.z = z_+ 0.1; //for object markers + text_marker.pose.orientation.x = 0.0; + text_marker.pose.orientation.y = 0.0; + text_marker.pose.orientation.z = 0.0; + text_marker.pose.orientation.w = 1.0; + + text_marker.scale.x = 0.2; + text_marker.scale.y = 0.2; + text_marker.scale.z = 0.2; + + text_marker.color.r = color_(2); + text_marker.color.g = color_(1); + text_marker.color.b = color_(0); + text_marker.color.a = 1.0; + + text_marker.lifetime = ros::Duration(0.2); + + msg->markers.push_back(text_marker); + +} + +bool +Track_object::getPointXYZRGB(pcl::PointXYZRGB& p) +{ + if(visibility_ == Track_object::NOT_VISIBLE) + return false; + + double _x, _y; + filter_->getState(_x, _y); + + p.x = float(_x); + p.y = float(_y); + p.z = float(z_); + uchar* rgb_ptr = (uchar*)&p.rgb; + *rgb_ptr++ = uchar(color_(0) * 255.0f); + *rgb_ptr++ = uchar(color_(1) * 255.0f); + *rgb_ptr++ = uchar(color_(2) * 255.0f); + return true; +} + +void +Track_object::toMsg(opt_msgs::Track& track_msg, bool vertical) +{ + + double _x, _y; + filter_->getState(_x, _y); + + track_msg.id = id_; + track_msg.x = _x; + track_msg.y = _y; + track_msg.height = height_; + track_msg.distance = distance_; + track_msg.object_name=object_name_; + track_msg.age = age_; + track_msg.confidence = - data_association_score_; // minus for transforming distance into a sort of confidence + track_msg.visibility = visibility_; + + Eigen::Vector3d top(_x, _y, z_ + (height_/2)); + Eigen::Vector3d bottom(_x, _y, z_ - (height_/2)); + top = detection_source_->transformToCam(top); + bottom = detection_source_->transformToCam(bottom); + if (not vertical) + { + track_msg.box_2D.height = int(std::abs((top - bottom)(1))); + track_msg.box_2D.width = track_msg.box_2D.height / 2; + track_msg.box_2D.x = int(top(0)) - track_msg.box_2D.height / 4; + track_msg.box_2D.y = int(top(1)); + } + else + { + track_msg.box_2D.width = int(std::abs((top - bottom)(0))); + track_msg.box_2D.height = track_msg.box_2D.width / 2; + track_msg.box_2D.x = int(top(0)) - track_msg.box_2D.width; + track_msg.box_2D.y = int(top(1)) - track_msg.box_2D.width / 4; + } +} + +open_ptrack::detection::DetectionSource* +Track_object::getDetectionSource() +{ + return detection_source_; +} + +void +Track_object::setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance) +{ + velocity_in_motion_term_ = velocity_in_motion_term; + + // Re-initialize Kalman filter + filter_->setPredictModel (acceleration_variance); + filter_->setObserveModel (position_variance); + double x, y; + filter_->getState(x, y); + filter_->init(x, y, distance_, velocity_in_motion_term_); + + *tmp_filter_ = *filter_; +} + +void +Track_object::setAccelerationVariance (double acceleration_variance) +{ + filter_->setPredictModel (acceleration_variance); + tmp_filter_->setPredictModel (acceleration_variance); +} + +void +Track_object::setPositionVariance (double position_variance) +{ + filter_->setObserveModel (position_variance); + tmp_filter_->setObserveModel (position_variance); +} +} /* namespace tracking */ +} /* namespace open_ptrack */ diff --git a/tracking/src/tracker.cpp b/tracking/src/tracker.cpp index e8e7b27..cf7d22e 100644 --- a/tracking/src/tracker.cpp +++ b/tracking/src/tracker.cpp @@ -389,12 +389,17 @@ namespace open_ptrack // If a detection<->track association has been found: if(cost_matrix_(track, measure) == 0.0 && distance_matrix_(track, measure) <= gate_distance_) { + + + + open_ptrack::detection::Detection& d = detections_[measure]; // If the detection has enough confidence in the current frame or in a recent past: // if ((t->getLowConfidenceConsecutiveFrames() < 10) || ((d.getConfidence() - 0.5) > min_confidence_detections_)) if ((t->getLowConfidenceConsecutiveFrames() < 10) || (d.getConfidence() > ((min_confidence_ + min_confidence_detections_)/2))) { + // Update track with the associated detection: bool first_update = false; t->update(d.getWorldCentroid()(0), d.getWorldCentroid()(1), d.getWorldCentroid()(2),d.getHeight(), diff --git a/tracking/src/tracker_object.cpp b/tracking/src/tracker_object.cpp new file mode 100644 index 0000000..32e981a --- /dev/null +++ b/tracking/src/tracker_object.cpp @@ -0,0 +1,597 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#include + +#include + +namespace open_ptrack +{ +namespace tracking +{ + +Tracker_object::Tracker_object( + double gate_distance, + bool detector_likelihood, + std::vector likelihood_weights, + bool velocity_in_motion_term, + double min_confidence, + double min_confidence_detections, + double sec_before_old, + double sec_before_fake, + double sec_remain_new, + int detections_to_validate, + double period, + double position_variance, + double acceleration_variance, + std::string world_frame_id, + bool debug_mode, + bool vertical) : + gate_distance_(gate_distance), + detector_likelihood_(detector_likelihood), + likelihood_weights_(likelihood_weights), + velocity_in_motion_term_(velocity_in_motion_term), + min_confidence_(min_confidence), + min_confidence_detections_(min_confidence_detections), + sec_before_old_(sec_before_old), + sec_before_fake_(sec_before_fake), + sec_remain_new_(sec_remain_new), + detections_to_validate_(detections_to_validate), + period_(period), + position_variance_(position_variance), + acceleration_variance_(acceleration_variance), + world_frame_id_(world_frame_id), + debug_mode_(debug_mode), + vertical_(vertical) +{ + tracks_counter_ = 0; +} + +Tracker_object::~Tracker_object() +{ + // TODO Auto-generated destructor stub +} + +void +Tracker_object::newFrame(const std::vector& detections) +{ + detections_.clear(); + unassociated_detections_.clear(); + lost_tracks_.clear(); + new_tracks_.clear(); + detections_ = detections; + ros::Time current_detections_time = detections_[0].getSource()->getTime(); + + for(std::list::iterator it = tracks_.begin(); it != tracks_.end();) + { + open_ptrack::tracking::Track_object* t = *it; + bool deleted = false; + + if(((t->getVisibility() == Track_object::NOT_VISIBLE && (t->getSecFromLastHighConfidenceDetection(current_detections_time)) >= sec_before_old_) + || (!t->isValidated() && t->getSecFromFirstDetection(current_detections_time) >= sec_before_fake_))) + { + if (debug_mode_) + { + std::cout << "Track " << t->getId() << " DELETED" << std::endl; + } + delete t; + it = tracks_.erase(it); + deleted = true; + } + else if(!t->isValidated() && t->getUpdatesWithEnoughConfidence() == detections_to_validate_) + { + t->validate(); + if (debug_mode_) + { + std::cout << "Track " << t->getId() << " VALIDATED" << std::endl; + } + } + else if(t->getStatus() == Track_object::NEW && t->getSecFromFirstDetection(current_detections_time) >= sec_remain_new_) + { + t->setStatus(Track_object::NORMAL); + if (debug_mode_) + { + std::cout << "Track " << t->getId() << " set to NORMAL" << std::endl; + } + } + + if(!deleted) + { + if(t->getStatus() == Track_object::NEW && t->getVisibility() == Track_object::VISIBLE) + new_tracks_.push_back(t); + if(t->getVisibility() == Track_object::NOT_VISIBLE) + lost_tracks_.push_back(t); + it++; + } + } +} + +void +Tracker_object::updateTracks() +{ + createDistanceMatrix(); + createCostMatrix(); + + // Solve Global Nearest Neighbor problem: + Munkres munkres; + cost_matrix_ = munkres.solve(cost_matrix_, false); // rows: targets (tracks), cols: detections + + updateDetectedTracks(); + fillUnassociatedDetections(); + updateLostTracks(); + createNewTracks(); +} + +// void Tracker_object::drawRgb() +// { +// for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) +// (*it)->draw(vertical_); +// } + +void +Tracker_object::toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg) +{ + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + t->createMarker(msg); + } +} + +void +Tracker_object::toMsg(opt_msgs::TrackArray::Ptr& msg) +{ + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + + opt_msgs::Track track; + t->toMsg(track, vertical_); + msg->tracks.push_back(track); + } +} + +void +Tracker_object::toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id) +{ + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + if (strcmp(t->getDetectionSource()->getFrameId().c_str(), source_frame_id.c_str()) == 0) // if strings are equal + { + opt_msgs::Track track; + t->toMsg(track, vertical_); + + // // For publishing only not occluded tracks: + // if (track.visibility < 2) + // msg->tracks.push_back(track); + + // For publishing all tracks: + msg->tracks.push_back(track); + } + } +} + + + + + +void +Tracker_object::to_object_name_Msg(opt_msgs::ObjectNameArray::Ptr& msg) +{ + if(association_for_initialize_objectnames_.size()==tracks_.size()) + { + int index=0; + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + opt_msgs::ObjectName object_name_msg; + object_name_msg.object_name=t->object_name_; + object_name_msg.no=association_for_initialize_objectnames_[index]; + msg->object_names.push_back(object_name_msg); + index++; +// std::cout<<"send message with object name:"<object_name_<::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + msg->ids.push_back ((*it)->getId()); + } + msg->max_ID = tracks_counter_; +} + +size_t +Tracker_object::appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, size_t starting_index, size_t max_size) +{ + for(size_t i = 0; i < tracks_.size() && pointcloud->size() < max_size; i++) + { + pcl::PointXYZRGB point; + pointcloud->push_back(point); + } + + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + if(t->getPointXYZRGB(pointcloud->points[starting_index])) + starting_index = (starting_index + 1) % max_size; + + } + return starting_index; +} + +/************************ protected methods ************************/ + +int +Tracker_object::createNewTrack(open_ptrack::detection::Detection& detection) +{ + if(detection.getConfidence() < min_confidence_) + return -1; + + open_ptrack::tracking::Track_object* t; + t = new open_ptrack::tracking::Track_object( + ++tracks_counter_, + world_frame_id_, + position_variance_, + acceleration_variance_, + period_, + velocity_in_motion_term_ ); + + t->init(detection.getWorldCentroid()(0), detection.getWorldCentroid()(1),detection.getWorldCentroid()(2), + detection.getHeight(), detection.getDistance(), detection.getObjectName(),detection.getSource()); + + bool first_update = true; + t->update(detection.getWorldCentroid()(0), detection.getWorldCentroid()(1), detection.getWorldCentroid()(2), + detection.getHeight(), detection.getDistance(), detection.getObjectName(),0.0, + detection.getConfidence(), min_confidence_, min_confidence_detections_, detection.getSource(), first_update); + + ROS_INFO("Created %d", t->getId()); + + tracks_.push_back(t); + return tracks_counter_; +} + +void +Tracker_object::createDistanceMatrix() +{ + distance_matrix_ = cv::Mat_(tracks_.size(), detections_.size()); + int track = 0; + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + //double x, y, height, vx, vz; + Track_object* t = *it; + //t->predict(x, y, height, vx, vz); + int measure = 0; + for(std::vector::iterator dit = detections_.begin(); dit != detections_.end(); dit++) + { + double detector_likelihood; + + // Compute detector likelihood: + if (detector_likelihood_) + { + detector_likelihood = dit->getConfidence(); + // detector_likelihood = log((dit->getConfidence() + 3) / 6); + } + else + { + detector_likelihood = 0; + } + + // Compute motion likelihood: + double motion_likelihood = t->getMahalanobisDistance( + dit->getWorldCentroid()(0), + dit->getWorldCentroid()(1), + dit->getSource()->getTime()); + + // Compute joint likelihood and put it in the distance matrix: + + distance_matrix_(track, measure++) = likelihood_weights_[0] * detector_likelihood + likelihood_weights_[1] * motion_likelihood; + + // Remove NaN and inf: + if (isnan(distance_matrix_(track, measure-1)) | (not std::isfinite(distance_matrix_(track, measure-1)))) + distance_matrix_(track, measure-1) = 2*gate_distance_; + + // std::cout << (*it)->getId() << ": " << "Motion likelihood: " << likelihood_weights_[0] * motion_likelihood << std::endl; + // if (detector_likelihood_) + // std::cout << (*it)->getId() << ": " << "Detector likelihood: " << likelihood_weights_[1] * dit->getConfidence() << std::endl; + // std::cout << (*it)->getId() << ": " << "JOINT LIKELIHOOD: " << distance_matrix_(track, measure-1) << std::endl; + + /*ROS_INFO("%d(%f, %f) = %f", t->getId(), + dit->getWorldCentroid()(0), + dit->getWorldCentroid()(1), + distance_matrix_(track, measure - 1));*/ + } + track++; + } + + // std::cout << "Distance matrix:" << std::endl; + // for(int row = 0; row < distance_matrix_.rows; row++) + // { + // for(int col = 0; col < distance_matrix_.cols; col++) + // { + // std::cout.width(8); + // std::cout << distance_matrix_(row,col) << ","; + // } + // std::cout << std::endl; + // } + // std::cout << std::endl; +} + +void +Tracker_object::createCostMatrix() +{ + cost_matrix_ = distance_matrix_.clone(); + for(int i = 0; i < distance_matrix_.rows; i++) + { + for(int j = 0; j < distance_matrix_.cols; j++) + { + if(distance_matrix_(i, j) > gate_distance_) + cost_matrix_(i, j) = 1000000.0; + } + } + + + // std::cout << "Munkres input matrix:" << std::endl; + // for(int row = 0; row < cost_matrix_.rows; row++) + // { + // for(int col = 0; col < cost_matrix_.cols; col++) + // { + // std::cout.width(8); + // std::cout << cost_matrix_(row,col) << ","; + // } + // std::cout << std::endl; + // } + // std::cout << std::endl; +} + +void +Tracker_object::updateDetectedTracks() +{ + // std::cout << "Munkres output matrix:" << std::endl; + // for(int row = 0; row < cost_matrix_.rows; row++) { + // for(int col = 0; col < cost_matrix_.cols; col++) { + // std::cout.width(1); + // std::cout << cost_matrix_(row,col) << ","; + // } + // std::cout << std::endl; + // } + // std::cout << std::endl; + + // Iterate over every track: + int track = 0; + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + bool updated = false; + open_ptrack::tracking::Track_object* t = *it; + + for(int measure = 0; measure < cost_matrix_.cols; measure++) + { + // If a detection<->track association has been found: + if(cost_matrix_(track, measure) == 0.0 && distance_matrix_(track, measure) <= gate_distance_) + { + + + open_ptrack::detection::Detection& d = detections_[measure]; + + // If the detection has enough confidence in the current frame or in a recent past: + // if ((t->getLowConfidenceConsecutiveFrames() < 10) || ((d.getConfidence() - 0.5) > min_confidence_detections_)) + if ((t->getLowConfidenceConsecutiveFrames() < 10) || (d.getConfidence() > ((min_confidence_ + min_confidence_detections_)/2))) + { + //for object tracking + association_for_initialize_objectnames_.push_back(measure); + + + // Update track with the associated detection: + bool first_update = false; + + t->update(d.getWorldCentroid()(0), d.getWorldCentroid()(1), d.getWorldCentroid()(2),d.getHeight(), + d.getDistance(),d.getObjectName(), distance_matrix_(track, measure), + d.getConfidence(), min_confidence_, min_confidence_detections_, + d.getSource(), first_update); + + t->setVisibility(d.isOccluded() ? Track_object::OCCLUDED : Track_object::VISIBLE); + updated = true; + break; + } + else + { + //std::cout << "Id: " << t->getId() << ", lowConfConsFrames: " << t->getLowConfidenceConsecutiveFrames() << ", newConf: " << d.getConfidence()<< std::endl; + } + } + } + if(!updated) + { + if(t->getVisibility() != Track_object::NOT_VISIBLE) + { + t->setVisibility(Track_object::NOT_VISIBLE); + //t->update(); + } + } + track++; + } + // std::cout << std::endl; +} + +void +Tracker_object::fillUnassociatedDetections() +{ + // Fill a list with detections not associated to any track: + if(cost_matrix_.cols == 0 && detections_.size() > 0) + { + for(size_t measure = 0; measure < detections_.size(); measure++) + unassociated_detections_.push_back(detections_[measure]); + } + else + { + for(int measure = 0; measure < cost_matrix_.cols; measure++) + { + bool associated = false; + for(int track = 0; track < cost_matrix_.rows; track++) + { + if(cost_matrix_(track, measure) == 0.0) + { + if(distance_matrix_(track, measure) > gate_distance_) + break; + associated = true; + } + } + if(!associated/* && detections_[measure].getConfidence() > min_confidence_*/) + { + unassociated_detections_.push_back(detections_[measure]); + } + } + } +} + +void +Tracker_object::updateLostTracks() +{ + //for(std::list::iterator it = lost_tracks_.begin(); it != lost_tracks_.end(); it++) + // (*it)->update(); +} + +void +Tracker_object::createNewTracks() +{ + for(std::list::iterator dit = unassociated_detections_.begin(); + dit != unassociated_detections_.end(); dit++) + { + createNewTrack(*dit); + } +} + +void +Tracker_object::setMinConfidenceForTrackInitialization (double min_confidence) +{ + min_confidence_ = min_confidence; +} + +void +Tracker_object::setSecBeforeOld (double sec_before_old) +{ + sec_before_old_ = sec_before_old; +} + +void +Tracker_object::setSecBeforeFake (double sec_before_fake) +{ + sec_before_fake_ = sec_before_fake; +} + +void +Tracker_object::setSecRemainNew (double sec_remain_new) +{ + sec_remain_new_ = sec_remain_new; +} + +void +Tracker_object::setDetectionsToValidate (int detections_to_validate) +{ + detections_to_validate_ = detections_to_validate; +} + +void +Tracker_object::setDetectorLikelihood (bool detector_likelihood) +{ + detector_likelihood_ = detector_likelihood; +} + +void +Tracker_object::setLikelihoodWeights (double detector_weight, double motion_weight) +{ + likelihood_weights_[0] = detector_weight; + likelihood_weights_[1] = motion_weight; +} + +void +Tracker_object::setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance) +{ + velocity_in_motion_term_ = velocity_in_motion_term; + acceleration_variance_ = acceleration_variance; + position_variance_ = position_variance; + + // Update all existing tracks: + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + t->setVelocityInMotionTerm (velocity_in_motion_term_, acceleration_variance_, position_variance_); + } +} + +void +Tracker_object::setAccelerationVariance (double acceleration_variance) +{ + acceleration_variance_ = acceleration_variance; + + // Update all existing tracks: + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + t->setAccelerationVariance (acceleration_variance_); + } +} + +void +Tracker_object::setPositionVariance (double position_variance) +{ + position_variance_ = position_variance; + + // Update all existing tracks: + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + t->setPositionVariance (position_variance_); + } +} + +void +Tracker_object::setGateDistance (double gate_distance) +{ + gate_distance_ = gate_distance; +} +} /* namespace tracking */ +} /* namespace open_ptrack */