From 38a94e35b8708fded668eedabbaf8bedeb60f811 Mon Sep 17 00:00:00 2001 From: yongheng1991 Date: Fri, 10 Feb 2017 13:20:56 +0100 Subject: [PATCH 1/9] first version with integrated people and object tracking --- detection/CMakeLists.txt | 38 +- .../apps/multiple_objects_detection_node.cpp | 99 +++ detection/cfg/multiple_objects_detection.cfg | 38 ++ .../conf/multiple_objects_detection.yaml | 73 ++ .../multiple_objects_detection.h | 557 ++++++++++++++++ .../multiple_objects_detectionConfig.h | 626 ++++++++++++++++++ .../object_detector.h | 102 +++ .../multiple_objects_detection/roi_zz.h | 70 ++ detection/launch/detector_kinect2.launch | 17 +- .../object_detector.cpp | 316 +++++++++ .../src/multiple_objects_detection/roi_zz.cpp | 127 ++++ tracking/conf/object_tracker_multicamera.yaml | 78 +++ tracking/launch/tracker_network.launch | 57 +- 13 files changed, 2178 insertions(+), 20 deletions(-) create mode 100644 detection/apps/multiple_objects_detection_node.cpp create mode 100755 detection/cfg/multiple_objects_detection.cfg create mode 100644 detection/conf/multiple_objects_detection.yaml create mode 100644 detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h create mode 100644 detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detectionConfig.h create mode 100644 detection/include/open_ptrack/multiple_objects_detection/object_detector.h create mode 100644 detection/include/open_ptrack/multiple_objects_detection/roi_zz.h create mode 100644 detection/src/multiple_objects_detection/object_detector.cpp create mode 100644 detection/src/multiple_objects_detection/roi_zz.cpp create mode 100644 tracking/conf/object_tracker_multicamera.yaml diff --git a/detection/CMakeLists.txt b/detection/CMakeLists.txt index b030e6a..28745ab 100644 --- a/detection/CMakeLists.txt +++ b/detection/CMakeLists.txt @@ -1,15 +1,30 @@ cmake_minimum_required(VERSION 2.8.3) project(detection) set(CMAKE_BUILD_TYPE RelWithDebInfo) -find_package(catkin REQUIRED COMPONENTS cmake_modules roscpp rospy pcl_ros pcl_conversions nodelet rosconsole image_transport opt_utils opt_msgs dynamic_reconfigure) + + +# Check for c++11 support +INCLUDE(CheckCXXCompilerFlag) +CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) +CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X) +IF(COMPILER_SUPPORTS_CXX11) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +ELSEIF(COMPILER_SUPPORTS_CXX0X) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") +ELSE() + MESSAGE(ERROR "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.") +ENDIF() + +find_package(catkin REQUIRED COMPONENTS cmake_modules roscpp rospy pcl_ros pcl_conversions nodelet rosconsole image_transport opt_utils opt_msgs dynamic_reconfigure std_msgs rostime sensor_msgs message_filters compressed_image_transport compressed_depth_image_transport kinect2_bridge) +find_package(cv_bridge ) find_package(VTK REQUIRED) if(VTK_FOUND) - include_directories(${VTK_INCLUDE_DIRS}) - link_directories(${VTK_LIBRARY_DIRS}) - add_definitions(${VTK_DEFINITIONS}) - include (${VTK_USE_FILE}) -endif() + include_directories(${VTK_INCLUDE_DIRS}) + link_directories(${VTK_LIBRARY_DIRS}) + add_definitions(${VTK_DEFINITIONS}) + include (${VTK_USE_FILE}) +endif() find_package(OpenCV REQUIRED) include_directories(${OpenCV_INCLUDE_DIRS}) @@ -32,8 +47,7 @@ if (NOT PCL_FOUND) endif (NOT PCL_FOUND) # Dynamic reconfigure support -generate_dynamic_reconfigure_options(cfg/HaarDispAdaDetector.cfg cfg/GroundBasedPeopleDetector.cfg) - +generate_dynamic_reconfigure_options(cfg/HaarDispAdaDetector.cfg cfg/GroundBasedPeopleDetector.cfg cfg/multiple_objects_detection.cfg) include_directories(cfg/cpp) catkin_package( @@ -43,10 +57,16 @@ catkin_package( ) -add_library(${PROJECT_NAME} src/detection_source.cpp src/detection.cpp) +add_library(${PROJECT_NAME} src/detection_source.cpp src/detection.cpp src/multiple_objects_detection/object_detector.cpp src/multiple_objects_detection/roi_zz.cpp) add_dependencies(${PROJECT_NAME} ${PROJECT_NAME}_gencfg) target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBS}) + + +add_executable(multiple_objects_detection_node apps/multiple_objects_detection_node.cpp) +target_link_libraries(multiple_objects_detection_node ${PROJECT_NAME} ${OpenCV_LIBRARIES} ${PCL_LIBRARIES} ${catkin_LIBRARIES}) + + add_executable(ground_based_people_detector apps/ground_based_people_detector_node.cpp) SET_TARGET_PROPERTIES(ground_based_people_detector PROPERTIES LINK_FLAGS -L${PCL_LIBRARY_DIRS}) target_link_libraries(ground_based_people_detector ${PROJECT_NAME} ${catkin_LIBRARIES} ${PCL_LIBRARIES} vtkHybrid vtkRendering) diff --git a/detection/apps/multiple_objects_detection_node.cpp b/detection/apps/multiple_objects_detection_node.cpp new file mode 100644 index 0000000..121f358 --- /dev/null +++ b/detection/apps/multiple_objects_detection_node.cpp @@ -0,0 +1,99 @@ +#include +#include +#include + +#include "ros/ros.h" +#include "std_msgs/String.h" + +#include +#include "open_ptrack/multiple_objects_detection/multiple_objects_detectionConfig.h" +#include "open_ptrack/multiple_objects_detection/multiple_objects_detection.h" + +using namespace std; +using namespace cv; + +//typedef dynamic_reconfigure::Server ReconfigureServer; + +std::string cameraName; +bool useExact = false; +bool useCompressed = false; + +std::string output_detection_topic; + +bool use_background_removal; +int background_calculate_frames; +int threshold_4_detecting_foreground;//threshold of depth difference,use the difference to ditinguish the background and foreground +bool show_2D_tracks; + + +void configCb(multiple_objects_detection::multiple_objects_detectionConfig &config, uint32_t level) +{ + Object_Detector::HMin=config.HMin; + Object_Detector::HMin=config.HMin; + Object_Detector::SMin=config.SMin; + Object_Detector::VMin=config.VMin; + Object_Detector::HMax=config.HMax; + Object_Detector::SMax=config.HMax; + Object_Detector::VMax=config.VMax; + Object_Detector::use_hs=config.use_hs; + Object_Detector::h_bins=config.h_bins; + Object_Detector::s_bins=config.s_bins; + Object_Detector::AREA_TOLERANCE=config.AREA_TOLERANCE; + Object_Detector::QUALITY_TOLERANCE=config.QUALITY_TOLERANCE; + Object_Detector::DENSITY_TOLORENCE=config.DENSITY_TOLORENCE; +} + + +int main(int argc, char** argv){ + + ros::init(argc, argv, "multiple_objects_detection"); + ros::NodeHandle nh("~"); + + if(!ros::ok()) + { + return 0; + } + + nh.param("sensor_name", cameraName, std::string("kinect2_head")); + nh.param("use_background_removal",use_background_removal,false); + nh.param("background_calculate_frames",background_calculate_frames,100); + nh.param("threshold_4_detecting_foreground",threshold_4_detecting_foreground,100); + nh.param("show_2D_track",show_2D_tracks,false); + nh.param("output_detection_topic",output_detection_topic,std::string("/objects_detector/detections")); + + + + //set the stastic varables of object_detector class,the instruction of the varable can be found in the header of the class + nh.param("HMin",Object_Detector::HMin,0); + nh.param("SMin",Object_Detector::SMin,80); + nh.param("VMin",Object_Detector::VMin,100); + nh.param("HMax",Object_Detector::HMax,360); + nh.param("SMax",Object_Detector::SMax,256); + nh.param("VMax",Object_Detector::VMax,256); + nh.param("use_hs",Object_Detector::use_hs,true); + nh.param("h_bins",Object_Detector::h_bins,36); + nh.param("s_bins",Object_Detector::s_bins,18); + nh.param("AREA_TOLERANCE",Object_Detector::AREA_TOLERANCE,20); + nh.param("QUALITY_TOLERANCE",Object_Detector::QUALITY_TOLERANCE,20000); + nh.param("DENSITY_TOLORENCE",Object_Detector::DENSITY_TOLORENCE,4.0); + + + // std::cout << "output detection topic: " << output_detection_topic << std::endl; + + + // Set up dynamic reconfiguration + dynamic_reconfigure::Server server; + dynamic_reconfigure::Server::CallbackType f; + f = boost::bind(&configCb, _1, _2); + server.setCallback(f); + + + Multiple_Objects_Detection _multiple_objects_detection(cameraName,output_detection_topic,useExact, useCompressed, + use_background_removal,background_calculate_frames,threshold_4_detecting_foreground,show_2D_tracks); + + std::cout << "start detecting..." << std::endl; + _multiple_objects_detection.run_detection(); + + ros::shutdown(); + return 0; +} diff --git a/detection/cfg/multiple_objects_detection.cfg b/detection/cfg/multiple_objects_detection.cfg new file mode 100755 index 0000000..c588370 --- /dev/null +++ b/detection/cfg/multiple_objects_detection.cfg @@ -0,0 +1,38 @@ +#! /usr/bin/env python + +# Declare parameters that control haar-based people detection + +PACKAGE='multiple_objects_detection' + +from dynamic_reconfigure.parameter_generator_catkin import * + +gen = ParameterGenerator() + +############################################## +## multiple_objects_detection:object detector parameters ## +############################################## + + + + +gen.add("HMax", int_t, 0, "Hue max", 360, 0, 360) +gen.add("SMax", int_t, 0, "Saturation max", 255, 0, 255) +gen.add("VMax", int_t, 0, "Value max", 255, 0, 255) +gen.add("HMin", int_t, 0, "Hue min", 0, 0, 360) +gen.add("SMin", int_t, 0, "Saturation min", 80, 0, 255) +gen.add("VMin", int_t, 0, "Value min", 100, 0, 255) + + +gen.add("use_hs", bool_t, 0, "whether use HS or just H to gennerate the hist of roi to do the back projection", True) +gen.add("h_bins", int_t, 0, "Divide (HMin,HMax) to h_bins parts", 36, 1, 100) +gen.add("s_bins", int_t, 0, "Divide (SMin,SMax) to s_bins parts", 18, 1, 100) + + +gen.add("AREA_TOLERANCE", int_t, 0, "AREA_TOLERANCE", 20, 5, 100) +gen.add("QUALITY_TOLERANCE", int_t, 0, "QUALITY_TOLERANCE", 20000, 5000, 65535) +gen.add("DENSITY_TOLORENCE", int_t, 0, "DENSITY_TOLORENCE", 4, 0, 50) + + + + +exit(gen.generate(PACKAGE, "multiple_objects_detection_node", "multiple_objects_detection")) diff --git a/detection/conf/multiple_objects_detection.yaml b/detection/conf/multiple_objects_detection.yaml new file mode 100644 index 0000000..1fdd7b6 --- /dev/null +++ b/detection/conf/multiple_objects_detection.yaml @@ -0,0 +1,73 @@ +############################ +## multiple_objects_detection ## +############################ + +sensor_name: kinect2_head +show_2D_track: false +output_detection_topic: /objects_detector/detections + + + + + + + + + + + +############################ +## Background removal ## +############################ + +use_background_removal: false + +#use how many frame to calculate the background +background_calculate_frames: 100 + +#threshold of depth difference,use the difference to ditinguish the background and foreground +threshold_4_detecting_foreground: 100 + + + +############################################## +## object_detector parameters ## +############################################## + + +########for the HSV mask###### +HMin: 0 +SMin: 80 +VMin: 100 +HMax: 360 +SMax: 256 +VMax: 256 +########for the HSV mask####### + + +###########For camshift######## +# if use_hs==true, use hue and s to generate the hitograme to do the bacprojection , if not, just used the hue image +use_hs: true + +#for the hue backrojection hist ,divide (HMin,HMax) to h_bins parts +h_bins: 36 +s_bins: 18 +###########For camshift######### + + + +###########For camshift recover from occlusion########### + +##for camshift, for the window in backproject image,the value represent the similirity with the ROI. the value of the pixel is equal to 255*percentage ,the percentage come from the histogram(problility) +## QUALITY_TOLERANCE here means the TOLERANCE of sum of the window in backproject image,it represent the quality of the window. +## DENSITY_TOLORENCE here means the TOLERANCE of the DENSITY,DENSITY=QUALITY/window.area(); +## AREA_TOLERANCE here means the TOLERANCE of the x,y ,window.it means the objects maybe apprear in the next frame ,in the window which increase it's size by AREA_TOLERANCE every frame.so the bigger AREA_TOLERANCE,the objects can move faster. +## if the QUALITYDENSITY_TOLORENCE,it will be "not occluded"; +AREA_TOLERANCE: 20 ##AREA_TOLERANCE is also used to create the position_maks +QUALITY_TOLERANCE: 20000 +DENSITY_TOLORENCE: 2.0 +###########For camshift recover from occlusion########### + diff --git a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h new file mode 100644 index 0000000..98130f2 --- /dev/null +++ b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h @@ -0,0 +1,557 @@ +/** + * Copyright 2014 University of Bremen, Institute for Artificial Intelligence + * Author: Thiemo Wiedemeyer + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + + +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include + +#include + +#include "open_ptrack/multiple_objects_detection/object_detector.h" +#include "open_ptrack/multiple_objects_detection/roi_zz.h" + +//publish the detections +#include +#include +#include +#include +#include + +using namespace opt_msgs; +using namespace sensor_msgs; + + +using namespace cv; + +class Multiple_Objects_Detection +{ +public: + +private: + + ////////////For receiving color and depth//////////// + std::mutex lock; + std::string cameraName; + std::string topicColor, topicDepth; + const bool useExact, useCompressed; + bool updateImage; + bool running; + const size_t queueSize; + cv::Mat color,depth; + cv::Mat cameraMatrixColor, cameraMatrixDepth; + typedef message_filters::sync_policies::ExactTime ExactSyncPolicy; + typedef message_filters::sync_policies::ApproximateTime ApproximateSyncPolicy; + ros::NodeHandle nh; + ros::AsyncSpinner spinner; + image_transport::ImageTransport it; + image_transport::SubscriberFilter *subImageColor, *subImageDepth; + message_filters::Subscriber *subCameraInfoColor, *subCameraInfoDepth; + + message_filters::Synchronizer *syncExact; + message_filters::Synchronizer *syncApproximate; + ////////////For receiving color and depth////////////// + + + + ////////////For roi selection//////////// + bool objects_selected,paused; + ///////////For roi selection///////////// + + + ///////////For background removal/////////// + cv::Mat depth_max; + bool use_background_removal; + int background_calculate_frames; + int threshold_4_detecting_foreground; + ///////////For background removal/////////// + + + + ///////////For camshift recover from occlusion/////////// + std::vector occludes; + ///////////For camshift recover from occlusion/////////// + + + + ///////////For generating 3d position/////////// + float fx ,fy,cx,cy; + ///////////For generating 3d position/////////// + + + + + ///////////For publish detection topic/////////// + std::string output_detection_topic; + ros::Publisher detection_pub; + std::string color_header_frameId; + ///////////For publish detection topic/////////// + + + + ///////////For main detection/////////// + // cv::Mat color, depth; + std::vector Object_Detectors; + std::vector current_detected_boxes; + cv::Mat main_color,main_depth; + ///////////For main detection/////////// + + + + + ///////////For display/////////// + bool show_2D_tracks; + std::vector> tracks_2D; + ///////////For display/////////// + + +public: + Multiple_Objects_Detection(const std::string _cameraName, const std::string &output_detection_topic,const bool useExact, const bool useCompressed, + const bool use_background_removal, const int background_calculate_frames,const int threshold_4_detecting_foreground,const bool show_2D_tracks) + : cameraName(_cameraName), output_detection_topic(output_detection_topic),useExact(useExact), useCompressed(useCompressed),updateImage(false), running(false), + use_background_removal(use_background_removal), objects_selected(false),paused(false),background_calculate_frames(background_calculate_frames),threshold_4_detecting_foreground(threshold_4_detecting_foreground), queueSize(5), + nh(), spinner(0), it(nh) ,show_2D_tracks(show_2D_tracks) + { + + topicColor = "/" + _cameraName + "/" + K2_TOPIC_LORES_COLOR K2_TOPIC_RAW; + topicDepth = "/" + _cameraName + "/" + K2_TOPIC_LORES_DEPTH K2_TOPIC_RAW; + cameraMatrixColor = cv::Mat::zeros(3, 3, CV_64F); + cameraMatrixDepth = cv::Mat::zeros(3, 3, CV_64F); + detection_pub=nh.advertise(output_detection_topic,3); + } + + ~Multiple_Objects_Detection() + { + } + + + + void run_detection(){ + + start_reciver(); + + for(; running && ros::ok();) + { + if(updateImage) + { + + lock.lock(); + main_color = this->color; + main_depth = this->depth; + updateImage = false; + lock.unlock(); + + // if accept background_removal ,generate the new color image without background + if(use_background_removal) + { + if ((background_calculate_frames>0))// if the back ground has not been got + { + background_calculation(); + } + else//after get the bakground (depth_max),using it to do background removal,generate new color + { + background_removal(); + } + } + + // if don't accept background_removal || background is already removed + if(!use_background_removal||(use_background_removal&&background_calculate_frames==0)) + { + if(!objects_selected&&paused)// pause to select object + { + multiple_objects_detector_initialization(); + } + + //!!!!!!!!!!!!!!!!!!!!!!!main loop!!!!!!!!!!!!!!!!!!!!!!! + else if(objects_selected&&!paused)// start to track after selecting objects + { + multiple_objects_detection_main(); + //draw tracks_2D (cotinuely 10 detections) + if(show_2D_tracks) + { + show_2D_tracks_on_image(); + } + } + + cv::imshow( "Object_Detector", main_color ); + char c = (char)cv::waitKey(10); + if( c == 27 ) + break; + switch(c) + { + case 'p': + paused = !paused; + break; + case 'k': + Object_Detectors.clear(); + break; + default: + ; + } + } + } + } + } + + +private: + void start_reciver() + { + running = true; + std::string topicCameraInfoColor = topicColor.substr(0, topicColor.rfind('/')) + "/camera_info"; + std::string topicCameraInfoDepth = topicDepth.substr(0, topicDepth.rfind('/')) + "/camera_info"; + + image_transport::TransportHints hints(useCompressed ? "compressed" : "raw"); + image_transport::TransportHints hintsDepth(useCompressed ? "compressedDepth" : "raw"); + subImageColor = new image_transport::SubscriberFilter(it, topicColor, queueSize, hints); + subImageDepth = new image_transport::SubscriberFilter(it, topicDepth, queueSize, hintsDepth); + subCameraInfoColor = new message_filters::Subscriber(nh, topicCameraInfoColor, queueSize); + subCameraInfoDepth = new message_filters::Subscriber(nh, topicCameraInfoDepth, queueSize); + + if(useExact) + { + syncExact = new message_filters::Synchronizer(ExactSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth); + syncExact->registerCallback(boost::bind(&Multiple_Objects_Detection::callback, this, _1, _2, _3, _4)); + } + else + { + syncApproximate = new message_filters::Synchronizer(ApproximateSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth); + syncApproximate->registerCallback(boost::bind(&Multiple_Objects_Detection::callback, this, _1, _2, _3, _4)); + } + + spinner.start(); + + std::chrono::milliseconds duration(1); + while(!updateImage ) + { + if(!ros::ok()) + { + return; + } + std::this_thread::sleep_for(duration); + } + + } + + + void background_calculation() + { + Mat test_backgroundfile=imread("/tmp/depth_background.png",CV_LOAD_IMAGE_UNCHANGED); + if(!test_backgroundfile.empty())// if the background successfully read from file + { + depth_max = test_backgroundfile; + background_calculate_frames=0; + printf("use background removal and background is successfully read from file......\n"); + printf("please press P to select ROIs......\n"); + + } + else//background can't be read from file, being calculated + { + printf("use background removal but background can't be read from file, being calculated......\n"); + if(depth_max.empty()) + depth_max=Mat::zeros(main_depth.size(),CV_16UC1); + else + depth_max=cv::max(depth_max,main_depth); + background_calculate_frames--; + if(background_calculate_frames==0)//save background file + { + std::vector compression_params; + compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); + compression_params.push_back(0); + // pcl::io::savePCDFileASCII ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud); + + cv::imwrite("/tmp/depth_background.png", depth_max, compression_params); + printf("background is sucessfully calculated and saved......\n"); + printf("please press P to select ROIs......\n"); + } + } + } + + void background_removal() + { + Mat depth_diff=depth_max-main_depth; + + int nr=depth_diff.rows; + int nc=depth_diff.cols; + if(depth_diff.isContinuous()&&main_color.isContinuous()) + { + nr=1; + nc=nc*depth_diff.rows*depth_diff.channels(); + } + for(int i=0;i(i); + uchar* colorData=main_color.ptr(i*3); + for(int j=0;j objects; + roi_zz select_roi; + select_roi.select("selectroi",main_color,objects,false); + cv::destroyWindow("selectroi"); + + //quit when the tracked object(s) is not provided + if(objects.size()<1) + return; + + + for(std::vector::iterator it =objects.begin();it!=objects.end();it++) + { + + Object_Detector newDetector; + cv::Rect selection=*it; + selection=selection&Rect(0,0,main_color.size().width,main_color.size().height); + + newDetector.setCurrentRect(selection); + Object_Detectors.push_back(newDetector); + + current_detected_boxes.push_back(selection); + + std::list tracks_2D_(10); + tracks_2D.push_back(tracks_2D_); + + bool occlude_=false; + occludes.push_back(occlude_); + + } + + Object_Detector::current_detected_boxes=current_detected_boxes; + + objects_selected=true; + paused=false; + std::cout<header.frame_id = color_header_frameId; + detection_array_msg->header.stamp = ros::Time::now(); + + for(int i = 0; i < 3; i++) + for(int j = 0; j < 3; j++) + detection_array_msg->intrinsic_matrix.push_back(cameraMatrixColor.at(i, j)); + + //set the parms so that we can use the openptrack's tracking node + detection_array_msg->confidence_type = std::string("hog+svm"); + detection_array_msg->image_type = std::string("rgb"); + //detection_array_msg msg!!!!!!!!!!!!!!!!!!!!!!! + + + + //detect one by one + for(size_t i=0; i(current_center2D_d.y,current_center2D_d.x)/1000.0;//meter ,not mm + + detection_msg.box_3D.p1.z=current_center_depth; + detection_msg.box_3D.p1.x=(current_center2D_d.x-cx)*fx*current_center_depth; + detection_msg.box_3D.p1.y=(current_center2D_d.y-cy)*fy*current_center_depth; + + detection_msg.box_3D.p2.z=detection_msg.box_3D.p1.z; + detection_msg.box_3D.p2.x=detection_msg.box_3D.p1.x; + detection_msg.box_3D.p2.y=detection_msg.box_3D.p1.y; + + detection_msg.box_2D.x = current_detected_boxes[i].x; + detection_msg.box_2D.y = current_detected_boxes[i].y; + detection_msg.box_2D.width = current_detected_boxes[i].width; + detection_msg.box_2D.height = current_detected_boxes[i].height; + detection_msg.height = 0.002; + detection_msg.confidence = 10; + + detection_msg.centroid.x=detection_msg.box_3D.p1.x; + detection_msg.centroid.y=detection_msg.box_3D.p1.y; + detection_msg.centroid.z=detection_msg.box_3D.p1.z; + + detection_msg.distance = std::sqrt(detection_msg.centroid.x * detection_msg.centroid.x + detection_msg.centroid.z * detection_msg.centroid.z); + + detection_msg.top.x=detection_msg.box_3D.p1.x; + detection_msg.top.y=detection_msg.box_3D.p1.y; + detection_msg.top.z=detection_msg.box_3D.p1.z; + + detection_msg.bottom.x=detection_msg.box_3D.p1.x; + detection_msg.bottom.y=detection_msg.box_3D.p1.y; + detection_msg.bottom.z=detection_msg.box_3D.p1.z; + if(current_center_depth>0.01) + detection_array_msg->detections.push_back(detection_msg); + } + else{//if occluded ,use a empty_rect because of the tracker will use this to generate the other_objects_mask + // std::cout< current_track=tracks_2D[i]; + + for( std::list ::iterator it = current_track.begin(); it!=current_track.end(); it++) + { + Rect test_Rect=*it; + cv::rectangle(main_color, test_Rect, cv::Scalar(250*(i), 250*(i-1), 250*(i-2)), 2, CV_AA); + } + } + } + + + void callback(const sensor_msgs::Image::ConstPtr imageColor, const sensor_msgs::Image::ConstPtr imageDepth, + const sensor_msgs::CameraInfo::ConstPtr cameraInfoColor, const sensor_msgs::CameraInfo::ConstPtr cameraInfoDepth) + { + cv::Mat _color, _depth; + + if(objects_selected==0) + { + readCameraInfo(cameraInfoColor, cameraMatrixColor); + readCameraInfo(cameraInfoDepth, cameraMatrixDepth); + + fx = 1.0f / cameraMatrixColor.at(0, 0); + fy = 1.0f / cameraMatrixColor.at(1, 1); + cx = cameraMatrixColor.at(0, 2); + cy = cameraMatrixColor.at(1, 2); + } + + readImage(imageColor, _color); + readImage(imageDepth, _depth); + + color_header_frameId=imageColor->header.frame_id; + + if(_color.type() == CV_16U) + { + cv::Mat tmp; + _color.convertTo(tmp, CV_8U, 0.02); + cv::cvtColor(tmp, _color, CV_GRAY2BGR); + } + + lock.lock(); + this->color = _color; + this->depth = _depth; + updateImage = true; + lock.unlock(); + } + + + void readImage(const sensor_msgs::Image::ConstPtr msgImage, cv::Mat &image) const + { + cv_bridge::CvImageConstPtr pCvImage; + pCvImage = cv_bridge::toCvShare(msgImage, msgImage->encoding); + pCvImage->image.copyTo(image); + } + + void readCameraInfo(const sensor_msgs::CameraInfo::ConstPtr cameraInfo, cv::Mat &cameraMatrix) const + { + double *itC = cameraMatrix.ptr(0, 0); + for(size_t i = 0; i < 9; ++i, ++itC) + { + *itC = cameraInfo->K[i]; + } + } + + +}; + + + diff --git a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detectionConfig.h b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detectionConfig.h new file mode 100644 index 0000000..f30e670 --- /dev/null +++ b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detectionConfig.h @@ -0,0 +1,626 @@ +//#line 2 "/opt/ros/indigo/share/dynamic_reconfigure/cmake/../templates/ConfigType.h.template" +// ********************************************************* +// +// File autogenerated for the multiple_objects_detection package +// by the dynamic_reconfigure package. +// Please do not edit. +// +// ********************************************************/ + +#ifndef __multiple_objects_detection__MULTIPLE_OBJECTS_DETECTIONCONFIG_H__ +#define __multiple_objects_detection__MULTIPLE_OBJECTS_DETECTIONCONFIG_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace multiple_objects_detection +{ + class multiple_objects_detectionConfigStatics; + + class multiple_objects_detectionConfig + { + public: + class AbstractParamDescription : public dynamic_reconfigure::ParamDescription + { + public: + AbstractParamDescription(std::string n, std::string t, uint32_t l, + std::string d, std::string e) + { + name = n; + type = t; + level = l; + description = d; + edit_method = e; + } + + virtual void clamp(multiple_objects_detectionConfig &config, const multiple_objects_detectionConfig &max, const multiple_objects_detectionConfig &min) const = 0; + virtual void calcLevel(uint32_t &level, const multiple_objects_detectionConfig &config1, const multiple_objects_detectionConfig &config2) const = 0; + virtual void fromServer(const ros::NodeHandle &nh, multiple_objects_detectionConfig &config) const = 0; + virtual void toServer(const ros::NodeHandle &nh, const multiple_objects_detectionConfig &config) const = 0; + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, multiple_objects_detectionConfig &config) const = 0; + virtual void toMessage(dynamic_reconfigure::Config &msg, const multiple_objects_detectionConfig &config) const = 0; + virtual void getValue(const multiple_objects_detectionConfig &config, boost::any &val) const = 0; + }; + + typedef boost::shared_ptr AbstractParamDescriptionPtr; + typedef boost::shared_ptr AbstractParamDescriptionConstPtr; + + template + class ParamDescription : public AbstractParamDescription + { + public: + ParamDescription(std::string name, std::string type, uint32_t level, + std::string description, std::string edit_method, T multiple_objects_detectionConfig::* f) : + AbstractParamDescription(name, type, level, description, edit_method), + field(f) + {} + + T (multiple_objects_detectionConfig::* field); + + virtual void clamp(multiple_objects_detectionConfig &config, const multiple_objects_detectionConfig &max, const multiple_objects_detectionConfig &min) const + { + if (config.*field > max.*field) + config.*field = max.*field; + + if (config.*field < min.*field) + config.*field = min.*field; + } + + virtual void calcLevel(uint32_t &comb_level, const multiple_objects_detectionConfig &config1, const multiple_objects_detectionConfig &config2) const + { + if (config1.*field != config2.*field) + comb_level |= level; + } + + virtual void fromServer(const ros::NodeHandle &nh, multiple_objects_detectionConfig &config) const + { + nh.getParam(name, config.*field); + } + + virtual void toServer(const ros::NodeHandle &nh, const multiple_objects_detectionConfig &config) const + { + nh.setParam(name, config.*field); + } + + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, multiple_objects_detectionConfig &config) const + { + return dynamic_reconfigure::ConfigTools::getParameter(msg, name, config.*field); + } + + virtual void toMessage(dynamic_reconfigure::Config &msg, const multiple_objects_detectionConfig &config) const + { + dynamic_reconfigure::ConfigTools::appendParameter(msg, name, config.*field); + } + + virtual void getValue(const multiple_objects_detectionConfig &config, boost::any &val) const + { + val = config.*field; + } + }; + + class AbstractGroupDescription : public dynamic_reconfigure::Group + { + public: + AbstractGroupDescription(std::string n, std::string t, int p, int i, bool s) + { + name = n; + type = t; + parent = p; + state = s; + id = i; + } + + std::vector abstract_parameters; + bool state; + + virtual void toMessage(dynamic_reconfigure::Config &msg, const boost::any &config) const = 0; + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, boost::any &config) const =0; + virtual void updateParams(boost::any &cfg, multiple_objects_detectionConfig &top) const= 0; + virtual void setInitialState(boost::any &cfg) const = 0; + + + void convertParams() + { + for(std::vector::const_iterator i = abstract_parameters.begin(); i != abstract_parameters.end(); ++i) + { + parameters.push_back(dynamic_reconfigure::ParamDescription(**i)); + } + } + }; + + typedef boost::shared_ptr AbstractGroupDescriptionPtr; + typedef boost::shared_ptr AbstractGroupDescriptionConstPtr; + + template + class GroupDescription : public AbstractGroupDescription + { + public: + GroupDescription(std::string name, std::string type, int parent, int id, bool s, T PT::* f) : AbstractGroupDescription(name, type, parent, id, s), field(f) + { + } + + GroupDescription(const GroupDescription& g): AbstractGroupDescription(g.name, g.type, g.parent, g.id, g.state), field(g.field), groups(g.groups) + { + parameters = g.parameters; + abstract_parameters = g.abstract_parameters; + } + + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, boost::any &cfg) const + { + PT* config = boost::any_cast(cfg); + if(!dynamic_reconfigure::ConfigTools::getGroupState(msg, name, (*config).*field)) + return false; + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = &((*config).*field); + if(!(*i)->fromMessage(msg, n)) + return false; + } + + return true; + } + + virtual void setInitialState(boost::any &cfg) const + { + PT* config = boost::any_cast(cfg); + T* group = &((*config).*field); + group->state = state; + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = boost::any(&((*config).*field)); + (*i)->setInitialState(n); + } + + } + + virtual void updateParams(boost::any &cfg, multiple_objects_detectionConfig &top) const + { + PT* config = boost::any_cast(cfg); + + T* f = &((*config).*field); + f->setParams(top, abstract_parameters); + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = &((*config).*field); + (*i)->updateParams(n, top); + } + } + + virtual void toMessage(dynamic_reconfigure::Config &msg, const boost::any &cfg) const + { + const PT config = boost::any_cast(cfg); + dynamic_reconfigure::ConfigTools::appendGroup(msg, name, id, parent, config.*field); + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + (*i)->toMessage(msg, config.*field); + } + } + + T (PT::* field); + std::vector groups; + }; + +class DEFAULT +{ + public: + DEFAULT() + { + state = true; + name = "Default"; + } + + void setParams(multiple_objects_detectionConfig &config, const std::vector params) + { + for (std::vector::const_iterator _i = params.begin(); _i != params.end(); ++_i) + { + boost::any val; + (*_i)->getValue(config, val); + + if("HMax"==(*_i)->name){HMax = boost::any_cast(val);} + if("SMax"==(*_i)->name){SMax = boost::any_cast(val);} + if("VMax"==(*_i)->name){VMax = boost::any_cast(val);} + if("HMin"==(*_i)->name){HMin = boost::any_cast(val);} + if("SMin"==(*_i)->name){SMin = boost::any_cast(val);} + if("VMin"==(*_i)->name){VMin = boost::any_cast(val);} + if("use_hs"==(*_i)->name){use_hs = boost::any_cast(val);} + if("h_bins"==(*_i)->name){h_bins = boost::any_cast(val);} + if("s_bins"==(*_i)->name){s_bins = boost::any_cast(val);} + if("AREA_TOLERANCE"==(*_i)->name){AREA_TOLERANCE = boost::any_cast(val);} + if("QUALITY_TOLERANCE"==(*_i)->name){QUALITY_TOLERANCE = boost::any_cast(val);} + if("DENSITY_TOLORENCE"==(*_i)->name){DENSITY_TOLORENCE = boost::any_cast(val);} + } + } + + int HMax; +int SMax; +int VMax; +int HMin; +int SMin; +int VMin; +bool use_hs; +int h_bins; +int s_bins; +int AREA_TOLERANCE; +int QUALITY_TOLERANCE; +int DENSITY_TOLORENCE; + + bool state; + std::string name; + + +}groups; + + + +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + int HMax; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + int SMax; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + int VMax; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + int HMin; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + int SMin; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + int VMin; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + bool use_hs; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + int h_bins; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + int s_bins; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + int AREA_TOLERANCE; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + int QUALITY_TOLERANCE; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + int DENSITY_TOLORENCE; +//#line 218 "/opt/ros/indigo/share/dynamic_reconfigure/cmake/../templates/ConfigType.h.template" + + bool __fromMessage__(dynamic_reconfigure::Config &msg) + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + + int count = 0; + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + if ((*i)->fromMessage(msg, *this)) + count++; + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); i ++) + { + if ((*i)->id == 0) + { + boost::any n = boost::any(this); + (*i)->updateParams(n, *this); + (*i)->fromMessage(msg, n); + } + } + + if (count != dynamic_reconfigure::ConfigTools::size(msg)) + { + ROS_ERROR("multiple_objects_detectionConfig::__fromMessage__ called with an unexpected parameter."); + ROS_ERROR("Booleans:"); + for (unsigned int i = 0; i < msg.bools.size(); i++) + ROS_ERROR(" %s", msg.bools[i].name.c_str()); + ROS_ERROR("Integers:"); + for (unsigned int i = 0; i < msg.ints.size(); i++) + ROS_ERROR(" %s", msg.ints[i].name.c_str()); + ROS_ERROR("Doubles:"); + for (unsigned int i = 0; i < msg.doubles.size(); i++) + ROS_ERROR(" %s", msg.doubles[i].name.c_str()); + ROS_ERROR("Strings:"); + for (unsigned int i = 0; i < msg.strs.size(); i++) + ROS_ERROR(" %s", msg.strs[i].name.c_str()); + // @todo Check that there are no duplicates. Make this error more + // explicit. + return false; + } + return true; + } + + // This version of __toMessage__ is used during initialization of + // statics when __getParamDescriptions__ can't be called yet. + void __toMessage__(dynamic_reconfigure::Config &msg, const std::vector &__param_descriptions__, const std::vector &__group_descriptions__) const + { + dynamic_reconfigure::ConfigTools::clear(msg); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->toMessage(msg, *this); + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); ++i) + { + if((*i)->id == 0) + { + (*i)->toMessage(msg, *this); + } + } + } + + void __toMessage__(dynamic_reconfigure::Config &msg) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + __toMessage__(msg, __param_descriptions__, __group_descriptions__); + } + + void __toServer__(const ros::NodeHandle &nh) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->toServer(nh, *this); + } + + void __fromServer__(const ros::NodeHandle &nh) + { + static bool setup=false; + + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->fromServer(nh, *this); + + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); i++){ + if (!setup && (*i)->id == 0) { + setup = true; + boost::any n = boost::any(this); + (*i)->setInitialState(n); + } + } + } + + void __clamp__() + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const multiple_objects_detectionConfig &__max__ = __getMax__(); + const multiple_objects_detectionConfig &__min__ = __getMin__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->clamp(*this, __max__, __min__); + } + + uint32_t __level__(const multiple_objects_detectionConfig &config) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + uint32_t level = 0; + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->calcLevel(level, config, *this); + return level; + } + + static const dynamic_reconfigure::ConfigDescription &__getDescriptionMessage__(); + static const multiple_objects_detectionConfig &__getDefault__(); + static const multiple_objects_detectionConfig &__getMax__(); + static const multiple_objects_detectionConfig &__getMin__(); + static const std::vector &__getParamDescriptions__(); + static const std::vector &__getGroupDescriptions__(); + + private: + static const multiple_objects_detectionConfigStatics *__get_statics__(); + }; + + template <> // Max and min are ignored for strings. + inline void multiple_objects_detectionConfig::ParamDescription::clamp(multiple_objects_detectionConfig &config, const multiple_objects_detectionConfig &max, const multiple_objects_detectionConfig &min) const + { + return; + } + + class multiple_objects_detectionConfigStatics + { + friend class multiple_objects_detectionConfig; + + multiple_objects_detectionConfigStatics() + { +multiple_objects_detectionConfig::GroupDescription Default("Default", "", 0, 0, true, &multiple_objects_detectionConfig::groups); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.HMax = 0; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.HMax = 360; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.HMax = 360; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("HMax", "int", 0, "Hue max", "", &multiple_objects_detectionConfig::HMax))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("HMax", "int", 0, "Hue max", "", &multiple_objects_detectionConfig::HMax))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.SMax = 0; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.SMax = 255; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.SMax = 255; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("SMax", "int", 0, "Saturation max", "", &multiple_objects_detectionConfig::SMax))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("SMax", "int", 0, "Saturation max", "", &multiple_objects_detectionConfig::SMax))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.VMax = 0; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.VMax = 255; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.VMax = 255; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("VMax", "int", 0, "Value max", "", &multiple_objects_detectionConfig::VMax))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("VMax", "int", 0, "Value max", "", &multiple_objects_detectionConfig::VMax))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.HMin = 0; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.HMin = 360; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.HMin = 0; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("HMin", "int", 0, "Hue min", "", &multiple_objects_detectionConfig::HMin))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("HMin", "int", 0, "Hue min", "", &multiple_objects_detectionConfig::HMin))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.SMin = 0; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.SMin = 255; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.SMin = 80; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("SMin", "int", 0, "Saturation min", "", &multiple_objects_detectionConfig::SMin))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("SMin", "int", 0, "Saturation min", "", &multiple_objects_detectionConfig::SMin))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.VMin = 0; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.VMin = 255; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.VMin = 100; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("VMin", "int", 0, "Value min", "", &multiple_objects_detectionConfig::VMin))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("VMin", "int", 0, "Value min", "", &multiple_objects_detectionConfig::VMin))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.use_hs = 0; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.use_hs = 1; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.use_hs = 1; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("use_hs", "bool", 0, "whether use HS or just H to gennerate the hist of roi to do the back projection", "", &multiple_objects_detectionConfig::use_hs))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("use_hs", "bool", 0, "whether use HS or just H to gennerate the hist of roi to do the back projection", "", &multiple_objects_detectionConfig::use_hs))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.h_bins = 1; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.h_bins = 100; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.h_bins = 36; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("h_bins", "int", 0, "Divide (HMin,HMax) to h_bins parts", "", &multiple_objects_detectionConfig::h_bins))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("h_bins", "int", 0, "Divide (HMin,HMax) to h_bins parts", "", &multiple_objects_detectionConfig::h_bins))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.s_bins = 1; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.s_bins = 100; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.s_bins = 18; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("s_bins", "int", 0, "Divide (SMin,SMax) to s_bins parts", "", &multiple_objects_detectionConfig::s_bins))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("s_bins", "int", 0, "Divide (SMin,SMax) to s_bins parts", "", &multiple_objects_detectionConfig::s_bins))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.AREA_TOLERANCE = 5; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.AREA_TOLERANCE = 100; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.AREA_TOLERANCE = 20; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("AREA_TOLERANCE", "int", 0, "AREA_TOLERANCE", "", &multiple_objects_detectionConfig::AREA_TOLERANCE))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("AREA_TOLERANCE", "int", 0, "AREA_TOLERANCE", "", &multiple_objects_detectionConfig::AREA_TOLERANCE))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.QUALITY_TOLERANCE = 5000; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.QUALITY_TOLERANCE = 65535; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.QUALITY_TOLERANCE = 20000; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("QUALITY_TOLERANCE", "int", 0, "QUALITY_TOLERANCE", "", &multiple_objects_detectionConfig::QUALITY_TOLERANCE))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("QUALITY_TOLERANCE", "int", 0, "QUALITY_TOLERANCE", "", &multiple_objects_detectionConfig::QUALITY_TOLERANCE))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __min__.DENSITY_TOLORENCE = 0; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __max__.DENSITY_TOLORENCE = 50; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __default__.DENSITY_TOLORENCE = 4; +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("DENSITY_TOLORENCE", "int", 0, "DENSITY_TOLORENCE", "", &multiple_objects_detectionConfig::DENSITY_TOLORENCE))); +//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("DENSITY_TOLORENCE", "int", 0, "DENSITY_TOLORENCE", "", &multiple_objects_detectionConfig::DENSITY_TOLORENCE))); +//#line 235 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + Default.convertParams(); +//#line 235 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" + __group_descriptions__.push_back(multiple_objects_detectionConfig::AbstractGroupDescriptionConstPtr(new multiple_objects_detectionConfig::GroupDescription(Default))); +//#line 353 "/opt/ros/indigo/share/dynamic_reconfigure/cmake/../templates/ConfigType.h.template" + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); ++i) + { + __description_message__.groups.push_back(**i); + } + __max__.__toMessage__(__description_message__.max, __param_descriptions__, __group_descriptions__); + __min__.__toMessage__(__description_message__.min, __param_descriptions__, __group_descriptions__); + __default__.__toMessage__(__description_message__.dflt, __param_descriptions__, __group_descriptions__); + } + std::vector __param_descriptions__; + std::vector __group_descriptions__; + multiple_objects_detectionConfig __max__; + multiple_objects_detectionConfig __min__; + multiple_objects_detectionConfig __default__; + dynamic_reconfigure::ConfigDescription __description_message__; + + static const multiple_objects_detectionConfigStatics *get_instance() + { + // Split this off in a separate function because I know that + // instance will get initialized the first time get_instance is + // called, and I am guaranteeing that get_instance gets called at + // most once. + static multiple_objects_detectionConfigStatics instance; + return &instance; + } + }; + + inline const dynamic_reconfigure::ConfigDescription &multiple_objects_detectionConfig::__getDescriptionMessage__() + { + return __get_statics__()->__description_message__; + } + + inline const multiple_objects_detectionConfig &multiple_objects_detectionConfig::__getDefault__() + { + return __get_statics__()->__default__; + } + + inline const multiple_objects_detectionConfig &multiple_objects_detectionConfig::__getMax__() + { + return __get_statics__()->__max__; + } + + inline const multiple_objects_detectionConfig &multiple_objects_detectionConfig::__getMin__() + { + return __get_statics__()->__min__; + } + + inline const std::vector &multiple_objects_detectionConfig::__getParamDescriptions__() + { + return __get_statics__()->__param_descriptions__; + } + + inline const std::vector &multiple_objects_detectionConfig::__getGroupDescriptions__() + { + return __get_statics__()->__group_descriptions__; + } + + inline const multiple_objects_detectionConfigStatics *multiple_objects_detectionConfig::__get_statics__() + { + const static multiple_objects_detectionConfigStatics *statics; + + if (statics) // Common case + return statics; + + boost::mutex::scoped_lock lock(dynamic_reconfigure::__init_mutex__); + + if (statics) // In case we lost a race. + return statics; + + statics = multiple_objects_detectionConfigStatics::get_instance(); + + return statics; + } + + +} + +#endif // __MULTIPLE_OBJECTS_DETECTIONRECONFIGURATOR_H__ diff --git a/detection/include/open_ptrack/multiple_objects_detection/object_detector.h b/detection/include/open_ptrack/multiple_objects_detection/object_detector.h new file mode 100644 index 0000000..2fe8457 --- /dev/null +++ b/detection/include/open_ptrack/multiple_objects_detection/object_detector.h @@ -0,0 +1,102 @@ +#ifndef Object_Detector_H +#define Object_Detector_H +//#include + +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/video/tracking.hpp" +using namespace cv; +class Object_Detector +{ +public: + bool occluded; + double half_occluded_frames; + + // ///////////For hsv mask/////////// + static int HMin, SMin, VMin; + static int HMax,SMax, VMax; + // ///////////For hsv mask/////////// + + // ///////////For camshift/////////// + static bool use_hs;//if use_hs==true, use hue and s to generate the hitograme to do the bacprojection , if not, just used the hue image + static int h_bins,s_bins;//for the hue backrojection hist ,divide (HMin,HMax) to h_bins parts + // ///////////For camshift/////////// + + + + ///////////For camshift recover from occlusion/////////// + //for camshift, for the window in backproject image,the value represent the similirity with the ROI. the value of the pixel is equal to 255*percentage ,the percentage come from the histogram(problility) + // QUALITY_TOLERANCE here means the TOLERANCE of sum of the window in backproject image,it represent the quality of the window. + // DENSITY_TOLORENCE here means the TOLERANCE of the DENSITY,DENSITY=QUALITY/window.area(); + // AREA_TOLERANCE here means the TOLERANCE of the x,y ,window.it means the objects maybe apprear in the next frame ,in the window which increase it's size by AREA_TOLERANCE every frame.so the bigger AREA_TOLERANCE,the objects can move faster. + // if the QUALITYDENSITY_TOLORENCE,it will be "not occluded"; + static int AREA_TOLERANCE;//AREA_TOLERANCE is also used to create the position_maks + static int QUALITY_TOLERANCE; + static double DENSITY_TOLORENCE; + ///////////For camshift recover from occlusion/////////// + + static std::vector current_detected_boxes;//this one come from class:multiple_objects_detection, every time of every object's detection ,multiple_objects_detection will update this varable ,we use it to generate the other_object_mask + +private: + static cv::Mat mainColor; + static cv::Mat mainDepth; + + + bool firstRun; + cv::Rect currentRect; + + cv::Rect detectWindow; + cv::Mat hsv, hue, hsv_mask, hist, backproj; //, histimg; + + + cv::Rect selection; + cv::Mat Color,Depth; + + Mat other_object_mask,position_mask,depth_mask; + + +public: + Object_Detector() + :firstRun(true),occluded(false),half_occluded_frames(10) + { + } + + static void setMainColor(const cv::Mat _mainColor); + static cv::Mat getMainColor(); + + static void setMainDepth(const cv::Mat _mainDepth); + static cv::Mat getMainDepth(); + + void setCurrentRect(const cv::Rect _currentRect); + cv::Rect getCurrentRect(); + + void setcurrent_detected_boxes(std::vector _current_detected_boxes); + std::vector getcurrent_detected_boxes(); + + cv::RotatedRect object_shift(InputArray _probColor,Rect& window, TermCriteria criteria); + + cv::RotatedRect detectCurrentRect(int id); +}; + +//stastic varable defination +int Object_Detector::HMax; +int Object_Detector::SMax; +int Object_Detector::VMax; +int Object_Detector::HMin; +int Object_Detector::SMin; +int Object_Detector::VMin; + +bool Object_Detector::use_hs; +int Object_Detector::h_bins; +int Object_Detector::s_bins; + +int Object_Detector::AREA_TOLERANCE; +int Object_Detector::QUALITY_TOLERANCE; +double Object_Detector::DENSITY_TOLORENCE; + +std::vector Object_Detector::current_detected_boxes; +#endif // Object_Detector_H diff --git a/detection/include/open_ptrack/multiple_objects_detection/roi_zz.h b/detection/include/open_ptrack/multiple_objects_detection/roi_zz.h new file mode 100644 index 0000000..ff3b424 --- /dev/null +++ b/detection/include/open_ptrack/multiple_objects_detection/roi_zz.h @@ -0,0 +1,70 @@ +#ifndef ROI_ZZ_H +#define ROI_ZZ_H + +//#include "opencv2/core/utility.hpp" + +#include + + + +#include "opencv2/ocl/ocl.hpp" +#include "opencv2/core/core.hpp" +#include "opencv2/highgui/highgui.hpp" +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/video/tracking.hpp" + +#include "opencv2/imgproc/types_c.h" +//#include "feature.hpp" +//#include "onlineMIL.hpp" +//#include "onlineBoosting.hpp" + +using namespace cv; + +//class roi_zz +//{ +//public: +// roi_zz(); +//}; + + + +class roi_zz { +public: + Rect select(Mat img, bool fromCenter = true); + Rect select(const cv::String& windowName, Mat img, bool showCrossair = true, bool fromCenter = true); + void select(const cv::String& windowName, Mat img, std::vector & boundingBox, bool fromCenter = true); + + struct handlerT{ + // basic parameters + bool isDrawing; + Rect box; + Mat image; + + // parameters for drawing from the center + bool drawFromCenter; + Point2f center; + + // initializer list + handlerT() : isDrawing(false), drawFromCenter(true) {}; + }selectorParams; + + // to store the tracked objects + std::vector objects; + +private: + static void mouseHandler(int event, int x, int y, int flags, void *param); + void opencv_mouse_callback(int event, int x, int y, int, void *param); + + // save the keypressed characted + int key; +}; + + + + + + + + + +#endif // ROI_ZZ_H diff --git a/detection/launch/detector_kinect2.launch b/detection/launch/detector_kinect2.launch index b2125d9..d850420 100644 --- a/detection/launch/detector_kinect2.launch +++ b/detection/launch/detector_kinect2.launch @@ -1,5 +1,6 @@ + @@ -14,7 +15,7 @@ - + @@ -27,7 +28,17 @@ - - + + + + + + + + + + + + diff --git a/detection/src/multiple_objects_detection/object_detector.cpp b/detection/src/multiple_objects_detection/object_detector.cpp new file mode 100644 index 0000000..d359860 --- /dev/null +++ b/detection/src/multiple_objects_detection/object_detector.cpp @@ -0,0 +1,316 @@ +#include "open_ptrack/multiple_objects_detection/object_detector.h" +#include +// Static member definition ... +cv::Mat Object_Detector::mainColor; +cv::Mat Object_Detector::mainDepth; + +void Object_Detector::setMainColor(const cv::Mat _mainColor) +{ + _mainColor.copyTo(mainColor); +} + +cv::Mat Object_Detector::getMainColor() +{ + return mainColor; +} + +void Object_Detector::setMainDepth(const cv::Mat _mainDepth) +{ + _mainDepth.copyTo(mainDepth); +} + +cv::Mat Object_Detector::getMainDepth() +{ + return mainDepth; +} + + +void Object_Detector::setCurrentRect(const cv::Rect _currentRect) +{ + currentRect = _currentRect; + selection = currentRect; +} + +cv::Rect Object_Detector::getCurrentRect() +{ + return currentRect; +} + +void Object_Detector::setcurrent_detected_boxes(std::vector _current_detected_boxes) +{ + current_detected_boxes=_current_detected_boxes; +} + +std::vector Object_Detector::getcurrent_detected_boxes() +{ + return current_detected_boxes; +} + +cv::RotatedRect Object_Detector::object_shift(InputArray _probColor,Rect& window, TermCriteria criteria) +{ + // CV_INSTRUMENT_REGION(); + + Size size; + Mat mat; + mat = _probColor.getMat(), size = mat.size(); + + cv::meanShift( _probColor, window, criteria ); + + window.x -= AREA_TOLERANCE; + window.y -= AREA_TOLERANCE; + window.width += 2 * AREA_TOLERANCE; + window.height += 2 * AREA_TOLERANCE; + window=window&Rect(0,0,size.width,size.height); + + // Calculating moments in new center mass + // Moments m = isUMat ? moments(umat(window)) : moments(mat(window)); + Moments m = moments(mat(window)); + + + double m00 = m.m00, m10 = m.m10, m01 = m.m01; + double mu11 = m.mu11, mu20 = m.mu20, mu02 = m.mu02; + + + + // // use 1 as the qulity to calculate the percentage of the object to the window + // Mat hsv_mask_window=hsv_mask(window); + // Moments m_hsv_mask_window = moments(hsv_mask_window); + // double m_hsv_mask_window00 = m_hsv_mask_window.m00; + + + + ////////////difference from cv::camshift////////// + // occluded process + if( fabs(m00) < QUALITY_TOLERANCE ) + { + occluded=true; + // std::cout<<"totally occluded: window density "<= 360) + box.angle -= 360; + if(box.angle >= 180) + box.angle -= 180; + box.center = Point2f( window.x + window.width*0.5f, window.y + window.height*0.5f); + + return box; +} + +cv::RotatedRect Object_Detector::detectCurrentRect(int id) +{ + + + // cv::Mat Color; + mainColor.copyTo(Color); + + cv::cvtColor(Color, hsv, CV_BGR2HSV); + + cv::inRange(hsv, cv::Scalar(HMin, SMin, MIN(VMin,VMax)), + cv::Scalar(HMax, SMax, MAX(VMin, VMax)), hsv_mask); + + //used for just hue + float h_ranges[] = {0,HMax}; + const float* ph_ranges = h_ranges; + int h_channels[] = {0, 0}; + if(use_hs==false) + { + hue.create(hsv.size(), hsv.depth()); + cv::mixChannels(&hsv, 1, &hue, 1, h_channels, 1); + } + //used for just hue + + + //used for h s + int hs_size[] = { h_bins, s_bins }; + float h_range[] = {HMin,HMax}; + float s_range[] = { SMin, SMax }; + const float* phs_ranges[] = { h_range, s_range }; + int channels[] = { 0, 1 }; + //used for h s + + + if( firstRun ) + { + if(use_hs) + { + //h s rather than h + cv::Mat roi(hsv, selection), maskroi(hsv_mask, selection); + cv::calcHist(&roi, 1, channels, maskroi, hist, 2, hs_size, phs_ranges, true, false); + cv::normalize(hist, hist, 0, 255, CV_MINMAX); + } + else{ + //just hue + cv::Mat roi(hue, selection), maskroi(hsv_mask, selection); + cv::calcHist(&roi, 1, 0, maskroi, hist, 1, &h_bins, &ph_ranges); + cv::normalize(hist, hist, 0, 255, CV_MINMAX); + } + + detectWindow = selection; + firstRun = false; + } + + + if(use_hs) + cv::calcBackProject( &hsv, 1, channels, hist, backproj, phs_ranges, 1, true ); + else + cv::calcBackProject(&hue, 1, 0, hist, backproj, &ph_ranges); + + +// imshow("backproj first",backproj); + // calculate the other_object_mask //where to put this ,slove bling bling + other_object_mask=Mat::ones(Color.size(), CV_8U)*255; + for (int i=0; i1) + { + //use x y to generate position_mask + position_mask=Mat::zeros(Color.size(),CV_8UC1); + // creat a bigger search search_window based on detectWindow + int detectWindow_XL=detectWindow.x-AREA_TOLERANCE,detectWindow_XR=detectWindow.x+detectWindow.width+AREA_TOLERANCE; + int detectWindow_YT=detectWindow.y-AREA_TOLERANCE,detectWindow_YB=detectWindow.y+detectWindow.height+AREA_TOLERANCE; + Rect search_window=Rect(detectWindow_XL,detectWindow_YT,detectWindow_XR-detectWindow_XL,detectWindow_YB-detectWindow_YT)&Rect(0,0,Color.size().width,Color.size().height); + position_mask(search_window)=255; + +// // calculate the other_object_mask +// other_object_mask=Mat::ones(Color.size(), CV_8U)*255; +// for (int i=0; i1) + tmp_detectedBox = object_shift(backproj, detectWindow, + cv::TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 )); +// cv::rectangle(backproj, detectWindow, cv::Scalar(255, 0, 0), 2, CV_AA); +// imshow("backproj final",backproj); + return tmp_detectedBox; + +} diff --git a/detection/src/multiple_objects_detection/roi_zz.cpp b/detection/src/multiple_objects_detection/roi_zz.cpp new file mode 100644 index 0000000..f0ad3f2 --- /dev/null +++ b/detection/src/multiple_objects_detection/roi_zz.cpp @@ -0,0 +1,127 @@ +#include "open_ptrack/multiple_objects_detection/roi_zz.h" + +void roi_zz::mouseHandler(int event, int x, int y, int flags, void *param){ + roi_zz *self =static_cast(param); + self->opencv_mouse_callback(event,x,y,flags,param); +} + +void roi_zz::opencv_mouse_callback( int event, int x, int y, int , void *param ){ + handlerT * data = (handlerT*)param; + switch( event ){ + // update the selected bounding box + case EVENT_MOUSEMOVE: + if( data->isDrawing ){ + if(data->drawFromCenter){ + data->box.width = 2*(x-data->center.x)/*data->box.x*/; + data->box.height = 2*(y-data->center.y)/*data->box.y*/; + data->box.x = data->center.x-data->box.width/2.0; + data->box.y = data->center.y-data->box.height/2.0; + }else{ + data->box.width = x-data->box.x; + data->box.height = y-data->box.y; + } + } + break; + + // start to select the bounding box + case EVENT_LBUTTONDOWN: + data->isDrawing = true; + data->box = cvRect( x, y, 0, 0 ); + data->center = Point2f((float)x,(float)y); + break; + + // cleaning up the selected bounding box + case EVENT_LBUTTONUP: + data->isDrawing = false; + if( data->box.width < 0 ){ + data->box.x += data->box.width; + data->box.width *= -1; + } + if( data->box.height < 0 ){ + data->box.y += data->box.height; + data->box.height *= -1; + } + break; + } +} + +Rect roi_zz::select(const cv::String& windowName, Mat img, bool showCrossair, bool fromCenter){ + + key=0; + + // set the drawing mode + selectorParams.drawFromCenter = fromCenter; + + // show the image and give feedback to user + imshow(windowName,img); + + // copy the data, rectangle should be drawn in the fresh image + selectorParams.image=img.clone(); + + // select the object + setMouseCallback( windowName, mouseHandler, (void *)&selectorParams ); + + // end selection process on SPACE (32) ESC (27) or ENTER (13) + while(!(key==32 || key==27 || key==13)){ + // draw the selected object + rectangle( + selectorParams.image, + selectorParams.box, + Scalar(255,0,0),2,1 + ); + + // draw cross air in the middle of bounding box + if(showCrossair){ + // horizontal line + line( + selectorParams.image, + Point((int)selectorParams.box.x,(int)(selectorParams.box.y+selectorParams.box.height/2)), + Point((int)(selectorParams.box.x+selectorParams.box.width),(int)(selectorParams.box.y+selectorParams.box.height/2)), + Scalar(255,0,0),2,1 + ); + + // vertical line + line( + selectorParams.image, + Point((int)(selectorParams.box.x+selectorParams.box.width/2),(int)selectorParams.box.y), + Point((int)(selectorParams.box.x+selectorParams.box.width/2),(int)(selectorParams.box.y+selectorParams.box.height)), + Scalar(255,0,0),2,1 + ); + } + + // show the image bouding box + imshow(windowName,selectorParams.image); + + // reset the image + selectorParams.image=img.clone(); + + //get keyboard event, extract lower 8 bits for scancode comparison + key=waitKey(1) & 0xFF; + } + + + return selectorParams.box; +} + +void roi_zz::select(const cv::String& windowName, Mat img, std::vector & boundingBox, bool fromCenter){ + std::vector box; + Rect temp; + key=0; + + // show notice to user + std::cout<<"Select an object to track and then press SPACE or ENTER button!"<0 && temp.height>0) + box.push_back(temp); + } + boundingBox=box; +} + +Rect roi_zz::select(Mat img, bool fromCenter){ + return select("ROI selector", img, fromCenter); +} diff --git a/tracking/conf/object_tracker_multicamera.yaml b/tracking/conf/object_tracker_multicamera.yaml new file mode 100644 index 0000000..09ab767 --- /dev/null +++ b/tracking/conf/object_tracker_multicamera.yaml @@ -0,0 +1,78 @@ +######################### +## Tracking parameters ## +######################### +# Mininum confidence for track initialization: +min_confidence_initialization: 4.0 +# Tracking maximum frame rate per sensor: +rate: 30 +# Tracking reference frame: +world_frame_id: "/world" +# Voxel size used for people detection: +voxel_size: 0.06 +# Flag stating if extrinsic (multicamera) calibration has been performed or not: +extrinsic_calibration: true +# Maximum delay that a detection message can have in order to be considered for tracking: +max_detection_delay: 2.0 +# Flag stating if the results of a calibration refinement procedure should be used to correct detection positions: +calibration_refinement: true + +######################## +## Sensor orientation ## +######################## +# Flag stating if the sensor is vertically placed, or not: +orientation: + vertical: false + +####################### +## Motion parameters ## +####################### +# The higher is this, the higher motion variance is allowed +# it can assume these values: 0.5 0.75 0.8 0.9 0.95 0.98 0.99 0.995 0.998 0.999 +gate_distance_probability: 0.999 +# Acceleration variance in people motion model: +acceleration_variance: 1000 +# Additional weight for position variance in people motion model: +position_variance_weight: 100 + +################################# +## Data association parameters ## +################################# +# Flag stating if detection likelihood should be used in data association: +detector_likelihood: true +# Flag stating if track velocity should be considered in data association: +velocity_in_motion_term: false +# Weight of detection likelihood in data association: +detector_weight: -0.25 +# Weight of motion likelihood in data association: +motion_weight: 0.25 + +################################ +## Tracking policy parameters ## +################################ +# Track duration (seconds) after last valid detection: +sec_before_old: 8 +# Seconds within which a track should be validated (otherwise it is discarded): +sec_before_fake: 2.4 +# Seconds after which a visible track obtain NORMAL status: +sec_remain_new: 1.2 +# Minimum number of detection<->track associations needed for validating a track: +detections_to_validate: 3 + +########### +## Debug ## +########### +# Flag activating debug/visualization options: +debug_active: true +# Flag stating if a point cloud containing track trajectories (visible in RViz) should be created: +history_pointcloud: true +# Dimension of the point cloud containing track trajectories: +history_size: 500 +# Flag stating if a spheric marker (with number) for every track (visible in RViz) should be created: +markers: true +# Flag stating if tracking results should be sent over the network: +tracking_results: true +# Flag stating if markers and a point cloud containing detection trajectories (visible in RViz) should be created: +detection_debug: true +# Dimension of the point cloud containing detection trajectories: +detection_history_size: 500 + diff --git a/tracking/launch/tracker_network.launch b/tracking/launch/tracker_network.launch index e8427d2..35a1f3c 100644 --- a/tracking/launch/tracker_network.launch +++ b/tracking/launch/tracker_network.launch @@ -1,11 +1,23 @@ - - + + + + + - - + + + + + + + + + + + @@ -13,10 +25,39 @@ - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From ddb3bd8e17d605ff586ba5b710b2732225329a83 Mon Sep 17 00:00:00 2001 From: yongheng1991 Date: Wed, 15 Mar 2017 15:24:01 +0100 Subject: [PATCH 2/9] 1. Add HSD backprojection as another option Now you can choose from 3 options: "H HS HSD" to do the backprojection. The HSD model is slower(6ms for one object), it still can be improved. If the objects are in different color ,choose HS rathar than HSD 2. Modify the YAML file(multiple_objects_detection.yaml) and add some comments 3. Delete the "multiple_objects_detectionConfig.h" cause it can be generate by the CFG file . 4. Set a varable camera_name to kinect2_head to const, we can use launch file to remap the kinect2_head based topic with the Camera_name in the detection launch file. 5. After you select the objects, the window which show the detections will be destroyed. With this you can draw the bounding box easier in the Master computer cause ther won't be a lot of windsows displaying the images from every camera. If you still want to check the detections on the image, set the varible "show_2D_track" in the "multiple_objects_detection.yaml" to true. --- .../apps/multiple_objects_detection_node.cpp | 36 +- detection/cfg/multiple_objects_detection.cfg | 12 +- .../conf/multiple_objects_detection.yaml | 32 +- .../multiple_objects_detection.h | 71 +- .../multiple_objects_detectionConfig.h | 626 ------------------ .../object_detector.h | 40 +- .../object_detector.cpp | 284 +++++--- 7 files changed, 326 insertions(+), 775 deletions(-) delete mode 100644 detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detectionConfig.h diff --git a/detection/apps/multiple_objects_detection_node.cpp b/detection/apps/multiple_objects_detection_node.cpp index 121f358..6bbc995 100644 --- a/detection/apps/multiple_objects_detection_node.cpp +++ b/detection/apps/multiple_objects_detection_node.cpp @@ -6,7 +6,7 @@ #include "std_msgs/String.h" #include -#include "open_ptrack/multiple_objects_detection/multiple_objects_detectionConfig.h" +#include #include "open_ptrack/multiple_objects_detection/multiple_objects_detection.h" using namespace std; @@ -26,7 +26,7 @@ int threshold_4_detecting_foreground;//threshold of depth difference,use the dif bool show_2D_tracks; -void configCb(multiple_objects_detection::multiple_objects_detectionConfig &config, uint32_t level) +void configCb(detection::multiple_objects_detectionConfig &config, uint32_t level) { Object_Detector::HMin=config.HMin; Object_Detector::HMin=config.HMin; @@ -35,9 +35,9 @@ void configCb(multiple_objects_detection::multiple_objects_detectionConfig &conf Object_Detector::HMax=config.HMax; Object_Detector::SMax=config.HMax; Object_Detector::VMax=config.VMax; - Object_Detector::use_hs=config.use_hs; Object_Detector::h_bins=config.h_bins; Object_Detector::s_bins=config.s_bins; + Object_Detector::d_bins=config.d_bins; Object_Detector::AREA_TOLERANCE=config.AREA_TOLERANCE; Object_Detector::QUALITY_TOLERANCE=config.QUALITY_TOLERANCE; Object_Detector::DENSITY_TOLORENCE=config.DENSITY_TOLORENCE; @@ -54,7 +54,7 @@ int main(int argc, char** argv){ return 0; } - nh.param("sensor_name", cameraName, std::string("kinect2_head")); +// nh.param("sensor_name", cameraName, std::string("kinect2_head")); nh.param("use_background_removal",use_background_removal,false); nh.param("background_calculate_frames",background_calculate_frames,100); nh.param("threshold_4_detecting_foreground",threshold_4_detecting_foreground,100); @@ -62,7 +62,6 @@ int main(int argc, char** argv){ nh.param("output_detection_topic",output_detection_topic,std::string("/objects_detector/detections")); - //set the stastic varables of object_detector class,the instruction of the varable can be found in the header of the class nh.param("HMin",Object_Detector::HMin,0); nh.param("SMin",Object_Detector::SMin,80); @@ -70,25 +69,44 @@ int main(int argc, char** argv){ nh.param("HMax",Object_Detector::HMax,360); nh.param("SMax",Object_Detector::SMax,256); nh.param("VMax",Object_Detector::VMax,256); - nh.param("use_hs",Object_Detector::use_hs,true); nh.param("h_bins",Object_Detector::h_bins,36); nh.param("s_bins",Object_Detector::s_bins,18); + nh.param("d_bins",Object_Detector::d_bins,100); + nh.param("AREA_TOLERANCE",Object_Detector::AREA_TOLERANCE,20); nh.param("QUALITY_TOLERANCE",Object_Detector::QUALITY_TOLERANCE,20000); nh.param("DENSITY_TOLORENCE",Object_Detector::DENSITY_TOLORENCE,4.0); + nh.param("Backprojection_Mode",Object_Detector::Backprojection_Mode,std::string("HSD")); + + // std::cout << "output detection topic: " << output_detection_topic << std::endl; + std::cout << "HSV range:"<< std::endl + << "H: (" << Object_Detector::HMin <<"~"<< Object_Detector::HMax <<")"<< std::endl + << "S: (" << Object_Detector::SMin <<"~"<< Object_Detector::SMax <<")"<< std::endl + << "V: (" << Object_Detector::VMin <<"~"<< Object_Detector::VMax <<")"<< std::endl + << std::endl; + + + std::cout << "Occlusion Parms:"<< std::endl + <<"AREA_TOLERANCE: "< server; - dynamic_reconfigure::Server::CallbackType f; + dynamic_reconfigure::Server server; + dynamic_reconfigure::Server::CallbackType f; f = boost::bind(&configCb, _1, _2); server.setCallback(f); - Multiple_Objects_Detection _multiple_objects_detection(cameraName,output_detection_topic,useExact, useCompressed, + Multiple_Objects_Detection _multiple_objects_detection(output_detection_topic,useExact, useCompressed, use_background_removal,background_calculate_frames,threshold_4_detecting_foreground,show_2D_tracks); std::cout << "start detecting..." << std::endl; diff --git a/detection/cfg/multiple_objects_detection.cfg b/detection/cfg/multiple_objects_detection.cfg index c588370..9753a6f 100755 --- a/detection/cfg/multiple_objects_detection.cfg +++ b/detection/cfg/multiple_objects_detection.cfg @@ -2,7 +2,7 @@ # Declare parameters that control haar-based people detection -PACKAGE='multiple_objects_detection' +PACKAGE='detection' from dynamic_reconfigure.parameter_generator_catkin import * @@ -13,8 +13,6 @@ gen = ParameterGenerator() ############################################## - - gen.add("HMax", int_t, 0, "Hue max", 360, 0, 360) gen.add("SMax", int_t, 0, "Saturation max", 255, 0, 255) gen.add("VMax", int_t, 0, "Value max", 255, 0, 255) @@ -22,11 +20,9 @@ gen.add("HMin", int_t, 0, "Hue min", 0, 0, 360) gen.add("SMin", int_t, 0, "Saturation min", 80, 0, 255) gen.add("VMin", int_t, 0, "Value min", 100, 0, 255) - -gen.add("use_hs", bool_t, 0, "whether use HS or just H to gennerate the hist of roi to do the back projection", True) gen.add("h_bins", int_t, 0, "Divide (HMin,HMax) to h_bins parts", 36, 1, 100) gen.add("s_bins", int_t, 0, "Divide (SMin,SMax) to s_bins parts", 18, 1, 100) - +gen.add("d_bins", int_t, 0, "Divide (DMin,DMax) to d_bins parts", 100, 1, 255) gen.add("AREA_TOLERANCE", int_t, 0, "AREA_TOLERANCE", 20, 5, 100) gen.add("QUALITY_TOLERANCE", int_t, 0, "QUALITY_TOLERANCE", 20000, 5000, 65535) @@ -35,4 +31,6 @@ gen.add("DENSITY_TOLORENCE", int_t, 0, "DENSITY_TOLORENCE", 4, 0, 50) -exit(gen.generate(PACKAGE, "multiple_objects_detection_node", "multiple_objects_detection")) + + +exit(gen.generate(PACKAGE, "detection", "multiple_objects_detection")) diff --git a/detection/conf/multiple_objects_detection.yaml b/detection/conf/multiple_objects_detection.yaml index 1fdd7b6..99f65a3 100644 --- a/detection/conf/multiple_objects_detection.yaml +++ b/detection/conf/multiple_objects_detection.yaml @@ -2,19 +2,10 @@ ## multiple_objects_detection ## ############################ -sensor_name: kinect2_head -show_2D_track: false output_detection_topic: /objects_detector/detections - - - - - - - - - +#if you want to see the tracks result on the image, set show_2D_track to true +show_2D_track: false ############################ ## Background removal ## @@ -36,22 +27,29 @@ threshold_4_detecting_foreground: 100 ########for the HSV mask###### +#set a mask(filter) to gain a better performance(set SMin or VMin smaller to get a larger range, also more noise) HMin: 0 -SMin: 80 -VMin: 100 HMax: 360 + +SMin: 80 SMax: 256 + +VMin: 100 VMax: 256 ########for the HSV mask####### ###########For camshift######## -# if use_hs==true, use hue and s to generate the hitograme to do the bacprojection , if not, just used the hue image -use_hs: true - -#for the hue backrojection hist ,divide (HMin,HMax) to h_bins parts +#for the hue backrojection hist ,divide (HMin,HMax) to "h_bins" parts, h_bins: 36 s_bins: 18 + +#for the depth, transfer the depth from (1000,9000) into (0, 255), than divide them into "d_bins" parts +d_bins: 100 + +# Three options : H HS HSD : +# if the objects are in different color, choose HS which is faster, or choose HSD. +Backprojection_Mode: HS ###########For camshift######### diff --git a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h index 98130f2..5871634 100644 --- a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h +++ b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h @@ -75,7 +75,7 @@ class Multiple_Objects_Detection ////////////For receiving color and depth//////////// std::mutex lock; - std::string cameraName; + // std::string cameraName; std::string topicColor, topicDepth; const bool useExact, useCompressed; bool updateImage; @@ -122,8 +122,6 @@ class Multiple_Objects_Detection ///////////For generating 3d position/////////// - - ///////////For publish detection topic/////////// std::string output_detection_topic; ros::Publisher detection_pub; @@ -136,28 +134,30 @@ class Multiple_Objects_Detection // cv::Mat color, depth; std::vector Object_Detectors; std::vector current_detected_boxes; - cv::Mat main_color,main_depth; + cv::Mat main_color,main_depth_16,main_depth_8; ///////////For main detection/////////// - - ///////////For display/////////// bool show_2D_tracks; std::vector> tracks_2D; ///////////For display/////////// + //test time cost + // std::chrono::time_point start, now; + public: - Multiple_Objects_Detection(const std::string _cameraName, const std::string &output_detection_topic,const bool useExact, const bool useCompressed, + Multiple_Objects_Detection(const std::string &output_detection_topic,const bool useExact, const bool useCompressed, const bool use_background_removal, const int background_calculate_frames,const int threshold_4_detecting_foreground,const bool show_2D_tracks) - : cameraName(_cameraName), output_detection_topic(output_detection_topic),useExact(useExact), useCompressed(useCompressed),updateImage(false), running(false), + : output_detection_topic(output_detection_topic),useExact(useExact), useCompressed(useCompressed),updateImage(false), running(false), use_background_removal(use_background_removal), objects_selected(false),paused(false),background_calculate_frames(background_calculate_frames),threshold_4_detecting_foreground(threshold_4_detecting_foreground), queueSize(5), nh(), spinner(0), it(nh) ,show_2D_tracks(show_2D_tracks) { + std::string cameraName = "kinect2_head"; + topicColor = "/" + cameraName + "/" + K2_TOPIC_LORES_COLOR K2_TOPIC_RAW; + topicDepth = "/" + cameraName + "/" + K2_TOPIC_LORES_DEPTH K2_TOPIC_RAW; - topicColor = "/" + _cameraName + "/" + K2_TOPIC_LORES_COLOR K2_TOPIC_RAW; - topicDepth = "/" + _cameraName + "/" + K2_TOPIC_LORES_DEPTH K2_TOPIC_RAW; cameraMatrixColor = cv::Mat::zeros(3, 3, CV_64F); cameraMatrixDepth = cv::Mat::zeros(3, 3, CV_64F); detection_pub=nh.advertise(output_detection_topic,3); @@ -168,7 +168,6 @@ class Multiple_Objects_Detection } - void run_detection(){ start_reciver(); @@ -180,7 +179,7 @@ class Multiple_Objects_Detection lock.lock(); main_color = this->color; - main_depth = this->depth; + main_depth_16 = this->depth; updateImage = false; lock.unlock(); @@ -203,20 +202,28 @@ class Multiple_Objects_Detection if(!objects_selected&&paused)// pause to select object { multiple_objects_detector_initialization(); + cv::destroyWindow("Press P to pause and select objects:"); } //!!!!!!!!!!!!!!!!!!!!!!!main loop!!!!!!!!!!!!!!!!!!!!!!! else if(objects_selected&&!paused)// start to track after selecting objects { + + // start = std::chrono::high_resolution_clock::now(); multiple_objects_detection_main(); + // now = std::chrono::high_resolution_clock::now(); + // double elapsed = std::chrono::duration_cast(now - start).count() / 1000.0; + // std::cout<(current_center2D_d.y,current_center2D_d.x)/1000.0;//meter ,not mm + double current_center_depth=main_depth_16.at(current_center2D_d.y,current_center2D_d.x)/1000.0;//meter ,not mm detection_msg.box_3D.p1.z=current_center_depth; detection_msg.box_3D.p1.x=(current_center2D_d.x-cx)*fx*current_center_depth; @@ -479,7 +502,6 @@ class Multiple_Objects_Detection } detection_pub.publish(detection_array_msg); // publish message - } @@ -491,13 +513,17 @@ class Multiple_Objects_Detection for( std::list ::iterator it = current_track.begin(); it!=current_track.end(); it++) { - Rect test_Rect=*it; + Rect test_Rect((*it).x+((*it).width)/2,(*it).y+((*it).height)/2,1,1); + test_Rect=test_Rect&Rect(0,0,main_color.size().width,main_color.size().height); cv::rectangle(main_color, test_Rect, cv::Scalar(250*(i), 250*(i-1), 250*(i-2)), 2, CV_AA); } } + cv::imshow( "show_2D_tracks", main_color ); } + + void callback(const sensor_msgs::Image::ConstPtr imageColor, const sensor_msgs::Image::ConstPtr imageDepth, const sensor_msgs::CameraInfo::ConstPtr cameraInfoColor, const sensor_msgs::CameraInfo::ConstPtr cameraInfoDepth) { @@ -554,4 +580,3 @@ class Multiple_Objects_Detection }; - diff --git a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detectionConfig.h b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detectionConfig.h deleted file mode 100644 index f30e670..0000000 --- a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detectionConfig.h +++ /dev/null @@ -1,626 +0,0 @@ -//#line 2 "/opt/ros/indigo/share/dynamic_reconfigure/cmake/../templates/ConfigType.h.template" -// ********************************************************* -// -// File autogenerated for the multiple_objects_detection package -// by the dynamic_reconfigure package. -// Please do not edit. -// -// ********************************************************/ - -#ifndef __multiple_objects_detection__MULTIPLE_OBJECTS_DETECTIONCONFIG_H__ -#define __multiple_objects_detection__MULTIPLE_OBJECTS_DETECTIONCONFIG_H__ - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace multiple_objects_detection -{ - class multiple_objects_detectionConfigStatics; - - class multiple_objects_detectionConfig - { - public: - class AbstractParamDescription : public dynamic_reconfigure::ParamDescription - { - public: - AbstractParamDescription(std::string n, std::string t, uint32_t l, - std::string d, std::string e) - { - name = n; - type = t; - level = l; - description = d; - edit_method = e; - } - - virtual void clamp(multiple_objects_detectionConfig &config, const multiple_objects_detectionConfig &max, const multiple_objects_detectionConfig &min) const = 0; - virtual void calcLevel(uint32_t &level, const multiple_objects_detectionConfig &config1, const multiple_objects_detectionConfig &config2) const = 0; - virtual void fromServer(const ros::NodeHandle &nh, multiple_objects_detectionConfig &config) const = 0; - virtual void toServer(const ros::NodeHandle &nh, const multiple_objects_detectionConfig &config) const = 0; - virtual bool fromMessage(const dynamic_reconfigure::Config &msg, multiple_objects_detectionConfig &config) const = 0; - virtual void toMessage(dynamic_reconfigure::Config &msg, const multiple_objects_detectionConfig &config) const = 0; - virtual void getValue(const multiple_objects_detectionConfig &config, boost::any &val) const = 0; - }; - - typedef boost::shared_ptr AbstractParamDescriptionPtr; - typedef boost::shared_ptr AbstractParamDescriptionConstPtr; - - template - class ParamDescription : public AbstractParamDescription - { - public: - ParamDescription(std::string name, std::string type, uint32_t level, - std::string description, std::string edit_method, T multiple_objects_detectionConfig::* f) : - AbstractParamDescription(name, type, level, description, edit_method), - field(f) - {} - - T (multiple_objects_detectionConfig::* field); - - virtual void clamp(multiple_objects_detectionConfig &config, const multiple_objects_detectionConfig &max, const multiple_objects_detectionConfig &min) const - { - if (config.*field > max.*field) - config.*field = max.*field; - - if (config.*field < min.*field) - config.*field = min.*field; - } - - virtual void calcLevel(uint32_t &comb_level, const multiple_objects_detectionConfig &config1, const multiple_objects_detectionConfig &config2) const - { - if (config1.*field != config2.*field) - comb_level |= level; - } - - virtual void fromServer(const ros::NodeHandle &nh, multiple_objects_detectionConfig &config) const - { - nh.getParam(name, config.*field); - } - - virtual void toServer(const ros::NodeHandle &nh, const multiple_objects_detectionConfig &config) const - { - nh.setParam(name, config.*field); - } - - virtual bool fromMessage(const dynamic_reconfigure::Config &msg, multiple_objects_detectionConfig &config) const - { - return dynamic_reconfigure::ConfigTools::getParameter(msg, name, config.*field); - } - - virtual void toMessage(dynamic_reconfigure::Config &msg, const multiple_objects_detectionConfig &config) const - { - dynamic_reconfigure::ConfigTools::appendParameter(msg, name, config.*field); - } - - virtual void getValue(const multiple_objects_detectionConfig &config, boost::any &val) const - { - val = config.*field; - } - }; - - class AbstractGroupDescription : public dynamic_reconfigure::Group - { - public: - AbstractGroupDescription(std::string n, std::string t, int p, int i, bool s) - { - name = n; - type = t; - parent = p; - state = s; - id = i; - } - - std::vector abstract_parameters; - bool state; - - virtual void toMessage(dynamic_reconfigure::Config &msg, const boost::any &config) const = 0; - virtual bool fromMessage(const dynamic_reconfigure::Config &msg, boost::any &config) const =0; - virtual void updateParams(boost::any &cfg, multiple_objects_detectionConfig &top) const= 0; - virtual void setInitialState(boost::any &cfg) const = 0; - - - void convertParams() - { - for(std::vector::const_iterator i = abstract_parameters.begin(); i != abstract_parameters.end(); ++i) - { - parameters.push_back(dynamic_reconfigure::ParamDescription(**i)); - } - } - }; - - typedef boost::shared_ptr AbstractGroupDescriptionPtr; - typedef boost::shared_ptr AbstractGroupDescriptionConstPtr; - - template - class GroupDescription : public AbstractGroupDescription - { - public: - GroupDescription(std::string name, std::string type, int parent, int id, bool s, T PT::* f) : AbstractGroupDescription(name, type, parent, id, s), field(f) - { - } - - GroupDescription(const GroupDescription& g): AbstractGroupDescription(g.name, g.type, g.parent, g.id, g.state), field(g.field), groups(g.groups) - { - parameters = g.parameters; - abstract_parameters = g.abstract_parameters; - } - - virtual bool fromMessage(const dynamic_reconfigure::Config &msg, boost::any &cfg) const - { - PT* config = boost::any_cast(cfg); - if(!dynamic_reconfigure::ConfigTools::getGroupState(msg, name, (*config).*field)) - return false; - - for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) - { - boost::any n = &((*config).*field); - if(!(*i)->fromMessage(msg, n)) - return false; - } - - return true; - } - - virtual void setInitialState(boost::any &cfg) const - { - PT* config = boost::any_cast(cfg); - T* group = &((*config).*field); - group->state = state; - - for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) - { - boost::any n = boost::any(&((*config).*field)); - (*i)->setInitialState(n); - } - - } - - virtual void updateParams(boost::any &cfg, multiple_objects_detectionConfig &top) const - { - PT* config = boost::any_cast(cfg); - - T* f = &((*config).*field); - f->setParams(top, abstract_parameters); - - for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) - { - boost::any n = &((*config).*field); - (*i)->updateParams(n, top); - } - } - - virtual void toMessage(dynamic_reconfigure::Config &msg, const boost::any &cfg) const - { - const PT config = boost::any_cast(cfg); - dynamic_reconfigure::ConfigTools::appendGroup(msg, name, id, parent, config.*field); - - for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) - { - (*i)->toMessage(msg, config.*field); - } - } - - T (PT::* field); - std::vector groups; - }; - -class DEFAULT -{ - public: - DEFAULT() - { - state = true; - name = "Default"; - } - - void setParams(multiple_objects_detectionConfig &config, const std::vector params) - { - for (std::vector::const_iterator _i = params.begin(); _i != params.end(); ++_i) - { - boost::any val; - (*_i)->getValue(config, val); - - if("HMax"==(*_i)->name){HMax = boost::any_cast(val);} - if("SMax"==(*_i)->name){SMax = boost::any_cast(val);} - if("VMax"==(*_i)->name){VMax = boost::any_cast(val);} - if("HMin"==(*_i)->name){HMin = boost::any_cast(val);} - if("SMin"==(*_i)->name){SMin = boost::any_cast(val);} - if("VMin"==(*_i)->name){VMin = boost::any_cast(val);} - if("use_hs"==(*_i)->name){use_hs = boost::any_cast(val);} - if("h_bins"==(*_i)->name){h_bins = boost::any_cast(val);} - if("s_bins"==(*_i)->name){s_bins = boost::any_cast(val);} - if("AREA_TOLERANCE"==(*_i)->name){AREA_TOLERANCE = boost::any_cast(val);} - if("QUALITY_TOLERANCE"==(*_i)->name){QUALITY_TOLERANCE = boost::any_cast(val);} - if("DENSITY_TOLORENCE"==(*_i)->name){DENSITY_TOLORENCE = boost::any_cast(val);} - } - } - - int HMax; -int SMax; -int VMax; -int HMin; -int SMin; -int VMin; -bool use_hs; -int h_bins; -int s_bins; -int AREA_TOLERANCE; -int QUALITY_TOLERANCE; -int DENSITY_TOLORENCE; - - bool state; - std::string name; - - -}groups; - - - -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - int HMax; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - int SMax; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - int VMax; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - int HMin; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - int SMin; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - int VMin; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - bool use_hs; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - int h_bins; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - int s_bins; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - int AREA_TOLERANCE; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - int QUALITY_TOLERANCE; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - int DENSITY_TOLORENCE; -//#line 218 "/opt/ros/indigo/share/dynamic_reconfigure/cmake/../templates/ConfigType.h.template" - - bool __fromMessage__(dynamic_reconfigure::Config &msg) - { - const std::vector &__param_descriptions__ = __getParamDescriptions__(); - const std::vector &__group_descriptions__ = __getGroupDescriptions__(); - - int count = 0; - for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) - if ((*i)->fromMessage(msg, *this)) - count++; - - for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); i ++) - { - if ((*i)->id == 0) - { - boost::any n = boost::any(this); - (*i)->updateParams(n, *this); - (*i)->fromMessage(msg, n); - } - } - - if (count != dynamic_reconfigure::ConfigTools::size(msg)) - { - ROS_ERROR("multiple_objects_detectionConfig::__fromMessage__ called with an unexpected parameter."); - ROS_ERROR("Booleans:"); - for (unsigned int i = 0; i < msg.bools.size(); i++) - ROS_ERROR(" %s", msg.bools[i].name.c_str()); - ROS_ERROR("Integers:"); - for (unsigned int i = 0; i < msg.ints.size(); i++) - ROS_ERROR(" %s", msg.ints[i].name.c_str()); - ROS_ERROR("Doubles:"); - for (unsigned int i = 0; i < msg.doubles.size(); i++) - ROS_ERROR(" %s", msg.doubles[i].name.c_str()); - ROS_ERROR("Strings:"); - for (unsigned int i = 0; i < msg.strs.size(); i++) - ROS_ERROR(" %s", msg.strs[i].name.c_str()); - // @todo Check that there are no duplicates. Make this error more - // explicit. - return false; - } - return true; - } - - // This version of __toMessage__ is used during initialization of - // statics when __getParamDescriptions__ can't be called yet. - void __toMessage__(dynamic_reconfigure::Config &msg, const std::vector &__param_descriptions__, const std::vector &__group_descriptions__) const - { - dynamic_reconfigure::ConfigTools::clear(msg); - for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) - (*i)->toMessage(msg, *this); - - for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); ++i) - { - if((*i)->id == 0) - { - (*i)->toMessage(msg, *this); - } - } - } - - void __toMessage__(dynamic_reconfigure::Config &msg) const - { - const std::vector &__param_descriptions__ = __getParamDescriptions__(); - const std::vector &__group_descriptions__ = __getGroupDescriptions__(); - __toMessage__(msg, __param_descriptions__, __group_descriptions__); - } - - void __toServer__(const ros::NodeHandle &nh) const - { - const std::vector &__param_descriptions__ = __getParamDescriptions__(); - for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) - (*i)->toServer(nh, *this); - } - - void __fromServer__(const ros::NodeHandle &nh) - { - static bool setup=false; - - const std::vector &__param_descriptions__ = __getParamDescriptions__(); - for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) - (*i)->fromServer(nh, *this); - - const std::vector &__group_descriptions__ = __getGroupDescriptions__(); - for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); i++){ - if (!setup && (*i)->id == 0) { - setup = true; - boost::any n = boost::any(this); - (*i)->setInitialState(n); - } - } - } - - void __clamp__() - { - const std::vector &__param_descriptions__ = __getParamDescriptions__(); - const multiple_objects_detectionConfig &__max__ = __getMax__(); - const multiple_objects_detectionConfig &__min__ = __getMin__(); - for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) - (*i)->clamp(*this, __max__, __min__); - } - - uint32_t __level__(const multiple_objects_detectionConfig &config) const - { - const std::vector &__param_descriptions__ = __getParamDescriptions__(); - uint32_t level = 0; - for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) - (*i)->calcLevel(level, config, *this); - return level; - } - - static const dynamic_reconfigure::ConfigDescription &__getDescriptionMessage__(); - static const multiple_objects_detectionConfig &__getDefault__(); - static const multiple_objects_detectionConfig &__getMax__(); - static const multiple_objects_detectionConfig &__getMin__(); - static const std::vector &__getParamDescriptions__(); - static const std::vector &__getGroupDescriptions__(); - - private: - static const multiple_objects_detectionConfigStatics *__get_statics__(); - }; - - template <> // Max and min are ignored for strings. - inline void multiple_objects_detectionConfig::ParamDescription::clamp(multiple_objects_detectionConfig &config, const multiple_objects_detectionConfig &max, const multiple_objects_detectionConfig &min) const - { - return; - } - - class multiple_objects_detectionConfigStatics - { - friend class multiple_objects_detectionConfig; - - multiple_objects_detectionConfigStatics() - { -multiple_objects_detectionConfig::GroupDescription Default("Default", "", 0, 0, true, &multiple_objects_detectionConfig::groups); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.HMax = 0; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.HMax = 360; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.HMax = 360; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("HMax", "int", 0, "Hue max", "", &multiple_objects_detectionConfig::HMax))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("HMax", "int", 0, "Hue max", "", &multiple_objects_detectionConfig::HMax))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.SMax = 0; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.SMax = 255; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.SMax = 255; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("SMax", "int", 0, "Saturation max", "", &multiple_objects_detectionConfig::SMax))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("SMax", "int", 0, "Saturation max", "", &multiple_objects_detectionConfig::SMax))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.VMax = 0; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.VMax = 255; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.VMax = 255; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("VMax", "int", 0, "Value max", "", &multiple_objects_detectionConfig::VMax))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("VMax", "int", 0, "Value max", "", &multiple_objects_detectionConfig::VMax))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.HMin = 0; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.HMin = 360; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.HMin = 0; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("HMin", "int", 0, "Hue min", "", &multiple_objects_detectionConfig::HMin))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("HMin", "int", 0, "Hue min", "", &multiple_objects_detectionConfig::HMin))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.SMin = 0; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.SMin = 255; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.SMin = 80; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("SMin", "int", 0, "Saturation min", "", &multiple_objects_detectionConfig::SMin))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("SMin", "int", 0, "Saturation min", "", &multiple_objects_detectionConfig::SMin))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.VMin = 0; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.VMin = 255; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.VMin = 100; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("VMin", "int", 0, "Value min", "", &multiple_objects_detectionConfig::VMin))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("VMin", "int", 0, "Value min", "", &multiple_objects_detectionConfig::VMin))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.use_hs = 0; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.use_hs = 1; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.use_hs = 1; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("use_hs", "bool", 0, "whether use HS or just H to gennerate the hist of roi to do the back projection", "", &multiple_objects_detectionConfig::use_hs))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("use_hs", "bool", 0, "whether use HS or just H to gennerate the hist of roi to do the back projection", "", &multiple_objects_detectionConfig::use_hs))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.h_bins = 1; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.h_bins = 100; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.h_bins = 36; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("h_bins", "int", 0, "Divide (HMin,HMax) to h_bins parts", "", &multiple_objects_detectionConfig::h_bins))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("h_bins", "int", 0, "Divide (HMin,HMax) to h_bins parts", "", &multiple_objects_detectionConfig::h_bins))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.s_bins = 1; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.s_bins = 100; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.s_bins = 18; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("s_bins", "int", 0, "Divide (SMin,SMax) to s_bins parts", "", &multiple_objects_detectionConfig::s_bins))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("s_bins", "int", 0, "Divide (SMin,SMax) to s_bins parts", "", &multiple_objects_detectionConfig::s_bins))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.AREA_TOLERANCE = 5; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.AREA_TOLERANCE = 100; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.AREA_TOLERANCE = 20; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("AREA_TOLERANCE", "int", 0, "AREA_TOLERANCE", "", &multiple_objects_detectionConfig::AREA_TOLERANCE))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("AREA_TOLERANCE", "int", 0, "AREA_TOLERANCE", "", &multiple_objects_detectionConfig::AREA_TOLERANCE))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.QUALITY_TOLERANCE = 5000; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.QUALITY_TOLERANCE = 65535; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.QUALITY_TOLERANCE = 20000; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("QUALITY_TOLERANCE", "int", 0, "QUALITY_TOLERANCE", "", &multiple_objects_detectionConfig::QUALITY_TOLERANCE))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("QUALITY_TOLERANCE", "int", 0, "QUALITY_TOLERANCE", "", &multiple_objects_detectionConfig::QUALITY_TOLERANCE))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __min__.DENSITY_TOLORENCE = 0; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __max__.DENSITY_TOLORENCE = 50; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __default__.DENSITY_TOLORENCE = 4; -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.abstract_parameters.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("DENSITY_TOLORENCE", "int", 0, "DENSITY_TOLORENCE", "", &multiple_objects_detectionConfig::DENSITY_TOLORENCE))); -//#line 280 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __param_descriptions__.push_back(multiple_objects_detectionConfig::AbstractParamDescriptionConstPtr(new multiple_objects_detectionConfig::ParamDescription("DENSITY_TOLORENCE", "int", 0, "DENSITY_TOLORENCE", "", &multiple_objects_detectionConfig::DENSITY_TOLORENCE))); -//#line 235 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - Default.convertParams(); -//#line 235 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py" - __group_descriptions__.push_back(multiple_objects_detectionConfig::AbstractGroupDescriptionConstPtr(new multiple_objects_detectionConfig::GroupDescription(Default))); -//#line 353 "/opt/ros/indigo/share/dynamic_reconfigure/cmake/../templates/ConfigType.h.template" - - for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); ++i) - { - __description_message__.groups.push_back(**i); - } - __max__.__toMessage__(__description_message__.max, __param_descriptions__, __group_descriptions__); - __min__.__toMessage__(__description_message__.min, __param_descriptions__, __group_descriptions__); - __default__.__toMessage__(__description_message__.dflt, __param_descriptions__, __group_descriptions__); - } - std::vector __param_descriptions__; - std::vector __group_descriptions__; - multiple_objects_detectionConfig __max__; - multiple_objects_detectionConfig __min__; - multiple_objects_detectionConfig __default__; - dynamic_reconfigure::ConfigDescription __description_message__; - - static const multiple_objects_detectionConfigStatics *get_instance() - { - // Split this off in a separate function because I know that - // instance will get initialized the first time get_instance is - // called, and I am guaranteeing that get_instance gets called at - // most once. - static multiple_objects_detectionConfigStatics instance; - return &instance; - } - }; - - inline const dynamic_reconfigure::ConfigDescription &multiple_objects_detectionConfig::__getDescriptionMessage__() - { - return __get_statics__()->__description_message__; - } - - inline const multiple_objects_detectionConfig &multiple_objects_detectionConfig::__getDefault__() - { - return __get_statics__()->__default__; - } - - inline const multiple_objects_detectionConfig &multiple_objects_detectionConfig::__getMax__() - { - return __get_statics__()->__max__; - } - - inline const multiple_objects_detectionConfig &multiple_objects_detectionConfig::__getMin__() - { - return __get_statics__()->__min__; - } - - inline const std::vector &multiple_objects_detectionConfig::__getParamDescriptions__() - { - return __get_statics__()->__param_descriptions__; - } - - inline const std::vector &multiple_objects_detectionConfig::__getGroupDescriptions__() - { - return __get_statics__()->__group_descriptions__; - } - - inline const multiple_objects_detectionConfigStatics *multiple_objects_detectionConfig::__get_statics__() - { - const static multiple_objects_detectionConfigStatics *statics; - - if (statics) // Common case - return statics; - - boost::mutex::scoped_lock lock(dynamic_reconfigure::__init_mutex__); - - if (statics) // In case we lost a race. - return statics; - - statics = multiple_objects_detectionConfigStatics::get_instance(); - - return statics; - } - - -} - -#endif // __MULTIPLE_OBJECTS_DETECTIONRECONFIGURATOR_H__ diff --git a/detection/include/open_ptrack/multiple_objects_detection/object_detector.h b/detection/include/open_ptrack/multiple_objects_detection/object_detector.h index 2fe8457..7dbbffc 100644 --- a/detection/include/open_ptrack/multiple_objects_detection/object_detector.h +++ b/detection/include/open_ptrack/multiple_objects_detection/object_detector.h @@ -11,6 +11,7 @@ class Object_Detector { public: bool occluded; + bool half_occluded; double half_occluded_frames; // ///////////For hsv mask/////////// @@ -19,12 +20,26 @@ class Object_Detector // ///////////For hsv mask/////////// // ///////////For camshift/////////// - static bool use_hs;//if use_hs==true, use hue and s to generate the hitograme to do the bacprojection , if not, just used the hue image + + + static std::string Backprojection_Mode; + // Backprojection_Mode{ + // H ,//just use hue of select roi to do calhist for one time, then use the permenet hue hist to do the backproject on every hue frame + // HS,//use hue and saturation of select roi to do calhist(named:HS_hist) for one time, then use the permenet hs hist to do the backproject on every hue&saturation frame + // HSD//use hue and saturation of select roi to do calhist for one time(got :hs_hist_for_HSD,it is just used under occlusion), calculate the pdf of this hist(got hs_hist_pdf), + // //then use the roi of depth data to do calhist and pdf(got :tmp_depth_hist_pdf) for every frame, + // //then combine the permenet hs hist pdf (named:hs_hist_pdf) and tmp depth hist(named: tmp_depth_hist_pdf) to do the backproject on every hsd frame,when occluded + // }; + + static int h_bins,s_bins;//for the hue backrojection hist ,divide (HMin,HMax) to h_bins parts + static int d_bins; // ///////////For camshift/////////// + + ///////////For camshift recover from occlusion/////////// //for camshift, for the window in backproject image,the value represent the similirity with the ROI. the value of the pixel is equal to 255*percentage ,the percentage come from the histogram(problility) // QUALITY_TOLERANCE here means the TOLERANCE of sum of the window in backproject image,it represent the quality of the window. @@ -40,7 +55,6 @@ class Object_Detector ///////////For camshift recover from occlusion/////////// static std::vector current_detected_boxes;//this one come from class:multiple_objects_detection, every time of every object's detection ,multiple_objects_detection will update this varable ,we use it to generate the other_object_mask - private: static cv::Mat mainColor; static cv::Mat mainDepth; @@ -48,10 +62,17 @@ class Object_Detector bool firstRun; cv::Rect currentRect; + cv::RotatedRect current_detectedBox; cv::Rect detectWindow; - cv::Mat hsv, hue, hsv_mask, hist, backproj; //, histimg; + cv::Mat hsv, hue, hsv_mask,backproj; //, histimg; + cv::Mat h_hist;//H mode + cv::Mat hs_hist_for_HS;//HS mode + cv::Mat hs_hist_for_HSD,//just used under occlusion,because ,when oclluded ,can't use depth infomation to detect the objects + hs_hist_pdf,//use hs_hist_for_HSD to get pdf ,use it as the permnet part + tmp_depth_hist_pdf,//use every depth frame to get this ,use it as the tmp part + hsd_hist;//combine hs_hist_pdf(perment) and tmp_depth_hist_pdf(tmp,every frame) to get this cv::Rect selection; cv::Mat Color,Depth; @@ -61,8 +82,9 @@ class Object_Detector public: Object_Detector() - :firstRun(true),occluded(false),half_occluded_frames(10) + :firstRun(true),occluded(false),half_occluded(false),half_occluded_frames(10) { +// hsd_hist=cv::zeros() } static void setMainColor(const cv::Mat _mainColor); @@ -77,6 +99,12 @@ class Object_Detector void setcurrent_detected_boxes(std::vector _current_detected_boxes); std::vector getcurrent_detected_boxes(); + + + void H_backprojection(); + void HS_backprojection(); + void HSD_backprojection(); + cv::RotatedRect object_shift(InputArray _probColor,Rect& window, TermCriteria criteria); cv::RotatedRect detectCurrentRect(int id); @@ -90,13 +118,15 @@ int Object_Detector::HMin; int Object_Detector::SMin; int Object_Detector::VMin; -bool Object_Detector::use_hs; int Object_Detector::h_bins; int Object_Detector::s_bins; +int Object_Detector::d_bins; int Object_Detector::AREA_TOLERANCE; int Object_Detector::QUALITY_TOLERANCE; double Object_Detector::DENSITY_TOLORENCE; std::vector Object_Detector::current_detected_boxes; + +std::string Object_Detector::Backprojection_Mode; #endif // Object_Detector_H diff --git a/detection/src/multiple_objects_detection/object_detector.cpp b/detection/src/multiple_objects_detection/object_detector.cpp index d359860..d3550b8 100644 --- a/detection/src/multiple_objects_detection/object_detector.cpp +++ b/detection/src/multiple_objects_detection/object_detector.cpp @@ -46,6 +46,175 @@ std::vector Object_Detector::getcurrent_detected_boxes() return current_detected_boxes; } +void Object_Detector::H_backprojection() +{ + float h_ranges[] = {0,(float)HMax}; + const float* ph_ranges = h_ranges; + int h_channels[] = {0, 0}; + hue.create(hsv.size(), hsv.depth()); + cv::mixChannels(&hsv, 1, &hue, 1, h_channels, 1); + + if( firstRun ) + { + cv::Mat roi(hue, selection), maskroi(hsv_mask, selection); + cv::calcHist(&roi, 1, 0, maskroi, h_hist, 1, &h_bins, &ph_ranges); + cv::normalize(h_hist, h_hist, 0, 255, CV_MINMAX); + detectWindow = selection; + firstRun = false; + // std::cout<<"H mode"<hsd + + + if( firstRun ) + { + cv::Mat roi(hsv, selection), maskroi(hsv_mask, selection); + cv::calcHist(&roi, 1, hs_channels, maskroi, hs_hist_for_HSD, 2, hs_size, phs_ranges, true, false); + cv::normalize(hs_hist_for_HSD , hs_hist_for_HSD , 0, 255, CV_MINMAX); + + double sum_hs_hist=sum(hs_hist_for_HSD)[0]; + hs_hist_pdf=hs_hist_for_HSD/sum_hs_hist; + + //used to generate the initial hsd_hist(use this just for the right data format ) + cv::calcHist(&roi, 1, hsd_channels, maskroi, hsd_hist, 3, hsd_size, phsd_ranges, true, false); + + cv::Mat roi_depth(Depth, selection); + + //calculate the the current_trackBox(rotatedrect) mask,named depth_mask(in this mask ,just the the value in the area :current_detectBox(rotatedrect) is 255) + Point2f vertices[4]; + current_detectedBox.points(vertices); + std::vector< std::vector > co_ordinates; + co_ordinates.push_back(std::vector()); + co_ordinates[0].push_back(vertices[0]); + co_ordinates[0].push_back(vertices[1]); + co_ordinates[0].push_back(vertices[2]); + co_ordinates[0].push_back(vertices[3]); + depth_mask=Mat::zeros(Color.size(),CV_8UC1); + drawContours( depth_mask,co_ordinates,0, Scalar(255),CV_FILLED, 8 ); + depth_mask&=hsv_mask; + cv::Mat depth_mask_roi(depth_mask, detectWindow); + + cv::calcHist(&roi_depth, 1, 0, depth_mask_roi, tmp_depth_hist_pdf, 1, &d_bins, &pd_ranges); + double sum_tmp_depth_hist_pdf=sum(tmp_depth_hist_pdf)[0]; + tmp_depth_hist_pdf=tmp_depth_hist_pdf/sum_tmp_depth_hist_pdf; + + for (int i=0; i(i,j,k)=255*hs_hist_pdf.at(i,j)*tmp_depth_hist_pdf.at(k); + } + } + } + //normalize hsd_hist + double hMin,hMax; + minMaxIdx(hsd_hist, &hMin, &hMax); + hsd_hist = 255 * hsd_hist / hMax; + + detectWindow = selection; + firstRun = false; + } + + else + { + if(!occluded&&!half_occluded) + { + cv::Mat roi_depth(Depth, detectWindow); + + //calculate the the current_trackBox(rotatedrect) mask,named depth_mask(in this mask ,just the the value in the area :current_detectBox(rotatedrect) is 255) + Point2f vertices[4]; + current_detectedBox.points(vertices); + std::vector< std::vector > co_ordinates; + co_ordinates.push_back(std::vector()); + co_ordinates[0].push_back(vertices[0]); + co_ordinates[0].push_back(vertices[1]); + co_ordinates[0].push_back(vertices[2]); + co_ordinates[0].push_back(vertices[3]); + depth_mask=Mat::zeros(Color.size(),CV_8UC1); + drawContours( depth_mask,co_ordinates,0, Scalar(255),CV_FILLED, 8 ); + depth_mask&=hsv_mask; + + cv::Mat depth_mask_roi(depth_mask, detectWindow); + + cv::calcHist(&roi_depth, 1, 0, depth_mask_roi, tmp_depth_hist_pdf, 1, &d_bins, &pd_ranges); + double sum_tmp_depth_hist_pdf=sum(tmp_depth_hist_pdf)[0]; + tmp_depth_hist_pdf=tmp_depth_hist_pdf/sum_tmp_depth_hist_pdf; + + + //////////////////////cost a lot of time ////////////////////// + for (int i=0; i(i,j,k)=255*hs_hist_pdf.at(i,j)*tmp_depth_hist_pdf.at(k); + } + } + } + //////////////////////rcost a lot of time ////////////////////// + + double hMin,hMax; + minMaxIdx(hsd_hist, &hMin, &hMax); + hsd_hist = 255 * hsd_hist / hMax; + } + } + + + + + + + if(!occluded&&!half_occluded) + { + cv::calcBackProject( &hsv, 1, hsd_channels, hsd_hist, backproj, phsd_ranges, 1, true ); + } + else{ + cv::calcBackProject( &hsv, 1, hs_channels, hs_hist_for_HSD, backproj, phs_ranges, 1, true ); + } + +} + cv::RotatedRect Object_Detector::object_shift(InputArray _probColor,Rect& window, TermCriteria criteria) { // CV_INSTRUMENT_REGION(); @@ -53,9 +222,8 @@ cv::RotatedRect Object_Detector::object_shift(InputArray _probColor,Rect& window Size size; Mat mat; mat = _probColor.getMat(), size = mat.size(); - cv::meanShift( _probColor, window, criteria ); - + // std::cout<<"real QUALITY_TOLERANCE"<1) { @@ -279,23 +403,6 @@ cv::RotatedRect Object_Detector::detectCurrentRect(int id) int detectWindow_YT=detectWindow.y-AREA_TOLERANCE,detectWindow_YB=detectWindow.y+detectWindow.height+AREA_TOLERANCE; Rect search_window=Rect(detectWindow_XL,detectWindow_YT,detectWindow_XR-detectWindow_XL,detectWindow_YB-detectWindow_YT)&Rect(0,0,Color.size().width,Color.size().height); position_mask(search_window)=255; - -// // calculate the other_object_mask -// other_object_mask=Mat::ones(Color.size(), CV_8U)*255; -// for (int i=0; i1) - tmp_detectedBox = object_shift(backproj, detectWindow, - cv::TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 )); -// cv::rectangle(backproj, detectWindow, cv::Scalar(255, 0, 0), 2, CV_AA); -// imshow("backproj final",backproj); - return tmp_detectedBox; + current_detectedBox = object_shift(backproj, detectWindow, + cv::TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 )); + // cv::rectangle(backproj, detectWindow, cv::Scalar(255, 0, 0), 2, CV_AA); + // imshow("backproj final",backproj); + + return current_detectedBox; } + From 716a1e82e23d3535419e61184614460d5af4904f Mon Sep 17 00:00:00 2001 From: yongheng1991 Date: Wed, 15 Mar 2017 16:40:41 +0100 Subject: [PATCH 3/9] A better visualization of both objects and people tracking --- tracking/conf/multicamera_tracking.rviz | 296 +++++++++++++++++++++--- 1 file changed, 265 insertions(+), 31 deletions(-) diff --git a/tracking/conf/multicamera_tracking.rviz b/tracking/conf/multicamera_tracking.rviz index 868f31a..6004b5d 100644 --- a/tracking/conf/multicamera_tracking.rviz +++ b/tracking/conf/multicamera_tracking.rviz @@ -6,9 +6,8 @@ Panels: Expanded: - /Global Options1 - /Status1 - - /PointCloud21 - - /MarkerArray1 - Splitter Ratio: 0.526846 + - /TF1/Frames1 + Splitter Ratio: 0.355837 Tree Height: 775 - Class: rviz/Selection Name: Selection @@ -28,7 +27,7 @@ Panels: Experimental: false Name: Time SyncMode: 0 - SyncSource: "" + SyncSource: PointCloud2 Visualization Manager: Class: "" Displays: @@ -62,7 +61,7 @@ Visualization Manager: Color: 255; 255; 255 Color Transformer: RGB8 Decay Time: 0 - Enabled: false + Enabled: true Invert Rainbow: false Max Color: 255; 255; 255 Max Intensity: 4096 @@ -73,15 +72,16 @@ Visualization Manager: Queue Size: 10 Selectable: true Size (Pixels): 5 - Size (m): 0.01 - Style: Points - Topic: /tracker/history_smoothed + Size (m): 0.03 + Style: Spheres + Topic: /tracker/object_history + Unreliable: false Use Fixed Frame: true Use rainbow: true - Value: false + Value: true - Class: rviz/MarkerArray Enabled: true - Marker Topic: /tracker/markers_array_smoothed + Marker Topic: /tracker/object_markers_array Name: MarkerArray Namespaces: numbers: true @@ -93,18 +93,42 @@ Visualization Manager: Frame Timeout: 15 Frames: All Enabled: false - Kinect: - Value: true - Kinect_depth_frame: + kinect2_far: Value: true - Kinect_depth_optical_frame: + kinect2_far_ir_frame: + Value: false + kinect2_far_ir_optical_frame: + Value: false + kinect2_far_link: + Value: false + kinect2_far_rgb_frame: + Value: false + kinect2_far_rgb_optical_frame: + Value: false + kinect2_head: Value: true - Kinect_link: - Value: true - Kinect_rgb_frame: - Value: true - Kinect_rgb_optical_frame: + kinect2_head_ir_frame: + Value: false + kinect2_head_ir_optical_frame: + Value: false + kinect2_head_link: + Value: false + kinect2_head_rgb_frame: + Value: false + kinect2_head_rgb_optical_frame: + Value: false + kinect2_lenovo: Value: true + kinect2_lenovo_ir_frame: + Value: false + kinect2_lenovo_ir_optical_frame: + Value: false + kinect2_lenovo_link: + Value: false + kinect2_lenovo_rgb_frame: + Value: false + kinect2_lenovo_rgb_optical_frame: + Value: false world: Value: true Marker Scale: 1 @@ -114,16 +138,221 @@ Visualization Manager: Show Names: true Tree: world: - Kinect: - Kinect_link: - Kinect_depth_frame: - Kinect_depth_optical_frame: + kinect2_far: + kinect2_far_link: + kinect2_far_ir_frame: + kinect2_far_ir_optical_frame: + {} + kinect2_far_rgb_frame: + kinect2_far_rgb_optical_frame: + {} + kinect2_head: + kinect2_head_link: + kinect2_head_ir_frame: + kinect2_head_ir_optical_frame: + {} + kinect2_head_rgb_frame: + kinect2_head_rgb_optical_frame: {} - Kinect_rgb_frame: - Kinect_rgb_optical_frame: + kinect2_lenovo: + kinect2_lenovo_link: + kinect2_lenovo_ir_frame: + kinect2_lenovo_ir_optical_frame: + {} + kinect2_lenovo_rgb_frame: + kinect2_lenovo_rgb_optical_frame: {} Update Interval: 0 Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 9 + Selectable: true + Size (Pixels): 3 + Size (m): 0.03 + Style: Boxes + Topic: /tracker/people_history_smoothed + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /tracker/people_markers_array + Name: MarkerArray + Namespaces: + numbers: true + people: true + Queue Size: 100 + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_head/depth_ir/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_head/depth_lowres/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_far/depth_lowres/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_far/depth_ir/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: false + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_lenovo/depth_ir/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: false Enabled: true Global Options: Background Color: 48; 48; 48 @@ -148,17 +377,22 @@ Visualization Manager: Views: Current: Class: rviz/Orbit - Distance: 7.62407 + Distance: 14.7331 + Enable Stereo Rendering: + Stereo Eye Separation: 0.06 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false Focal Point: X: -0.326729 Y: 0.467386 Z: 1.2193 Name: Current View Near Clip Distance: 0.01 - Pitch: 1.5698 + Pitch: 0.815205 Target Frame: Value: Orbit (rviz) - Yaw: 1.5704 + Yaw: 5.82858 Saved: ~ Window Geometry: Displays: @@ -166,7 +400,7 @@ Window Geometry: Height: 1056 Hide Left Dock: false Hide Right Dock: true - QMainWindow State: 000000ff00000000fd00000004000000000000013c00000396fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000002800000396000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000007800000003efc0100000002fb0000000800540069006d0065010000000000000780000002f600fffffffb0000000800540069006d006501000000000000045000000000000000000000063e0000039600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd0000000400000000000002c900000396fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000002800000396000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073f0000003efc0100000002fb0000000800540069006d006501000000000000073f000002f600fffffffb0000000800540069006d00650100000000000004500000000000000000000004700000039600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: @@ -175,6 +409,6 @@ Window Geometry: collapsed: false Views: collapsed: true - Width: 1920 - X: 0 + Width: 1855 + X: 65 Y: 24 From 4afdd5b0a741e367a06dd1edf6d8470730a5b427 Mon Sep 17 00:00:00 2001 From: yongheng1991 Date: Fri, 31 Mar 2017 20:58:04 +0200 Subject: [PATCH 4/9] with gui and roi projection --- detection/CMakeLists.txt | 1 - .../apps/multiple_objects_detection_node.cpp | 7 +- .../ground_based_people_detector_kinect2.yaml | 4 +- .../conf/multiple_objects_detection.yaml | 13 +- .../multiple_objects_detection.h | 308 ++++++---- detection/launch/camera_poses.txt | 3 + .../launch/detection_node_kinect2_head.launch | 16 + detection/launch/detector_kinect2.launch | 3 + .../launch/opt_calibration_master.launch | 68 +++ .../launch/opt_calibration_results.launch | 28 + .../launch/opt_define_reference_frame.launch | 43 ++ .../launch/sensor_kinect2_head.launch | 17 + opt_gui | 1 + opt_msgs/CMakeLists.txt | 3 +- opt_msgs/CMakeLists.txt.user | 371 ++++++++++++ opt_msgs/CMakeLists.txt~ | 23 + opt_msgs/msg/Image2D_roi.msg | 6 + opt_msgs/msg/Image2D_roi.msg~ | 6 + opt_msgs/msg/Image2D_roi_array.msg | 2 + opt_msgs/msg/World3D_roi.msg | 7 + opt_msgs/msg/World3D_roi.msg~ | 7 + opt_msgs/msg/World3D_roi_arra.msg~ | 2 + opt_msgs/msg/World3D_roi_array.msg | 2 + opt_msgs/msg/image2D_roi_array.msg~ | 2 + .../swissranger_camera/SwissRangerConfig.h | 531 ++++++++++++++++++ .../src/swissranger_camera/__init__.py | 0 .../cfg/SwissRangerConfig.py | 38 ++ .../src/swissranger_camera/cfg/__init__.py | 0 tracking/conf/multicamera_tracking.rviz | 91 ++- tracking/conf/tracker_multicamera.yaml | 2 +- tracking/launch/tracker_network.launch | 4 + tracking/launch/tracker_network.launch~ | 69 +++ 32 files changed, 1539 insertions(+), 139 deletions(-) create mode 100644 detection/launch/camera_poses.txt create mode 100644 detection/launch/detection_node_kinect2_head.launch create mode 100644 opt_calibration/launch/opt_calibration_master.launch create mode 100644 opt_calibration/launch/opt_calibration_results.launch create mode 100644 opt_calibration/launch/opt_define_reference_frame.launch create mode 100644 opt_calibration/launch/sensor_kinect2_head.launch create mode 160000 opt_gui create mode 100644 opt_msgs/CMakeLists.txt.user create mode 100644 opt_msgs/CMakeLists.txt~ create mode 100644 opt_msgs/msg/Image2D_roi.msg create mode 100644 opt_msgs/msg/Image2D_roi.msg~ create mode 100644 opt_msgs/msg/Image2D_roi_array.msg create mode 100644 opt_msgs/msg/World3D_roi.msg create mode 100644 opt_msgs/msg/World3D_roi.msg~ create mode 100644 opt_msgs/msg/World3D_roi_arra.msg~ create mode 100644 opt_msgs/msg/World3D_roi_array.msg create mode 100644 opt_msgs/msg/image2D_roi_array.msg~ create mode 100644 swissranger_camera/cfg/cpp/swissranger_camera/SwissRangerConfig.h create mode 100644 swissranger_camera/src/swissranger_camera/__init__.py create mode 100644 swissranger_camera/src/swissranger_camera/cfg/SwissRangerConfig.py create mode 100644 swissranger_camera/src/swissranger_camera/cfg/__init__.py create mode 100644 tracking/launch/tracker_network.launch~ diff --git a/detection/CMakeLists.txt b/detection/CMakeLists.txt index 28745ab..f63134c 100644 --- a/detection/CMakeLists.txt +++ b/detection/CMakeLists.txt @@ -62,7 +62,6 @@ add_dependencies(${PROJECT_NAME} ${PROJECT_NAME}_gencfg) target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES} ${OpenCV_LIBS}) - add_executable(multiple_objects_detection_node apps/multiple_objects_detection_node.cpp) target_link_libraries(multiple_objects_detection_node ${PROJECT_NAME} ${OpenCV_LIBRARIES} ${PCL_LIBRARIES} ${catkin_LIBRARIES}) diff --git a/detection/apps/multiple_objects_detection_node.cpp b/detection/apps/multiple_objects_detection_node.cpp index 6bbc995..d9aad38 100644 --- a/detection/apps/multiple_objects_detection_node.cpp +++ b/detection/apps/multiple_objects_detection_node.cpp @@ -24,7 +24,7 @@ bool use_background_removal; int background_calculate_frames; int threshold_4_detecting_foreground;//threshold of depth difference,use the difference to ditinguish the background and foreground bool show_2D_tracks; - +bool publish_world3D_rois; void configCb(detection::multiple_objects_detectionConfig &config, uint32_t level) { @@ -61,6 +61,7 @@ int main(int argc, char** argv){ nh.param("show_2D_track",show_2D_tracks,false); nh.param("output_detection_topic",output_detection_topic,std::string("/objects_detector/detections")); + nh.param("publish_world3D_rois",publish_world3D_rois,true); //set the stastic varables of object_detector class,the instruction of the varable can be found in the header of the class nh.param("HMin",Object_Detector::HMin,0); @@ -79,8 +80,6 @@ int main(int argc, char** argv){ nh.param("Backprojection_Mode",Object_Detector::Backprojection_Mode,std::string("HSD")); - - // std::cout << "output detection topic: " << output_detection_topic << std::endl; std::cout << "HSV range:"<< std::endl @@ -107,7 +106,7 @@ int main(int argc, char** argv){ Multiple_Objects_Detection _multiple_objects_detection(output_detection_topic,useExact, useCompressed, - use_background_removal,background_calculate_frames,threshold_4_detecting_foreground,show_2D_tracks); + use_background_removal,background_calculate_frames,threshold_4_detecting_foreground,show_2D_tracks,publish_world3D_rois); std::cout << "start detecting..." << std::endl; _multiple_objects_detection.run_detection(); diff --git a/detection/conf/ground_based_people_detector_kinect2.yaml b/detection/conf/ground_based_people_detector_kinect2.yaml index af5daf2..12be92e 100644 --- a/detection/conf/ground_based_people_detector_kinect2.yaml +++ b/detection/conf/ground_based_people_detector_kinect2.yaml @@ -14,7 +14,7 @@ valid_points_threshold: 0.3 ## Background subtraction ## ############################ # Flag enabling/disabling background subtraction: -background_subtraction: false +background_subtraction: true # Resolution of the octree representing the background: background_resolution: 0.3 # Seconds to use to learn the background: @@ -44,7 +44,7 @@ heads_minimum_distance: 0.3 # Voxel size used to downsample the point cloud (lower: detection slower but more precise; higher: detection faster but less precise): voxel_size: 0.06 # Denoising flag. If true, a statistical filter is applied to the point cloud to remove noise: -apply_denoising: false +apply_denoising: true # MeanK for denoising (the higher it is, the stronger is the filtering): mean_k_denoising: 5 # Standard deviation for denoising (the lower it is, the stronger is the filtering): diff --git a/detection/conf/multiple_objects_detection.yaml b/detection/conf/multiple_objects_detection.yaml index 99f65a3..ff43b31 100644 --- a/detection/conf/multiple_objects_detection.yaml +++ b/detection/conf/multiple_objects_detection.yaml @@ -7,6 +7,17 @@ output_detection_topic: /objects_detector/detections #if you want to see the tracks result on the image, set show_2D_track to true show_2D_track: false + + +#if this camera is used to mark the ROIs and publish them, set it to true, or set it into false +#Two options: +#1, Mark in one camera, and project the rois to others: then set publish_world3D_rois to true in the marking camera, other cameras should be set into false. +#2, Mark in all cameras, then set publish_world3D_rois in every camera to true +publish_world3D_rois: true + + + + ############################ ## Background removal ## ############################ @@ -49,7 +60,7 @@ d_bins: 100 # Three options : H HS HSD : # if the objects are in different color, choose HS which is faster, or choose HSD. -Backprojection_Mode: HS +Backprojection_Mode: HSD ###########For camshift######### diff --git a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h index 5871634..2700d89 100644 --- a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h +++ b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h @@ -34,10 +34,17 @@ #include + #include #include #include #include +#include +#include +#include +#include +#include + #include @@ -61,6 +68,11 @@ #include #include +#include +#include +#include +#include + using namespace opt_msgs; using namespace sensor_msgs; @@ -91,14 +103,24 @@ class Multiple_Objects_Detection image_transport::SubscriberFilter *subImageColor, *subImageDepth; message_filters::Subscriber *subCameraInfoColor, *subCameraInfoDepth; + message_filters::Synchronizer *syncExact; message_filters::Synchronizer *syncApproximate; ////////////For receiving color and depth////////////// - ////////////For roi selection//////////// - bool objects_selected,paused; + bool objects_selected; + // std::vector objects; + //for roi projection + tf::TransformListener tf_listener; + ros::Subscriber sub_image2D_rois; + ros::Subscriber sub_world3D_rois; + ros::Publisher world3D_rois_pub; + World3D_roi current_World3D_roi_msg; + World3D_roi_array World3D_rois_msg; + bool publish_world3D_rois; + int numberofrois=0; ///////////For roi selection///////////// @@ -110,13 +132,11 @@ class Multiple_Objects_Detection ///////////For background removal/////////// - ///////////For camshift recover from occlusion/////////// std::vector occludes; ///////////For camshift recover from occlusion/////////// - ///////////For generating 3d position/////////// float fx ,fy,cx,cy; ///////////For generating 3d position/////////// @@ -129,7 +149,6 @@ class Multiple_Objects_Detection ///////////For publish detection topic/////////// - ///////////For main detection/////////// // cv::Mat color, depth; std::vector Object_Detectors; @@ -143,16 +162,17 @@ class Multiple_Objects_Detection std::vector> tracks_2D; ///////////For display/////////// - //test time cost - // std::chrono::time_point start, now; + std::chrono::time_point start, now; + + public: Multiple_Objects_Detection(const std::string &output_detection_topic,const bool useExact, const bool useCompressed, - const bool use_background_removal, const int background_calculate_frames,const int threshold_4_detecting_foreground,const bool show_2D_tracks) + const bool use_background_removal, const int background_calculate_frames,const int threshold_4_detecting_foreground,const bool show_2D_tracks, const bool publish_world3D_rois) : output_detection_topic(output_detection_topic),useExact(useExact), useCompressed(useCompressed),updateImage(false), running(false), - use_background_removal(use_background_removal), objects_selected(false),paused(false),background_calculate_frames(background_calculate_frames),threshold_4_detecting_foreground(threshold_4_detecting_foreground), queueSize(5), - nh(), spinner(0), it(nh) ,show_2D_tracks(show_2D_tracks) + use_background_removal(use_background_removal), objects_selected(false),background_calculate_frames(background_calculate_frames),threshold_4_detecting_foreground(threshold_4_detecting_foreground), queueSize(5), + nh(), spinner(0), it(nh) ,show_2D_tracks(show_2D_tracks) ,publish_world3D_rois(publish_world3D_rois) { std::string cameraName = "kinect2_head"; topicColor = "/" + cameraName + "/" + K2_TOPIC_LORES_COLOR K2_TOPIC_RAW; @@ -161,13 +181,15 @@ class Multiple_Objects_Detection cameraMatrixColor = cv::Mat::zeros(3, 3, CV_64F); cameraMatrixDepth = cv::Mat::zeros(3, 3, CV_64F); detection_pub=nh.advertise(output_detection_topic,3); + + std::string output_World3D_roi_topic = "/World3D_rois"; + world3D_rois_pub=nh.advertise(output_World3D_roi_topic,3); } ~Multiple_Objects_Detection() { } - void run_detection(){ start_reciver(); @@ -192,28 +214,26 @@ class Multiple_Objects_Detection } else//after get the bakground (depth_max),using it to do background removal,generate new color { + // start = std::chrono::high_resolution_clock::now(); background_removal(); + // now = std::chrono::high_resolution_clock::now(); + // double elapsed = std::chrono::duration_cast(now - start).count(); + // std::cout<(now - start).count() / 1000.0; - // std::cout<(now - start).count(); + // std::cout<(ExactSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth); - syncExact->registerCallback(boost::bind(&Multiple_Objects_Detection::callback, this, _1, _2, _3, _4)); + syncExact->registerCallback(boost::bind(&Multiple_Objects_Detection::image_callback, this, _1, _2, _3, _4)); } else { syncApproximate = new message_filters::Synchronizer(ApproximateSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth); - syncApproximate->registerCallback(boost::bind(&Multiple_Objects_Detection::callback, this, _1, _2, _3, _4)); + syncApproximate->registerCallback(boost::bind(&Multiple_Objects_Detection::image_callback, this, _1, _2, _3, _4)); } + spinner.start(); std::chrono::milliseconds duration(1); @@ -280,10 +288,8 @@ class Multiple_Objects_Detection } std::this_thread::sleep_for(duration); } - } - void background_calculation() { Mat test_backgroundfile=imread("/tmp/depth_background.png",CV_LOAD_IMAGE_UNCHANGED); @@ -293,7 +299,6 @@ class Multiple_Objects_Detection background_calculate_frames=0; printf("use background removal and background is successfully read from file......\n"); printf("please press P to select ROIs......\n"); - } else//background can't be read from file, being calculated { @@ -309,7 +314,6 @@ class Multiple_Objects_Detection compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); compression_params.push_back(0); // pcl::io::savePCDFileASCII ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud); - cv::imwrite("/tmp/depth_background.png", depth_max, compression_params); printf("background is sucessfully calculated and saved......\n"); printf("please press P to select ROIs......\n"); @@ -352,53 +356,9 @@ class Multiple_Objects_Detection } } - - void multiple_objects_detector_initialization() - { - printf("please press P to select ROIs......\n"); - std::vector objects; - roi_zz select_roi; - select_roi.select("selectroi",main_color,objects,false); - cv::destroyWindow("selectroi"); - - //quit when the tracked object(s) is not provided - if(objects.size()<1) - return; - - - for(std::vector::iterator it =objects.begin();it!=objects.end();it++) - { - - Object_Detector newDetector; - cv::Rect selection=*it; - selection=selection&Rect(0,0,main_color.size().width,main_color.size().height); - - newDetector.setCurrentRect(selection); - Object_Detectors.push_back(newDetector); - - current_detected_boxes.push_back(selection); - - std::list tracks_2D_(10); - tracks_2D.push_back(tracks_2D_); - - bool occlude_=false; - occludes.push_back(occlude_); - - } - - Object_Detector::current_detected_boxes=current_detected_boxes; - - objects_selected=true; - paused=false; - std::cout<image_type = std::string("rgb"); //detection_array_msg msg!!!!!!!!!!!!!!!!!!!!!!! - - //detect one by one for(size_t i=0; i current_track=tracks_2D[i]; - for( std::list ::iterator it = current_track.begin(); it!=current_track.end(); it++) { Rect test_Rect((*it).x+((*it).width)/2,(*it).y+((*it).height)/2,1,1); @@ -519,13 +472,11 @@ class Multiple_Objects_Detection } } cv::imshow( "show_2D_tracks", main_color ); + cv::waitKey(10); } - - - - void callback(const sensor_msgs::Image::ConstPtr imageColor, const sensor_msgs::Image::ConstPtr imageDepth, - const sensor_msgs::CameraInfo::ConstPtr cameraInfoColor, const sensor_msgs::CameraInfo::ConstPtr cameraInfoDepth) + void image_callback(const sensor_msgs::Image::ConstPtr imageColor, const sensor_msgs::Image::ConstPtr imageDepth, + const sensor_msgs::CameraInfo::ConstPtr cameraInfoColor, const sensor_msgs::CameraInfo::ConstPtr cameraInfoDepth) { cv::Mat _color, _depth; @@ -559,6 +510,159 @@ class Multiple_Objects_Detection lock.unlock(); } + void image2D_rois_Callback(const opt_msgs::Image2D_roi_array::ConstPtr& image2D_roi_msg) + { + + for( std::vector::const_iterator it = image2D_roi_msg->Rois.begin(); + it != image2D_roi_msg->Rois.end(); it++) + { + cv::Rect selection((*it).x,(*it).y,(*it).width,(*it).height); + selection=selection&Rect(0,0,main_color.size().width,main_color.size().height); + + //////////////initialize one detector with one selection////////////// + if(selection.area()>1) + { + Object_Detector newDetector; + newDetector.setCurrentRect(selection); + Object_Detectors.push_back(newDetector); + current_detected_boxes.push_back(selection); + std::list tracks_2D_(10); + tracks_2D.push_back(tracks_2D_); + bool occlude_=false; + occludes.push_back(occlude_); + Object_Detector::current_detected_boxes=current_detected_boxes; + //////////////~~initialize one detector with one selection////////////// + + + //////////////project theselection from current_frame to other cameras////////////// + //1. project from currentframe to world + std::string world_frame_id="/world"; + std::string current_frame_id=color_header_frameId; + tf::StampedTransform transform_current2world; + try{ + tf_listener.lookupTransform(world_frame_id, current_frame_id, ros::Time(0), transform_current2world); + } + catch(tf::TransformException ex){ + ROS_ERROR("%s",ex.what()); + ros::Duration(1.0).sleep(); + } + Point2d current_frame_image2D_1(cvFloor(selection.x),cvFloor(selection.y)); + Point2d current_frame_image2D_2(cvFloor(selection.x+selection.width),cvFloor(selection.y+selection.height)); + std::cout<<"current_frame_image2D_11111111111111111: "<(current_frame_image2D_1.y,current_frame_image2D_1.x)/1000.0;//meter ,not mm + double current_frame_depth_2=main_depth_16.at(current_frame_image2D_2.y,current_frame_image2D_2.x)/1000.0;//meter ,not mm + + tf::Vector3 world3D_1=image2D_to_world3D(current_frame_image2D_1,current_frame_depth_1,transform_current2world); + tf::Vector3 world3D_2=image2D_to_world3D(current_frame_image2D_2,current_frame_depth_2,transform_current2world); + + //publish + current_World3D_roi_msg.no=numberofrois++; + current_World3D_roi_msg.x1=world3D_1.getX(); + current_World3D_roi_msg.y1=world3D_1.getY(); + current_World3D_roi_msg.z1=world3D_1.getZ(); + current_World3D_roi_msg.x2=world3D_2.getX(); + current_World3D_roi_msg.y2=world3D_2.getY(); + current_World3D_roi_msg.z2=world3D_2.getZ(); + World3D_rois_msg.Rois.push_back(current_World3D_roi_msg); + + // cv::rectangle(main_color, selection, cv::Scalar(255, 0, 0), 2, CV_AA); + } + } + cv::imwrite("/tmp/show_the_set_recs.png",main_color); + objects_selected=true; + std::cout<::const_iterator it = world3D_rois_msg->Rois.begin(); + it != world3D_rois_msg->Rois.end(); it++) + { + tf::Vector3 world3D_1,world3D_2; + world3D_1.setX((*it).x1); + world3D_1.setY((*it).y1); + world3D_1.setZ((*it).z1); + world3D_2.setX((*it).x2); + world3D_2.setY((*it).y2); + world3D_2.setZ((*it).z2); + + tf::StampedTransform transform_world2projectedframe; + std::string projected_frame_id=color_header_frameId; + std::string world_frame_id= "/world"; + try{ + tf_listener.lookupTransform(projected_frame_id, world_frame_id, ros::Time(0), transform_world2projectedframe); + } + catch(tf::TransformException ex){ + ROS_ERROR("%s",ex.what()); + ros::Duration(1.0).sleep(); + } + Point2d projected_frame_image2D_1=world3D_to_image2D(world3D_1,transform_world2projectedframe); + Point2d projected_frame_image2D_2=world3D_to_image2D(world3D_2,transform_world2projectedframe); + std::cout<<"projected_frame_image2D_11111111111111111: "<1) + { + Object_Detector newDetector; + newDetector.setCurrentRect(selection); + Object_Detectors.push_back(newDetector); + current_detected_boxes.push_back(selection); + std::list tracks_2D_(10); + tracks_2D.push_back(tracks_2D_); + bool occlude_=false; + occludes.push_back(occlude_); + Object_Detector::current_detected_boxes=current_detected_boxes; + cv::rectangle(main_color, selection, cv::Scalar(255, 0, 0), 2, CV_AA); + } + + } + cv::imwrite("/tmp/show_the_project_recs.png",main_color); + objects_selected=true; + std::cout<K[i]; } } - - }; diff --git a/detection/launch/camera_poses.txt b/detection/launch/camera_poses.txt new file mode 100644 index 0000000..a17bc97 --- /dev/null +++ b/detection/launch/camera_poses.txt @@ -0,0 +1,3 @@ +# Auto-generated file. +# CALIBRATION ID: 1490893680 +kinect2_head: 2.62509 -0.111163 5.146 0.875696 -0.136273 0.0710624 0.457752 diff --git a/detection/launch/detection_node_kinect2_head.launch b/detection/launch/detection_node_kinect2_head.launch new file mode 100644 index 0000000..26b5351 --- /dev/null +++ b/detection/launch/detection_node_kinect2_head.launch @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/detection/launch/detector_kinect2.launch b/detection/launch/detector_kinect2.launch index d850420..b41f491 100644 --- a/detection/launch/detector_kinect2.launch +++ b/detection/launch/detector_kinect2.launch @@ -39,6 +39,9 @@ + + + diff --git a/opt_calibration/launch/opt_calibration_master.launch b/opt_calibration/launch/opt_calibration_master.launch new file mode 100644 index 0000000..c8e22a9 --- /dev/null +++ b/opt_calibration/launch/opt_calibration_master.launch @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/opt_calibration/launch/opt_calibration_results.launch b/opt_calibration/launch/opt_calibration_results.launch new file mode 100644 index 0000000..2570aeb --- /dev/null +++ b/opt_calibration/launch/opt_calibration_results.launch @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/opt_calibration/launch/opt_define_reference_frame.launch b/opt_calibration/launch/opt_define_reference_frame.launch new file mode 100644 index 0000000..2fac742 --- /dev/null +++ b/opt_calibration/launch/opt_define_reference_frame.launch @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/opt_calibration/launch/sensor_kinect2_head.launch b/opt_calibration/launch/sensor_kinect2_head.launch new file mode 100644 index 0000000..2dfe94d --- /dev/null +++ b/opt_calibration/launch/sensor_kinect2_head.launch @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + diff --git a/opt_gui b/opt_gui new file mode 160000 index 0000000..9ce876f --- /dev/null +++ b/opt_gui @@ -0,0 +1 @@ +Subproject commit 9ce876ff04a3bb58fed7b7b82c9ca58b68c0c65c diff --git a/opt_msgs/CMakeLists.txt b/opt_msgs/CMakeLists.txt index f259354..5ab7922 100644 --- a/opt_msgs/CMakeLists.txt +++ b/opt_msgs/CMakeLists.txt @@ -2,7 +2,8 @@ cmake_minimum_required(VERSION 2.8.3) project(opt_msgs) find_package(catkin REQUIRED COMPONENTS roscpp message_generation geometry_msgs sensor_msgs) -add_message_files(FILES Rois.msg RoiRect.msg HumanEntry.msg HumanEntries.msg BoundingBox2D.msg BoundingBox3D.msg Detection.msg DetectionArray.msg Track.msg TrackArray.msg CalibrationStatus.msg IDArray.msg) +add_message_files(FILES Rois.msg RoiRect.msg HumanEntry.msg HumanEntries.msg BoundingBox2D.msg BoundingBox3D.msg Detection.msg DetectionArray.msg Track.msg TrackArray.msg CalibrationStatus.msg IDArray.msg + Image2D_roi.msg Image2D_roi_array.msg World3D_roi.msg World3D_roi_array.msg) add_service_files(FILES OPTSensor.srv OPTTransform.srv) diff --git a/opt_msgs/CMakeLists.txt.user b/opt_msgs/CMakeLists.txt.user new file mode 100644 index 0000000..a0de05f --- /dev/null +++ b/opt_msgs/CMakeLists.txt.user @@ -0,0 +1,371 @@ + + + + + + EnvironmentId + {825f57e6-b4e3-4534-8383-4eb78c75979b} + + + ProjectExplorer.Project.ActiveTarget + 0 + + + ProjectExplorer.Project.EditorSettings + + true + false + true + + Cpp + + CppGlobal + + + + QmlJS + + QmlJSGlobal + + + 2 + UTF-8 + false + 4 + false + 80 + true + true + 1 + true + false + 0 + true + true + 0 + 8 + true + 1 + true + true + true + false + + + + ProjectExplorer.Project.PluginSettings + + + + ProjectExplorer.Project.Target.0 + + Desktop + Desktop + {b16e7ffb-e671-4440-99f4-da1004cae089} + 0 + 0 + 0 + + + /home/zhao/workspace/ros/catkin/src/open_ptrack/build-opt_msgs-Desktop-Default + + + + + all + + true + Make + + CMakeProjectManager.MakeStep + + 1 + Build + + ProjectExplorer.BuildSteps.Build + + + + + + clean + + true + Make + + CMakeProjectManager.MakeStep + + 1 + Clean + + ProjectExplorer.BuildSteps.Clean + + 2 + false + + Default + Default + CMakeProjectManager.CMakeBuildConfiguration + + + + CMAKE_BUILD_TYPE:STRING=Debug + + /home/zhao/workspace/ros/catkin/src/open_ptrack/build-opt_msgs-Desktop-Debug + + + + + + + true + Make + + CMakeProjectManager.MakeStep + + 1 + Build + + ProjectExplorer.BuildSteps.Build + + + + + + clean + + true + Make + + CMakeProjectManager.MakeStep + + 1 + Clean + + ProjectExplorer.BuildSteps.Clean + + 2 + false + + Debug + Debug + CMakeProjectManager.CMakeBuildConfiguration + + + + CMAKE_BUILD_TYPE:STRING=Release + + /home/zhao/workspace/ros/catkin/src/open_ptrack/build-opt_msgs-Desktop-Release + + + + + + + true + Make + + CMakeProjectManager.MakeStep + + 1 + Build + + ProjectExplorer.BuildSteps.Build + + + + + + clean + + true + Make + + CMakeProjectManager.MakeStep + + 1 + Clean + + ProjectExplorer.BuildSteps.Clean + + 2 + false + + Release + Release + CMakeProjectManager.CMakeBuildConfiguration + + + + CMAKE_BUILD_TYPE:STRING=RelWithDebInfo + + /home/zhao/workspace/ros/catkin/src/open_ptrack/build-opt_msgs-Desktop-Release with Debug Information + + + + + + + true + Make + + CMakeProjectManager.MakeStep + + 1 + Build + + ProjectExplorer.BuildSteps.Build + + + + + + clean + + true + Make + + CMakeProjectManager.MakeStep + + 1 + Clean + + ProjectExplorer.BuildSteps.Clean + + 2 + false + + Release with Debug Information + Release with Debug Information + CMakeProjectManager.CMakeBuildConfiguration + + + + CMAKE_BUILD_TYPE:STRING=MinSizeRel + + /home/zhao/workspace/ros/catkin/src/open_ptrack/build-opt_msgs-Desktop-Minimum Size Release + + + + + + + true + Make + + CMakeProjectManager.MakeStep + + 1 + Build + + ProjectExplorer.BuildSteps.Build + + + + + + clean + + true + Make + + CMakeProjectManager.MakeStep + + 1 + Clean + + ProjectExplorer.BuildSteps.Clean + + 2 + false + + Minimum Size Release + Minimum Size Release + CMakeProjectManager.CMakeBuildConfiguration + + 5 + + + 0 + Deploy + + ProjectExplorer.BuildSteps.Deploy + + 1 + Deploy locally + + ProjectExplorer.DefaultDeployConfiguration + + 1 + + + false + false + 1000 + + true + + false + false + false + false + true + 0.01 + 10 + true + 1 + 25 + + 1 + true + false + true + valgrind + + 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12 + 13 + 14 + + 2 + + + + %{buildDir} + Custom Executable + + ProjectExplorer.CustomExecutableRunConfiguration + 3768 + false + true + false + false + true + + 1 + + + + ProjectExplorer.Project.TargetCount + 1 + + + ProjectExplorer.Project.Updater.FileVersion + 18 + + + Version + 18 + + diff --git a/opt_msgs/CMakeLists.txt~ b/opt_msgs/CMakeLists.txt~ new file mode 100644 index 0000000..10704f1 --- /dev/null +++ b/opt_msgs/CMakeLists.txt~ @@ -0,0 +1,23 @@ +cmake_minimum_required(VERSION 2.8.3) +project(opt_msgs) +find_package(catkin REQUIRED COMPONENTS roscpp message_generation geometry_msgs sensor_msgs) + +add_message_files(FILES Rois.msg RoiRect.msg HumanEntry.msg HumanEntries.msg BoundingBox2D.msg BoundingBox3D.msg Detection.msg DetectionArray.msg Track.msg TrackArray.msg CalibrationStatus.msg IDArray.msg + Image2D_roi.msg Image2D_roi_array.msg World3D_roi.msg World3D_roi_arra.msg) + +add_service_files(FILES OPTSensor.srv OPTTransform.srv) + +generate_messages(DEPENDENCIES std_msgs geometry_msgs sensor_msgs) + + +catkin_package( + INCLUDE_DIRS + LIBRARIES + CATKIN_DEPENDS roscpp std_msgs sensor_msgs message_runtime +) + +include_directories(include include/open_ptrack/${PROJECT_NAME}/) + +add_library(roi_msgs src/overlap.cpp ) + +target_link_libraries(roi_msgs ${catkin_LIBRARIES}) diff --git a/opt_msgs/msg/Image2D_roi.msg b/opt_msgs/msg/Image2D_roi.msg new file mode 100644 index 0000000..a5640d4 --- /dev/null +++ b/opt_msgs/msg/Image2D_roi.msg @@ -0,0 +1,6 @@ +int32 no +string name +int32 x +int32 y +int32 width +int32 height diff --git a/opt_msgs/msg/Image2D_roi.msg~ b/opt_msgs/msg/Image2D_roi.msg~ new file mode 100644 index 0000000..a59c871 --- /dev/null +++ b/opt_msgs/msg/Image2D_roi.msg~ @@ -0,0 +1,6 @@ +int32 no +string name +int32 x +int32 y +int32 width +int32 height diff --git a/opt_msgs/msg/Image2D_roi_array.msg b/opt_msgs/msg/Image2D_roi_array.msg new file mode 100644 index 0000000..d6b106f --- /dev/null +++ b/opt_msgs/msg/Image2D_roi_array.msg @@ -0,0 +1,2 @@ +Header header +Image2D_roi[] Rois diff --git a/opt_msgs/msg/World3D_roi.msg b/opt_msgs/msg/World3D_roi.msg new file mode 100644 index 0000000..f30091a --- /dev/null +++ b/opt_msgs/msg/World3D_roi.msg @@ -0,0 +1,7 @@ +int32 no +float64 x1 +float64 y1 +float64 z1 +float64 x2 +float64 y2 +float64 z2 diff --git a/opt_msgs/msg/World3D_roi.msg~ b/opt_msgs/msg/World3D_roi.msg~ new file mode 100644 index 0000000..8481e4a --- /dev/null +++ b/opt_msgs/msg/World3D_roi.msg~ @@ -0,0 +1,7 @@ +int32 no +int32 x1 +int32 y1 +int32 z1 +int32 x2 +int32 y2 +int32 z2 diff --git a/opt_msgs/msg/World3D_roi_arra.msg~ b/opt_msgs/msg/World3D_roi_arra.msg~ new file mode 100644 index 0000000..d6b106f --- /dev/null +++ b/opt_msgs/msg/World3D_roi_arra.msg~ @@ -0,0 +1,2 @@ +Header header +Image2D_roi[] Rois diff --git a/opt_msgs/msg/World3D_roi_array.msg b/opt_msgs/msg/World3D_roi_array.msg new file mode 100644 index 0000000..6f789e5 --- /dev/null +++ b/opt_msgs/msg/World3D_roi_array.msg @@ -0,0 +1,2 @@ +Header header +World3D_roi[] Rois diff --git a/opt_msgs/msg/image2D_roi_array.msg~ b/opt_msgs/msg/image2D_roi_array.msg~ new file mode 100644 index 0000000..9c51bca --- /dev/null +++ b/opt_msgs/msg/image2D_roi_array.msg~ @@ -0,0 +1,2 @@ +Header header +Roi_object[] Rois diff --git a/swissranger_camera/cfg/cpp/swissranger_camera/SwissRangerConfig.h b/swissranger_camera/cfg/cpp/swissranger_camera/SwissRangerConfig.h new file mode 100644 index 0000000..d5cdb4f --- /dev/null +++ b/swissranger_camera/cfg/cpp/swissranger_camera/SwissRangerConfig.h @@ -0,0 +1,531 @@ +//#line 2 "/opt/ros/indigo/share/dynamic_reconfigure/templates/ConfigType.h.template" +// ********************************************************* +// +// File autogenerated for the swissranger_camera package +// by the dynamic_reconfigure package. +// Please do not edit. +// +// ********************************************************/ + +#ifndef __swissranger_camera__SWISSRANGERCONFIG_H__ +#define __swissranger_camera__SWISSRANGERCONFIG_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace swissranger_camera +{ + class SwissRangerConfigStatics; + + class SwissRangerConfig + { + public: + class AbstractParamDescription : public dynamic_reconfigure::ParamDescription + { + public: + AbstractParamDescription(std::string n, std::string t, uint32_t l, + std::string d, std::string e) + { + name = n; + type = t; + level = l; + description = d; + edit_method = e; + } + + virtual void clamp(SwissRangerConfig &config, const SwissRangerConfig &max, const SwissRangerConfig &min) const = 0; + virtual void calcLevel(uint32_t &level, const SwissRangerConfig &config1, const SwissRangerConfig &config2) const = 0; + virtual void fromServer(const ros::NodeHandle &nh, SwissRangerConfig &config) const = 0; + virtual void toServer(const ros::NodeHandle &nh, const SwissRangerConfig &config) const = 0; + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, SwissRangerConfig &config) const = 0; + virtual void toMessage(dynamic_reconfigure::Config &msg, const SwissRangerConfig &config) const = 0; + virtual void getValue(const SwissRangerConfig &config, boost::any &val) const = 0; + }; + + typedef boost::shared_ptr AbstractParamDescriptionPtr; + typedef boost::shared_ptr AbstractParamDescriptionConstPtr; + + template + class ParamDescription : public AbstractParamDescription + { + public: + ParamDescription(std::string name, std::string type, uint32_t level, + std::string description, std::string edit_method, T SwissRangerConfig::* f) : + AbstractParamDescription(name, type, level, description, edit_method), + field(f) + {} + + T (SwissRangerConfig::* field); + + virtual void clamp(SwissRangerConfig &config, const SwissRangerConfig &max, const SwissRangerConfig &min) const + { + if (config.*field > max.*field) + config.*field = max.*field; + + if (config.*field < min.*field) + config.*field = min.*field; + } + + virtual void calcLevel(uint32_t &comb_level, const SwissRangerConfig &config1, const SwissRangerConfig &config2) const + { + if (config1.*field != config2.*field) + comb_level |= level; + } + + virtual void fromServer(const ros::NodeHandle &nh, SwissRangerConfig &config) const + { + nh.getParam(name, config.*field); + } + + virtual void toServer(const ros::NodeHandle &nh, const SwissRangerConfig &config) const + { + nh.setParam(name, config.*field); + } + + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, SwissRangerConfig &config) const + { + return dynamic_reconfigure::ConfigTools::getParameter(msg, name, config.*field); + } + + virtual void toMessage(dynamic_reconfigure::Config &msg, const SwissRangerConfig &config) const + { + dynamic_reconfigure::ConfigTools::appendParameter(msg, name, config.*field); + } + + virtual void getValue(const SwissRangerConfig &config, boost::any &val) const + { + val = config.*field; + } + }; + + class AbstractGroupDescription : public dynamic_reconfigure::Group + { + public: + AbstractGroupDescription(std::string n, std::string t, int p, int i, bool s) + { + name = n; + type = t; + parent = p; + state = s; + id = i; + } + + std::vector abstract_parameters; + bool state; + + virtual void toMessage(dynamic_reconfigure::Config &msg, const boost::any &config) const = 0; + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, boost::any &config) const =0; + virtual void updateParams(boost::any &cfg, SwissRangerConfig &top) const= 0; + virtual void setInitialState(boost::any &cfg) const = 0; + + + void convertParams() + { + for(std::vector::const_iterator i = abstract_parameters.begin(); i != abstract_parameters.end(); ++i) + { + parameters.push_back(dynamic_reconfigure::ParamDescription(**i)); + } + } + }; + + typedef boost::shared_ptr AbstractGroupDescriptionPtr; + typedef boost::shared_ptr AbstractGroupDescriptionConstPtr; + + template + class GroupDescription : public AbstractGroupDescription + { + public: + GroupDescription(std::string name, std::string type, int parent, int id, bool s, T PT::* f) : AbstractGroupDescription(name, type, parent, id, s), field(f) + { + } + + GroupDescription(const GroupDescription& g): AbstractGroupDescription(g.name, g.type, g.parent, g.id, g.state), field(g.field), groups(g.groups) + { + parameters = g.parameters; + abstract_parameters = g.abstract_parameters; + } + + virtual bool fromMessage(const dynamic_reconfigure::Config &msg, boost::any &cfg) const + { + PT* config = boost::any_cast(cfg); + if(!dynamic_reconfigure::ConfigTools::getGroupState(msg, name, (*config).*field)) + return false; + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = &((*config).*field); + if(!(*i)->fromMessage(msg, n)) + return false; + } + + return true; + } + + virtual void setInitialState(boost::any &cfg) const + { + PT* config = boost::any_cast(cfg); + T* group = &((*config).*field); + group->state = state; + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = boost::any(&((*config).*field)); + (*i)->setInitialState(n); + } + + } + + virtual void updateParams(boost::any &cfg, SwissRangerConfig &top) const + { + PT* config = boost::any_cast(cfg); + + T* f = &((*config).*field); + f->setParams(top, abstract_parameters); + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + boost::any n = &((*config).*field); + (*i)->updateParams(n, top); + } + } + + virtual void toMessage(dynamic_reconfigure::Config &msg, const boost::any &cfg) const + { + const PT config = boost::any_cast(cfg); + dynamic_reconfigure::ConfigTools::appendGroup(msg, name, id, parent, config.*field); + + for(std::vector::const_iterator i = groups.begin(); i != groups.end(); ++i) + { + (*i)->toMessage(msg, config.*field); + } + } + + T (PT::* field); + std::vector groups; + }; + +class DEFAULT +{ + public: + DEFAULT() + { + state = true; + name = "Default"; + } + + void setParams(SwissRangerConfig &config, const std::vector params) + { + for (std::vector::const_iterator _i = params.begin(); _i != params.end(); ++_i) + { + boost::any val; + (*_i)->getValue(config, val); + + if("frame_id"==(*_i)->name){frame_id = boost::any_cast(val);} + if("camera_info_url"==(*_i)->name){camera_info_url = boost::any_cast(val);} + if("auto_exposure"==(*_i)->name){auto_exposure = boost::any_cast(val);} + if("integration_time"==(*_i)->name){integration_time = boost::any_cast(val);} + if("amp_threshold"==(*_i)->name){amp_threshold = boost::any_cast(val);} + } + } + + std::string frame_id; +std::string camera_info_url; +int auto_exposure; +int integration_time; +int amp_threshold; + + bool state; + std::string name; + + +}groups; + + + +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + std::string frame_id; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + std::string camera_info_url; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + int auto_exposure; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + int integration_time; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + int amp_threshold; +//#line 218 "/opt/ros/indigo/share/dynamic_reconfigure/templates/ConfigType.h.template" + + bool __fromMessage__(dynamic_reconfigure::Config &msg) + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + + int count = 0; + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + if ((*i)->fromMessage(msg, *this)) + count++; + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); i ++) + { + if ((*i)->id == 0) + { + boost::any n = boost::any(this); + (*i)->updateParams(n, *this); + (*i)->fromMessage(msg, n); + } + } + + if (count != dynamic_reconfigure::ConfigTools::size(msg)) + { + ROS_ERROR("SwissRangerConfig::__fromMessage__ called with an unexpected parameter."); + ROS_ERROR("Booleans:"); + for (unsigned int i = 0; i < msg.bools.size(); i++) + ROS_ERROR(" %s", msg.bools[i].name.c_str()); + ROS_ERROR("Integers:"); + for (unsigned int i = 0; i < msg.ints.size(); i++) + ROS_ERROR(" %s", msg.ints[i].name.c_str()); + ROS_ERROR("Doubles:"); + for (unsigned int i = 0; i < msg.doubles.size(); i++) + ROS_ERROR(" %s", msg.doubles[i].name.c_str()); + ROS_ERROR("Strings:"); + for (unsigned int i = 0; i < msg.strs.size(); i++) + ROS_ERROR(" %s", msg.strs[i].name.c_str()); + // @todo Check that there are no duplicates. Make this error more + // explicit. + return false; + } + return true; + } + + // This version of __toMessage__ is used during initialization of + // statics when __getParamDescriptions__ can't be called yet. + void __toMessage__(dynamic_reconfigure::Config &msg, const std::vector &__param_descriptions__, const std::vector &__group_descriptions__) const + { + dynamic_reconfigure::ConfigTools::clear(msg); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->toMessage(msg, *this); + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); ++i) + { + if((*i)->id == 0) + { + (*i)->toMessage(msg, *this); + } + } + } + + void __toMessage__(dynamic_reconfigure::Config &msg) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + __toMessage__(msg, __param_descriptions__, __group_descriptions__); + } + + void __toServer__(const ros::NodeHandle &nh) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->toServer(nh, *this); + } + + void __fromServer__(const ros::NodeHandle &nh) + { + static bool setup=false; + + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->fromServer(nh, *this); + + const std::vector &__group_descriptions__ = __getGroupDescriptions__(); + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); i++){ + if (!setup && (*i)->id == 0) { + setup = true; + boost::any n = boost::any(this); + (*i)->setInitialState(n); + } + } + } + + void __clamp__() + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + const SwissRangerConfig &__max__ = __getMax__(); + const SwissRangerConfig &__min__ = __getMin__(); + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->clamp(*this, __max__, __min__); + } + + uint32_t __level__(const SwissRangerConfig &config) const + { + const std::vector &__param_descriptions__ = __getParamDescriptions__(); + uint32_t level = 0; + for (std::vector::const_iterator i = __param_descriptions__.begin(); i != __param_descriptions__.end(); ++i) + (*i)->calcLevel(level, config, *this); + return level; + } + + static const dynamic_reconfigure::ConfigDescription &__getDescriptionMessage__(); + static const SwissRangerConfig &__getDefault__(); + static const SwissRangerConfig &__getMax__(); + static const SwissRangerConfig &__getMin__(); + static const std::vector &__getParamDescriptions__(); + static const std::vector &__getGroupDescriptions__(); + + private: + static const SwissRangerConfigStatics *__get_statics__(); + }; + + template <> // Max and min are ignored for strings. + inline void SwissRangerConfig::ParamDescription::clamp(SwissRangerConfig &config, const SwissRangerConfig &max, const SwissRangerConfig &min) const + { + return; + } + + class SwissRangerConfigStatics + { + friend class SwissRangerConfig; + + SwissRangerConfigStatics() + { +SwissRangerConfig::GroupDescription Default("Default", "", 0, 0, true, &SwissRangerConfig::groups); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.frame_id = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.frame_id = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.frame_id = "camera"; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("frame_id", "str", 3, "ROS tf frame of reference, resolved with tf_prefix unless absolute.", "", &SwissRangerConfig::frame_id))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("frame_id", "str", 3, "ROS tf frame of reference, resolved with tf_prefix unless absolute.", "", &SwissRangerConfig::frame_id))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.camera_info_url = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.camera_info_url = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.camera_info_url = ""; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("camera_info_url", "str", 0, "Camera calibration URL for this video_mode (uncalibrated if null).", "", &SwissRangerConfig::camera_info_url))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("camera_info_url", "str", 0, "Camera calibration URL for this video_mode (uncalibrated if null).", "", &SwissRangerConfig::camera_info_url))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.auto_exposure = 0; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.auto_exposure = 1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.auto_exposure = 1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("auto_exposure", "int", 0, "Auto exposure control state.", "{'enum_description': 'Feature control states', 'enum': [{'srcline': 54, 'description': 'Use fixed value', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Off'}, {'srcline': 55, 'description': 'Camera sets continuously', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Auto'}]}", &SwissRangerConfig::auto_exposure))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("auto_exposure", "int", 0, "Auto exposure control state.", "{'enum_description': 'Feature control states', 'enum': [{'srcline': 54, 'description': 'Use fixed value', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Off'}, {'srcline': 55, 'description': 'Camera sets continuously', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Auto'}]}", &SwissRangerConfig::auto_exposure))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.integration_time = 0; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.integration_time = 255; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.integration_time = 1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("integration_time", "int", 0, "integration_time control.", "", &SwissRangerConfig::integration_time))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("integration_time", "int", 0, "integration_time control.", "", &SwissRangerConfig::integration_time))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __min__.amp_threshold = -1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __max__.amp_threshold = 65535; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __default__.amp_threshold = -1; +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.abstract_parameters.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("amp_threshold", "int", 0, "amplitude_threshold control.", "", &SwissRangerConfig::amp_threshold))); +//#line 259 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __param_descriptions__.push_back(SwissRangerConfig::AbstractParamDescriptionConstPtr(new SwissRangerConfig::ParamDescription("amp_threshold", "int", 0, "amplitude_threshold control.", "", &SwissRangerConfig::amp_threshold))); +//#line 233 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + Default.convertParams(); +//#line 233 "/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py" + __group_descriptions__.push_back(SwissRangerConfig::AbstractGroupDescriptionConstPtr(new SwissRangerConfig::GroupDescription(Default))); +//#line 353 "/opt/ros/indigo/share/dynamic_reconfigure/templates/ConfigType.h.template" + + for (std::vector::const_iterator i = __group_descriptions__.begin(); i != __group_descriptions__.end(); ++i) + { + __description_message__.groups.push_back(**i); + } + __max__.__toMessage__(__description_message__.max, __param_descriptions__, __group_descriptions__); + __min__.__toMessage__(__description_message__.min, __param_descriptions__, __group_descriptions__); + __default__.__toMessage__(__description_message__.dflt, __param_descriptions__, __group_descriptions__); + } + std::vector __param_descriptions__; + std::vector __group_descriptions__; + SwissRangerConfig __max__; + SwissRangerConfig __min__; + SwissRangerConfig __default__; + dynamic_reconfigure::ConfigDescription __description_message__; + + static const SwissRangerConfigStatics *get_instance() + { + // Split this off in a separate function because I know that + // instance will get initialized the first time get_instance is + // called, and I am guaranteeing that get_instance gets called at + // most once. + static SwissRangerConfigStatics instance; + return &instance; + } + }; + + inline const dynamic_reconfigure::ConfigDescription &SwissRangerConfig::__getDescriptionMessage__() + { + return __get_statics__()->__description_message__; + } + + inline const SwissRangerConfig &SwissRangerConfig::__getDefault__() + { + return __get_statics__()->__default__; + } + + inline const SwissRangerConfig &SwissRangerConfig::__getMax__() + { + return __get_statics__()->__max__; + } + + inline const SwissRangerConfig &SwissRangerConfig::__getMin__() + { + return __get_statics__()->__min__; + } + + inline const std::vector &SwissRangerConfig::__getParamDescriptions__() + { + return __get_statics__()->__param_descriptions__; + } + + inline const std::vector &SwissRangerConfig::__getGroupDescriptions__() + { + return __get_statics__()->__group_descriptions__; + } + + inline const SwissRangerConfigStatics *SwissRangerConfig::__get_statics__() + { + const static SwissRangerConfigStatics *statics; + + if (statics) // Common case + return statics; + + boost::mutex::scoped_lock lock(dynamic_reconfigure::__init_mutex__); + + if (statics) // In case we lost a race. + return statics; + + statics = SwissRangerConfigStatics::get_instance(); + + return statics; + } + +//#line 54 "/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg" + const int SwissRanger_Off = 0; +//#line 55 "/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg" + const int SwissRanger_Auto = 1; +} + +#endif // __SWISSRANGERRECONFIGURATOR_H__ diff --git a/swissranger_camera/src/swissranger_camera/__init__.py b/swissranger_camera/src/swissranger_camera/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/swissranger_camera/src/swissranger_camera/cfg/SwissRangerConfig.py b/swissranger_camera/src/swissranger_camera/cfg/SwissRangerConfig.py new file mode 100644 index 0000000..20e2b76 --- /dev/null +++ b/swissranger_camera/src/swissranger_camera/cfg/SwissRangerConfig.py @@ -0,0 +1,38 @@ +## ********************************************************* +## +## File autogenerated for the swissranger_camera package +## by the dynamic_reconfigure package. +## Please do not edit. +## +## ********************************************************/ + +from dynamic_reconfigure.encoding import extract_params + +inf = float('inf') + +config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 233, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 259, 'description': 'ROS tf frame of reference, resolved with tf_prefix unless absolute.', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'frame_id', 'edit_method': '', 'default': 'camera', 'level': 3, 'min': '', 'type': 'str'}, {'srcline': 259, 'description': 'Camera calibration URL for this video_mode (uncalibrated if null).', 'max': '', 'cconsttype': 'const char * const', 'ctype': 'std::string', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'camera_info_url', 'edit_method': '', 'default': '', 'level': 0, 'min': '', 'type': 'str'}, {'srcline': 259, 'description': 'Auto exposure control state.', 'max': 1, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'auto_exposure', 'edit_method': "{'enum_description': 'Feature control states', 'enum': [{'srcline': 54, 'description': 'Use fixed value', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 0, 'ctype': 'int', 'type': 'int', 'name': 'Off'}, {'srcline': 55, 'description': 'Camera sets continuously', 'srcfile': '/home/zhao/workspace/ros/catkin/src/open_ptrack/swissranger_camera/cfg/SwissRanger.cfg', 'cconsttype': 'const int', 'value': 1, 'ctype': 'int', 'type': 'int', 'name': 'Auto'}]}", 'default': 1, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 259, 'description': 'integration_time control.', 'max': 255, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'integration_time', 'edit_method': '', 'default': 1, 'level': 0, 'min': 0, 'type': 'int'}, {'srcline': 259, 'description': 'amplitude_threshold control.', 'max': 65535, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator.py', 'name': 'amp_threshold', 'edit_method': '', 'default': -1, 'level': 0, 'min': -1, 'type': 'int'}], 'type': '', 'id': 0} + +min = {} +max = {} +defaults = {} +level = {} +type = {} +all_level = 0 + +#def extract_params(config): +# params = [] +# params.extend(config['parameters']) +# for group in config['groups']: +# params.extend(extract_params(group)) +# return params + +for param in extract_params(config_description): + min[param['name']] = param['min'] + max[param['name']] = param['max'] + defaults[param['name']] = param['default'] + level[param['name']] = param['level'] + type[param['name']] = param['type'] + all_level = all_level | param['level'] + +SwissRanger_Off = 0 +SwissRanger_Auto = 1 diff --git a/swissranger_camera/src/swissranger_camera/cfg/__init__.py b/swissranger_camera/src/swissranger_camera/cfg/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tracking/conf/multicamera_tracking.rviz b/tracking/conf/multicamera_tracking.rviz index 6004b5d..08b1f50 100644 --- a/tracking/conf/multicamera_tracking.rviz +++ b/tracking/conf/multicamera_tracking.rviz @@ -8,7 +8,7 @@ Panels: - /Status1 - /TF1/Frames1 Splitter Ratio: 0.355837 - Tree Height: 775 + Tree Height: 719 - Class: rviz/Selection Name: Selection - Class: rviz/Tool Properties @@ -96,39 +96,39 @@ Visualization Manager: kinect2_far: Value: true kinect2_far_ir_frame: - Value: false + Value: true kinect2_far_ir_optical_frame: - Value: false + Value: true kinect2_far_link: Value: false kinect2_far_rgb_frame: - Value: false + Value: true kinect2_far_rgb_optical_frame: - Value: false + Value: true kinect2_head: Value: true kinect2_head_ir_frame: - Value: false + Value: true kinect2_head_ir_optical_frame: - Value: false + Value: true kinect2_head_link: Value: false kinect2_head_rgb_frame: - Value: false + Value: true kinect2_head_rgb_optical_frame: - Value: false + Value: true kinect2_lenovo: Value: true kinect2_lenovo_ir_frame: - Value: false + Value: true kinect2_lenovo_ir_optical_frame: - Value: false + Value: true kinect2_lenovo_link: Value: false kinect2_lenovo_rgb_frame: - Value: false + Value: true kinect2_lenovo_rgb_optical_frame: - Value: false + Value: true world: Value: true Marker Scale: 1 @@ -199,8 +199,7 @@ Visualization Manager: Marker Topic: /tracker/people_markers_array Name: MarkerArray Namespaces: - numbers: true - people: true + {} Queue Size: 100 Value: true - Alpha: 1 @@ -353,6 +352,46 @@ Visualization Manager: Use Fixed Frame: true Use rainbow: true Value: false + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_lenovo/depth_lowres/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /detector/object_markers_array + Name: MarkerArray + Namespaces: + /kinect2_far_rgb_optical_frame: true + /kinect2_head_rgb_optical_frame: true + /kinect2_lenovo_rgb_optical_frame: true + Queue Size: 100 + Value: true Enabled: true Global Options: Background Color: 48; 48; 48 @@ -377,30 +416,30 @@ Visualization Manager: Views: Current: Class: rviz/Orbit - Distance: 14.7331 + Distance: 10.4249 Enable Stereo Rendering: Stereo Eye Separation: 0.06 Stereo Focal Distance: 1 Swap Stereo Eyes: false Value: false Focal Point: - X: -0.326729 - Y: 0.467386 - Z: 1.2193 + X: 0.0471745 + Y: 1.9946 + Z: 0.00524044 Name: Current View Near Clip Distance: 0.01 - Pitch: 0.815205 + Pitch: 0.48246 Target Frame: Value: Orbit (rviz) - Yaw: 5.82858 + Yaw: 4.27367 Saved: ~ Window Geometry: Displays: collapsed: false - Height: 1056 + Height: 1000 Hide Left Dock: false Hide Right Dock: true - QMainWindow State: 000000ff00000000fd0000000400000000000002c900000396fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000002800000396000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e100000197000000030000073f0000003efc0100000002fb0000000800540069006d006501000000000000073f000002f600fffffffb0000000800540069006d00650100000000000004500000000000000000000004700000039600000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd0000000400000000000002c90000035efc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c00610079007301000000280000035e000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004bf0000003efc0100000002fb0000000800540069006d00650100000000000004bf000002f600fffffffb0000000800540069006d00650100000000000004500000000000000000000001f00000035e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: @@ -409,6 +448,6 @@ Window Geometry: collapsed: false Views: collapsed: true - Width: 1855 - X: 65 - Y: 24 + Width: 1215 + X: 55 + Y: 42 diff --git a/tracking/conf/tracker_multicamera.yaml b/tracking/conf/tracker_multicamera.yaml index 2cb5a99..276a278 100644 --- a/tracking/conf/tracker_multicamera.yaml +++ b/tracking/conf/tracker_multicamera.yaml @@ -14,7 +14,7 @@ extrinsic_calibration: true # Maximum delay that a detection message can have in order to be considered for tracking: max_detection_delay: 2.0 # Flag stating if the results of a calibration refinement procedure should be used to correct detection positions: -calibration_refinement: true +calibration_refinement: false ######################## ## Sensor orientation ## diff --git a/tracking/launch/tracker_network.launch b/tracking/launch/tracker_network.launch index 35a1f3c..751c180 100644 --- a/tracking/launch/tracker_network.launch +++ b/tracking/launch/tracker_network.launch @@ -62,4 +62,8 @@ + + + + diff --git a/tracking/launch/tracker_network.launch~ b/tracking/launch/tracker_network.launch~ new file mode 100644 index 0000000..751c180 --- /dev/null +++ b/tracking/launch/tracker_network.launch~ @@ -0,0 +1,69 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 4298591c6ee2c55bded988a21ca55a6ec3321f6f Mon Sep 17 00:00:00 2001 From: yongheng1991 Date: Mon, 3 Apr 2017 15:46:38 +0200 Subject: [PATCH 5/9] fix show_2D tracks; use launch file to run opt_gui in tracker launch --- .../multiple_objects_detection/multiple_objects_detection.h | 1 + tracking/launch/tracker_network.launch | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h index 2700d89..19ff4b5 100644 --- a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h +++ b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h @@ -471,6 +471,7 @@ class Multiple_Objects_Detection cv::rectangle(main_color, test_Rect, cv::Scalar(250*(i), 250*(i-1), 250*(i-2)), 2, CV_AA); } } + cv::namedWindow("show_2D_tracks"); cv::imshow( "show_2D_tracks", main_color ); cv::waitKey(10); } diff --git a/tracking/launch/tracker_network.launch b/tracking/launch/tracker_network.launch index 751c180..10af930 100644 --- a/tracking/launch/tracker_network.launch +++ b/tracking/launch/tracker_network.launch @@ -63,7 +63,7 @@ - + From eaadc43f1813f2d047d4f16be48d02ce4fc2257f Mon Sep 17 00:00:00 2001 From: yongheng1991 Date: Mon, 3 Apr 2017 15:52:01 +0200 Subject: [PATCH 6/9] add the mising folder: opt_gui --- opt_gui | 1 - opt_gui/CMakeLists.txt | 71 +++ opt_gui/acknowledge | 5 + opt_gui/conf/opt_gui.yaml | 11 + .../component/opt_streaming_component.hpp | 67 +++ opt_gui/include/opt_gui/main_application.hpp | 30 ++ opt_gui/launch/opt_gui.launch | 12 + opt_gui/package.xml | 62 +++ opt_gui/resources/Rois_listview_delegate.qml | 138 +++++ opt_gui/resources/images.qrc | 5 + opt_gui/resources/main_window.qml | 484 ++++++++++++++++++ opt_gui/resources/openptrack-logo.png | Bin 0 -> 22359 bytes opt_gui/resources/qml.qrc | 6 + .../src/component/opt_streaming_component.cpp | 182 +++++++ opt_gui/src/main_application.cpp | 63 +++ opt_gui/src/opt_gui_main.cpp | 14 + 16 files changed, 1150 insertions(+), 1 deletion(-) delete mode 160000 opt_gui create mode 100644 opt_gui/CMakeLists.txt create mode 100644 opt_gui/acknowledge create mode 100644 opt_gui/conf/opt_gui.yaml create mode 100644 opt_gui/include/component/opt_streaming_component.hpp create mode 100644 opt_gui/include/opt_gui/main_application.hpp create mode 100644 opt_gui/launch/opt_gui.launch create mode 100644 opt_gui/package.xml create mode 100644 opt_gui/resources/Rois_listview_delegate.qml create mode 100644 opt_gui/resources/images.qrc create mode 100644 opt_gui/resources/main_window.qml create mode 100644 opt_gui/resources/openptrack-logo.png create mode 100644 opt_gui/resources/qml.qrc create mode 100644 opt_gui/src/component/opt_streaming_component.cpp create mode 100644 opt_gui/src/main_application.cpp create mode 100644 opt_gui/src/opt_gui_main.cpp diff --git a/opt_gui b/opt_gui deleted file mode 160000 index 9ce876f..0000000 --- a/opt_gui +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 9ce876ff04a3bb58fed7b7b82c9ca58b68c0c65c diff --git a/opt_gui/CMakeLists.txt b/opt_gui/CMakeLists.txt new file mode 100644 index 0000000..3195bf3 --- /dev/null +++ b/opt_gui/CMakeLists.txt @@ -0,0 +1,71 @@ +############################################################################## +# CMake +############################################################################## +INCLUDE(CheckCXXCompilerFlag) +CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11) +CHECK_CXX_COMPILER_FLAG("-std=c++0x" COMPILER_SUPPORTS_CXX0X) +IF(COMPILER_SUPPORTS_CXX11) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") +ELSEIF(COMPILER_SUPPORTS_CXX0X) +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") +ELSE() + MESSAGE(ERROR "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.") +ENDIF() +cmake_minimum_required(VERSION 2.8.0) +project(opt_gui) + +############################################################################## +# Catkin +############################################################################## + +# qt_build provides the qt cmake glue, roscpp the comms for a default talker +find_package(catkin REQUIRED COMPONENTS qt_build roscpp sensor_msgs image_transport cv_bridge) + + +set(QML_IMPORT_PATH "${QML_IMPORT_PATH};${CATKIN_GLOBAL_LIB_DESTINATION}" ) +set(QML_IMPORT_PATH2 "${QML_IMPORT_PATH};${CATKIN_GLOBAL_LIB_DESTINATION}" ) +include_directories(${catkin_INCLUDE_DIRS} include) +# Use this to define what the package will export (e.g. libs, headers). +# Since the default here is to produce only a binary, we don't worry about +# exporting anything. +#add_message_files(FILES Roi_object.msg Roi_object_array.msg) +#generate_messages(DEPENDENCIES std_msgs geometry_msgs sensor_msgs) + +catkin_package() + +#generate_messages() +############################################################################## +# Qt Environment +############################################################################## + +# this comes from qt_build's qt-ros.cmake which is automatically +# included via the dependency ca ll in package.xml +#rosbuild_prepare_qt4(QtCore Qtgui QtQml QtQuick) # Add the appropriate components to the component list here +find_package(Qt5 COMPONENTS Core Gui Qml Quick REQUIRED) + +############################################################################## +# Sections +############################################################################## + +file(GLOB QT_RESOURCES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} resources/*.qrc) +file(GLOB_RECURSE QT_MOC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} FOLLOW_SYMLINKS include/opt_gui/*.hpp include/component/*.hpp) + +QT5_ADD_RESOURCES(QT_RESOURCES_CPP ${QT_RESOURCES}) +QT5_WRAP_CPP(QT_MOC_HPP ${QT_MOC}) + +############################################################################## +# Sources +############################################################################## + +file(GLOB_RECURSE QT_SOURCES RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} FOLLOW_SYMLINKS src/*.cpp src/component/*.cpp) + +############################################################################## +# Binaries +############################################################################## + +add_executable(opt_gui ${QT_SOURCES} ${QT_RESOURCES_CPP} ${QT_FORMS_HPP} ${QT_MOC_HPP}) +qt5_use_modules(opt_gui Quick Core) +target_link_libraries(opt_gui ${QT_LIBRARIES} ${catkin_LIBRARIES}) +target_include_directories(opt_gui PUBLIC include ${catkin_INCLUDE_DIRS}) +add_dependencies(opt_gui ${catkin_EXPORTED_TARGETS}) +install(TARGETS opt_gui RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}) diff --git a/opt_gui/acknowledge b/opt_gui/acknowledge new file mode 100644 index 0000000..10eb154 --- /dev/null +++ b/opt_gui/acknowledge @@ -0,0 +1,5 @@ +rqt_image_view +bluesat/owr-qtgui +kinect2_bridge +iai_kinect + diff --git a/opt_gui/conf/opt_gui.yaml b/opt_gui/conf/opt_gui.yaml new file mode 100644 index 0000000..6671399 --- /dev/null +++ b/opt_gui/conf/opt_gui.yaml @@ -0,0 +1,11 @@ +view_topic_1: /kinect2_head/rgb_lowres/image + +view_topic_2: /kinect2_far/rgb_lowres/image + +view_topic_3: /kinect2_lenovo/rgb_lowres/image + +view_topic_4: /kinect2_head/rgb_lowres/image + +view_topic_5: /kinect2_head/rgb_lowres/image + +view_topic_6: /kinect2_head/rgb_lowres/image diff --git a/opt_gui/include/component/opt_streaming_component.hpp b/opt_gui/include/component/opt_streaming_component.hpp new file mode 100644 index 0000000..7493e1f --- /dev/null +++ b/opt_gui/include/component/opt_streaming_component.hpp @@ -0,0 +1,67 @@ +#ifndef OPT_Streaming_Component_H +#define OPT_Streaming_Component_H + +//QT +#include +#include +#include + +//ROS +#include +#include +#include + +#include +#include +#include "opencv2/highgui/highgui.hpp" + +#include +#include +class OPT_Streaming_Component : public QQuickPaintedItem { + //make this a Qt Widget + Q_OBJECT + // defines a qml value for the topic + Q_PROPERTY(QString topic READ get_topic WRITE set_topic NOTIFY topic_changed) + + public: + // Constructor, takes parent widget, which defaults to null + OPT_Streaming_Component(QQuickItem * parent = 0); + + void paint(QPainter *painter); + void setup(ros::NodeHandle * nh); + + //getters and setters + void set_topic(const QString &new_value); + QString get_topic() const; + + Q_INVOKABLE void save_ROI(QString sensor_name,int no_ROI,QString roi_name, int rect_X,int rect_Y, int rect_width, int rect_height ); + Q_INVOKABLE void set_roi_name(int index, QString roi_name); + Q_INVOKABLE void publish_rois(); +signals: + void topic_changed(); + + private: + void receive_image(const sensor_msgs::Image::ConstPtr & msg); + + // ROS + ros::NodeHandle * nh; + image_transport::ImageTransport * img_trans; + image_transport::Subscriber image_sub; + QString topic_value; + bool ros_ready; + // Used for buffering the image +// QImage * current_image; + QImage current_image; + cv::Mat conversion_mat_; + uchar * current_buffer; + + //for publish msgs + int number_of_rois; + opt_msgs::Image2D_roi current_image2D_roi_msg; + opt_msgs::Image2D_roi_array image2D_rois_msg; + ros::Publisher image2D_rois_pub; +// gui::Roi_object_array::Ptr rois(new Roi_object_array); + +}; + +#endif // OPT_Streaming_Component_H diff --git a/opt_gui/include/opt_gui/main_application.hpp b/opt_gui/include/opt_gui/main_application.hpp new file mode 100644 index 0000000..71e6996 --- /dev/null +++ b/opt_gui/include/opt_gui/main_application.hpp @@ -0,0 +1,30 @@ +#ifndef MAIN_APPLICATION_H +#define MAIN_APPLICATION_H + +#include +#include + +class Main_Application : public QQmlApplicationEngine { + Q_OBJECT + public: + Main_Application(); + // this method is used to setup all the ros functionality we need + // before the application starts runnings + void run(); + + // this defines a slot that will be called when the application is idle + public slots: + void main_loop(); + + private: + ros::NodeHandle nh; + std::string view_topic1; + std::string view_topic2; + std::string view_topic3; + std::string view_topic4; + std::string view_topic5; + std::string view_topic6; + +}; + +#endif // MAIN_APPLICATION_H diff --git a/opt_gui/launch/opt_gui.launch b/opt_gui/launch/opt_gui.launch new file mode 100644 index 0000000..0669527 --- /dev/null +++ b/opt_gui/launch/opt_gui.launch @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/opt_gui/package.xml b/opt_gui/package.xml new file mode 100644 index 0000000..d2a47d1 --- /dev/null +++ b/opt_gui/package.xml @@ -0,0 +1,62 @@ + + + opt_gui + 0.1.0 + The opt_gui package + + + + + OpenPTrack + + + + + + Apache 2.0 + + + + + + + + + + + + + Yongheng Zhao + + + + + + + + + + + + + + catkin + qt_build + roscpp + image_transport + sensor_msgs + libqt4-dev + cv_bridge + qt_build + image_transport + sensor_msgs + roscpp + libqt4-dev + cv_bridge + + + + + + + diff --git a/opt_gui/resources/Rois_listview_delegate.qml b/opt_gui/resources/Rois_listview_delegate.qml new file mode 100644 index 0000000..bd8ddef --- /dev/null +++ b/opt_gui/resources/Rois_listview_delegate.qml @@ -0,0 +1,138 @@ +import QtQuick 2.0 +import QtQuick.Controls 1.1 + +Component { + id: imageDelegate + Item { + id: imagewraper + width: show_rois.width/3 + height: show_rois.width/3+20 + + Rectangle { + id: image + color: "black" +// anchors.left: parent.left + anchors.top: parent.top + width: show_rois.width/3 + height:show_rois.width/3+20 + Image { + id:image_ + anchors.top: parent.top + width: show_rois.width/3 + height:show_rois.width/3 + z:1 + fillMode: Image.Stretch + source: roiSource + } + TextInput { + id:roi_name +// anchors.left: parent.left + anchors.top: image_.bottom + anchors.left: image_.left + height: 20 + width: show_rois.width/3-50 + color: "lime" + text:roiName + font.pixelSize: 16 + } + Button{ + id:confirm_button + anchors.top: image_.bottom + anchors.right: image_.right + height: 20 + width: 50 + z:1 + text: qsTr("Confirm") + onClicked: + { + roi_name.focus=false + main_view.focus=true + roiblock.model.get(index).roiName=roi_name.text + switch(main_view.current_camera) + { + case 1: + videoStream1.set_roi_name(index,roi_name.text) + confirm_button.visible=false + break; + case 2: + videoStream2.set_roi_name(index,roi_name.text) + confirm_button.visible=false + break; + case 3: + videoStream3.set_roi_name(index,roi_name.text) + confirm_button.visible=false + break; + case 4: + videoStream4.set_roi_name(index,roi_name.text) + confirm_button.visible=false + break; + case 5: + videoStream5.set_roi_name(index,roi_name.text) + confirm_button.visible=false + break; + case 6: + videoStream6.set_roi_name(index,roi_name.text) + confirm_button.visible=false + break; + } + } + } + } + + MouseArea { + id:imagearea + z:0 + anchors.top: imagewraper.top + anchors.bottom: imagewraper.bottom + anchors.bottomMargin: 20 + anchors.left: imagewraper.left + anchors.right: imagewraper.right + + // anchors.fill: imagewraper + // hoverEnabled: true + // drag.target: imagewraper + onClicked: { + switch(imagewraper.state) + { + case "":imagewraper.state="expanded";break; + case "expanded":imagewraper.state="";break; + // case "doubleexpanded":imagewraper.state="doubleexpanded";break; + default:imagewraper.state="";break; + } + } + // onDoubleClicked: { + // switch(imagewraper.state) + // { + // case "expanded":imagewraper.state="expanded";break; + // case "":imagewraper.state="doubleexpanded";break + // case "doubleexpanded":imagewraper.state="expanded";break; + // default:imagewraper.state="expanded";break; + // } + // } + } + states: [ + State { + name: "expanded" + PropertyChanges { target: imagewraper; height: roiblock.height/2 ;width:roiblock.width;z:100} + PropertyChanges { target: image_; width: roiblock.width; height: roiblock.height/2} + PropertyChanges { target: imagewraper.ListView.view; contentX: imagewraper.x;contentY: imagewraper.y; interactive: true } + } + // ,State { + // name: "doubleexpanded" + // PropertyChanges { target: imagewraper; height: roiblock.height*2 ;width:imageblock.width*2;z:100} + // PropertyChanges { target: image; width: imageblock.width*2; height: imageblock.height*2} + // PropertyChanges { target: imagewraper.GridView.view; contentX: imagewraper.x;contentY: imagewraper.y.mouseY; interactive: true } + // } + ] + + + transitions: [ + Transition { + NumberAnimation { + duration: 100; + properties: "height,width,anchors.rightMargin,anchors.topMargin,opacity,contentX,contentY" + } + } + ] + } +} diff --git a/opt_gui/resources/images.qrc b/opt_gui/resources/images.qrc new file mode 100644 index 0000000..78b0ff2 --- /dev/null +++ b/opt_gui/resources/images.qrc @@ -0,0 +1,5 @@ + + + openptrack-logo.png + + diff --git a/opt_gui/resources/main_window.qml b/opt_gui/resources/main_window.qml new file mode 100644 index 0000000..b4cb761 --- /dev/null +++ b/opt_gui/resources/main_window.qml @@ -0,0 +1,484 @@ +import QtQuick 2.0 +import QtQuick.Window 2.1 +import OPT_Streaming_Component 1.0 +import QtQuick.Controls 1.1 +import QtQuick.Layouts 1.1 + +Window { + id: main_window + visibility: Window.FullScreen + width: Screen.width + height: Screen.height + title: "MOT" + visible: true + +// property string sensor_topic1:"/kinect2_head/rgb_lowres/image" +// property string sensor_topic2:"/kinect2_far/rgb_lowres/image" +// property string sensor_topic3:"/kinect2_lenovo/rgb_lowres/image" +// property string sensor_topic4:"/kinect2_head/rgb_lowres/image" +// property string sensor_topic5:"/kinect2_head/rgb_lowres/image" +// property string sensor_topic6:"/kinect2_head/rgb_lowres/image" + + RowLayout{ + spacing: 10 + ColumnLayout { + spacing: 10 + Rectangle{ + id:sensor_01 + property int number_rois : 0 + width: (Screen.height-40)*16/54 + height: (Screen.height-40)/6 + color: "#c92c2c" + OptStreamingComponent { + id: videoStream1 + anchors.fill: parent + objectName: "videoStream1" +// topic: topic1.text + } + TextInput { + id: topic1 + anchors.left: parent.left + anchors.top: parent.top + color: "lime" + text: videoStream1.topic + font.pixelSize: 16 + font.bold: true + } + MouseArea{ + anchors.top: parent.top + anchors.topMargin: 20 + anchors.bottom: parent.bottom + anchors.left: parent.left + anchors.right: parent.right + onClicked: + { + main_videoStream.topic= videoStream1.topic + main_view.current_camera=1 + main_view.no_roi=sensor_01.number_rois + roiblock.model=roiModel1 + topic1.font.bold=true + topic2.font.bold=false + topic3.font.bold=false + topic4.font.bold=false + topic5.font.bold=false + topic6.font.bold=false + } + } + } + Rectangle{ + id:sensor_02 + property int number_rois : 0 + width: sensor_01.width + height: sensor_01.height + color: "#c92c2c" + OptStreamingComponent { + anchors.fill: parent + objectName: "videoStream2" + id: videoStream2 +// topic: topic2.text + } + TextInput { + id: topic2 + anchors.left: parent.left + anchors.top: parent.top + color: "lime" + text: videoStream2.topic + font.pixelSize: 16 + font.bold: false + } + MouseArea{ + anchors.top: parent.top + anchors.topMargin: 20 + anchors.bottom: parent.bottom + anchors.left: parent.left + anchors.right: parent.right + onClicked: + { + main_videoStream.topic= videoStream2.topic + main_view.current_camera=2 + main_view.no_roi=sensor_02.number_rois + roiblock.model=roiModel2 + topic1.font.bold=false + topic2.font.bold=true + topic3.font.bold=false + topic4.font.bold=false + topic5.font.bold=false + topic6.font.bold=false + } + } + } + Rectangle{ + id:sensor_03 + property int number_rois : 0 + + width: sensor_01.width + height: sensor_01.height + color: "#c92c2c" + OptStreamingComponent { + anchors.fill: parent + objectName: "videoStream3" + id: videoStream3 +// topic: topic3.text + } + TextInput { + id: topic3 + anchors.left: parent.left + anchors.top: parent.top + color: "lime" + text: videoStream3.topic + font.pixelSize: 16 + font.bold: false + } + MouseArea{ + anchors.top: parent.top + anchors.topMargin: 20 + anchors.bottom: parent.bottom + anchors.left: parent.left + anchors.right: parent.right + onClicked: + { + main_videoStream.topic= videoStream3.topic + main_view.current_camera=3 + main_view.no_roi=sensor_03.number_rois + roiblock.model=roiModel3 + topic1.font.bold=false + topic2.font.bold=false + topic3.font.bold=true + topic4.font.bold=false + topic5.font.bold=false + topic6.font.bold=false + + } + } + } + Rectangle{ + id:sensor_04 + property int number_rois : 0 + width: sensor_01.width + height: sensor_01.height + color: "#c92c2c" + OptStreamingComponent { + anchors.fill: parent + objectName: "videoStream4" + id: videoStream4 +// topic: topic4.text + } + TextInput { + id: topic4 + anchors.left: parent.left + anchors.top: parent.top + color: "lime" + text: videoStream4.topic + font.pixelSize: 16 + font.bold: false + } + MouseArea{ + anchors.top: parent.top + anchors.topMargin: 20 + anchors.bottom: parent.bottom + anchors.left: parent.left + anchors.right: parent.right + onClicked: + { + main_videoStream.topic= videoStream4.topic + main_view.current_camera=4 + main_view.no_roi=sensor_04.number_rois + roiblock.model=roiModel4 + topic1.font.bold=false + topic2.font.bold=false + topic3.font.bold=false + topic4.font.bold=true + topic5.font.bold=false + topic6.font.bold=false + + } + } + } + Rectangle{ + id:sensor_05 + property int number_rois : 0 + width: sensor_01.width + height: sensor_01.height + color: "#c92c2c" + OptStreamingComponent { + anchors.fill: parent + objectName: "videoStream5" + id: videoStream5 +// topic: topic5.text + } + TextInput { + id: topic5 + anchors.left: parent.left + anchors.top: parent.top + color: "lime" + text: videoStream5.topic + font.pixelSize: 16 + font.bold: false + } + MouseArea{ + anchors.top: parent.top + anchors.topMargin: 20 + anchors.bottom: parent.bottom + anchors.left: parent.left + anchors.right: parent.right + onClicked: + { + main_videoStream.topic= videoStream5.topic + main_view.current_camera=5 + main_view.no_roi=sensor_05.number_rois + roiblock.model=roiModel5 + topic1.font.bold=false + topic2.font.bold=false + topic3.font.bold=false + topic4.font.bold=false + topic5.font.bold=true + topic6.font.bold=false + + } + } + } + Rectangle{ + id:sensor_06 + property int number_rois : 0 + width: sensor_01.width + height: sensor_01.height + color: "#c92c2c" + OptStreamingComponent { + anchors.fill: parent + objectName: "videoStream6" + id: videoStream6 +// topic: topic6.text + } + TextInput { + id: topic6 + anchors.left: parent.left + anchors.top: parent.top + color: "lime" + text: videoStream6.topic + font.pixelSize: 16 + font.bold: false + } + MouseArea{ + anchors.top: parent.top + anchors.topMargin: 20 + anchors.bottom: parent.bottom + anchors.left: parent.left + anchors.right: parent.right + onClicked: + { + main_videoStream.topic= videoStream6.topic + main_view.current_camera=6 + main_view.no_roi=sensor_06.number_rois + roiblock.model=roiModel6 + topic1.font.bold=false + topic2.font.bold=false + topic3.font.bold=false + topic4.font.bold=false + topic5.font.bold=false + topic6.font.bold=true + } + } + } + } + ColumnLayout { + spacing: 50 + Image { + id: openptrack_logo + source: "/images/openptrack-logo.png" + width: 244 + height: 116 + anchors.horizontalCenter: parent.horizontalCenter + fillMode: Image.PreserveAspectFit + } + Rectangle{ + width: 960 + height:540 + color: "#c92c2c" + focus: true + id: main_view + property int current_camera : 1 + property int no_roi : 0 + property int rect_X + property int rect_Y + property int rect_width + property int rect_height + + // property string roi_name_s + OptStreamingComponent { + id:main_videoStream + anchors.fill: parent + objectName: "main_videoStream" +// topic: main_topic.text + } + TextInput { + id: main_topic + anchors.left: parent.left + anchors.top: parent.top + z:1 + color: "lime" + text: main_videoStream.topic + font.pixelSize: 20 + font.bold: true + font.italic: true + } + MouseArea { + id: selectArea; + property bool ready2add : false + z:0 + anchors.top: parent.top + anchors.bottom: parent.bottom + anchors.left: parent.left + anchors.right: parent.right + onPressed: { + if (highlightItem !== null) + { + // if there is already a selection, delete it + highlightItem.destroy (); + } + // create a new rectangle at the wanted position + highlightItem = roiComponent.createObject (selectArea, {"x" : mouse.x, "y" : mouse.y ,"width" : 0, "height" :0}); + } + onPositionChanged: { + highlightItem.width = (Math.abs (mouse.x - highlightItem.x)); + highlightItem.height = (Math.abs (mouse.y - highlightItem.y)); + } + onReleased: { + main_view.rect_X=highlightItem.x; + main_view.rect_Y=highlightItem.y; + main_view.rect_width=highlightItem.width; + main_view.rect_height=highlightItem.height; + ready2add=true + } + property Rectangle highlightItem : null; + Component { + id: roiComponent; + Rectangle { + id:roi_rec + border.color: "red" + border.width: 2 + color: "transparent" + x: selectArea.x + y: selectArea.y + width: selectArea.width + height:selectArea.height + } + } + } + Keys.onSpacePressed:{ + if(selectArea.ready2add) + { + no_roi++ + var topic_name_list = main_topic.text.split('/'); + var sensor_name=topic_name_list[1]; + var roi_name=sensor_name+"_roi_"+no_roi.toString(); + var roiSourceUrl="file:/tmp/"+sensor_name+"_roi_"+no_roi.toString()+".png" + + switch(current_camera) + { + case 1: + videoStream1.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) + roiModel1.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); + (sensor_01.number_rois)++;//use the the number of the models + selectArea.highlightItem.destroy (); + break; + case 2: + videoStream2.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) + roiModel2.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); + (sensor_02.number_rois)++; + selectArea.highlightItem.destroy (); + break; + case 3: + videoStream3.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) + roiModel3.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); + (sensor_03.number_rois)++; + selectArea.highlightItem.destroy (); + break; + case 4: + videoStream4.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) + roiModel4.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); + (sensor_04.number_rois)++; + selectArea.highlightItem.destroy (); + break; + case 5: + videoStream5.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) + roiModel5.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); + (sensor_05.number_rois)++; + selectArea.highlightItem.destroy (); + break; + case 6: + videoStream6.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) + roiModel6.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); + (sensor_06.number_rois)++; + selectArea.highlightItem.destroy (); + break; + } + } + selectArea.ready2add=false + } + } + } + Rectangle{ + id: show_rois + width:Screen.width-(Screen.height-40)*16/54-980 + height: Screen.height + ListView { + id: roiblock + anchors.top: parent.top + width: parent.width + height: parent.height-50 + spacing: 20 + clip: true + model: roiModel1 + delegate: Rois_listview_delegate{} + ListModel{ + id: roiModel1 + } + ListModel{ + id: roiModel2 + } + ListModel{ + id: roiModel3 + } + ListModel{ + id: roiModel4 + } + ListModel{ + id: roiModel5 + } + ListModel{ + id: roiModel6 + } + } + Button{ + id: publish_rois + anchors.top: roiblock.bottom + anchors.topMargin: 20 + anchors.left: roiblock.left + anchors.leftMargin: 40 + height: 20 + width: 100 + text: qsTr("Start Detection") + + onClicked: + { + switch(main_view.current_camera) + { + case 1: videoStream1.publish_rois(); + break; + case 2: videoStream2.publish_rois(); + break; + case 3: videoStream3.publish_rois(); + break; + case 4: videoStream4.publish_rois(); + break; + case 5: videoStream5.publish_rois(); + break; + case 6: videoStream6.publish_rois(); + break; + } + } + } + } + } +} + diff --git a/opt_gui/resources/openptrack-logo.png b/opt_gui/resources/openptrack-logo.png new file mode 100644 index 0000000000000000000000000000000000000000..c536f01fdf1d45b8dfc52f8ce3ea1c36e43c83ec GIT binary patch literal 22359 zcmX6^1yCJbvs~cf?(T#DA-D#22=49@+}+(R5Zv7%xLfdGAq01a;1XQke*ZgFEVqE# zyZ6jYPj}BDN=ZQq6^RH5002}OX>k<*fQkWMOCdmmSFerdGVmLsqqMdQ03c)k_XP!H zW)lDa-a9KXF(oBS2UiCdO9w{^88I;mM`s5MD_e5_@LaC(wBndc=zO_;$ewYE_sTV= zz2=`b%N?{!b61nxEFEe93=7UQS~sEWd>Ne#;J?gHnL_<9@@0}$p3 zf2~m-064w@(volUR1W}A48X(ms*BwUyEzYOJ3S=ECwh2!3i43cepdn1ZB`5-R?O(`c|B54?mDQi>9)n!?kpxsBbd*GqWG2yC)bWw<+i?&oOlcBloH1eNuh-J_ z&af3y_LMcF)VqXi!CjK;)Uu>!BMM&N;VYD8l9}XTi z9iALMA0{1M9a$u;jb-47eo|nd8vU+DjRBm0SB9ukxy z65k)(pRh$BMkvNUKsUfG`AL#hQfxRRb}V)%)`K8|AT8rCeJ~?7qg<{+&RniWE?KT> zqIx2F!hK?EqAa71Rmjx9jPiSAV|v4Vy;*}=`o+edra`{p}y`~UVP zx4I|!h9a(Hu6eIWx2pDuH+A>$H@OcIwtGi626`F$D2*sEsFdh_QGTZxWhj47z^FyH zNPn*#sXwYjCs!_hSpO@UCGSFTi)ZV@kB}cLJ~X#&Co&teO<#0EX$mQ^C{*Zve{7&) zptGj-R%;S*{T!{b)Dcw6l|73_3oR?sYZpuJRb_O>G%Mt+s=YA0 zpuu}GrDX^Zg^{1z2?WUt-Hl%l?r)Ey2pRyuz z*s$obYH|vhtT&^#PFowk_Q)XnBOAte$KcAkjpK}uXqbI?U!_uoKL2}uX5NmgJg!7O zXi`^~bp>JN$=&FSv8EBX9kX%3;%gAhdjERC#8z@q;g0f%;CqcSF@=Tf`Rust>+Fp- z>5jp6*G{$$@Amo5+Rlu&9FHuwG!I3mfV2J`%#)dInG4r{)&KO?96EX}-@4K&HB%P` zp`u`-;B!fLc?sMvwkU_i{)l;WIj!5TFLZg7yZ>SMTbU51*ghRySAPz>!R!y`ALcLn z+VOhuf)DKqGX>8G_ZJQi!UTH+j|=}1o)|t2?hRoHUxl!N%o^XD^Bdki-aL*1${m_R zoKX})oF@rAMg{>d);sJJTuvXYW5=1T6w9i^WdfRgVL4ewU0byi;cT!V)wRkmfe2liRE1BLWF%Tbfj=&4Np3crRcoLjYvihkR@lbEFd z=V!3&aGhgZJQSXC&xDFROLtLDZTpt``J20B4=xUw=0#i zlwEkt6cm0LVi-!nm&V^_nn=}3YtqB%_B`8Guu?km%uv=={+hO8g(*C)tvW>`))bOQZxViM4JTBm`jhD5HjYplNUQUfmwU1t%fnh-0 zxsCzMGRDGp*R|uv3)E!dk8urgnwdPA&JI5&+p_W;yf!l1?A$t^;)ipFJ54;s&+FI6 ztt2y(Gd0iP*YfNxEeWlfZBOlI?K|&;&-&-ww{9CEPSCGENt0%fHcjnL&DECI?nwnD z?02BM^&UB$6Z)K7cz$1~ygR~fd@tY^?w6zJme(RQ>pj0*HxuyIzI?q8H5cT=tY)}M ztVV20rb2E_I8S0vJVSUyFh}lp^LuwMnKh>VV5en=8k-5bj<%cji{V4JjxXu$cw8kE`fRAsq52t4yA?Gk3&OTfV6$bqB&wU+wIsWUAvawcm zTg7g(WwUb3OT0?#mPM51r)#f^yn@IVnv}a4U8TSYYx20DoN3)-iIMHmKQA&ULGV-lrWM~xEMS1o^*5-2Zxws zs)Ss%F;te8`wLS`HEJJsqe*CrP3sD+Y^HAMnifW7R@5!=UZ?u8r$(lfH@C}2+U2h4 zKWU;7E09e6WOA?Fvj4J5ERgHdv(dZV>~Hjh;h9{%D%j8y?jQde0sRb9A)?U#iBhyc z`p{PE%l%$t>}WpS(*V(oDC=PQc|Eoh>7^SYY*dAe@fevDWBlLJ^0G?OSvhuo`;@cP ztvo`XZE=mQu}oEuhE8WV=l2S|e2tE`HtW+aRSz}?n#dZ+tU1hT8u_@t^3x;xY48hQFkE({`8KF%k7VchKD*o!{4bR-bbF|g`+4Pa?OqX*`+UHFN(Q$DE!_tf{Pq# zY$WU^{GSQ^9vW;Jjr820xmkI&?-*X2`*Hj$&*hhO=US0xAJ~`KUCyT_tf&3G3?KTP z(AXDlX7+aWeYeWG^FHB|;0rS4Y9$#y-nXC04mMm}{aq+^=-J5V-O#N!e7M;;RGi9F z^xu1l*D9%ntv;~zUH!Y3QZxhf;bMx|7g7?uo1nVIV?sSw`1S7RZ<$1z!@ERWIvo_W z*dDoxi4;cCYS|Mx5~6IiTGdq#FN^!bhZNXbE<(O16La$6 zj$S)mr$g&OM{1E~xpdilG0$<-nSbxwj_>lj5zg6K2^NInd+tk3n$P3R*}vkvAs=Cs*7iRNCvcfYxDaUOV{369Fiidh#m6Ubv>#XvMe{=8IG3Qn| z%)+=sm%T}+mCx~?(=)!TyWPx}v$W>emMyaKCUS4j!lIVO?y8-c<}()o`UGt6+1%ZK z>ijkS?iFt**KJT`_zfY8{S^f(1MSq5MQF_OBXT38w7#@E2If72yeU_-n>Cjl(>GUg z$DPTIXXoZZ7(%|8#}8Vp0r$!M&|b#2_1;H|2b;|P<06Iwy%K&2I#G|9FYK?Hzt2PG zHaV}|hX3&qml98N3vl<^qn;;vlv#2Bi9sWgxK~L3MmAZ!vo*1?gc04pX z4Eb(1$}DO)MtMm7T__C~4SX>}iK*G`DQ>ESdD<@RULHdey@4u&vV!WR#+=$f6>0^( zRhx~oP0xJguiIJW-$Tb!hn&X;gb>0^f+%A4jKDGGS=dIzYxt+=kIx^67{Anfu0FLz z;1XydbjQ2ZyUip0M1q_hoZYNX$HZRCy&B)4>;2uQ>$-Y{{HWw_v)ATUf0Nkoy7)P! zb-QADT|D`c!_PPCzAfRnzMTB2%_fWxC4^HXJ~Dffpt!?0eoS+MW~VmFWoo#niiGBv z7g9~EI&7ij#MIHedzLZ23~H~x(gQQGICpAef0|7NtYAlAX64z6+1nLIPj)FY zFyr3Ko+`=J;1%Oxce}MPm}>v!dwtD#kcV@^ti>e9#Ky?g;rN%To}=u<6!z|2$>+)O zOii__-Kuvsp>JEY<~^S)i>khK(XQtEtUV2n4HL^VJDjbicI|lGJ$HY7vi(R_vuBrl z+7_Wgx}PJcKSHMe_w3N?F8x-#ZTjA7vvKyNHCGYk1no2GDR~bI+qH_-$;5rDOqphl z>JNwL(!CAVF2(!w&h(qP+k@p&m^iqLI}doEuL=qW2dXU>%26MBv=^P?6$ng*7%4(I z&O>g@sPp&Z0z-4}7hZ({WN{-18U}=32@2*roFYQF_-MFF8yYDKQl1gKu}Con%pgpZ z$P~ptX=b(vs&W?V(8q%JGslN?9buULi$+PTZs{_Nm{nn@BO0cLJ6et!ZH#*8Wc|dO zvlnX+lKv#e_={*dITY%a-}_4T*n;8Mo<2p^zCb*bd7YvIRB!L443+ptbuBfys^Y(*l*4~ z+Y}0QSx#Ozcel)8%}K%<0<^$kL$h*e9uQRjP!`68U0$3!OYIKJD~w?y|e4H|^Ulj|XO? z(iqEkq9vJo^J!@M3Y8st>~88UuUBqi!%MoFXJlQ98_SFS`KqP5-xp4-<^V5gXEQs- z0F|qMqbsuKOuhs!yWLF>DSJ=<@-B3Zn6FZdUjZ7J3Q;mjawSHFAf2KaOZw#y|B(yhY8EM0 z`Sb)WvTABB{FN9n$Bs7YHuyHuEB zLaVi~hd)!~m+8V;ClZ9|NqByMP7Ov0o)z^7ojyavW5wFTdZD^idd?OPEdKf}og)Ut ziCH0)ZiHgGu&-KLZB1zTPf zmmSsR+Ao4p;R^eR!`DW@e z0>hrp_S9&ixYek*Z=#s@X*_EoInQ9zz|2?Vsn9*cJlEIH*Ke0mnc@9A5$1xhK+Ft- zn1+a6q$6>f4&FL=xUl&UC!EkUXl&5c*u8#v(ae_E9sWz?c)$F1$;HU!EJQ#Q_p5X) zP5g}2Z}mha6$Gp7_0U%+?d0c36UB$Y#3{Oo#i8AyreWWBiPSw<8whb&P5fHq%SS1! zGT?w>LEE03jM~6TkgLb;)hY&-ssGUBISIMFT?u#O?@$*1`sN)nw{L1F#u33F$j@w4W#^(6)SeDp;SS)Ke3sv`9O?l0X-P~!?DLEe=pE}=4=Ts;6 ztI=!QTf2goB~z0P2a&K`V8ic?`ERz6z^sB9|0o!YDVUS}S<4l5j%9mJVE?1A=HiAR zXbVq6lwQ$*d_!;k(_WAIHzrZAOOkp}>~bO3OP2Y{th00_zhfPhTU z9yJ93Hhcge{iwU%JqrNB)iUBDYM#p{ejc7`>W^7tz#fg#>!q9ZTmFZPr zi$Bi}1*77v)qSX9YKS>wh^Bf}6_F5*My`&eAfN4#$?X!M&%{9n^;@ZO6Ma2|XBuW+a|b+$as5m_48{0C{`dHpn20D=a>pS$jWX2JH{_=*t*vm1$0j>oPxrtQ`89 zoZCpE#7!EP)ELzD^>g}-zOS6-nAX$C5XDJ$t|=(v$RyE7u-&YE{sa-ZcVkRva-6{{^gj#5peuy!6l;3`-@9B6dfW0od@qri6Gv$(~4O{FwCs@ zg2{uEDnDUyU0*bU0vDoE(WFzu$;qj%uAV-gNHZ4AEg1DBkRZH(?sL`LXPTrD@__8p zqsc68aE+U>Rn0|X{Ee*>J|>kcs&8ugW5gmZEKp+Ja zC9@IKtiUV78})&PpP!$yGNy#G0G9VC%v#g|S@;~Rjz8H$Y6%RZG~p)B9YtEtGU6Il2R`bxAsRt#0^%rv zJ*viibkfqW!8gwFkJH~2T6J6P*E}}-=;`S<{GV$0h1W^xScb@`+m3(!{9H6chl4a} z;bLd^5lEzo+IlaA4>9DTl%LgOYAr3LrKYC-`0?~=Z(LE}-=9AfszWCAL$=1P-DR!L zzS%1W+vkYEmX?-@<9k$J%PT4>3Ja;Isf%D?ZB`nSr@v8a?cWD=$CO$HQ!J9n(I#tD z%~^7M%9%2$|E{OPh$ma35+zQBgS528i#F#(Q0uRoE0A{;be07-1nI5&ZoRv=zO4ah(3TMVpk=Fj5_V!(Ib{{aQ zlW5xY?8U`HXP@s)+U{oB6h(Bu=~T^a+f^(aqr$N@m$bdT{k=kV;+PqSylLY%VnS3XF?LT8#9&7k z`D1Q~NV;5UUtix?^80bK2EO9shc$s2<4*SdmrQq$9%hbTwZ8)~z)yZ~=@}{9qvR%4 z=ag7LqD5_Q(U`Gj`cX<8-ki?2+n^2|5fKp;6%`rTlwjDTK9R<%b>-Y3-1<maA1M z{h(-iXKC(Mq*_O6)>TBLzd$Ua=k4|B<>_vxGbuS48yoxS<|stF#-Pr@(sHrOi~ZH@ zVzYl|B!QgQ+05JflZ5i7NzA`-CDOzKg3_=LlmG>Q2|yOz@~r@3z#04@1O-6sgT;VA zF{7M(%9j^*3?haWFx@IkfFjVk=T-^@#Mse(`S|<48oplP8=95x?wTc!xjx<6E#zf$ zjRr!&49AjE!w~uqyeXU0g&Y5|Zl1F?x7;TiQ}PT0amQ*o#n4?koC@KGMdPJsS7anY zbRk%=7L%DNX=!Qc>E*LW+1@T@Ou1wSk9xPn6S>*hExsuQB#&RB&` z4-@s6$obrDH8eDwot;6D5*7yXrr#q#UpqDH=z~j3>wcWMcXxmP^ z3In+SEx=Ox3KOquYKpeZ{YL+ zKDeko!O7wi@ZEeoRZ;N4Y`x11{Gy*E4CBz_Y+p3S**RJ(JV+;aT+ z^JhatgQ%$J1-Do@N`_FtE54yGtK+6H*kww{$nvBxLg%bGGvu3c_Afns1vjjlP2?}g z6gfo8J4zp4n$PR>J6kmC3_!fd*0B6yT1GBzaEb}<7fLNlo0Jq0m4#JIS?dqqQu=h1(=zc+1atPy3t5K;gFE% zX=`ii>JsDPa=M=^<_h?PhlgKzb`1{?&z33X@q4k+(1aG!NySYgtDI?cTw>CHd7zh{ z-3WMl0l5PS9v+w7a#I*GJ{$z%8K5l%;}VktMNluz;j%ARHTSsHfetp&We}nzV(`Aw z9JN%rqr&!#`jHC%Tv;CK<{uYWJ_VkAIyRXi*6`>(JPfGteB2=gx@Q)aM3@i=!&bq_ z)-&994ybK5~@l`q16?Dr(R zln^fZmcx`Z@6LDaF1F9ip{IN*{s(ZS1JdmY=!L5$tW$J4Qi;yCNQ20!xw-M$C^AX? z{rw09zkmN;dt1_1(}Ms71qJLD69gQVO1{2batAqkS1Z@PCqf44aL+%}kXlNe(stjuU}TRMr3wDn3P(~3Q>*3?B7i{n>1E3duYhF@%sJCgyYyR?GZ)~h>& zRP^Vi$QkwgsyPo%=OkV9wjrF)#O|M{aywfuu&-cQDP*wi?T#gbSwzO?eggJM--W;n ztmRX8&o!Qxe*glDrODDp4z6=r!jpaN8kZ(4*l7`;u_FdB5g?0-ZAHij1Uy^rb0o=;uDCuX zLbDux@0VM%%;+*pWukYl?=2eJySlm>8dR9l-oJZ?O8SX|l9G~*jg65}u0qWOhvdID zmK|=xLSPRzhqKjo*)%3~O-)d^1%Mz;g7%(?iHVi9zO7A*>Z|LA_RSl%-sh`vbfIS! zv(ZG65I95#v?f#mc2+1w@SnKLMN94_jMXAb|pTAb7QH3St&NI$YDyFwjUebi5v({Gl5L zN(J}MoRJ++E!@e;$jHje%gLDuw0F7sJl&rDr*cL{?xxSCcdjl{teYQiPgg($V*uBO z4>q>8gZA&(G8k!TX(=h8I=etMHP4-u2VkS8Ayf_otV;jXI?!@pUc6 z)E_^7jDxzR)4is;dcu~=g@>d%F;j%Oo7qO6j$NhLftgK(h z_nh1HG%n9H(A0HxefECh0-l>e@Y_2(=LXourp<*#MPRL8flN)m%d@_;qZBp7J`B+T zM9zTMXZ0$LlCrXEPkxX#zW#V1UGeAw3GDq(uxpxs$D%B@Rso~B#Y`UXdk!%McQvr;qU9uflIO0+eZiJ3T9_qh^~0mJJ8+t-F2h8m9$8 zPY8;FnmS};=PQlZ{sbZ5Y4(FUKH0W$2~3;otE;JO-bR;$spEyZohdNOh)GD~l8SX| z1f6#yKbJrUf|6l6lhfwraK?b-E_;^^4BX}R5DYx}^UGv!W3Ppo8E6T_45=t71;R(I zJpMZdH^+%A;h>8zvF>EdzV+>{_brFj6b`Um?;_NF06n9ex+oA!9i`BPF-7U~Q~e))q*at>XCZwfgxq{F%WJBJM8SW;4=tgKv%!JNy= zXE&lO0x}(gE>G4077zs{fByVoaH29^k$h_q48@e#)r@JMr%l zSuhGddnuY=V6GpD>%J6!@nj8MtKXw^;9oFL(Pid7EMOoP2T(i?EiefySp102#{oNEH0KW9Ajf+ zk+wD|ls)YxD{9mkOtMUrD;Je2=7F+?gp6!zs%FS!#*zax#r})JN#nHp+3LfL6gjAN z73pxiKbaF+X4F?!R)QeZd+`GqU;!x_Jeem2D=XNCkitZ>c?OMGIJmep)YWnE@$0l3 zCFyXkJo(=jIuD+&^ohq3?_Nfb`+Gj@rNKh?FBk?0IBkm?#?#%k@DJBOjTTS`Q}B=| zn46mm3-^J_{F3N%9p6X_8< z+dIJK^TV}y>v)n6ZKJdo{>l}J1?Y8cUu+G8z@bJ*MfIy5*|;`+v9>U!-`U=VLm^NC zVOvM%ljntzA`m%bTTxrvdno41WG1K2?a6w#&xe%HnpDJGwu_NpsX<}qe!Bec z@wko{CHz0>KtnT#_}y{2ZzP^nx5xM4;rhVP_i~6{we;ri-1t#3h~pDuV}vY5!rtDG zV3B}Ys2db3;O}`9aG(d9O3=U(QdYjao)%)(`fl{|8$HNY#{OpE5@G$FM|B>I#00VM zzuW<318#WO z0s?GoDhw$d1{hsHMN62 z8MFvh(0Kgz?Ph{~+U(1h9cE4c0879hV z{u3uiIDg?-=xzIYi#mKdopoSV@-N2dA1arvC&XzgWGtDM(`DXP9LH%h*(X;6%^2FaesWKtGRvT zL&hg2<^!9aMGAwEPyo17U<%~8;1YvGrA>g7P-e#fJZ}+QIDx)@=?IO#3@mYs8 zjsz_He`_O7n}VDBuBLUp11uCUrOV`Uer!4V8Y||A{Its4zkUFXB5->_h#e}=9W(0y z>jh*WAcW?mrAf^;sTA_6NcpfI&ufb{p6si$;eR z<78%HXIIm2)W_0QrL*?y*LRS{zrH*Pfis14fh8+GcOFNHV9QKAq_EjidDRLvu(gx> z-y4IV?BeX4?aueQ`sYuR)of`9JUWwhgW1-PFs)^`W0$~}EF7evYVF48=xD*GlSchc z_jm8!8BeRf7orb026+Z5+(5wVb>8CQA_xqajqf|x&gNYw0kIXUBpMJ(wUv~<2fV$4 zcSR%SVq|2jHl@IhCoDbnFvDkb+zJ5l)dzY$I(l-3z6(@G1p+;d*a4VCHoz?C4hVJD zV?>Lf{wnS4;sQ1x0Wf$E$on5~kWSd5Zr?T=asYCy}9?S|joTJP(>I^%(Gf$Ir&YN>d$wDp>SAl(3g5lU~Tq&^}2 z9_NP~k9A(KH~;sCBV;~ubQo+*L@8yGuC5*RQ=mHk4?hI8{k~&#g8ljA{5b0`(&M^VIf;bzN*t!~4I%$3yR`4Mo@g9cK^{cDkQFu57%{ zgF>8`+o9*>b_K-HnD}_`5$`Cp28}q3@>y9}5~t4}JwGrd#GuQid@fly?$)EnivjT_ zO0{N|LmsR6{d>ieQ3eczVNeHx{SR#3B)rbzRdd0ejF_M=3Yt+;=j+`E6B!{($u?vN z8eSt(E^2CEd=sGiE_X&)M^Evhf7}>xWgSfA#OQOJG?7XLcH%OyUK&wP{VEUx5lc;3 zx!-#AG?@WL*io1Q0#Ha(AP5hV4nt5Z=Mp^-xn8ha1Yx5`@!rF9LU~gDawYXf3yVF` zz&&4gqKnQIWf@7Gz@7-LlrjL3^KnXIHtP^N}BDP~RX;(rJ_WHZV${_?Yw-v$KI6-p!kP5mqVC&!8-8IViR(dLKv#p~z z&mp&Qd3h-of%XEL4QOQi_oLJa#iga6`jf%pg-0i2WM&TdTT$kHcXm32wQUwQF?!ex zqRTW`XCNyK}h6ZI1Nw~ae(nTsc5)>dVDOp-lLMq_>Z4&knmEOUR zd=ns;bL_+@cJBMZX$N10%1HaGGQSqdUO&|3JP=OA6B>YOg0lSJDAr#Iiwl$RhFG{= zU}qJEwx{+Nq7qM^T51(1$?mYuKQYq(PQCgGy)Pe+6wP>YO7?XcPsq3e1$lF5Xb3K( zAWoqVpkT?GTx{TXSnrxCl4=bUxi>S($DaC7y?mr`0mIRfLN`V&f_xj=zS$I>tkLx>8Un$gp zx+UB%=nEj27b6Es1D6F4;JXlm0&4X_cpZMgn0^PsNHhsMNa@SyDl01J>F6v${jJcV ze31!1>1AtB6tEb6c>+pFF3skh6tIzlJ_cTtIEW`bjipQ!zdz`mzNbm z1`~oV==pe5_I6VyR4=~$4*JdB2{gdLCv4aI&qZ52y{)ch%#fqSih?$S!m1y%bM_8Va!g~eCQ|0mIM__#qRXuJ0$KSEDtkWgOa1f8Gi+FCT##$;bJ7J_?U ze?7fy(|Gqh4nrnQ+GMCeotk0$h#vWO__o&N&xj`;4XH4Q!chOsJP|OUtKj6+g%gG0 z04eBYc1L2PM?<5Z*c3Q01FNP9@^YH6u&|)4k9W`ywztEIq5VPA|^Hv_aTj0zf({M32sg3%p`$8x5hiLpDx+y>m(6Ha*KC91rVveAXwh1 zL&zCSKIcGhyuTDn+#D_$jzF215hSE?jn=TqsY-{lsJH#Z&;S*<$fc>GQaF3W(%6?% zKKB=#jY0|Q=w!+-R$%}|9x!@z*Mr>(9&x43wyab<0385te@gHAWXk~Biq=u6n` z4%7LKHH->yb;H-bkAcbodUnIm^OC|i7@`;u;8%VpLJ1%ODB1-otBTORH6tC&aM-bB zfRo(sAtq^w$-2=dRl{4K>D-skya_kA(}d?Z8sgD zc&L#aw(l<4RZjefYV-)|#qxUF)s-C@BV3Q!tRddsz z+Ru*3Q2{~%Y}db@dqv6SY=j~zH5A|}jLBb`nXbK-5EmRNs7Ew!MuH zCMMKK`s_I#xB~TvWUG>tTjt0pD1+r9@OW6wqtri*-r>1lh<-F)XOHck~g;|a@Nf~hj;Ko~Es*wyI z|2?McnVi)DAx^_`H3=I0EN<c0Q97Z$^OZOpZ zfHjZ`>UX$^sF3s-s*^y0Ir;B9gZ9pl<0UKp< z+^PvBu2RDBPzd5mgNes>*$eHZW`fCNWDGo)0>$rwgb{R5=|q+Eai#Z)4HoK{dJ)u5 z88yD)Z}2Etd|mu^042>-C#5H+r^lZ#;-XgES$Fy1Td#%k-%%0k<_3ie{YV#M!a+Xt zZAado;-Y^yNY3qDEiLo5tbG1;vqv06@(K!5pfOHFMD%@adE^-hM*_4BgDF4;vl5*; zDg%9btHw~CqERzPaR%!r1+56&Gg3-ZvIvi}b2PrN5TL-p;0?3VY)IZhSTf}PT5yV% z+q}H;ke~@JTV=jFy(KvGJXly*lq%8LV%DBJuYdB%Z%>=U3?vPVF(N5+smbp4e)XcI zyMO$L6GAV{02>Y!4i`g+V9B!5+o96LXNaGioRP?RpC{r>mX{#VRaIFDa%}o&BqXFC znVPL^^g+TH1O$=veP(RN_H$vACiNT~96+ERlLxW-FJ}y|3;!z#l-;W~u$gyttyuSvrEifqLPuANRd1iDr#(LNjK$C zq)~;8jJ&puix)Fw#4);e#W&)p$yhdLJ!G*B)=};KHB!h71~IX&j?Ux5K^{1DR*qK*&(TlxaiG!$NaggqmZM(`cm z^cX*woL2@12ZLabhl4Y^=iGW*$7Cy7fMMDVs&t*2)FHo4@gF0>D7UgGXlS=suKwH@ z1Tu*pKd+kwg6z1mK&Q!Q4W5;i)mE|q&4|vu6ZAK^CClB3M8GI*2Xg>CdYg^{lcusr zE)(r%sod`RU&%@bP7ui#K|!!mZ#r_27k~zUF1UWTw;N~(J36vh$nldU6w);`U&209 zMv`gioZeN;8bwH**!OCrhvzUKy}w|-RytS-*LC@!%Rg|;_j+PKEQhXTL{!qJt?z!H0FloR6&cDC##xF~gROy9gbn zOZlAg`QNfl9eCD-DQ!iiqq0hX>$1*Qkle?5_uzn=gJW=Ls9nSO1OnS=ECtG7u%Uvc z3AjG$kU0-zS!TM6_kU8G!T!Pt9w<@}<|!R2EiDD-*x=Qj(sKlD7Oi|3Qm;({$LcaHopbOFK|KdBXyZf>bXyq5UV;u5jrKO<4dHGit0M5q1935EV zlTONJ^ZLAW+7R%(Lk?Q%;28utMaB0RTR7d0+|pj}uef3G`_THtenptkK;)?&d+`id zMPF1^>kPYp|5~KOin+Z1->Isay1KgB+FFn|RaEc>ymo0Z`v05L1Uq1qG(g9|@Y|B( zbIEV}Ru4lvazQQLAJ(5RPBNRA86UW+k#jEI=nX_K-EF48^t!%OB~3IX%s9|wGij&ZW9t1EDV4gq5D zK4x;+5xG0seQm(}KEqP`0sK#LmoC3!Ska)wFuhKOqc%C(4@^vFb5#p2(*h%KLX@H3 zKJQg8HOhYe++w$qnw{MR+G$hfQio=@xZHi%!iXW3fgU9caFM+>#Q;X^A!PVB367`0 zxulpFjDoT?SJvCp#v4dBm5OFSKLecZrhWS+m&Jw3m%ANRRb9>RxEUx;wdLFnPW_0# zIzs>qEUaLCTg7Z1R{v+ucD;4jB~2w+yj&0~!otHX%+0-!w!a9#d>^YT@)|jH_Ynif zPe%V8;{4HW#+IQ{0SWBr_$1{(bnn+}n8Llj6VYhn{cyGS-y{QcVyUI#zKxlsb3cft zf)-)jEVdoB&-Xqi93)oEZE<0P6RsZY_Rl3q2ne26WAC+ZL$PfEVIAwgpaBi4?H6!5 zO+!O7k;OeN??2_fHNvDVffkq>LE1b}v~^FbUdbpyNhLOq`XRJTPD5=Hp~)YO1QLD#eR~mk%maC<;Nr zT({goVp*oN9Wyu(A?!m^1IXhZ$hYG><-zy2vZ4oW&NqY@*pN)2du(mPjlsjToI0ox z16CnotN=jum5zxC4Y+dHH+{O&C&I=MotvA3f`Zb5F{j33nMVNz!cnupXRUp3eZF^^ zFyk}B>Ci=!fI-0c#xAYE5|F~BfN#BjGDEoj+mNlL z{^&nG=kxiz-mmBD`Fy-C=Fp@N4M3%|^!nC9uY84k&zUZVL#G;IXdRSNpf%W@o0Ng9 z?#`)k6_zeApqUDI`!qP6TS-hwVd3QsnOS@WfvTikBgRma3%?i@WykOva`w&_%EbQn zR>#ib&6_;CY0j;*PdxA6@z~EkO^Zc{B2-U1&`~3Y%VGg`kUW({)+q2+29BZ z2_6}2^YM^NXpRO1U_8rbr7Jf8r&)yxheHkySO43$r*}7|>P(As?paE(Ztm?vB8IK; zTJE3n{KWGP-aZV}5%dpcCMN0#_Z(G~Zp72?%l`n0! zi{H&v7h+ykJ;7mC^<2?X@0w|$7fNqvEuDUfE3Ws*Rr{h8?V_o2@Q|~E=`wfEDC|K z91uN_lLo?gaITE&Qz!FZe)<{Vk&~pe^fxES3+_=1%pXyp4bLpTb;`^9xzGNnbi&&H z{lHZGn?5Hir_rH%frN|T*xYi5PdsfYuV^8SVX9c3%DwU-9`R zFfKu;!pPuZoFRVY6fG^6yb}P&V5F%8>_`M?)Rq)WNsBv{miTDqqh5g{NXshmqpj@* zB$KNnwHt%%W78f)*_5J-DMfTG2zTTI6tyHdznxoIGV0bO_4n`oB^c}OxlMV_)2x&n zkpR!60h_(c5Js5>S(?1fqm3`;8lm-SoNa>Du ziTS7#qqjxZJB(L;{H_0C3@4NjI>L_K`7(L8-O6$fHR3Gp<-@f)qG*h#e(7?wEZEyu zb9Lm==;ODlx|&0SI+c^~mgCb%5KcB_D89{K3Y!zKyFLLokD8X&s@&0#mai}7y0=cG z(*Q3GzZUtQU-LKG|0b8!<=Lb?ZzwEZK*%n(>(ch8s zDDGGs{Ms1u?6mRVdRruTv0!t>bI8r6-^p&&F?0Jy*`>|QMVh;Kv{x~36N;SIus5x6 zqR%CWon%UtU#1ecyAEdtSdgXAh>PoQ^i)z&kp3O~+lI5b-tzt7?2=oh%gEc_g^$|b z7S3f|sCuJ>|F8fPM! zu%~`!8cmnG^IX#AAGn>}P!g_HnVpX2x4A#KLHG$tst2+Os0VgU!nwAqHuH^%1J%zc zM*g`e-!$MO@7;u#0IYW_U}Gnq@XpT7k*K$QL^6%}PW)T*{ zc5ZY{QNmcDuPZD{SMEWNrg@BopAI=#vIXM1;qX8=>Ce(OPQEsT@-{O+A_|AQS?Qdv zD`Z*jcv_NQ!SQEW@WDTA9i6g@A2ykNPVYMy(UNW z*z`4#$YXJ;kZzEmy2EbR%b>aih_7#bQJ;Q~^Lbv-U4U7T03u_Pw8lWRS*p975rit| zk9o}#wDk*2N0{A%Aq<7Midn=!*TBHpJPiowYQIg9uHE79^y7^WYHHUVwGaa11MwxA z_3tkFp31V{c)}M2c`US$AkY;M&jDJ+pt2sC|5H)y*&^I7z-_;13EZe$77lN$b{RTf zg>x$wCqRLWKAeh&02-`@++P@QzM#Y&CZrGCMT;cs6`Crw(CsD>6IBn7Z?}mFVWnzp zXBqzhFu_5^q9#;(Y5^M;;xS`tSh$EDV;NBg9i4?wwtsu6UhNgU9y;fe+du&*fFt3v zLgL_9pm|JwR@eQnjq9d@CY#yT7RrT@nYkR^vWSR??WKVbs3`!vdBHe@`~~`nMw5nv z2}eR}6Z0dREBEi;kBy0uw5<;$Ot(Sz-1@WyKXWBbnk@4c_Y9|0F)T9xc`|i8xr&Ja zhSFc=(4m7vrKhE_v$KmbQcHUS6!6{*{ti$04S*uR_KaiWBg&5FQcXTr2K<_;kxkN` zp99;Hgas1k^HOitz%(S)urCY2V_FZja zxCjJ|LPw6}vLLC-bIhrc#A75E2GgVQsc}~ux+E0 zTw0ow_ho6wt>K_Ylc8?Af3doH7xeenuWHF{(`G7-TVE&K;lS0o`X>{@f50%#u&}6X z^=Lu~$Ks-&>p6gnCisyx78JfbFc)TKP@11$DFHxPw-*(xM5RL0%U4sZY=*qg1D(Vw zX>9?9KtjmA;D7(!gU9c>WLede^H;lFrrY^e52i#o;!_Bgg#sbyr6`0VAqjpGPat6( z18M*m1QEjwEk1JbAR#@a{vVVd_A+Zv{pYK48&Xl=t^up2M`1ofats7Iq&)RWy25pG z0gxEeKGv{yp)@apB$%_ZaeJ|E3Cwnss=t?`qexfGoY26-#5eAo@sOJm$3jASUa21u zLym}`pX0a`Bw^>K&~$Rp9;92F2PXYroJ)MJ-Gls~3!^3)4NkPFblNIIH zzCiVM{QT2nE^y^<#W4;6n>j>K2k|_rN-%Y^={5J|Lp0*{OjjxX9=VD&RJ7H!!jL!7*ykXAi| z=6Br)cKTc{pTf+JXqHxqu}5>t{Ij}t?ZM)oLP#0`?~m6B&kLu&p;T)%&{S#!WTd#b zxT2!paJ7e@pC2Iab&yU%h-H+8nRpmvVKMkhJsdDg_y>S8IO6Vyti1X+F#k}u^fj!l zVS7(i!E;)dE(`dd?+fh!mq-G6HU2dW=4|J*Sg968ErDOhl?E^s$P!@YS3!K+2Jhu9 zJ;USi`p+d6;5orvfL8NSbhRD18+a(l%%f+Z>---TUY$}D!l2!#2|GJGo=Zk1j2G$2 zi_{nqpUzOzM+~UlmEC4!6wkR$=#V0=4EY`hQ$JI>=YZPK%rX8*sXlb|7DvYG0=U9?1qEC1@j%LefNjnZ z;aS6m$proaX~}@X@TGQul7mQ7MR?L4Awl=%nvdk812Zui8J19PWsdO!Dm4Tn0xcac zL`<=ii_j0=8V!K=9DMsAn2bCcwo=x)Rq0;H|4+!G zKBpH(t(2UtJ6iJS5!MZ8M&K-t8y9IYh(x&E^?uYwWY!-E27A&lSKBu2wFesdX z2qZ0#C*!oS%BFr{XYhbzTo-$$lY6i9j^Mb5AGXv2n9<|CugM^VjGrPXn+emFj#iKo zV4IZqEClcSaLuFDrk3mI&U2FKJ<{@DyLMq=+dsKYE9rJ^@8mv{kMd4=7Tr7?ViN_mfaH&F5iP8yKqqHwTvk%W2%4K%4 zU-+)5tAu`uX?di^x1N+#kSxoh(|!(->N0H_wxIXBrcpFXg#A+PNwX7ubWx1{QDt2z zH?F*kW6k7dD5^ z99)e0wxa4{i9Vq`2u2Oe$*U)qE}lqeU9vW^9GffOyn`gWg9yVjV#-IRr*2Ki=({14p%VTFM1nnbNA4eYeXNh zFFn6nTlD8jy3#MuSG17+%9U0O+0DIeA?_{5RgsZ@BzH&ZvOC7A|Kvkw-Z>0;G@C54 z;L3b-EgGk>C6aslwFQ?Zhf^QFEtjy$`?{!C+A5NWf{Y9aGjSbKFq_8?ZnZM5D6sAA zw|SE$<)NHSW@NSDvpz-F`A97#f|BobrN-2N5V`4JD{g-aw+UXsUR%*T(VZv#Ye}2c zONuPCVVg+Po@QjEu7Hd6qf>$|VHC;V2k;z1GBLzOVXTnYMy8b6Eh-By7V_N z$>mN|_S>F{h@%QO5X%z<5oLP)x*h$D;5i@a_05-xuseh&{@n^=l5MO)C%M4#~y^-OD>`fNxxQEXpJ>Rkv= zWujW$-q{Iu8n};F9bB*yri8B%ix2l+I#{mMOu(C~7^J33DWjcjXZt%awSu12BiDDf z`s;f_&nj)n8cCHe28eLv)2ehPNTsa(ldvTW?xrn$#}BCqG1)ml+k;$+_nP(V{*dwY zMvs&IfBGpXD5UAGnVXwmvw{7BTA?~AT;D6v%aGiW(cJ0VQ_VgTOx82D-zoS6>+7wh z1xpJRLn|sgQWy+7o5vE@u4K{$Q@AC!E4JMDcGSJW@=?a+@J?of>eDdUI zwI9)aC13wFtc4$JD6#}ZUeeYUK6`e`UGcF7l9ko5Y6y~y2bP@c+q{Td9?^VJ;1?%C*@1jLFZT&KJ-3@9f7(S zybt1XW^+#CJs%K62_=87khH3s@g|56dQfr>ydx}ycMsvhvQJ?CgeUN^jiMg|ywG}J{ z6ZZCH?0Eq33G*qpgWA~}TFcZ_yleu{Ouo?zF;UT;1ux%gqh*<#>WmsW()|2(a0`LO z>`7D+I^OT!379d)S&cZB!A9Ap3*qAuJYNl-@LE-qT0h~!kClKgCB@z1G)>Zh91B9NokyeSu Htw;X@chUrO literal 0 HcmV?d00001 diff --git a/opt_gui/resources/qml.qrc b/opt_gui/resources/qml.qrc new file mode 100644 index 0000000..552d54b --- /dev/null +++ b/opt_gui/resources/qml.qrc @@ -0,0 +1,6 @@ + + + main_window.qml + Rois_listview_delegate.qml + + diff --git a/opt_gui/src/component/opt_streaming_component.cpp b/opt_gui/src/component/opt_streaming_component.cpp new file mode 100644 index 0000000..07ae0e8 --- /dev/null +++ b/opt_gui/src/component/opt_streaming_component.cpp @@ -0,0 +1,182 @@ +#include "../../include/component/opt_streaming_component.hpp" + +OPT_Streaming_Component::OPT_Streaming_Component(QQuickItem * parent) : + QQuickPaintedItem(parent), + current_image(NULL), + current_buffer(NULL), + topic_value("/cam0"), + ros_ready(false), + img_trans(NULL),number_of_rois(0) { +} + +void OPT_Streaming_Component::setup(ros::NodeHandle * nh) { + img_trans = new image_transport::ImageTransport(*nh); + //TODO: make these parameters of the component + image_sub = img_trans->subscribe( + topic_value.toStdString(), + 1, + &OPT_Streaming_Component::receive_image, + this, + image_transport::TransportHints("raw") + ); + + std::string image2D_rois_pub_name = topic_value.toStdString().substr(0, topic_value.toStdString().rfind('/')) + "/image2D_rois"; + image2D_rois_pub=nh->advertise(image2D_rois_pub_name,3); + ros_ready = true; + ROS_INFO("Setup of video component complete"); +} + +/* +void OPT_Streaming_Component::receive_image(const sensor_msgs::Image::ConstPtr &msg) { + // check to see if we already have an image frame, if we do then we need to + // delete it to avoid memory leaks + if( current_image ) { + delete current_image; + } + // allocate a buffer of sufficient size to contain our video frame + uchar * temp_buffer = (uchar *) malloc(sizeof(uchar) * msg->data.size()); + + // and copy the message into the buffer + // we need to do this because QImage api requires the buffer we pass in to + // continue to exist whilst the image is in use, but the msg and it's data will + // be lost once we leave this context + current_image = new QImage( + temp_buffer, + msg->width, + msg->height, + QImage::Format_RGB888 // TODO: detect the type from the message + ); + + ROS_INFO("Recieved Message"); + + // We then swap out of bufer to avoid memory leaks + if(current_buffer) { + delete current_buffer; + current_buffer = temp_buffer; + } + + // finally we need to re-render the component to display the new image + update(); +} + + +*/ + +void OPT_Streaming_Component::receive_image(const sensor_msgs::Image::ConstPtr &msg) { + image2D_rois_msg.header=msg->header; + + //-----------------------------This part comes from rtq_image_view---------------------------------- + try + { + // First let cv_bridge do its magic + cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg, sensor_msgs::image_encodings::RGB8); + conversion_mat_ = cv_ptr->image; + } + catch (cv_bridge::Exception& e) + { + try + { + // If we're here, there is no conversion that makes sense, but let's try to imagine a few first + cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg); + if (msg->encoding == "CV_8UC3") + { + // assuming it is rgb + conversion_mat_ = cv_ptr->image; + } else if (msg->encoding == "8UC1") { + // convert gray to rgb + cv::cvtColor(cv_ptr->image, conversion_mat_, CV_GRAY2RGB); + } else if (msg->encoding == "16UC1" || msg->encoding == "32FC1") { + // scale / quantify + double min = 0; + double max = 10; + if (msg->encoding == "16UC1") max *= 1000; + // dynamically adjust range based on min/max in image + cv::minMaxLoc(cv_ptr->image, &min, &max); + if (min == max) { + // completely homogeneous images are displayed in gray + min = 0; + max = 2; + } + cv::Mat img_scaled_8u; + cv::Mat(cv_ptr->image-min).convertTo(img_scaled_8u, CV_8UC1, 255. / (max - min)); + cv::cvtColor(img_scaled_8u, conversion_mat_, CV_GRAY2RGB); + } else { + qWarning("ImageView.callback_image() could not convert image from '%s' to 'rgb8' (%s)", msg->encoding.c_str(), e.what()); + return; + } + } + catch (cv_bridge::Exception& e) + { + qWarning("ImageView.callback_image() while trying to convert image from '%s' to 'rgb8' an exception was thrown (%s)", msg->encoding.c_str(), e.what()); + return; + } + } + //-----------------------------This part comes from rtq_image_view---------------------------------- + + // image must be copied since it uses the conversion_mat_ for storage which is asynchronously overwritten in the next callback invocation + current_image=QImage(conversion_mat_.data,conversion_mat_.cols,conversion_mat_.rows,QImage::Format_RGB888); + // finally we need to re-render the component to display the new image + update(); +} + +void OPT_Streaming_Component::paint(QPainter * painter) { + painter->drawImage(boundingRect().adjusted(1, 1, -1, -1), current_image); +} + +void OPT_Streaming_Component::set_topic(const QString & new_value) { + if(topic_value != new_value) { + topic_value = new_value; + if(ros_ready) { + image_sub.shutdown(); + image_sub = img_trans->subscribe( + topic_value.toStdString(), + 1, + &OPT_Streaming_Component::receive_image, + this, + image_transport::TransportHints("raw") + ); + } + emit topic_changed(); + } +} + +QString OPT_Streaming_Component::get_topic() const { + return topic_value; +} + +void OPT_Streaming_Component::save_ROI(QString sensor_name,int no_ROI,QString roi_name, int rect_X,int rect_Y, int rect_width, int rect_height ) +{ + + //get the roi with the bouding box you created in qml main_view, save it as a file + cv::Rect roi_rec=cv::Rect(rect_X,rect_Y,rect_width,rect_height) & cv::Rect(0,0,conversion_mat_.size().width,conversion_mat_.size().height); +// if(roi_rec.size().area()>1) +// { + cv::Mat roi(conversion_mat_,roi_rec); + cv::cvtColor(roi, roi, CV_BGR2RGB); + QString roi_file_name="/tmp/"+sensor_name+"_roi_"+ QString::number(no_ROI)+".png"; + cv::imwrite(roi_file_name.toStdString(),roi); + + //create a image2D_roi_msg with the roi and push it to image2D_roi_array_msg called image2D_rois_msg + current_image2D_roi_msg.no=no_ROI; + current_image2D_roi_msg.name=roi_name.toStdString(); + current_image2D_roi_msg.x=rect_X; + current_image2D_roi_msg.y=rect_Y; + current_image2D_roi_msg.width=rect_width; + current_image2D_roi_msg.height=rect_height; + image2D_rois_msg.Rois.push_back(current_image2D_roi_msg); + } +//} + +void OPT_Streaming_Component::set_roi_name(int index, QString roi_name) +{ + image2D_rois_msg.Rois[index-number_of_rois].name=roi_name.toStdString(); +} + +void OPT_Streaming_Component::publish_rois() +{ + image2D_rois_pub.publish(image2D_rois_msg); + number_of_rois=number_of_rois+image2D_rois_msg.Rois.size(); + std::cout< +#include "../include/component/opt_streaming_component.hpp" +#include "../../opt_gui/include/opt_gui/main_application.hpp" + +Main_Application::Main_Application() { + +} + +void Main_Application::run() { + + + nh.param("view_topic_1",view_topic1,std::string("/kinect2_head/rgb_lowres/image")); + nh.param("view_topic_2",view_topic2,std::string("/kinect2_far/rgb_lowres/image")); + nh.param("view_topic_3",view_topic3,std::string("/kinect2_lenovo/rgb_lowres/image")); + nh.param("view_topic_4",view_topic4,std::string("/kinect2_head/rgb_lowres/image")); + nh.param("view_topic_5",view_topic5,std::string("/kinect2_head/rgb_lowres/image")); + nh.param("view_topic_6",view_topic6,std::string("/kinect2_head/rgb_lowres/image")); + + + qmlRegisterType("OPT_Streaming_Component", 1, 0, "OptStreamingComponent"); + // this loads the qml file we are about to create + this->load(QUrl(QStringLiteral("qrc:/main_window.qml"))); + + // Setup a timer to get the application's idle loop + QTimer * timer = new QTimer(this); + connect(timer, SIGNAL(timeout()), this, SLOT(main_loop())); + timer->start(0); + + // setup the video component + OPT_Streaming_Component * video1 = this->rootObjects()[0]->findChild(QString("videoStream1")); + video1->set_topic(QString::fromStdString(view_topic1)); + video1->setup(&nh); + + OPT_Streaming_Component * video2 = this->rootObjects()[0]->findChild(QString("videoStream2")); + video2->set_topic(QString::fromStdString(view_topic2)); + video2->setup(&nh); + + OPT_Streaming_Component * video3 = this->rootObjects()[0]->findChild(QString("videoStream3")); + video3->set_topic(QString::fromStdString(view_topic3)); + video3->setup(&nh); + + OPT_Streaming_Component * video4 = this->rootObjects()[0]->findChild(QString("videoStream4")); + video4->set_topic(QString::fromStdString(view_topic4)); + video4->setup(&nh); + + OPT_Streaming_Component * video5 = this->rootObjects()[0]->findChild(QString("videoStream5")); + video5->set_topic(QString::fromStdString(view_topic5)); + video5->setup(&nh); + + OPT_Streaming_Component * video6 = this->rootObjects()[0]->findChild(QString("videoStream6")); + video6->set_topic(QString::fromStdString(view_topic6)); + video6->setup(&nh); + + OPT_Streaming_Component * video_main = this->rootObjects()[0]->findChild(QString("main_videoStream")); + video_main->set_topic(QString::fromStdString(view_topic1)); + video_main->setup(&nh); +} + +void Main_Application::main_loop() { + + ros::spinOnce(); + +} diff --git a/opt_gui/src/opt_gui_main.cpp b/opt_gui/src/opt_gui_main.cpp new file mode 100644 index 0000000..98a7c6f --- /dev/null +++ b/opt_gui/src/opt_gui_main.cpp @@ -0,0 +1,14 @@ +#include +#include +#include"../include/opt_gui/main_application.hpp" +int main(int argc, char **argv) { + // Set up ROS. + ros::init(argc, argv, "gui_main"); + + + QGuiApplication app(argc, argv); + Main_Application engine; + engine.run(); + + return app.exec(); +} From 6a42a5e7e2d385e5f5003b11ca10bf9a9cbd05da Mon Sep 17 00:00:00 2001 From: yongheng1991 Date: Sat, 24 Jun 2017 08:37:54 +0200 Subject: [PATCH 7/9] use two initialization; make the opt_gui multiple camrea views dynamiclly --- .../apps/multiple_objects_detection_node.cpp | 19 +- .../conf/multiple_objects_detection.yaml | 19 +- .../include/open_ptrack/detection/detection.h | 7 + .../multiple_objects_detection.h | 1042 +++++++++-------- .../object_detector.h | 52 +- detection/launch/camera_poses.txt | 3 - .../launch/detection_node_kinect2_head.launch | 16 - detection/launch/detector_kinect2.launch | 3 +- .../launch/object_detector_kinect2.launch | 30 + .../launch/people_detector_kinect2.launch | 33 + detection/src/detection.cpp | 7 + .../object_detector.cpp | 198 ++-- opt_gui/CMakeLists.txt | 8 +- opt_gui/conf/opt_gui.yaml | 11 - opt_gui/data/readme | 6 + .../component/opt_streaming_component.hpp | 107 +- opt_gui/include/opt_gui/main_application.hpp | 15 +- opt_gui/launch/opt_gui.launch | 2 +- opt_gui/resources/DeleteRed.png | Bin 0 -> 22716 bytes opt_gui/resources/Rois_listview_delegate.qml | 98 +- .../resources/Streaming_listview_delegate.qml | 58 + opt_gui/resources/images.qrc | 1 + opt_gui/resources/main_window.qml | 547 +++------ opt_gui/resources/qml.qrc | 1 + .../src/component/opt_streaming_component.cpp | 342 +++--- opt_gui/src/main_application.cpp | 88 +- opt_gui/src/opt_gui_main.cpp | 3 +- opt_msgs/CMakeLists.txt | 2 +- opt_msgs/CMakeLists.txt.user | 371 ------ opt_msgs/msg/Detection.msg | 2 + opt_msgs/msg/Image2D_roi_file.msg | 2 + opt_msgs/msg/ObjectName.msg | 2 + opt_msgs/msg/ObjectNameArray.msg | 2 + opt_msgs/msg/Track.msg | 2 +- opt_msgs/msg/Track3D.msg | 18 + opt_msgs/msg/Track3DArray.msg | 3 + opt_utils/CMakeLists.txt | 3 + opt_utils/apps/ros2udp_converter.cpp | 2 +- opt_utils/apps/ros2udp_converter_object.cpp | 248 ++++ opt_utils/launch/ros2udp_converter.launch | 11 +- .../launch/single_camera_visualization.launch | 16 + tracking/CMakeLists.txt | 7 +- tracking/apps/tracker_node.cpp | 9 + tracking/apps/tracker_object_node.cpp | 834 +++++++++++++ tracking/conf/multicamera_tracking.rviz | 13 +- tracking/conf/object_tracker_multicamera.yaml | 2 +- tracking/conf/single_camera_tracking.rviz | 238 ++++ .../open_ptrack/tracking/track_object.h | 409 +++++++ .../include/open_ptrack/tracking/tracker.h | 287 ++--- .../open_ptrack/tracking/tracker_object.h | 347 ++++++ tracking/launch/detection_and_tracking.launch | 21 +- .../object_detection_and_tracking.launch | 28 + .../people_detection_and_tracking.launch | 15 + ...ople_detection_and_tracking_kinect1.launch | 15 + tracking/launch/tracker_network.launch | 15 +- tracking/launch/tracker_single_camera.launch | 64 + tracking/launch/tracking_node.launch | 7 +- tracking/src/track_object.cpp | 648 ++++++++++ tracking/src/tracker.cpp | 5 + tracking/src/tracker_object.cpp | 597 ++++++++++ 60 files changed, 5110 insertions(+), 1851 deletions(-) delete mode 100644 detection/launch/camera_poses.txt delete mode 100644 detection/launch/detection_node_kinect2_head.launch create mode 100644 detection/launch/object_detector_kinect2.launch create mode 100644 detection/launch/people_detector_kinect2.launch delete mode 100644 opt_gui/conf/opt_gui.yaml create mode 100644 opt_gui/data/readme create mode 100644 opt_gui/resources/DeleteRed.png create mode 100644 opt_gui/resources/Streaming_listview_delegate.qml delete mode 100644 opt_msgs/CMakeLists.txt.user create mode 100644 opt_msgs/msg/Image2D_roi_file.msg create mode 100644 opt_msgs/msg/ObjectName.msg create mode 100644 opt_msgs/msg/ObjectNameArray.msg create mode 100644 opt_msgs/msg/Track3D.msg create mode 100644 opt_msgs/msg/Track3DArray.msg create mode 100644 opt_utils/apps/ros2udp_converter_object.cpp create mode 100644 opt_utils/launch/single_camera_visualization.launch create mode 100644 tracking/apps/tracker_object_node.cpp create mode 100644 tracking/conf/single_camera_tracking.rviz create mode 100644 tracking/include/open_ptrack/tracking/track_object.h create mode 100644 tracking/include/open_ptrack/tracking/tracker_object.h create mode 100644 tracking/launch/object_detection_and_tracking.launch create mode 100644 tracking/launch/people_detection_and_tracking.launch create mode 100644 tracking/launch/people_detection_and_tracking_kinect1.launch create mode 100644 tracking/launch/tracker_single_camera.launch create mode 100644 tracking/src/track_object.cpp create mode 100644 tracking/src/tracker_object.cpp diff --git a/detection/apps/multiple_objects_detection_node.cpp b/detection/apps/multiple_objects_detection_node.cpp index d9aad38..3adfd08 100644 --- a/detection/apps/multiple_objects_detection_node.cpp +++ b/detection/apps/multiple_objects_detection_node.cpp @@ -12,19 +12,23 @@ using namespace std; using namespace cv; -//typedef dynamic_reconfigure::Server ReconfigureServer; +//Parms for receiving color and depth image std::string cameraName; bool useExact = false; bool useCompressed = false; + std::string output_detection_topic; +bool set_object_names;//if to use this detector to set the object names + +//Parms for use_background_removal bool use_background_removal; int background_calculate_frames; int threshold_4_detecting_foreground;//threshold of depth difference,use the difference to ditinguish the background and foreground -bool show_2D_tracks; -bool publish_world3D_rois; + +bool show_2D_tracks;//show 2d tracks or not void configCb(detection::multiple_objects_detectionConfig &config, uint32_t level) { @@ -54,14 +58,13 @@ int main(int argc, char** argv){ return 0; } -// nh.param("sensor_name", cameraName, std::string("kinect2_head")); nh.param("use_background_removal",use_background_removal,false); nh.param("background_calculate_frames",background_calculate_frames,100); nh.param("threshold_4_detecting_foreground",threshold_4_detecting_foreground,100); nh.param("show_2D_track",show_2D_tracks,false); nh.param("output_detection_topic",output_detection_topic,std::string("/objects_detector/detections")); + nh.param("set_object_names",set_object_names,false); - nh.param("publish_world3D_rois",publish_world3D_rois,true); //set the stastic varables of object_detector class,the instruction of the varable can be found in the header of the class nh.param("HMin",Object_Detector::HMin,0); @@ -80,6 +83,7 @@ int main(int argc, char** argv){ nh.param("Backprojection_Mode",Object_Detector::Backprojection_Mode,std::string("HSD")); + // std::cout << "output detection topic: " << output_detection_topic << std::endl; std::cout << "HSV range:"<< std::endl @@ -105,8 +109,9 @@ int main(int argc, char** argv){ server.setCallback(f); - Multiple_Objects_Detection _multiple_objects_detection(output_detection_topic,useExact, useCompressed, - use_background_removal,background_calculate_frames,threshold_4_detecting_foreground,show_2D_tracks,publish_world3D_rois); + // main class for detection + Multiple_Objects_Detection _multiple_objects_detection(output_detection_topic,set_object_names,useExact, useCompressed, + use_background_removal,background_calculate_frames,threshold_4_detecting_foreground,show_2D_tracks); std::cout << "start detecting..." << std::endl; _multiple_objects_detection.run_detection(); diff --git a/detection/conf/multiple_objects_detection.yaml b/detection/conf/multiple_objects_detection.yaml index ff43b31..422e152 100644 --- a/detection/conf/multiple_objects_detection.yaml +++ b/detection/conf/multiple_objects_detection.yaml @@ -5,17 +5,12 @@ output_detection_topic: /objects_detector/detections #if you want to see the tracks result on the image, set show_2D_track to true -show_2D_track: false - - - -#if this camera is used to mark the ROIs and publish them, set it to true, or set it into false -#Two options: -#1, Mark in one camera, and project the rois to others: then set publish_world3D_rois to true in the marking camera, other cameras should be set into false. -#2, Mark in all cameras, then set publish_world3D_rois in every camera to true -publish_world3D_rois: true +show_2D_track: true +###########For choose on which camera to set the object names########### +set_object_names: false +###########For choose on which camera to set the object names########### ############################ @@ -60,7 +55,7 @@ d_bins: 100 # Three options : H HS HSD : # if the objects are in different color, choose HS which is faster, or choose HSD. -Backprojection_Mode: HSD +Backprojection_Mode: HS ###########For camshift######### @@ -80,3 +75,7 @@ QUALITY_TOLERANCE: 20000 DENSITY_TOLORENCE: 2.0 ###########For camshift recover from occlusion########### + + + + diff --git a/detection/include/open_ptrack/detection/detection.h b/detection/include/open_ptrack/detection/detection.h index 3434bfa..f23bcf6 100644 --- a/detection/include/open_ptrack/detection/detection.h +++ b/detection/include/open_ptrack/detection/detection.h @@ -153,6 +153,13 @@ namespace open_ptrack double getDistance(); + + //get the object name from msg + std::string + getObjectName(); + + + /** * \brief Returns if the detection corresponds to an occluded person or not. * diff --git a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h index 19ff4b5..e23a409 100644 --- a/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h +++ b/detection/include/open_ptrack/multiple_objects_detection/multiple_objects_detection.h @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -66,16 +67,21 @@ #include #include #include +#include + #include #include #include -#include -#include + +#include +#include + +#include using namespace opt_msgs; using namespace sensor_msgs; - +using namespace std; using namespace cv; @@ -85,601 +91,633 @@ class Multiple_Objects_Detection private: - ////////////For receiving color and depth//////////// - std::mutex lock; - // std::string cameraName; - std::string topicColor, topicDepth; - const bool useExact, useCompressed; - bool updateImage; - bool running; - const size_t queueSize; - cv::Mat color,depth; - cv::Mat cameraMatrixColor, cameraMatrixDepth; - typedef message_filters::sync_policies::ExactTime ExactSyncPolicy; - typedef message_filters::sync_policies::ApproximateTime ApproximateSyncPolicy; - ros::NodeHandle nh; - ros::AsyncSpinner spinner; - image_transport::ImageTransport it; - image_transport::SubscriberFilter *subImageColor, *subImageDepth; - message_filters::Subscriber *subCameraInfoColor, *subCameraInfoDepth; + ////////////////////////For receiving color and depth//////////////////////// + std::mutex lock; + std::string topicColor, topicDepth; + const bool useExact, useCompressed; + bool updateImage; + bool running; + const size_t queueSize; + cv::Mat color,depth; + cv::Mat cameraMatrixColor, cameraMatrixDepth; + typedef message_filters::sync_policies::ExactTime ExactSyncPolicy; + typedef message_filters::sync_policies::ApproximateTime ApproximateSyncPolicy; + ros::NodeHandle nh; + ros::AsyncSpinner spinner; + image_transport::ImageTransport it; + // image_transport::Subscriber roi_image_sub; + ros::Subscriber roi_image_sub; + image_transport::SubscriberFilter *subImageColor, *subImageDepth; + message_filters::Subscriber *subCameraInfoColor, *subCameraInfoDepth; + message_filters::Synchronizer *syncExact; + message_filters::Synchronizer *syncApproximate; + ////////////////////////For receiving color and depth////////////////////////// + + + ////////////For roi selection//////////// + bool objects_selected; + bool finished_select_rois_from_file; + // std::vector rois_from_gui; + // + + std::vector> rois_from_gui; + + std::vector rois_from_file; + std::vector rois_from_file_namelist; - message_filters::Synchronizer *syncExact; - message_filters::Synchronizer *syncApproximate; - ////////////For receiving color and depth////////////// + //for roi projection + tf::TransformListener tf_listener; + tf::StampedTransform transform_camera2world; + ros::Subscriber sub_image2D_rois; - ////////////For roi selection//////////// - bool objects_selected; - // std::vector objects; - //for roi projection - tf::TransformListener tf_listener; - ros::Subscriber sub_image2D_rois; - ros::Subscriber sub_world3D_rois; - ros::Publisher world3D_rois_pub; - World3D_roi current_World3D_roi_msg; - World3D_roi_array World3D_rois_msg; - bool publish_world3D_rois; - int numberofrois=0; - ///////////For roi selection///////////// + ros::Subscriber sub_object_names_from_tracker; + ///////////For roi selection///////////// - ///////////For background removal/////////// - cv::Mat depth_max; - bool use_background_removal; - int background_calculate_frames; - int threshold_4_detecting_foreground; - ///////////For background removal/////////// + ///////////For background removal/////////// + cv::Mat depth_max; + bool use_background_removal; + int background_calculate_frames; + int threshold_4_detecting_foreground; + ///////////For background removal/////////// - ///////////For camshift recover from occlusion/////////// - std::vector occludes; - ///////////For camshift recover from occlusion/////////// + ///////////For camshift recover from occlusion/////////// + std::vector occludes; + ///////////For camshift recover from occlusion/////////// - ///////////For generating 3d position/////////// - float fx ,fy,cx,cy; - ///////////For generating 3d position/////////// + ///////////For generating 3d position/////////// + float fx ,fy,cx,cy; + ///////////For generating 3d position/////////// - ///////////For publish detection topic/////////// - std::string output_detection_topic; - ros::Publisher detection_pub; - std::string color_header_frameId; - ///////////For publish detection topic/////////// + ///////////For publish detection topic/////////// + std::string output_detection_topic; + ros::Publisher detection_pub; + std::string color_header_frameId; + ///////////For publish detection topic/////////// - ///////////For main detection/////////// - // cv::Mat color, depth; - std::vector Object_Detectors; - std::vector current_detected_boxes; - cv::Mat main_color,main_depth_16,main_depth_8; - ///////////For main detection/////////// + bool set_object_names;//if to use this detector to set the object names - ///////////For display/////////// - bool show_2D_tracks; - std::vector> tracks_2D; - ///////////For display/////////// + ///////////For main detection/////////// + std::vector Object_Detectors; + std::vector current_detected_boxes; + cv::Mat main_color,main_depth_16,main_depth_8; + ///////////For main detection/////////// - //test time cost - std::chrono::time_point start, now; + + ///////////For display/////////// + bool show_2D_tracks; + std::vector> tracks_2D; + ///////////For display/////////// + + //test time cost + std::chrono::time_point start, now; public: - Multiple_Objects_Detection(const std::string &output_detection_topic,const bool useExact, const bool useCompressed, - const bool use_background_removal, const int background_calculate_frames,const int threshold_4_detecting_foreground,const bool show_2D_tracks, const bool publish_world3D_rois) - : output_detection_topic(output_detection_topic),useExact(useExact), useCompressed(useCompressed),updateImage(false), running(false), - use_background_removal(use_background_removal), objects_selected(false),background_calculate_frames(background_calculate_frames),threshold_4_detecting_foreground(threshold_4_detecting_foreground), queueSize(5), - nh(), spinner(0), it(nh) ,show_2D_tracks(show_2D_tracks) ,publish_world3D_rois(publish_world3D_rois) + Multiple_Objects_Detection(const std::string &output_detection_topic,const bool set_object_names,const bool useExact, const bool useCompressed, + const bool use_background_removal, const int background_calculate_frames,const int threshold_4_detecting_foreground,const bool show_2D_tracks) + : output_detection_topic(output_detection_topic),set_object_names(set_object_names),useExact(useExact), useCompressed(useCompressed),updateImage(false), running(false), + use_background_removal(use_background_removal),objects_selected(false), finished_select_rois_from_file(false),background_calculate_frames(background_calculate_frames),threshold_4_detecting_foreground(threshold_4_detecting_foreground), queueSize(5), + nh(), spinner(0), it(nh) ,show_2D_tracks(show_2D_tracks) + { + std::string cameraName = "kinect2_head"; + topicColor = "/" + cameraName + "/" + K2_TOPIC_LORES_COLOR K2_TOPIC_RAW; + topicDepth = "/" + cameraName + "/" + K2_TOPIC_LORES_DEPTH K2_TOPIC_RAW; + + cameraMatrixColor = cv::Mat::zeros(3, 3, CV_64F); + cameraMatrixDepth = cv::Mat::zeros(3, 3, CV_64F); + detection_pub=nh.advertise(output_detection_topic,3); + } + + ~Multiple_Objects_Detection() + { + } + + void run_detection(){ + + start_reciver();// define some subscribers + + for(; running && ros::ok();) { - std::string cameraName = "kinect2_head"; - topicColor = "/" + cameraName + "/" + K2_TOPIC_LORES_COLOR K2_TOPIC_RAW; - topicDepth = "/" + cameraName + "/" + K2_TOPIC_LORES_DEPTH K2_TOPIC_RAW; + if(updateImage)//if recieved color and depth msg + { + lock.lock(); + main_color = this->color; + main_depth_16 = this->depth; + updateImage = false; + lock.unlock(); - cameraMatrixColor = cv::Mat::zeros(3, 3, CV_64F); - cameraMatrixDepth = cv::Mat::zeros(3, 3, CV_64F); - detection_pub=nh.advertise(output_detection_topic,3); + // if accept background_removal ,generate the new color image without background + if(use_background_removal) + { + if ((background_calculate_frames>0))// if the background has not been got + { + background_calculation(); + } + else//after getting the bakground (depth_max), use it to do background removal,generate new color + { + background_removal(); + } + } - std::string output_World3D_roi_topic = "/World3D_rois"; - world3D_rois_pub=nh.advertise(output_World3D_roi_topic,3); - } + // if don't accept background_removal or background is already removed + if(!use_background_removal||(use_background_removal&&background_calculate_frames==0)) + { + //!!!!!!!!!!!!!!!!!!!!!!!main loop!!!!!!!!!!!!!!!!!!!!!!! - ~Multiple_Objects_Detection() - { - } + if(finished_select_rois_from_file)//keep checkin gif there are new rois + select_rois_from_file(); - void run_detection(){ + if(!rois_from_gui.empty())//keep checkin gif there are new rois + select_rois_from_gui(); - start_reciver(); + multiple_objects_detection_main();// main detection - for(; running && ros::ok();) - { - if(updateImage) - { - - lock.lock(); - main_color = this->color; - main_depth_16 = this->depth; - updateImage = false; - lock.unlock(); - - // if accept background_removal ,generate the new color image without background - if(use_background_removal) - { - if ((background_calculate_frames>0))// if the back ground has not been got - { - background_calculation(); - } - else//after get the bakground (depth_max),using it to do background removal,generate new color - { - // start = std::chrono::high_resolution_clock::now(); - background_removal(); - // now = std::chrono::high_resolution_clock::now(); - // double elapsed = std::chrono::duration_cast(now - start).count(); - // std::cout<(now - start).count(); - // std::cout<(nh, topicCameraInfoColor, queueSize); - subCameraInfoDepth = new message_filters::Subscriber(nh, topicCameraInfoDepth, queueSize); - if(useExact) - { - syncExact = new message_filters::Synchronizer(ExactSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth); - syncExact->registerCallback(boost::bind(&Multiple_Objects_Detection::image_callback, this, _1, _2, _3, _4)); - } - else - { - syncApproximate = new message_filters::Synchronizer(ApproximateSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth); - syncApproximate->registerCallback(boost::bind(&Multiple_Objects_Detection::image_callback, this, _1, _2, _3, _4)); - } + running = true; + //////////////////////////subscribe the color and depth data////////////////////////// + std::string topicCameraInfoColor = topicColor.substr(0, topicColor.rfind('/')) + "/camera_info"; + std::string topicCameraInfoDepth = topicDepth.substr(0, topicDepth.rfind('/')) + "/camera_info"; - spinner.start(); + image_transport::TransportHints hints(useCompressed ? "compressed" : "raw"); + image_transport::TransportHints hintsDepth(useCompressed ? "compressedDepth" : "raw"); + subImageColor = new image_transport::SubscriberFilter(it, topicColor, queueSize, hints); + subImageDepth = new image_transport::SubscriberFilter(it, topicDepth, queueSize, hintsDepth); + subCameraInfoColor = new message_filters::Subscriber(nh, topicCameraInfoColor, queueSize); + subCameraInfoDepth = new message_filters::Subscriber(nh, topicCameraInfoDepth, queueSize); - std::chrono::milliseconds duration(1); - while(!updateImage ) - { - if(!ros::ok()) - { - return; - } - std::this_thread::sleep_for(duration); - } + if(useExact) + { + syncExact = new message_filters::Synchronizer(ExactSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth); + syncExact->registerCallback(boost::bind(&Multiple_Objects_Detection::image_callback, this, _1, _2, _3, _4)); } - - void background_calculation() + else { - Mat test_backgroundfile=imread("/tmp/depth_background.png",CV_LOAD_IMAGE_UNCHANGED); - if(!test_backgroundfile.empty())// if the background successfully read from file - { - depth_max = test_backgroundfile; - background_calculate_frames=0; - printf("use background removal and background is successfully read from file......\n"); - printf("please press P to select ROIs......\n"); - } - else//background can't be read from file, being calculated - { - printf("use background removal but background can't be read from file, being calculated......\n"); - if(depth_max.empty()) - depth_max=Mat::zeros(main_depth_16.size(),CV_16UC1); - else - depth_max=cv::max(depth_max,main_depth_16); - background_calculate_frames--; - if(background_calculate_frames==0)//save background file - { - std::vector compression_params; - compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); - compression_params.push_back(0); - // pcl::io::savePCDFileASCII ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud); - cv::imwrite("/tmp/depth_background.png", depth_max, compression_params); - printf("background is sucessfully calculated and saved......\n"); - printf("please press P to select ROIs......\n"); - } - } + syncApproximate = new message_filters::Synchronizer(ApproximateSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth); + syncApproximate->registerCallback(boost::bind(&Multiple_Objects_Detection::image_callback, this, _1, _2, _3, _4)); } + //////////////////////////subscribe the color and depth data////////////////////////// - void background_removal() - { - Mat depth_diff=depth_max-main_depth_16; - int nr=depth_diff.rows; - int nc=depth_diff.cols; - if(depth_diff.isContinuous()&&main_color.isContinuous()) - { - nr=1; - nc=nc*depth_diff.rows*depth_diff.channels(); - } - for(int i=0;i(i); - uchar* colorData=main_color.ptr(i*3); - for(int j=0;j compression_params; + compression_params.push_back(CV_IMWRITE_PNG_COMPRESSION); + compression_params.push_back(0); + // pcl::io::savePCDFileASCII ("/tmp/background_" + frame_id.substr(1, frame_id.length()-1) + ".pcd", *background_cloud); + cv::imwrite("/tmp/depth_background.png", depth_max, compression_params); + printf("background is sucessfully calculated and saved......\n"); + printf("please press P to select ROIs......\n"); + } + } + } - if(Object_Detector::Backprojection_Mode=="HSD") + void background_removal() + { + Mat depth_diff=depth_max-main_depth_16; + + int nr=depth_diff.rows; + int nc=depth_diff.cols; + if(depth_diff.isContinuous()&&main_color.isContinuous()) + { + nr=1; + nc=nc*depth_diff.rows*depth_diff.channels(); + } + for(int i=0;i(i); + uchar* colorData=main_color.ptr(i*3); + for(int j=0;jheader.frame_id = color_header_frameId; - detection_array_msg->header.stamp = ros::Time::now(); + void select_rois_from_file() + { - for(int i = 0; i < 3; i++) - for(int j = 0; j < 3; j++) - detection_array_msg->intrinsic_matrix.push_back(cameraMatrixColor.at(i, j)); - //set the parms so that we can use the openptrack's tracking node - detection_array_msg->confidence_type = std::string("hog+svm"); - detection_array_msg->image_type = std::string("rgb"); - //detection_array_msg msg!!!!!!!!!!!!!!!!!!!!!!! + if(rois_from_file.empty())//when the "image2D_rois_from_file_Callback" is activted, the "rois_from_file" will be filled with cv::Rect + return; + for(std::vector::iterator it =rois_from_file_namelist.begin();it!=rois_from_file_namelist.end();it++) + { + size_t lastindex = (*it).find_last_of("."); + *it = (*it).substr(0, lastindex); + } - //detect one by one - for(size_t i=0; i tracks_2D_(10); + tracks_2D.push_back(tracks_2D_); + bool occlude_=true; + occludes.push_back(occlude_); + Object_Detector::current_detected_boxes=current_detected_boxes; + } + objects_selected=true; + finished_select_rois_from_file=false; + std::cout<(current_center2D_d.y,current_center2D_d.x)/1000.0;//meter ,not mm - - detection_msg.box_3D.p1.z=current_center_depth; - detection_msg.box_3D.p1.x=(current_center2D_d.x-cx)*fx*current_center_depth; - detection_msg.box_3D.p1.y=(current_center2D_d.y-cy)*fy*current_center_depth; - - detection_msg.box_3D.p2.z=detection_msg.box_3D.p1.z; - detection_msg.box_3D.p2.x=detection_msg.box_3D.p1.x; - detection_msg.box_3D.p2.y=detection_msg.box_3D.p1.y; - - detection_msg.box_2D.x = current_detected_boxes[i].x; - detection_msg.box_2D.y = current_detected_boxes[i].y; - detection_msg.box_2D.width = current_detected_boxes[i].width; - detection_msg.box_2D.height = current_detected_boxes[i].height; - detection_msg.height = 0.002; - detection_msg.confidence = 10; - - detection_msg.centroid.x=detection_msg.box_3D.p1.x; - detection_msg.centroid.y=detection_msg.box_3D.p1.y; - detection_msg.centroid.z=detection_msg.box_3D.p1.z; - - detection_msg.distance = std::sqrt(detection_msg.centroid.x * detection_msg.centroid.x + detection_msg.centroid.z * detection_msg.centroid.z); - - detection_msg.top.x=detection_msg.box_3D.p1.x; - detection_msg.top.y=detection_msg.box_3D.p1.y; - detection_msg.top.z=detection_msg.box_3D.p1.z; - - detection_msg.bottom.x=detection_msg.box_3D.p1.x; - detection_msg.bottom.y=detection_msg.box_3D.p1.y; - detection_msg.bottom.z=detection_msg.box_3D.p1.z; - if(current_center_depth>0.01) - detection_array_msg->detections.push_back(detection_msg); - } - else{//if occluded ,use a empty_rect because of the tracker will use this to generate the other_objects_mask - // std::cout<>::const_iterator roi_it= rois_from_gui.begin();roi_it!=rois_from_gui.end();roi_it++) + { + Object_Detector newDetector; + if(set_object_names) + newDetector.setObjectName((*roi_it).first); + else + newDetector.setObjectName("default"); + newDetector.setCurrentRect((*roi_it).second); + + Object_Detectors.push_back(newDetector); + current_detected_boxes.push_back((*roi_it).second); + std::list tracks_2D_(10); + tracks_2D.push_back(tracks_2D_); + bool occlude_=false; + occludes.push_back(occlude_); + Object_Detector::current_detected_boxes=current_detected_boxes; } + objects_selected=true; + std::cout< current_track=tracks_2D[i]; - for( std::list ::iterator it = current_track.begin(); it!=current_track.end(); it++) - { - Rect test_Rect((*it).x+((*it).width)/2,(*it).y+((*it).height)/2,1,1); - test_Rect=test_Rect&Rect(0,0,main_color.size().width,main_color.size().height); - cv::rectangle(main_color, test_Rect, cv::Scalar(250*(i), 250*(i-1), 250*(i-2)), 2, CV_AA); - } - } - cv::namedWindow("show_2D_tracks"); - cv::imshow( "show_2D_tracks", main_color ); - cv::waitKey(10); + //devide (1000mm~9000mm) into 255 parts + ushort Max=9000,Min=1000; + main_depth_16.convertTo(main_depth_8, CV_8U,255.0/(Max-Min),-255.0*Min/(Max-Min)); + Object_Detector::setMainDepth(main_depth_8); } + else + { + Object_Detector::setMainDepth(main_depth_16); + } + ////////////////////////////////////////set the input(color+depth) of every detector//////////////////////////////////////// + + - void image_callback(const sensor_msgs::Image::ConstPtr imageColor, const sensor_msgs::Image::ConstPtr imageDepth, - const sensor_msgs::CameraInfo::ConstPtr cameraInfoColor, const sensor_msgs::CameraInfo::ConstPtr cameraInfoDepth) + //!!!!!!!!!!!!!!!!!!!!!!!create detection_array_msg!!!!!!!!!!!!!!!!!!!!!!! + /// Write detection message: + DetectionArray::Ptr detection_array_msg(new DetectionArray); + // Set camera-specific fields: + detection_array_msg->header.frame_id = color_header_frameId; + detection_array_msg->header.stamp = ros::Time::now(); + + for(int i = 0; i < 3; i++) + for(int j = 0; j < 3; j++) + detection_array_msg->intrinsic_matrix.push_back(cameraMatrixColor.at(i, j)); + + //set the parms so that we can use the openptrack's tracking node + detection_array_msg->confidence_type = std::string("hog+svm"); + detection_array_msg->image_type = std::string("rgb"); + //!!!!!!!!!!!!!!!!!!!!!!!detection_array_msg msg!!!!!!!!!!!!!!!!!!!!!!! + + //detect objects one by one + for(size_t i=0; i(0, 0); - fy = 1.0f / cameraMatrixColor.at(1, 1); - cx = cameraMatrixColor.at(0, 2); - cy = cameraMatrixColor.at(1, 2); - } + occludes[i]=Object_Detectors[i].occluded;//update the occlusion flag - readImage(imageColor, _color); - readImage(imageDepth, _depth); + if(occludes[i]==false)// if no occlusion + { - color_header_frameId=imageColor->header.frame_id; + ////////////////////for display the detection ellipse and track points//////////////////// + current_detected_boxes[i]=current_trackBox.boundingRect(); + cv::ellipse(main_color, current_trackBox, cv::Scalar(250*(i), 250*(i-1), 250*(i-2)), 2, CV_AA); + //cv::rectangle(color, current_trackBox.boundingRect(), cv::Scalar(255, 0, 0), 2, CV_AA); + tracks_2D[i].push_back(current_detected_boxes[i]); + tracks_2D[i].pop_front(); + ////////////////////for display the detection ellipse and track points//////////////////// - if(_color.type() == CV_16U) - { - cv::Mat tmp; - _color.convertTo(tmp, CV_8U, 0.02); - cv::cvtColor(tmp, _color, CV_GRAY2BGR); - } - lock.lock(); - this->color = _color; - this->depth = _depth; - updateImage = true; - lock.unlock(); + Object_Detector::current_detected_boxes=current_detected_boxes;// update the detected_boxes, it is used to generate the other objects mask for the following detection , in the following detection ,this Rect will be blocked in the image + + + //////////////////////// genearate detection msg!!!!!!!!!!!!!!!!!!!!!!!!!! + Detection detection_msg; + Point2d current_center2D_d(cvFloor(current_trackBox.center.x),cvFloor(current_trackBox.center.y));//center point in image2D coordinate + double current_center_depth=main_depth_16.at(current_center2D_d.y,current_center2D_d.x)/1000.0;//meter ,not mm + + tf::Vector3 camera_3D;//center point in camera3D coordinate + camera_3D.setX((current_center2D_d.x-cx)*fx*current_center_depth); + camera_3D.setY((current_center2D_d.y-cy)*fy*current_center_depth); + camera_3D.setZ(current_center_depth); + + // set items of the detection msg + /////set every 3D point of the detection box with the center point in camera3D coordinate///// + detection_msg.box_3D.p1.z=camera_3D.z(); + detection_msg.box_3D.p1.x=camera_3D.x(); + detection_msg.box_3D.p1.y=camera_3D.y(); + + detection_msg.box_3D.p2.z=detection_msg.box_3D.p1.z; + detection_msg.box_3D.p2.x=detection_msg.box_3D.p1.x; + detection_msg.box_3D.p2.y=detection_msg.box_3D.p1.y; + + detection_msg.centroid.x=detection_msg.box_3D.p1.x; + detection_msg.centroid.y=detection_msg.box_3D.p1.y; + detection_msg.centroid.z=detection_msg.box_3D.p1.z; + + detection_msg.top.x=detection_msg.box_3D.p1.x; + detection_msg.top.y=detection_msg.box_3D.p1.y; + detection_msg.top.z=detection_msg.box_3D.p1.z; + + detection_msg.bottom.x=detection_msg.box_3D.p1.x; + detection_msg.bottom.y=detection_msg.box_3D.p1.y; + detection_msg.bottom.z=detection_msg.box_3D.p1.z; + /////set every 3D point of the detection box with the center point in camera3D coordinate///// + + + detection_msg.box_2D.x = current_detected_boxes[i].x; + detection_msg.box_2D.y = current_detected_boxes[i].y; + detection_msg.box_2D.width = current_detected_boxes[i].width; + detection_msg.box_2D.height = current_detected_boxes[i].height; + + + detection_msg.confidence = 10;//set a fixed value + + //the distance between the camera and the object + detection_msg.distance = std::sqrt(detection_msg.centroid.x * detection_msg.centroid.x + detection_msg.centroid.z * detection_msg.centroid.z); + + + // set the the height of the detection in world_3D, this will pass to the tracker ,the only reason to do this is it is needed in json message. + tf::Vector3 world_3D; + world_3D = transform_camera2world(camera_3D); + detection_msg.height =world_3D.z()*4/3 ; + + detection_msg.object_name=object_name; + + if(current_center_depth>0.01) + detection_array_msg->detections.push_back(detection_msg);//push to the msg array + } + else{//if occluded ,use a empty_rect because of the tracker will use this to generate the other_objects_mask + Rect empty_rect(0,0,1,1); + current_detected_boxes[i]=empty_rect; + Object_Detector::current_detected_boxes=current_detected_boxes; + } } - void image2D_rois_Callback(const opt_msgs::Image2D_roi_array::ConstPtr& image2D_roi_msg) + detection_pub.publish(detection_array_msg); // publish message + } + + void show_2D_tracks_on_image()//show track points on 2D image + { + for(size_t i=0; i current_track=tracks_2D[i]; + for( std::list ::iterator it = current_track.begin(); it!=current_track.end(); it++) + { + Rect test_Rect((*it).x+((*it).width)/2,(*it).y+((*it).height)/2,1,1); + test_Rect=test_Rect&Rect(0,0,main_color.size().width,main_color.size().height); + cv::rectangle(main_color, test_Rect, cv::Scalar(250*(i), 250*(i-1), 250*(i-2)), 2, CV_AA); + } + } + cv::namedWindow("show_2D_tracks"); + cv::imshow( "show_2D_tracks", main_color ); + cv::waitKey(10); + } - for( std::vector::const_iterator it = image2D_roi_msg->Rois.begin(); - it != image2D_roi_msg->Rois.end(); it++) - { - cv::Rect selection((*it).x,(*it).y,(*it).width,(*it).height); - selection=selection&Rect(0,0,main_color.size().width,main_color.size().height); - - //////////////initialize one detector with one selection////////////// - if(selection.area()>1) - { - Object_Detector newDetector; - newDetector.setCurrentRect(selection); - Object_Detectors.push_back(newDetector); - current_detected_boxes.push_back(selection); - std::list tracks_2D_(10); - tracks_2D.push_back(tracks_2D_); - bool occlude_=false; - occludes.push_back(occlude_); - Object_Detector::current_detected_boxes=current_detected_boxes; - //////////////~~initialize one detector with one selection////////////// - - - //////////////project theselection from current_frame to other cameras////////////// - //1. project from currentframe to world - std::string world_frame_id="/world"; - std::string current_frame_id=color_header_frameId; - tf::StampedTransform transform_current2world; - try{ - tf_listener.lookupTransform(world_frame_id, current_frame_id, ros::Time(0), transform_current2world); - } - catch(tf::TransformException ex){ - ROS_ERROR("%s",ex.what()); - ros::Duration(1.0).sleep(); - } - Point2d current_frame_image2D_1(cvFloor(selection.x),cvFloor(selection.y)); - Point2d current_frame_image2D_2(cvFloor(selection.x+selection.width),cvFloor(selection.y+selection.height)); - std::cout<<"current_frame_image2D_11111111111111111: "<(current_frame_image2D_1.y,current_frame_image2D_1.x)/1000.0;//meter ,not mm - double current_frame_depth_2=main_depth_16.at(current_frame_image2D_2.y,current_frame_image2D_2.x)/1000.0;//meter ,not mm - - tf::Vector3 world3D_1=image2D_to_world3D(current_frame_image2D_1,current_frame_depth_1,transform_current2world); - tf::Vector3 world3D_2=image2D_to_world3D(current_frame_image2D_2,current_frame_depth_2,transform_current2world); - - //publish - current_World3D_roi_msg.no=numberofrois++; - current_World3D_roi_msg.x1=world3D_1.getX(); - current_World3D_roi_msg.y1=world3D_1.getY(); - current_World3D_roi_msg.z1=world3D_1.getZ(); - current_World3D_roi_msg.x2=world3D_2.getX(); - current_World3D_roi_msg.y2=world3D_2.getY(); - current_World3D_roi_msg.z2=world3D_2.getZ(); - World3D_rois_msg.Rois.push_back(current_World3D_roi_msg); - - // cv::rectangle(main_color, selection, cv::Scalar(255, 0, 0), 2, CV_AA); - } - } - cv::imwrite("/tmp/show_the_set_recs.png",main_color); - objects_selected=true; - std::cout<header.frame_id; + + if(!objects_selected)// the camrea info just need to be read once + { + readCameraInfo(cameraInfoColor, cameraMatrixColor); + readCameraInfo(cameraInfoDepth, cameraMatrixDepth); + + fx = 1.0f / cameraMatrixColor.at(0, 0); + fy = 1.0f / cameraMatrixColor.at(1, 1); + cx = cameraMatrixColor.at(0, 2); + cy = cameraMatrixColor.at(1, 2); + + std::string world_frame_id="/world"; + try{ + tf_listener.lookupTransform(world_frame_id, color_header_frameId, ros::Time(0), transform_camera2world); + } + catch(tf::TransformException ex){ + ROS_ERROR("%s",ex.what()); + ros::Duration(1.0).sleep(); + } } - //project form image2D coordinate of currentframe to world3D coordinate, - // the intrisinc martrix comes from the "cameraMatrixColor" - //two steps : image2D--camera3D--world3D - tf::Vector3 image2D_to_world3D(Point2d i, double depth_, tf::StampedTransform transform_) + if(_color.type() == CV_16U) { - tf::Vector3 v; - v.setX((i.x-cx)*fx*depth_); - v.setY((i.y-cy)*fy*depth_); - v.setZ(depth_); - v = transform_(v); - std::cout<<"world3D"<color = _color; + this->depth = _depth; + updateImage = true; + lock.unlock(); + } + - void world3D_rois_Callback(const opt_msgs::World3D_roi_array::ConstPtr& world3D_rois_msg) + //recieve the roi msg(x,y,width,height) from marking in the gui + void image2D_rois_from_gui_Callback(const opt_msgs::Image2D_roi_array::ConstPtr& image2D_roi_msg) + { + for( std::vector::const_iterator it = image2D_roi_msg->Rois.begin(); + it != image2D_roi_msg->Rois.end(); it++) { - if(publish_world3D_rois) - { - sub_world3D_rois.shutdown(); - return; - } - for( std::vector::const_iterator it = world3D_rois_msg->Rois.begin(); - it != world3D_rois_msg->Rois.end(); it++) - { - tf::Vector3 world3D_1,world3D_2; - world3D_1.setX((*it).x1); - world3D_1.setY((*it).y1); - world3D_1.setZ((*it).z1); - world3D_2.setX((*it).x2); - world3D_2.setY((*it).y2); - world3D_2.setZ((*it).z2); - - tf::StampedTransform transform_world2projectedframe; - std::string projected_frame_id=color_header_frameId; - std::string world_frame_id= "/world"; - try{ - tf_listener.lookupTransform(projected_frame_id, world_frame_id, ros::Time(0), transform_world2projectedframe); - } - catch(tf::TransformException ex){ - ROS_ERROR("%s",ex.what()); - ros::Duration(1.0).sleep(); - } - Point2d projected_frame_image2D_1=world3D_to_image2D(world3D_1,transform_world2projectedframe); - Point2d projected_frame_image2D_2=world3D_to_image2D(world3D_2,transform_world2projectedframe); - std::cout<<"projected_frame_image2D_11111111111111111: "<1) - { - Object_Detector newDetector; - newDetector.setCurrentRect(selection); - Object_Detectors.push_back(newDetector); - current_detected_boxes.push_back(selection); - std::list tracks_2D_(10); - tracks_2D.push_back(tracks_2D_); - bool occlude_=false; - occludes.push_back(occlude_); - Object_Detector::current_detected_boxes=current_detected_boxes; - cv::rectangle(main_color, selection, cv::Scalar(255, 0, 0), 2, CV_AA); - } + string object_name=(*it).name; + cv::Rect selection((*it).x,(*it).y,(*it).width,(*it).height); + selection=selection&Rect(0,0,main_color.size().width,main_color.size().height); - } - cv::imwrite("/tmp/show_the_project_recs.png",main_color); - objects_selected=true; - std::cout< new_rois_from_gui(object_name,selection); + + lock.lock(); + rois_from_gui.push_back(new_rois_from_gui); + lock.unlock(); + std::cout<<"got image msg comes from gui"<name!="end_flag") { - v = inverse_transform_(v); - // std::cout<<"cam3D"<image_roi, sensor_msgs::image_encodings::BGR8); + cv::Mat conversion_mat_; + cv_ptr->image.copyTo(conversion_mat_); + lock.lock(); + rois_from_file.push_back(conversion_mat_); + lock.unlock(); + + string object_name=msg->name; + rois_from_file_namelist.push_back(object_name); - void readImage(const sensor_msgs::Image::ConstPtr msgImage, cv::Mat &image) const + std::cout<<"got image msg comes from file"<encoding); - pCvImage->image.copyTo(image); + finished_select_rois_from_file=true; + std::cout<<"finish receiving image msg comes from file"<header.frame_id)==color_header_frameId) { - double *itC = cameraMatrix.ptr(0, 0); - for(size_t i = 0; i < 9; ++i, ++itC) + for( std::vector::const_iterator it = object_names_msg->object_names.begin(); + it!=object_names_msg->object_names.end();it++) + { + int number=(*it).no; + if(Object_Detectors[number].object_name=="default") { - *itC = cameraInfo->K[i]; + Object_Detectors[number].object_name=(*it).object_name; + std::cout<<"set number "<encoding); + pCvImage->image.copyTo(image); + } + + void readCameraInfo(const sensor_msgs::CameraInfo::ConstPtr cameraInfo, cv::Mat &cameraMatrix) const + { + double *itC = cameraMatrix.ptr(0, 0); + for(size_t i = 0; i < 9; ++i, ++itC) + { + *itC = cameraInfo->K[i]; } + } }; diff --git a/detection/include/open_ptrack/multiple_objects_detection/object_detector.h b/detection/include/open_ptrack/multiple_objects_detection/object_detector.h index 7dbbffc..8701e0a 100644 --- a/detection/include/open_ptrack/multiple_objects_detection/object_detector.h +++ b/detection/include/open_ptrack/multiple_objects_detection/object_detector.h @@ -10,18 +10,16 @@ using namespace cv; class Object_Detector { public: - bool occluded; - bool half_occluded; - double half_occluded_frames; + + // ///////////For hsv mask/////////// static int HMin, SMin, VMin; static int HMax,SMax, VMax; // ///////////For hsv mask/////////// - // ///////////For camshift/////////// - + //////////////////////////For camshift////////////////////////// static std::string Backprojection_Mode; // Backprojection_Mode{ // H ,//just use hue of select roi to do calhist for one time, then use the permenet hue hist to do the backproject on every hue frame @@ -31,10 +29,9 @@ class Object_Detector // //then combine the permenet hs hist pdf (named:hs_hist_pdf) and tmp depth hist(named: tmp_depth_hist_pdf) to do the backproject on every hsd frame,when occluded // }; - static int h_bins,s_bins;//for the hue backrojection hist ,divide (HMin,HMax) to h_bins parts - static int d_bins; - // ///////////For camshift/////////// + static int d_bins;// depth bins, + //////////////////////////For camshift////////////////////////// @@ -52,40 +49,50 @@ class Object_Detector static int AREA_TOLERANCE;//AREA_TOLERANCE is also used to create the position_maks static int QUALITY_TOLERANCE; static double DENSITY_TOLORENCE; + + bool occluded; + bool half_occluded; + double half_occluded_frames; + ///////////For camshift recover from occlusion/////////// static std::vector current_detected_boxes;//this one come from class:multiple_objects_detection, every time of every object's detection ,multiple_objects_detection will update this varable ,we use it to generate the other_object_mask + cv::Mat roi_from_file;// the roi come from file which is set when "select_rois_from_file" + std::string object_name; + private: static cv::Mat mainColor; static cv::Mat mainDepth; + cv::Mat Color,Depth;//copied from mainColor,mainDepth bool firstRun; - cv::Rect currentRect; - cv::RotatedRect current_detectedBox; + cv::Rect currentRect;// just used in the initilization(no important) + cv::Rect selection;// just used in the initilization(no important) + + cv::RotatedRect current_detectedBox;//detction result (important) + cv::Rect detectWindow;// used as search window at the beginning of detection(important) - cv::Rect detectWindow; - cv::Mat hsv, hue, hsv_mask,backproj; //, histimg; + //////////main variable for calculate the histogram /////////// + cv::Mat hsv, hue, hsv_mask,backproj; cv::Mat h_hist;//H mode cv::Mat hs_hist_for_HS;//HS mode cv::Mat hs_hist_for_HSD,//just used under occlusion,because ,when oclluded ,can't use depth infomation to detect the objects hs_hist_pdf,//use hs_hist_for_HSD to get pdf ,use it as the permnet part tmp_depth_hist_pdf,//use every depth frame to get this ,use it as the tmp part hsd_hist;//combine hs_hist_pdf(perment) and tmp_depth_hist_pdf(tmp,every frame) to get this + ///////////main variable for calculate the histogram /////////// - cv::Rect selection; - cv::Mat Color,Depth; - Mat other_object_mask,position_mask,depth_mask; + Mat other_object_mask,position_mask,depth_mask;//some mask that used for get the good back projection image + public: Object_Detector() :firstRun(true),occluded(false),half_occluded(false),half_occluded_frames(10) - { -// hsd_hist=cv::zeros() - } + {} static void setMainColor(const cv::Mat _mainColor); static cv::Mat getMainColor(); @@ -96,18 +103,19 @@ class Object_Detector void setCurrentRect(const cv::Rect _currentRect); cv::Rect getCurrentRect(); + void setObjectName(const std::string object_name); + std::string getObjectName(); + void setcurrent_detected_boxes(std::vector _current_detected_boxes); std::vector getcurrent_detected_boxes(); - void H_backprojection(); void HS_backprojection(); void HSD_backprojection(); - cv::RotatedRect object_shift(InputArray _probColor,Rect& window, TermCriteria criteria); - - cv::RotatedRect detectCurrentRect(int id); + cv::RotatedRect object_shift(InputArray _probColor,Rect& window, TermCriteria criteria);//camshift + occlusion handle + cv::RotatedRect detectCurrentRect(int id);//main detection function }; //stastic varable defination diff --git a/detection/launch/camera_poses.txt b/detection/launch/camera_poses.txt deleted file mode 100644 index a17bc97..0000000 --- a/detection/launch/camera_poses.txt +++ /dev/null @@ -1,3 +0,0 @@ -# Auto-generated file. -# CALIBRATION ID: 1490893680 -kinect2_head: 2.62509 -0.111163 5.146 0.875696 -0.136273 0.0710624 0.457752 diff --git a/detection/launch/detection_node_kinect2_head.launch b/detection/launch/detection_node_kinect2_head.launch deleted file mode 100644 index 26b5351..0000000 --- a/detection/launch/detection_node_kinect2_head.launch +++ /dev/null @@ -1,16 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/detection/launch/detector_kinect2.launch b/detection/launch/detector_kinect2.launch index b41f491..aded8ee 100644 --- a/detection/launch/detector_kinect2.launch +++ b/detection/launch/detector_kinect2.launch @@ -39,7 +39,8 @@ - + + diff --git a/detection/launch/object_detector_kinect2.launch b/detection/launch/object_detector_kinect2.launch new file mode 100644 index 0000000..531d410 --- /dev/null +++ b/detection/launch/object_detector_kinect2.launch @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/detection/launch/people_detector_kinect2.launch b/detection/launch/people_detector_kinect2.launch new file mode 100644 index 0000000..b2125d9 --- /dev/null +++ b/detection/launch/people_detector_kinect2.launch @@ -0,0 +1,33 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/detection/src/detection.cpp b/detection/src/detection.cpp index cef1012..71ce350 100644 --- a/detection/src/detection.cpp +++ b/detection/src/detection.cpp @@ -112,6 +112,13 @@ namespace open_ptrack return detection_msg_.distance; } + std::string + Detection::getObjectName() + { + return detection_msg_.object_name; + } + + bool Detection::isOccluded() { diff --git a/detection/src/multiple_objects_detection/object_detector.cpp b/detection/src/multiple_objects_detection/object_detector.cpp index d3550b8..64035a9 100644 --- a/detection/src/multiple_objects_detection/object_detector.cpp +++ b/detection/src/multiple_objects_detection/object_detector.cpp @@ -36,6 +36,17 @@ cv::Rect Object_Detector::getCurrentRect() return currentRect; } + +void Object_Detector::setObjectName(const std::string _object_name) +{ + object_name=_object_name; +} +std::string Object_Detector::getObjectName() +{ + return object_name; +} + + void Object_Detector::setcurrent_detected_boxes(std::vector _current_detected_boxes) { current_detected_boxes=_current_detected_boxes; @@ -56,12 +67,28 @@ void Object_Detector::H_backprojection() if( firstRun ) { + if(!occluded) + { cv::Mat roi(hue, selection), maskroi(hsv_mask, selection); cv::calcHist(&roi, 1, 0, maskroi, h_hist, 1, &h_bins, &ph_ranges); cv::normalize(h_hist, h_hist, 0, 255, CV_MINMAX); detectWindow = selection; firstRun = false; // std::cout<<"H mode"<hsd - if( firstRun ) { - cv::Mat roi(hsv, selection), maskroi(hsv_mask, selection); - cv::calcHist(&roi, 1, hs_channels, maskroi, hs_hist_for_HSD, 2, hs_size, phs_ranges, true, false); - cv::normalize(hs_hist_for_HSD , hs_hist_for_HSD , 0, 255, CV_MINMAX); - - double sum_hs_hist=sum(hs_hist_for_HSD)[0]; - hs_hist_pdf=hs_hist_for_HSD/sum_hs_hist; - - //used to generate the initial hsd_hist(use this just for the right data format ) - cv::calcHist(&roi, 1, hsd_channels, maskroi, hsd_hist, 3, hsd_size, phsd_ranges, true, false); - - cv::Mat roi_depth(Depth, selection); - - //calculate the the current_trackBox(rotatedrect) mask,named depth_mask(in this mask ,just the the value in the area :current_detectBox(rotatedrect) is 255) - Point2f vertices[4]; - current_detectedBox.points(vertices); - std::vector< std::vector > co_ordinates; - co_ordinates.push_back(std::vector()); - co_ordinates[0].push_back(vertices[0]); - co_ordinates[0].push_back(vertices[1]); - co_ordinates[0].push_back(vertices[2]); - co_ordinates[0].push_back(vertices[3]); - depth_mask=Mat::zeros(Color.size(),CV_8UC1); - drawContours( depth_mask,co_ordinates,0, Scalar(255),CV_FILLED, 8 ); - depth_mask&=hsv_mask; - cv::Mat depth_mask_roi(depth_mask, detectWindow); - - cv::calcHist(&roi_depth, 1, 0, depth_mask_roi, tmp_depth_hist_pdf, 1, &d_bins, &pd_ranges); - double sum_tmp_depth_hist_pdf=sum(tmp_depth_hist_pdf)[0]; - tmp_depth_hist_pdf=tmp_depth_hist_pdf/sum_tmp_depth_hist_pdf; - - for (int i=0; i(i,j,k)=255*hs_hist_pdf.at(i,j)*tmp_depth_hist_pdf.at(k); + if(!occluded)//if the roi is from the file, the first frame will be set to occluded, in this situation,we just calculate the hs pdf and use it to search the object + { + cv::Mat roi(hsv, selection), maskroi(hsv_mask, selection); + cv::calcHist(&roi, 1, hs_channels, maskroi, hs_hist_for_HSD, 2, hs_size, phs_ranges, true, false); + cv::normalize(hs_hist_for_HSD , hs_hist_for_HSD , 0, 255, CV_MINMAX); + + double sum_hs_hist=sum(hs_hist_for_HSD)[0]; + hs_hist_pdf=hs_hist_for_HSD/sum_hs_hist; + + //used to generate the initial hsd_hist(use this just for the right data format ) + cv::calcHist(&roi, 1, hsd_channels, maskroi, hsd_hist, 3, hsd_size, phsd_ranges, true, false); + + cv::Mat roi_depth(Depth, selection); + + //calculate the the current_trackBox(rotatedrect) mask,named depth_mask(in this mask ,just the the value in the area :current_detectBox(rotatedrect) is 255) + Point2f vertices[4]; + current_detectedBox.points(vertices); + std::vector< std::vector > co_ordinates; + co_ordinates.push_back(std::vector()); + co_ordinates[0].push_back(vertices[0]); + co_ordinates[0].push_back(vertices[1]); + co_ordinates[0].push_back(vertices[2]); + co_ordinates[0].push_back(vertices[3]); + depth_mask=Mat::zeros(Color.size(),CV_8UC1); + drawContours( depth_mask,co_ordinates,0, Scalar(255),CV_FILLED, 8 ); + depth_mask&=hsv_mask; + cv::Mat depth_mask_roi(depth_mask, detectWindow); + cv::calcHist(&roi_depth, 1, 0, depth_mask_roi, tmp_depth_hist_pdf, 1, &d_bins, &pd_ranges); + double sum_tmp_depth_hist_pdf=sum(tmp_depth_hist_pdf)[0]; + tmp_depth_hist_pdf=tmp_depth_hist_pdf/sum_tmp_depth_hist_pdf; + + + //combine the hs and depth + for (int i=0; i(i,j,k)=255*hs_hist_pdf.at(i,j)*tmp_depth_hist_pdf.at(k); + } } } + + //normalize hsd_hist + double hMin,hMax; + minMaxIdx(hsd_hist, &hMin, &hMax); + hsd_hist = 255 * hsd_hist / hMax; + + detectWindow = selection; + firstRun = false; } - //normalize hsd_hist - double hMin,hMax; - minMaxIdx(hsd_hist, &hMin, &hMax); - hsd_hist = 255 * hsd_hist / hMax; + else{ // if we get the roi from file ,we just calculate the hs pdf - detectWindow = selection; - firstRun = false; + Mat roi_color= roi_from_file; + Mat roi_hsv,roi_hsv_mask; + cv::cvtColor(roi_color, roi_hsv, CV_BGR2HSV); + cv::inRange(roi_hsv, cv::Scalar(HMin, SMin, MIN(VMin,VMax)), + cv::Scalar(HMax, SMax, MAX(VMin, VMax)), roi_hsv_mask); + // imshow("roi_hsv_mask",roi_hsv_mask); + // cv::waitKey(10); + + cv::calcHist(&roi_hsv, 1, hs_channels, roi_hsv_mask, hs_hist_for_HSD, 2, hs_size, phs_ranges, true, false); + cv::normalize(hs_hist_for_HSD , hs_hist_for_HSD , 0, 255, CV_MINMAX); + + double sum_hs_hist=sum(hs_hist_for_HSD)[0]; + hs_hist_pdf=hs_hist_for_HSD/sum_hs_hist; + + //used to generate the initial hsd_hist(use this just for the right data format ) + cv::calcHist(&roi_hsv, 1, hsd_channels, roi_hsv_mask, hsd_hist, 3, hsd_size, phsd_ranges, true, false); + + detectWindow = selection; + firstRun = false; + } } - else + else//main loop to get the hsd pdf, just update the depth pdf , and combine it with the initial hs pdf { if(!occluded&&!half_occluded) { @@ -173,6 +241,7 @@ void Object_Detector::HSD_backprojection() co_ordinates[0].push_back(vertices[1]); co_ordinates[0].push_back(vertices[2]); co_ordinates[0].push_back(vertices[3]); + depth_mask=Mat::zeros(Color.size(),CV_8UC1); drawContours( depth_mask,co_ordinates,0, Scalar(255),CV_FILLED, 8 ); depth_mask&=hsv_mask; @@ -183,7 +252,6 @@ void Object_Detector::HSD_backprojection() double sum_tmp_depth_hist_pdf=sum(tmp_depth_hist_pdf)[0]; tmp_depth_hist_pdf=tmp_depth_hist_pdf/sum_tmp_depth_hist_pdf; - //////////////////////cost a lot of time ////////////////////// for (int i=0; i1) { - //use x y to generate position_mask + //use x y to generate position_mask,the object can move in the window which is AREA_TOLERANCE size bigger than the last detected window position_mask=Mat::zeros(Color.size(),CV_8UC1); - // creat a bigger search search_window based on detectWindow int detectWindow_XL=detectWindow.x-AREA_TOLERANCE,detectWindow_XR=detectWindow.x+detectWindow.width+AREA_TOLERANCE; int detectWindow_YT=detectWindow.y-AREA_TOLERANCE,detectWindow_YB=detectWindow.y+detectWindow.height+AREA_TOLERANCE; Rect search_window=Rect(detectWindow_XL,detectWindow_YT,detectWindow_XR-detectWindow_XL,detectWindow_YB-detectWindow_YT)&Rect(0,0,Color.size().width,Color.size().height); @@ -415,8 +478,9 @@ cv::RotatedRect Object_Detector::detectCurrentRect(int id) if(detectWindow.area()>1) current_detectedBox = object_shift(backproj, detectWindow, cv::TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 )); - // cv::rectangle(backproj, detectWindow, cv::Scalar(255, 0, 0), 2, CV_AA); - // imshow("backproj final",backproj); + + // cv::rectangle(backproj, detectWindow, cv::Scalar(255, 0, 0), 2, CV_AA); + // imshow("backproj final",backproj); return current_detectedBox; diff --git a/opt_gui/CMakeLists.txt b/opt_gui/CMakeLists.txt index 3195bf3..f3942ae 100644 --- a/opt_gui/CMakeLists.txt +++ b/opt_gui/CMakeLists.txt @@ -19,8 +19,7 @@ project(opt_gui) ############################################################################## # qt_build provides the qt cmake glue, roscpp the comms for a default talker -find_package(catkin REQUIRED COMPONENTS qt_build roscpp sensor_msgs image_transport cv_bridge) - +find_package(catkin REQUIRED COMPONENTS qt_build roscpp sensor_msgs image_transport cv_bridge opt_msgs) set(QML_IMPORT_PATH "${QML_IMPORT_PATH};${CATKIN_GLOBAL_LIB_DESTINATION}" ) set(QML_IMPORT_PATH2 "${QML_IMPORT_PATH};${CATKIN_GLOBAL_LIB_DESTINATION}" ) @@ -31,7 +30,10 @@ include_directories(${catkin_INCLUDE_DIRS} include) #add_message_files(FILES Roi_object.msg Roi_object_array.msg) #generate_messages(DEPENDENCIES std_msgs geometry_msgs sensor_msgs) -catkin_package() +catkin_package( + + CATKIN_DEPENDS qt_build roscpp sensor_msgs image_transport cv_bridge +) #generate_messages() ############################################################################## diff --git a/opt_gui/conf/opt_gui.yaml b/opt_gui/conf/opt_gui.yaml deleted file mode 100644 index 6671399..0000000 --- a/opt_gui/conf/opt_gui.yaml +++ /dev/null @@ -1,11 +0,0 @@ -view_topic_1: /kinect2_head/rgb_lowres/image - -view_topic_2: /kinect2_far/rgb_lowres/image - -view_topic_3: /kinect2_lenovo/rgb_lowres/image - -view_topic_4: /kinect2_head/rgb_lowres/image - -view_topic_5: /kinect2_head/rgb_lowres/image - -view_topic_6: /kinect2_head/rgb_lowres/image diff --git a/opt_gui/data/readme b/opt_gui/data/readme new file mode 100644 index 0000000..c2b906f --- /dev/null +++ b/opt_gui/data/readme @@ -0,0 +1,6 @@ +1. Mkdir in this folder with the names of the sensors + +2. Drawing boudingbox in the gui , the images of the boudingbox will be saved here + +3. Select some good images and put them in the right folder(named with the sensor name) for the save/restore + diff --git a/opt_gui/include/component/opt_streaming_component.hpp b/opt_gui/include/component/opt_streaming_component.hpp index 7493e1f..0faffad 100644 --- a/opt_gui/include/component/opt_streaming_component.hpp +++ b/opt_gui/include/component/opt_streaming_component.hpp @@ -10,58 +10,83 @@ #include #include #include - +#include +#include #include -#include -#include "opencv2/highgui/highgui.hpp" +#include #include #include + + +#include + +//OPENCV +#include +#include +#include "opencv2/highgui/highgui.hpp" + + class OPT_Streaming_Component : public QQuickPaintedItem { - //make this a Qt Widget - Q_OBJECT - // defines a qml value for the topic - Q_PROPERTY(QString topic READ get_topic WRITE set_topic NOTIFY topic_changed) + //make this a Qt Widget + Q_OBJECT + // defines a qml value for the topic + Q_PROPERTY(QString topic READ get_topic WRITE set_topic NOTIFY topic_changed) + Q_PROPERTY(QString rois_dir_path READ get_rois_dir_path WRITE set_rois_dir_path NOTIFY rois_dir_path_changed) + Q_PROPERTY(QString all_rois_dir_path READ get_all_rois_dir_path) + + +public: + // Constructor, takes parent widget, which defaults to null + OPT_Streaming_Component(QQuickItem * parent = 0); + + void paint(QPainter *painter); + Q_INVOKABLE void setup();//set up the subscriber(image msg from sensor) and advertiser(roi msg) + + //getters and setters for Q_PROPERTYs + void set_topic(const QString &new_value); + QString get_topic() const; - public: - // Constructor, takes parent widget, which defaults to null - OPT_Streaming_Component(QQuickItem * parent = 0); + void set_rois_dir_path(const QString rois_dir_path_);//set the roi dir for the current camera view + QString get_rois_dir_path() const; - void paint(QPainter *painter); - void setup(ros::NodeHandle * nh); + QString get_all_rois_dir_path() const;// get the main folder of all the rois from all the cameras - //getters and setters - void set_topic(const QString &new_value); - QString get_topic() const; - Q_INVOKABLE void save_ROI(QString sensor_name,int no_ROI,QString roi_name, int rect_X,int rect_Y, int rect_width, int rect_height ); - Q_INVOKABLE void set_roi_name(int index, QString roi_name); - Q_INVOKABLE void publish_rois(); + Q_INVOKABLE void save_ROI(QString sensor_name,int no_ROI, int rect_X,int rect_Y, int rect_width, int rect_height );//save the selected rois as iamges + + Q_INVOKABLE void add_roi(int no_ROI, QString roi_name, int rect_X,int rect_Y, int rect_width, int rect_height);// add every selected roi to msg:image2D_rois_msg + + Q_INVOKABLE void publish_rois_from_gui(); + Q_INVOKABLE void publish_rois_from_file(); + signals: - void topic_changed(); - - private: - void receive_image(const sensor_msgs::Image::ConstPtr & msg); - - // ROS - ros::NodeHandle * nh; - image_transport::ImageTransport * img_trans; - image_transport::Subscriber image_sub; - QString topic_value; - bool ros_ready; - // Used for buffering the image -// QImage * current_image; - QImage current_image; - cv::Mat conversion_mat_; - uchar * current_buffer; - - //for publish msgs - int number_of_rois; - opt_msgs::Image2D_roi current_image2D_roi_msg; - opt_msgs::Image2D_roi_array image2D_rois_msg; - ros::Publisher image2D_rois_pub; -// gui::Roi_object_array::Ptr rois(new Roi_object_array); + void topic_changed(); + void rois_dir_path_changed(); + +private: + void receive_image(const sensor_msgs::Image::ConstPtr & msg);//call back function to recieve the images for displaying + + // ROS + ros::NodeHandle nh; + image_transport::ImageTransport * img_trans; + image_transport::Subscriber image_sub; + QString topic_value;//topic name + QString rois_dir_path_value;//roi file path for the current sensor + QString all_rois_dir_path_value;//roi file main path for all the sensors + bool ros_ready; + // Used for buffering the image + QImage current_image; + cv::Mat conversion_mat_; + uchar * current_buffer; + //for publish msgs + int number_of_rois; + opt_msgs::Image2D_roi current_image2D_roi_msg; + opt_msgs::Image2D_roi_array image2D_rois_msg; + ros::Publisher image2D_rois_from_gui_pub; + // image_transport::Publisher image2D_rois_from_file_pub; + ros::Publisher image2D_rois_from_file_pub; }; #endif // OPT_Streaming_Component_H diff --git a/opt_gui/include/opt_gui/main_application.hpp b/opt_gui/include/opt_gui/main_application.hpp index 71e6996..2ae76ff 100644 --- a/opt_gui/include/opt_gui/main_application.hpp +++ b/opt_gui/include/opt_gui/main_application.hpp @@ -3,7 +3,8 @@ #include #include - +#include +#include class Main_Application : public QQmlApplicationEngine { Q_OBJECT public: @@ -18,12 +19,12 @@ class Main_Application : public QQmlApplicationEngine { private: ros::NodeHandle nh; - std::string view_topic1; - std::string view_topic2; - std::string view_topic3; - std::string view_topic4; - std::string view_topic5; - std::string view_topic6; + std::string sensor_name1; + std::string sensor_name2; + std::string sensor_name3; + std::string sensor_name4; + std::string sensor_name5; + std::string sensor_name6; }; diff --git a/opt_gui/launch/opt_gui.launch b/opt_gui/launch/opt_gui.launch index 0669527..21356fc 100644 --- a/opt_gui/launch/opt_gui.launch +++ b/opt_gui/launch/opt_gui.launch @@ -5,7 +5,7 @@ - + diff --git a/opt_gui/resources/DeleteRed.png b/opt_gui/resources/DeleteRed.png new file mode 100644 index 0000000000000000000000000000000000000000..495106c72e46513f9e784eb3f55e7deabc2e483f GIT binary patch literal 22716 zcmb?i^X{0-lqcK3MPdtc|C*Lj_Ip65w2dZJB3$w3JK0BCe|G>icNBElsRfSi=@WBsDa z6#%e^)zwga8v1v?BP`$9G)x88zB&K#V5dC%aP~6VA=kmwnMq?_@_v>74^kP*#H0SyA~3M?vYIVd7ldLemI8^`WUNm zpV5rP@yS2Eur!Rz z$8b!3x7&|+-x~mq7+=fLkmcrI;b4UDCCacs&q;TNlXCzuR3yL-S{Pm^4-pswrVO4b($-WG6{az(2EepuwEno8fE~E`G(Mh9VpNAs0LQQ~*(i zQ6a_9Rjlc^Ce}TBfo9(RGuxX*o)d*wSInV5#pw@p<3! zFY;jW?)%F(c6UC)i6l8xPWXs4E|0VL5rRq1#P~P&OT3=I*K7WOFKrIB0z%}Bjd`=* z@NEg80@$~7QF4jVUv){tQfO28!BWYiU3}i5d(cle`|2|R4fsmrBpOYY6}5Op!dm;3 zuU|f}x-04SekNqezo2MZhM}W1``UDfI?z*#SUMN6_tErMoo(vgc4e`{tW@aG5HBC% zNL3BMEVKRD7%?>yak(&q>o{}>#YgS202N7mY-`TGHj88Z$T8Mei8@8@6+|Y{fHx&k z6j$a6DDZVRmBja}k8~w2Q+GoPAa~YP>8bq!SWvqb9O)q3OpqNP_|1=zE{Cg^{NL)~ zq`TZ#2FA&MTks=BPuFRt@$RuYUWpAYp^NJez97Jk^r$Qoz&GpU zDE~zv<3L-%pU8WPtAouSZbaZQVU-_VKmD!VdY>fb8-M4WSfy=C#*xg#?p8B4_ManN zd~4b#Gi^cQ1)*E2-is}(on+ZKxma>avv7OP_}ApF+((%Qb2Hc*SHt_Zd|MxdmwjyV znxJKdLBS>adrc$$$C~aI?egq<7hZRee-4+@HdXLpXZ1F()HP=KdVXnbv>h0sD{tQ2 zQ1^o!HSwd>8$>g2bmQwfFN?FA=S5cAG)hZ67l;9N@An! z-dy>fuf1{6I2>eLTq=L0XysML?*0fX^CjP3LE-*)0R2-^dDECqjw93i2@B0juWgza z;xjTF_UM6D(INyrRv6@kBwSi$Eu~o&$(=^sM6GdLfh$1iXRjW2NC*wv>IAL|@wA6` z{ijXiAhELDUC8pCI+Zkr?H1}6L-vZT@3*7PDD-%vQd2M26SNxi#MSmk4(L3$UC#*9D{Gx2ja^vGw zX(XAdfD4HU;$@h;(2}Ic`wmk4HAld2l zDYL*p3i?sNNZ9C0ckpI@=ocQcqoGR(`59(IZpMG0mhE6Rjabe8ebM5QIQB7f0>6BXQ|!q%LEMe78tJ~h1WaWW>40qJGQCpG=%3;9_t4@+A(K~Xw~y6WFq=w zO?+i~f)%_TW;uj5ZWed)o$YV;Ef*6hoy?TZyjVtIo9~<_8RSLW)m$En*`|s-tS6Sk zOKfJ6}Ie{fx3wCO=M#4#ls0%*|_C;q07EM*R$z3 z*Pgl^w0Zw^vFWVjCsn{F*}%p`94tX{L^|<9MDS`xWdYY;Gto#ac)&w-@ps=-AcH(( zuC=)xTYMnJ6^}uG$X9yo(A}ZaO^;LsQp;ia+Y1sMpzUuDiD%Y-fo(JC)QU!?6NZ*B ztfU1tX)3BSy8M@SPnE&u=sMy-EJa4tL~G?oBKTusbSNX_?k2t_D{}ke`T!hV0mlP5 z6#qgCeLm4(?m!oxAFX&Ybe^4m)SLA!S(aNe5svHD;qGXobu99w-+k!Yov9e|=~?L+ za;f_4F9+jLb#ygR;!32h1v~AIVFktdQ(Ou*rf1 z?rwU9V2WW7 ztuWQHl2-Nb!(-|e@Y@MD8qp^y7aP+WAIyYa7(Ik$%}K@{vYV46T+kKy8iFs*&;HSI z7XCObEnRA_tE&6@TS~W_y^{2`@=>(^m-oYyk+{g(4BhEsEQwnw;q*XUIqo315O99f zu+t&z((zFFk~fF5E12&{so}y+4{x8jp=?@)vq06e>Ub_{_jw6g3|_SvCF3K1CV=1g zyptvin(Ylus=sr^w8*5ATk{2H7CERznE|05y_8aUVn*>XUrG?cV_96T92-xXg#H52 zRwtf1oImcIT4=L9i6{rdn#T;|;3HcV(ws8C_=&VZeGiB0-Q@~vt})kod_$0%XKp3C zm{VS*=mH~PO!Fk~?)W6^f+4Nx&|{<>%iO#K0C z`6!Bel5_aW{sGT6EZ{CV1dx8H-`~Vq7>usTY2UJ*XnvUUP^K^Xwu-v2H2JSD*$)qR-ppYvgx|3VS*5=Coxpa!=z*mX&IaL5Q7;Vl= znvu7wAKxz96w)Pao5Fd;4GzCq`rgVW;)0k2qnW8i8y_Bg#9(PsB zna+oqK9Bv#FTnCV2hW7`E9=2e9LnVq1k>f74+{FCGEf=)u<_+gK>MrmtvHroN!Z{I>bbD~6UJ3a;sa`dR4xw#_bG4kA4>W`ysKaJMA=V< zo_!`{%)3#4O~2P&y~AH5B&6~N{8Zp z`Rlte5w9%FG(r(wF~3Lcq{F$x>D%Bix);}6-v%UyFHN&H&36|S9o^t}7$7f!Tejc* zSZ-7j?TMD&;Qu}aTDR5Nv3#FQQ8nG>6nsl%dNOTlO2Y82>dqwb%Wrkmd3m6A8jkv+ z7K2){a22pfA#x3$g`RgEx}jkdWz(Qf4R}G2b^W>;`J{h(m><2_lD9bw?z$>mEF(;a?U-zxvE58vv)F_JwgP9=21(MqCzYlY7?{Y;JmHoe$13^}%wg zLsD?^6HPw5Ps}`3ZdB5ru%nt$z8f2##FO~V?yT)xcg|{YvBUEEQZ{5yqr^AeK3HqT zy{1-UxshLIc$+`=y5Kfee(CaoSLl}PyOrWh9w?e8ih(VT@wls^LSorC=C~hgF#)d^ z8P1c~vJ;4v$kZ#|B?Q{A*)jPe2CZ=7t1fK9n>7g{9%@DxlHEGAvCG3`sq8dRaG{>Ru(1xEc8+Q@+~8c?-Fjfj()51=VN* zTrC095|U9ZaQ}v`y+eg}OFZk>`;`STZ&^gtrzU?M2(fC_Nuw4w9{aN``HSk$l6OjO~-EyqL7N1lIhS-c2YJ$M+-U@2m9Jz(=86_8^y zux}t@#jEbyiklKoKsj)k^0Uqk&cy7GFK5I@y+FD8?VXZg0pWrp70_F6DlME#hY3-e+N%+DQ8JGV5=I{UXQTW=*;^fc(& zH_EnJsyc4^!Ur6Oog-^7y|x$#AD4*4W@vv|Ou5p?QOkb6uE8IjXHv%Bf;1vzFWFi) z!m+O>#YfpTt|!CKRyhMoI-`=m?!QIGH{dgtoR$uw5>)xU8B}i$XgbmUYOn2v*2Ep( zB>dv4*Wx`*2c8#d1_k&JS+piEk|z9 zFlZi8i1@4ro4nwt`n|yyOFtt_Q9i0%NtDVC`E;n>RG%Xk`tK${rtUhao=4`npOoIq z3OPUiE1)hE3Bu@?ZGAUNMjkpx;ic?Kb@iNcB(&!G2Z$JtU)VnWm$M!@JE5NYFUg{A z_|t>-fLp=1o#|gR)b?%6GnC$#Bx8KjqnScqEsS+`9xn6*ql;DK-ZxtwBjp901Ec_X zi!1%pomDlvCOPfcb7OS<^>Orbmq~HWH)>FbtniBklK#UDeeQ@;4&wg3r&DR3CS`S1ztM zCDqv!1qOd}ooN1A5^$~;>acrX#JjuNdpq^v8AuuXO>g8U{{{GhExa(25vwu7uX#XJ zj=Xtg|Ng*TZDLp`H_~Z;La}ZcY#Tu|m|?e_4&ngFbQmRO-aqX@+8rgE-?s|tdg;FB zt{yz9^y`aUuL{n2dssI3BGn3)11ZQq4Hz70D(`%EzlS2`o|GsIe7b9Tg?-l;a0;O1 zX8So_(4)}(0bhBx`OSUNO(tax>M}M!O)wYXB`f8Yy{D?vAzq#K@5ZWX>ec5?T->(~ zC=bB;i%qhDrBb(x;yy&ijmDBSVsnsc&;97<%2C4h`YWKR)N9=CEkVDk9lW8I zwfB8cAl63N_i5WM^91*q>T2didDGCKdx0Wn?JHKbvT@Sy)p})e{Rl>p6O9{m#FR=B zHVw?3b@3aDXUK9pT4xl5zv-7-uTV4S4PF*y(MixqroN>=ZDzQ8Y#bHQ`wcMt|-PyXuru*xjTg)Sidpi*s{mn)r=clsudG#_Z-S>%|tv})kh&kU~Mc{n)!23=BxUn6L`R&wWkMieNUSiVo{x}s-kJ(LcV zWI!yX!)r{;4xsTp=^3yc8S$HBcS9CMtUq%T7@v=uTBm|e&$F2jSvx8E8iWmh`!wA@ zw)p1Md*H@mVmiB}^RtfXb)!tQK4r1B$FiR~h2Ag`AM*)ETLF)Tq3x=NA+>VX)$fqV zpjZBfD-jN6C9-hMtdtLa%mldjs_s)JUdYvJo&ox06|_yq;?WuXsaYc zWo=p+fh`B}NrZ7qhhqFF*rKP@CE!T0D(WgWPfJj8gXqD62L7{X3OdLj)!j~p=X0k@ z!Fa+Ocw|dwis3N>XkUEKu8kFI)$CYLKlAv@scU&BhXO`;Sv!4xNUpZG)8N9$$fG)% zl0R2};#$iPv#mS3w`&ORqG}ABeKFD8Tog153=LY8*AH`AL!0tPnW{JF)q*$Zo|ci^ zF*{k6RJN}bKf#e>pp-^tz^>hbjz?#gEDiT%$dDFK3qg|BrFQlo&B;HeE?nHtC^_ve z$~^qdyTfGZI;`fTZRMTr3OI1?9~NXx2u&BdJ|&KwX>z3@r9EE_<7;?(+|*D9Rv^8~ z6oz;`jy15(RAyB&>-#zuqmW7cgz3wcvDji$_@8-wLq#lgU`SB0e=wOP>+s8*C#H}0 zYS$0U-(J2kiJ`o-O@l7#zrfeZAe=z-%GzPz1Sk3#9g-P6ikQu4b&|a$>Et2fWe>4} zD^)0LE$qH2`HS%~H6!V1q=JydJI%lawIQmF1kRAZ8$=E~TUJZfMA{)k0(;f{t8SrP81aX#9S zPo*Jn>eFOpt>w#u*H2{r96)Y#srUp_oe*v>%l_r9(#63Zeqm{#~k_@$jsGkJNQ>Qc~@~@J4KY$$&G0ga=bg_-Cban>y?i9 zy#|xmH+Mp_nK$!=dfJ{?wY#71WSSRMS5wTAG>v@rW7Vh*bRU4dVtaaz#pvGWUFZIK zm_F{trNviNpr{;LPLNULXM$k?7#T4+w(kDi8#2&6>;CMiFtBcg=xNA#?MK9C z{pfGEmaXg#iIZs(kokw&zxe=o`I0Cda%yoFa#$>;symuk^Nf`puG3#^Ck{KWugOG! zlLU?K?F@{p;6wXYM2gC5SM{Yk}@Tn`1hKT8OT!l;y}Ep`Ip6RQZd;({J}SSI#uJB zMV1o^i%{_2_JD8f=es*&?mj~?N{?LY>}sPEYPkv%^`_C^e6*LQYPT5TXaxCl;N~>T zBDy&C2ftw-=~1COsIQ4dW#n~7Ydp1jm5X6J`7JgBZbP(5_S~OO6*F~^`LXV|~FHTeQl+8IS2t@fN* z`MsQzpEKJqRkSu+^Wt7jq179BC;_X@w5+YXOO%p8{)Sog_P-A=&jXSaY*&+-KULL4 zEL7Vpq*A?aA|x@T5tIBAa&8~@PE02IQ8E}uF@(oaBj35W4Lqw14tSybWO|7dwrAM3m*AW2&RMM1&s!#bz!2p10f5Pc5Y3B?jH?{i|?B%PdC30_xyHlteLR1 zH6nTOd_Iw=wz1LrUxbwE$sCerZoSZ7@qVQgk>J8($ohjPix%9f6Y34^ow%{d+csM| zm7|}FsBFYH1WXI{9^&3uhBK{!`}88$S=T?$`F_X!&JAXcftS?5x1a4=Oh|xpCLwmn zH1$U>Qww@HxttM!gojZoE<2*S(xvni|Z$kiIG11AfT(x5kBn<7w$#i#zo&VU3 zW@$#;d3nHcI@;;YT1L;p6#m^(yYSs=4l}^Y)$Y-4QM)EV_yo2x6Y@Nml<-2aot%## zQa)EpQI-Hf)J?YoqV6bU6+9qj|70A38dAga6Oi9_~ zpi!Pg@RU&;e}MnzzU}RJf+zl5CNK?lvWs6=N)x3tJKDdcHPO{_ASg~M^BK&zg2+IO z&ZcruyYKTep`>BcHuPZrqRh>A4PKTyxCSV`r@5cE@rEv*uIS!rz21a`a=RP*TR{$^O@#)M`pGbF+<+2QdyadZ`PQSfUutTR`t}Be)&P?XWd`adkd{B z0;Q^zWzePJXDB%#d}wxtww5e9AyP?PU+_q5;7q}q zQRfY2G{b7ZjH0#i>0%~u=-ig~k|E~9Kv!X@udG&`UEaLY;rcD$OErd@kp1qWL#Iso zmYcv&oxa4#zrj1(Uy^p>gb>GXY5Tl|!_!2TzNsaPo9Wd1Gmxj=9?A{q<#+ykUi@QN z>r3-#)N4~pJzE3|)@f`J5cc^C58aiQHa3}=h=_uVL&a0jsHRlnyf`ZoYdsP)kp1Ty zVl?Y*Ra%J(v|CPaHQ*`n6|~N-B~?!D&5vlG{acV_t-%Tv^BLubK_IL7|<$S1*d^`<_I`m%9<=KjkuDWKci%q(fC%IPBQ#`>9x;5 zmevPe=UqW(=aX4EdtDNZ!kTj6ue=5G=Rgh)c2!?Px^P10BamKcM*L6xG@YL zrx4&E%zz3FCtqXJa`2d5paERO@?dv$?TW^-u@=(l1Wp5I)Zm7;|WCqPkU zcw#D)5z)6>_KFxmh}4y}?~*Dt9o?L3Aq8#)G+b)Neirx0$)SP)wQ@#ZR*Q>P0K7BT zJuYdA3!T1GYc65JK||v)X#42a&rGF>W*d7L`v^i-9cRdV?IKxahdc8W_$XJ%S)#j3C9}x zE1FZ9=4rCh*{@eh1Zffsh2BQyPnd69tU`&L+eIQZY$3K@ zXS8&sIfzuj;DzFe=E9M7KE5TMUW&i-tK`eS^b;mA(db`1p^eNu&*}m%jmtAU>eM&q zh0DgJ=4aieu3N?c8eeUqdbNuDr*A~K&sPhnIxnk+*?vh+=|?XS6}F9&qwa`)&?O}`XEGL-xI1u~P7tjwae z@$I9tIa!Ay(Qx6yqBZhm(Ja~;hUjWiDZozs<9@Imt#wrESgKFn=vaoTl^dy`eLf!# z&p|&QTi>U1^XhA!+Nv~=Bl}&+_{iSz0u)CtGC|Pj%`kkYKfs(EeYmdYHpkyD@%Ux8 zKhtm``s-5&qXYfVCGaO1S{AAqK9Z@QA8?+Z!D+v1heZNA56PF0?vUzort1CBGraZX zD%&{fNO0rSOf*$EZDe!-5#RgNG~FxZ7UpYdRFH8Z-l0YlCK$QY1$5GRFqN&lECM*; z(d)mI0M;oy^Kb?@3-tRUmM~%7&EQrZ$7ynwY(ygSFW%&+btfaSJ=!ijq2s6(GwW7y z*Y&TI&9IIiNKz9UZRydaZ7#adVdOF4rWE+c6OqO2d-KyGRV6zDljb_%_6<>Z6kf!Z zj*>1wWv1W@1h(c~@`kO;WOhI!g4HGMDArG&HHaOQL>MvG6)`r7;fJNutGkb|m(Q-F zzDn7RwX2XtX#=$P=mcrRYsrkjX;r(js$Z9Hi-@Jt0%qUJSXz#+n+!bAnn+VZtg@Z2 z3evZ;ow2J^hW=s7EoI*?UaP+)TheI&3bsK88v0#xm@<+;MgV9Ge|*=>9T2x@&b1aJ zKb+PCPWf2?;qe4C`H~|7*Nn- zvNspEW?X)}(G7hmVu!RG$*#Qq*k~f1muK?J)uapiJQiF$yBVsD6@0;F3bBksCLU|R z=G?#RqThD8iO2CHY}cLQM1lBQbi|lH+aVx7>v5n7O|FNk>wW07qSAG<$b-U=H)HP1 z5Pf})^B{ii^UV0j$7&PxxNsnJNtigf4R`EXD8Gq0Y`mr0h;|gixt5RVvwEcxdS(H!arDxnJLZejNid>Vduk~sx0YVK zfuJJx;nffa@_!G%V-?(&Ng0b7%oe-uG>Hz$F9FJqWM%v+dr9)igX`RhxdMf)>MGRFUTG3hkK4v_K2J z5Mk}mCCfZ=EHm#s0N=3+K-L@+ls4s|YTQ3^LHeZ4BH8Q7Xn4Qc&#F`M*>DE+<)=w# zuA2%cVje!v2oi@la;vys*th5R2r+}3c&}odCwi%1u20lZ1~5D{jUm(?M{T4$+5l-=n$W~c8fS0W24jy2;$Iac z#*X0{usCuRUXT{K?lO ztDNTsN0$`c0UB+cD>V}A3o!NGQcJJ;|3qUq076dg|DcqWEUemnVc34iynMpasdbxZ zNGu>>@v1aRYJ{moWxCbh6L07VxcR-jgblL8YTI#PVo8afW@7(r7YYK8=Ib|Ruz#2u z7#G(5hKl~coPZl3 z#q{$eS(O*L(4}ZN<-L_ClD<^(lJ`oj#{%hV(1J)N%|QYI27OS^GO5VJ@NZ?v*dFo# z{mNQP{~F8!Eo65JIX%vZNW<^PZgvPtLQvzzdsnVdSVcmZ1VSA^ zs%FfoHG8H1cV=};unh2o=&`b_Wb#$xRLdcHXG3{8p;x}FaeZynf9SV1bs%JD+g0URR z7$yHBZ2^prboG07pFw$w?^ntNkplnrM@I#WmA~okObOm~13tOWz_zFoc;(;E75R?2 zNh0eX(hwT1S?MZ6QB>gh>xRO6@ev1f>9IF?TsUBV(1^q0UQdL?n}3Z2_KDJCFVFo0 zflA7Jp!{e@^vo)yVZp@^m{`f0GrME?8G7;@u zcF}v|8a+CQvK~r3yjVjPd3#QA3i8qbd9^{W0Z)1o?H)W`BOtJ=ucGntKd0SEN=-q9 zT#7rVjY()W#ht!I?XJ+{*&cVtUZo=syo3-qrlYtb|~Ca*%_9S2KUOrFT3V zmbdgxgT&RYsfz!QF2tx$qeSK7Cfan#Ez(*Wbr+x~%}PNa0!Ex9S;2yC{jP8uOO)6o zlLyMLpbA?xlKaN4lj7_xs zlNJ>yM)sxe9VBhRExn>>_5c|PSd)l~_cf6)h0g!P=R@Hevn~r9!Bzvgp|Sr<*SI@HZ1Fa+jU3oi*;%d z6V(EX#F)*SCUC1{$=M*pLYdW-9h$4uDCK z$2bl%(-R|DDFzK0EP>!<;#*`)Tcy*oFiU znquaT#=-MiWLRc>)Cr9p?3kk4mkE zo={r9kDphtlxwU3aWB@=H{o4TSiWr;~ULRS=ka^ zV31kuo{#+eb7&)z!@Bi|Q#Os^_AJH_mY?)Q$g|VOFa0Oqhk*KA4vX^AQ*I8Gk zf{Z_|{h(Hq7Hy*-wqqi;Gwz^;oe_AO1+os)<~%xkqtES z@OdWrkItyzCI~4+#P6@ESeVc}2C@2iK6v>D84{4*Dl_vtqED*UJjh*(3DLSLldO0h zex0dY+=jFfEEw~8xKOnf^}XL5%v$hNFR`ZWnH*Zh&p1>n<9dI<4Wv*$10OHGZ#R*Ph^sM0g0#6ki4-*7oOlo(=_GAbg*aEEYc_>V$7wOEfy3!Rjc z+bk!@5Lk^ZvIZ0K`RX(Hv&u8tsqp!&8mq%8bbScbgGh1|X(3epHx#`6w>!N1lY&}2 zyF~WC!VMAM+riZYBh)2=J9_zoa8pOLwZG2&BVWLp-XHQZ(7s%Tr=Qc%-RWRG9#0rl zS4^nwqem3qWJUesF2{SO(f>$b3#~x?@=vwqVaTnfsL$NKF5*&%wLJ&|yR&PRjm0A_f^qRF^ z7Sh8IP535aT-8@OPGbC;7K}2ZjWbS_67^rv7SDH+Ky#F4v78o8R6?vgM69n3p!RiJ zj8KQd{3J#XY7$!4*)O<8!zwx5;W+^W;n8k4mdIu(K8ebNMBrgzk`wH zLWOo-dB#Eb__FFhOc%s}MpWewKNPe_<_j*r6+k#q;T{5H`k_l0%yU`}STMzJ*n+v; zmdn9c6gVYxb{GU7d-k$|gH09|EpcnIG9>y?MI+8ln?2%{Qe&0jWN9a?w)C}M_^SkF zVsVg7P<)Y4)gDbl2n~T45AyTwN)w|$7f9cC>ZV70P+N=ItC2XDx?~?+{Bzw0M8fPn z7>=1635~ zaY@YbltfT!z|>|s$?WUtuR5lh2_^3jR-)YNPMEygHk>aS6sbbHA2HR zR%i2H$6BXok~T8}?yLH*!=I@?@_GbQ2s`z0iOCm|GuJN0< zed?KZsA?M-k$q!P$g^t5hyO_|eI)mwG}|i{UV{IZp@IE%Huoovh~Jy!VHAO*XHJ3{ zeWDQ2yJ!La8s^+m3p%M_{4d44)ZG}q%@}t(EL9}_d_@!XN;1w=Ouq7my4wc`rF-Pp zEI5@p3qi|wyWlU#aSu0o+O|Tuzv)hW`}^VXFOmworI1Ki2g^I{j|e-fWgZEN2|Y3` z0QCaF7Zi?SS_qUFp2^TiBM~jwgXk)|R@RIJfbBy?wu-68^7{q_q0dsEqQn4%;-3_^ zac3D^(Dz)(U~!=@j*s>+vJk!F6~8TK{qsye8MBH7S7`zprB+fqTZ1|_hDo;(y-vg* z`FAo|&R>>OJ3{Spr(^23|B&ZTv!|oj)gmW6>7{Cbb*jG@AO=UP4Jdei+QR3`W9pcZ z0aTRgmAXCJ=L4eCHWG(sQZ$Lf?R00fP&Aky(3@c}y27zx|Cl0Iu)#%R zH6se2SP~_Oe_7O^L2ms}f`+Il)vq9%GB<#D-j)zOO$Fc{ftQ3ifqQ&tPtzmAokpv! z_cBCn>}-_rAqN;U=t5kmk4p7Gy;%#Lwhl;FP}1`aswofrUZA^zA$95YnG@8mr$oh6 z3&x-U%V-!S2ej^h{?OI{YhHf8^O$4@3Y%*%Arg@%5`2A}{O#<%S-2^&^k>qm$}xxc z5wenlsOPFJ@3YaKG_!M}g!IBzc!dTQtzEUUx6Eh$bm>j*)Yh$tJb|vQ@rzB4zO!#;DwEZuo{nDecglwW5 z+4k(T?heR4=jMw11ea6>`e>$b^l4+5l{~a;xUa$?Xa>53dtWBBQu53r^gohkR0eYm zG;D+bC-{Mk9t{#h2tdGdOUBp7o^)N{N!mFsk_M5%o9Wm??8~vVBI|!mF7U-)Hv&mf zcTr$g3TAJ{nIDlghA6DE%0t@SAFt6HU`o=r5B2Z*yw#ERYh;EKPYdsn7g1h0y~c}- zFT(#!N2<0_;7UXHrbgu*t%zr;W82~8j}FsNg}6u9Gbh}iA#->OtX5ZCt$ z5fPWSj^~RMK2;4T?sAa&ZRg8Ky-6o|i+Lxq^gN8HPHq__jOb}XtGv6wdC{KExq7AE z4e+xZ9!)`OZ#N7rI4mMX3NAN|9`$cpraAp$7$A;ocz3-{w!O+XCy z+rbnVj!bE>kaoeNYL$&P8H8f+Ex#gEW7V+Pgq!$IAI`eB^V8%$x56$3&0uPM`iVrJ z2e|@cZZXJS5yy>~=rdj{93<(KGWU>?1RfbOU-ilF#3J~K=S&?J?P_QHR34C- zyD<#Y(!L`FY9rvhmnCRkC_(mE7RaYjx9lR$oCKSw{cy+s-pZp9y;2#y*f0!l+E}Qv zJ$X+VhS$(R<=@XDmi(L=cKAaN-b)rwo8pP_)^_NIf-lni?a2$qKWGU^vl6VkwMerH zHg-f&NSnwG!cdMMGKzIJTaejA-OKAc&r$hQW$4c@ITz}YQpLlyOyO;s;=u1sHf6cm zK9mptxQ<;FitceOwiCnlVD;>5K=Hw;qL}ay!a7SAv~Qi22R+l>U7kmlXPKv&kfaMB z-Z`Ol0o8h}?ImR*e$Q%U~4!Rto;2LRNWU%0e2EKnwk+y|CJhavHmt`n8?By+G{ z0K=3AZZ=jA{3!JlXSqz)%Xa6)#%`fC=lU&>2s!&f`>?9OXd*3wjO!@~RGj*OPkx*Q zB6zb-aJ2WC0_P%!s>ntxe;gMMR`K{S%veyL;FOC#$X)I9CFWrE+TjYi1o=>tI?Ljk z=Pj`*WH3O;a~@4Q+cd?UCKs+ypnQj7>RoBpN9umP3=q?C-{`SfQo^K@N=j8>B*BNI zphr)ilTbhKL;SjXEu@Rzc0}J!ViZl~Le82ODrkHWt7N0w?=LM1orm%aJ~*^@c$H8` zn)LcRpT*`=)R{0sWBflhH&c_WT{_JG>|aAkR8T{j_|EtYv``dsb-&O*k=)Ru^M~lQ z>A+1U?Md!yuTM;gFNPD8f$AFSWLmNZbJ;v06ZKVW?!7y8G8}BVHV;A4PHZu237xIufDT>YgHO5lVz7Uc% z_q+WtsvIn@A@jvoW2F)Mk^qb5nge*>3*Yt8FE)|>6BNJvMFf%Dfz8nwoizwt;J=+4 zl^PieEqv`ik@m|qXZY94F?>WP?kyiYph~GFL}BwkGybRyd%A_{ zl4p((;x<^3AwBm~bemTH@IzchK>wjmHo;|Iy{>;g`y=vHk^$Esmlhjlqr4Mvey>Li z_%@_$9}J%3E>zBt3zo_`dN)5m%4SqIc23wfcDJKA9a(g&qQ;A(f~N zGx7@oo7+<}9dYFZU#Gq%#)Ol`ED6VWu6NDDnJ3*YA8@mg9KF9?8ZDN~eD2dcDn)zi zTg8Z__Lk%G$QqG4Eti1wM=FD22r)fixlPpJ`P%G$N?H&B_R6{(pGZVYmPBcgzNbtX zg`h97J=6FE=V|1Wu8K4ZMwmj-hx_-!UX39Sxi~T1!7qY2{BSiGl5c6Ez1O7@ozr2F zY-)CAc?FcuCt5*q6=p5<{I923 zlj`r1L%VSmVZyf~z|VQQx(wr_irz)i&0G(aprdHE!A$y530{=K2F$-?WnEg;>v=rSR^k8JiZwl{WeWrOtsLdT7nJrNHSlSb9ckKQ$Pedh8n7Py2$pH}qfPdie7Z8cbRPU7Zo|My z2X1vy=)T;X#LEwO57LSK(e-!WD__d)%=R4qgMhXJcz%EzovACi5b-=uha&RisjHDn3I zTK}vnJuw}O7JBV@Nr$>-Z!h-xL21+{M-(dmn~cJ{#+0p~fF}?M`#a(QB8Y7!fktL;OQ1~} zBuX+S*t0kp>^)L}A_cKjDJq2zM9D+M&V2;_&b1)#VOPrV-wnEEth?^zU4o0wmiCx| zupg9dH?NrF?c+wL){6P=H5mO*#jb+B%z2H{#68Ua*UEMHL;e5p&*yV@#zl7yWt2Pf z&ZulMayXn3XCy5nC8dx`nRjQ?$<8P;!sT0$G-RDwk&FtdP|0kZJ;MDy{SUu?;PrUC zAFt>8{dm7#&w+?kTlY}2IHUU2`$ArtI3#D}s;h7HaMul@&P;qyiQUlcGpv|?-W+EM zZ?WX+LEKDvBc|5kiyKC74Y->Qg;XV^MHTdd@|O%|dfQp23kSPpXV~9Ry>>33vj&Q^ zpSSPkH$;Bkl)b) zKbbCGGeI0%o-EQ%sWYr9vM;`8e^TQxo3Utj7(9svM{jKHJ~Gzn>QfVfJ7u*w)-7b8Fi0w?G>1%&wG)fd`bG+#qerTr4Au2ZFJYT<>Y|(J%;2Vk(_f#TGCc` z%*vVEK6j79&~t$wbhVjnpoj`2{pO|Sj8#gx*)*L~yOm*z6j$Izczk&xzR_wNRaHX7 zjOO8KPaMcv21noJoH&L`$i2yzpbTx~EYJ(Ef6jZq4RtEk$pAxe3bBft$!xp|Q(ur} zTreA05_$OoJ?&8?He)MI_riwZe}n498FQ_{R##U&+FSnf&whI63fA!`mF?a!&y*&*7n{YOyY7YS`| z7e)@mNx8DMc?I^g=Y}*Ak;B#91P8NBDgl+=ug*)Sl4De4l{<-)2s?Om7`^(d%B+AH zO&;yZYE*4rz0}WJT?2>hQISQ9!+XX8e#x~I6d2V^w6S{zuKJy~DYD1z@beG9MdS%Q z`4nB=fX=Z&lcCf-lz;#)%cu85+zo`&-ML&RCYo!MDps_1CLLf>B{~R93QgDOsH2IB z@$wq0A-3?#N@QtML}5@%4?AP)WtITWfzbF5m}*fjuuUdB=)Im@3l*iUSN@3RcOQGX z9mb{eYXo+ub>GOF=W5;LGgel>vI}}V1Y3}Xz9xf}2UTaTCx}t*yZBmd9wk%;cb^$c z^h!2jB)?gS6OF8pq}#9KJxn;~#or=c8JDbWI~L2~CbV!9?no^CI>79hprO>SHkc*t ze0xfqPzG>q*LZ7Y^(DEIxpsWQ2M>zF-v67;epJcJ{CUQp*ZX~?Q1SU)!;W}r!aDl_ zQg>XNPrRhb=(ASJc=7@Jb%%%eRDSh!c@N{~sEwvDWN%m%%^&;CM)$@#5(zFzVyi#Y^q|DTV(nGu(xw+j5*k`f2s_XEx-J3mE&lE~^KF;DR8Hy@uZ? z@jUO$WSg?jJf7I+p9!_bqiF{YQzF8)U3}5pYWCpK@!r`A5=<=Y;^TVv@r+C>&jKY6 zWuwcM2b?lfGRRT*vBY#KY-wDPSd7@f&99oP-Zlg$!Q!|zVrR0M6bE=tM27Vdm~!cu z1=yKi3YCn`e|T^!abQu@t1zHOZsyamgm=O+QW97up2v&e-!WKY5^ueifpn6IA)4Vifm@=?01=BUF@|6-Wy<@fo{-XL z(mH$(x?KpC&#DnzaO)?RK|3{ffS~@7v(X5wg*Y6@HuJMep2sCq>36{Gq;$ z-mstE^NBM$q=pLGL{)vWd8%81sMd%L$cb?XNL5Bx{%T>N>SSz55?04lip?(nh4mVn?%C^d4o#MJ#AtXaV`& zG1=!jS?L0~+s!y<{>>IMGqS+Cqq%Q7O&2%Wnll-{_!9z6m`70TA?gIjWK)dX?#}#_ z)fm|zW^U0=YmJm!l?05ybZEit`$=W@^Az6u%k~H*V-%Zs`_9SjmSLseJjIn;F;y&6 zS#xi7O{mp{98*BPe_rAjDYIXl&pdTorfu}8Ris9VIu-n!w@jh+Db-tF>M`i%Rav6h zOz5)d%iPwxs|JasNg8(oAA*sXch8BOTMFEl{UDT@ud_{|6(8Se6{!J^OaEE+I{#a+ zJjiKH3ds~>$;qRX$u4Y1K$}@p?Q39z5SqrjBd*2aaS<)0&;gd9%jm*~6W9Ak4DI{% z!v@GQ)$$qo`Y|)cH5mT0j31!F6_oup3;pTSWWEGbs*X>(G~=Ww^QSSODSIOI^g`8p zXIkS9b`sZbk0KTZn=&Xe0Oohxsf`=^XG_O835hSDL)kfS$#>1hkI8=j(B&%2IGe?1 z=Yyt&`Vdkl6w6KA7pw%wl)H%P|3THq4Yd2AOZ-JtIgHmY3DyM}mBWIe^V_$yfDNiT z@;WeY8URFAIih<#!QR{Y`jo*Cw^yuMO1a`);GO@aRJ}x=E=ZOS^e0<@++29wYPSDj zOA?T!NVm`-XIG}>?qJXBdQLIsNfi>lC@0`zZ4CY{_?0xrK1wFLKXwMR;*pHb3eMCM ziz^pwhwDx?IuV~NO*UUx68C63oA|L-?uD@dv8|^DZgGU3=XN(d32;0Da*K-y%U5g# zDPh1#EyP_%<HBwn15g{s)^}3TwQ%7#sS`N&!P+$+RTW z;%cFdEtc^kug(>nm+cAJtI7xV6QQ%743IPYTbc##>$)N$jqwS=z0;C;i<|l`zug_TV=3lxvfmwWBp2&)HQwd3W^HNX~V9`-nTx9|7Eu1@*ETtSvEU?ASs-9DBjJNE0K>AMbr z+dst^*ID^Oo~vWc94_L>9y2d3nn0vdQ_Yd`VxC9LG=tl9&_r&GNzf_O6!A?$c*Kky z7C2BIL7sLZN`Ml4!GsalbN&KR3#hUP6|6jy=RET`zaaeeF?6K!x#tOH&IWcM1o@uy zEG_&vtAjBR<*FB(0C4=6Gt!G)IjYI~CQ2WB?PLO|MXNvB$C)9Lnhs0V2{;c-I+ePE zRLm|J@{3Ea_`z4GwI+K4$iF__q~gTv&I>E(-^h(ON_cDXB(r2L^yBkP!uXOvd1Q( z_(R6kw|gtf${bDf=|Oq?QOar1#;d~)lZ-Dyo}coFEKB@ZolTc@knJl}ZJ9(Q2~dJO zTwyA{@=@w5Ew5Ro7YTxqlVbrW2|gbf%e{kU`480d9J}$gdTNN@@@D(K?{9O6$#b0^ zJ9UduDp(hvLw7<}ebvKs81nAQu5^!V4fzGI-Zl$NRYM!+rRZ|2`YKxf#_B?V1QnWf5St$VqdwFyEo$N?U&(C_J=!MHu_xif~eGhA;>YqGDm_7#8%3f z*#|x9jrf%5!kTbe*gQz&koMcb4BaD-w#*K=!L*c%LtM0_L?wmWe@4 zPU~A2v5aF{+ctu4smG?t0IS<9czD|HzlYh8MxQg4qVA1>6rov@%C$O_-2JA>NTFpJ zwXU!u)BJ=YI|0cR1R?5pehfVMA1G#)~0~tJ$tooYePn54%q;t}^<2Ry66O#Ic+RlhyVrBQi|$rxLQRb4an-g9|4;`Ow~kL63dia3daZp@-}yk zOa7E5-x6U%{|hXb!awOBA=_7pu2P=qj3e7`$UMZ13FLg>rwtXZapZ02Zs5@BC#%3e zgH5{u_c%^=utIiekJVu3pKb3=Pod>3g>Y(+eAUVrAQL$SlkQ#(6NWOhb!*j^(ho#7 zzZlGAG7j=tFUZ0ee|x%u0Wmi6Z7RRqU&HG#tayKdjW%chdjCb*7;ikHx?2mC&c?NR zyx1F+Y!=ZFA2|7-sV%S(eqM!PCdZ0YSKn81w1D3VBHqSTMW}UX@7`f}$cRD7UZRIz zvBfN4bQp$>Tydk&Pd%;d6M~_~`X6iSty7d43FU}>^MD@Hjg5GdRvKKeFn@IJ!xK{W z)#CU)yc3I8DeCeQX1Ikk*eEhx8~^tQpQOdYhZ*2K0^YSR_GNB7(=9$M%6R7yqP({O zc9~?q20Af#c$jm$*Y8Kd|5Tm5^z}+9_`~~qiNm+AhJRXneP#Zd8a93XuV=SF*_Kdj zN;5ZPhz9OO1^TUPxQgF)KtTyqh7gYWQGqMt(D*~al)evyjn^Hg@td}4ZdWS6bEi|8 zkSd=0AfLS6Kl?|^4M+KVUU_yrQmRZ!t@BczpZ$KcWr(cd~ zM}?9rGrqN-v7MU>^}LC!8e2T1|97dfxq4^*ZiId~YM^hnT2tclXta*WUvb^7t&6MV z*W9}1!_TCu=|jMlK&PdrQ+76-)C#Y*P!Oy&V9wY2mEnBdsM@fQ`}qD_Q^_(w3iIdv zy>C1ka0?E`sP6nBUif7qa4wBJEEm^?zIx^(Di%ov(I^Nk+fuFeJWB;F5wj+G~aTgKKPRM zB`4|fFI~n-NiGT9-2AHV8ap%Z;B{&YUk(Rft0qb!u|8G>OrS|O`a29S(IF2es^oKqkDAbsaajm;r zr=5Vz@^1Xd9EFeJ9FKf6XSe)N4gdPgW5HI3s;ifBJ?>oYOhGilZ7uUP@Dz>GK-YY8 zQ?jAqUUX^^>j(cu>)JW$aM=1d1-{UV`K1rRmz%+V#opNp=iqNK$}g^?136c_L^z8l zjGAY+bNy>iTXvT835>(xya#&}KUc!xyZ_I4UY+(q9!AOi^;^d6%uG0y7_hdmr#v+C GjQSt#^j?Yp literal 0 HcmV?d00001 diff --git a/opt_gui/resources/Rois_listview_delegate.qml b/opt_gui/resources/Rois_listview_delegate.qml index bd8ddef..c74ea0a 100644 --- a/opt_gui/resources/Rois_listview_delegate.qml +++ b/opt_gui/resources/Rois_listview_delegate.qml @@ -7,26 +7,44 @@ Component { id: imagewraper width: show_rois.width/3 height: show_rois.width/3+20 - - Rectangle { + Rectangle {//main rect id: image color: "black" -// anchors.left: parent.left anchors.top: parent.top width: show_rois.width/3 height:show_rois.width/3+20 - Image { + Image {//image display id:image_ anchors.top: parent.top width: show_rois.width/3 height:show_rois.width/3 - z:1 + cache : false fillMode: Image.Stretch source: roiSource + Image {//icon for remove the current roi + id: delete_red + z:2 + visible: showConfirmbutton + anchors.top:parent.top + anchors.right: parent.right + source: "/images/DeleteRed.png" + width: 30 + height: 30 + fillMode: Image.PreserveAspectFit + MouseArea{ + anchors.fill: parent + onClicked: + { + var roi_no_=roi_visual_Model.get(index).roiNo + roi_visual_Model.remove(index) + roiModel.remove(roi_no_) + main_view.no_roi-- + } + } + } } TextInput { id:roi_name -// anchors.left: parent.left anchors.top: image_.bottom anchors.left: image_.left height: 20 @@ -34,6 +52,7 @@ Component { color: "lime" text:roiName font.pixelSize: 16 + } Button{ id:confirm_button @@ -41,45 +60,26 @@ Component { anchors.right: image_.right height: 20 width: 50 + visible: showConfirmbutton z:1 - text: qsTr("Confirm") - onClicked: + text: "confirm" + property int roi_no_ + onClicked://add the current roi to the models { roi_name.focus=false main_view.focus=true - roiblock.model.get(index).roiName=roi_name.text - switch(main_view.current_camera) - { - case 1: - videoStream1.set_roi_name(index,roi_name.text) - confirm_button.visible=false - break; - case 2: - videoStream2.set_roi_name(index,roi_name.text) - confirm_button.visible=false - break; - case 3: - videoStream3.set_roi_name(index,roi_name.text) - confirm_button.visible=false - break; - case 4: - videoStream4.set_roi_name(index,roi_name.text) - confirm_button.visible=false - break; - case 5: - videoStream5.set_roi_name(index,roi_name.text) - confirm_button.visible=false - break; - case 6: - videoStream6.set_roi_name(index,roi_name.text) - confirm_button.visible=false - break; - } + roi_visual_Model.setProperty(index, "roiName", roi_name.text) + roi_visual_Model.setProperty(index, "showConfirmbutton", false) + roi_no_=roi_visual_Model.get(index).roiNo + roiModel.setProperty(roi_no_, "showConfirmbutton", false) + roiModel.setProperty(roi_no_, "roiName", roi_name.text) + confirm_button.visible=false + delete_red.visible=false } } } - - MouseArea { +/* + MouseArea {//strech the image id:imagearea z:0 anchors.top: imagewraper.top @@ -87,28 +87,14 @@ Component { anchors.bottomMargin: 20 anchors.left: imagewraper.left anchors.right: imagewraper.right - - // anchors.fill: imagewraper - // hoverEnabled: true - // drag.target: imagewraper onClicked: { switch(imagewraper.state) { case "":imagewraper.state="expanded";break; case "expanded":imagewraper.state="";break; - // case "doubleexpanded":imagewraper.state="doubleexpanded";break; default:imagewraper.state="";break; } } - // onDoubleClicked: { - // switch(imagewraper.state) - // { - // case "expanded":imagewraper.state="expanded";break; - // case "":imagewraper.state="doubleexpanded";break - // case "doubleexpanded":imagewraper.state="expanded";break; - // default:imagewraper.state="expanded";break; - // } - // } } states: [ State { @@ -117,12 +103,6 @@ Component { PropertyChanges { target: image_; width: roiblock.width; height: roiblock.height/2} PropertyChanges { target: imagewraper.ListView.view; contentX: imagewraper.x;contentY: imagewraper.y; interactive: true } } - // ,State { - // name: "doubleexpanded" - // PropertyChanges { target: imagewraper; height: roiblock.height*2 ;width:imageblock.width*2;z:100} - // PropertyChanges { target: image; width: imageblock.width*2; height: imageblock.height*2} - // PropertyChanges { target: imagewraper.GridView.view; contentX: imagewraper.x;contentY: imagewraper.y.mouseY; interactive: true } - // } ] @@ -134,5 +114,7 @@ Component { } } ] + */ + } } diff --git a/opt_gui/resources/Streaming_listview_delegate.qml b/opt_gui/resources/Streaming_listview_delegate.qml new file mode 100644 index 0000000..4f2b373 --- /dev/null +++ b/opt_gui/resources/Streaming_listview_delegate.qml @@ -0,0 +1,58 @@ +import QtQuick 2.0 +import QtQuick.Controls 1.1 +import OPT_Streaming_Component 1.0 +import QtQuick.Window 2.1 + +Component{ + id:steaming_delegate + Rectangle{ + width: (Screen.height-40)*16/54 + height: (Screen.height-40)/6 + color: "#c92c2c" + OptStreamingComponent {//main component from c++ + id:opt_streaming + anchors.fill: parent + topic:modelData + } + Text { + id: topic_text + anchors.left: parent.left + anchors.top: parent.top + color: "lime" + text: opt_streaming.topic + font.pixelSize: 16 + font.bold: true + } + MouseArea{//click on the area to select this camera view as the main camera view in the "main_view" + anchors.top: parent.top + anchors.topMargin: 20 + anchors.bottom: parent.bottom + anchors.left: parent.left + anchors.right: parent.right + onClicked: + { + // set the camera topic in the "main_view",this change will activete the "set_topic" function in c++, + // and modify the subscriber to subscribe another camera topic ,at the same time ,the advertiser of roi will + // also be changed with the new sensor name from the new topic + main_videoStream.topic= opt_streaming.topic; + + var current_topic_name_list = main_videoStream.topic.split('/'); + var current_sensor_name=current_topic_name_list[1]; + + + //clear the roi_visual_Model and add the rois from "roiModel" to roi_visual_Model with the current sensor name + roi_visual_Model.clear(); + for( var i = 0; i< roiModel.count; i++ ) + { + if (roiModel.get(i).sensorName===current_sensor_name) + roi_visual_Model.append(roiModel.get(i)) + } + + // set the rois_dir_path in the main_videoStream with the sensor name + main_videoStream.rois_dir_path=main_videoStream.all_rois_dir_path+current_sensor_name+"/" + } + } + Component.onCompleted: opt_streaming.setup()// set up the subscriber to subscribe camera topic ,at the same time ,the advertiser of roi will + // also be setted with the sensor name from the topic + } +} diff --git a/opt_gui/resources/images.qrc b/opt_gui/resources/images.qrc index 78b0ff2..1574a91 100644 --- a/opt_gui/resources/images.qrc +++ b/opt_gui/resources/images.qrc @@ -1,5 +1,6 @@ openptrack-logo.png + DeleteRed.png diff --git a/opt_gui/resources/main_window.qml b/opt_gui/resources/main_window.qml index b4cb761..21af7b2 100644 --- a/opt_gui/resources/main_window.qml +++ b/opt_gui/resources/main_window.qml @@ -3,286 +3,31 @@ import QtQuick.Window 2.1 import OPT_Streaming_Component 1.0 import QtQuick.Controls 1.1 import QtQuick.Layouts 1.1 - +import Qt.labs.folderlistmodel 2.1 Window { id: main_window - visibility: Window.FullScreen + //visibility: Window.FullScreen width: Screen.width height: Screen.height title: "MOT" visible: true -// property string sensor_topic1:"/kinect2_head/rgb_lowres/image" -// property string sensor_topic2:"/kinect2_far/rgb_lowres/image" -// property string sensor_topic3:"/kinect2_lenovo/rgb_lowres/image" -// property string sensor_topic4:"/kinect2_head/rgb_lowres/image" -// property string sensor_topic5:"/kinect2_head/rgb_lowres/image" -// property string sensor_topic6:"/kinect2_head/rgb_lowres/image" - - RowLayout{ + RowLayout{//Main row, 3 items: Multi-Streaming_listview; main_view; rois_view spacing: 10 - ColumnLayout { - spacing: 10 - Rectangle{ - id:sensor_01 - property int number_rois : 0 - width: (Screen.height-40)*16/54 - height: (Screen.height-40)/6 - color: "#c92c2c" - OptStreamingComponent { - id: videoStream1 - anchors.fill: parent - objectName: "videoStream1" -// topic: topic1.text - } - TextInput { - id: topic1 - anchors.left: parent.left - anchors.top: parent.top - color: "lime" - text: videoStream1.topic - font.pixelSize: 16 - font.bold: true - } - MouseArea{ - anchors.top: parent.top - anchors.topMargin: 20 - anchors.bottom: parent.bottom - anchors.left: parent.left - anchors.right: parent.right - onClicked: - { - main_videoStream.topic= videoStream1.topic - main_view.current_camera=1 - main_view.no_roi=sensor_01.number_rois - roiblock.model=roiModel1 - topic1.font.bold=true - topic2.font.bold=false - topic3.font.bold=false - topic4.font.bold=false - topic5.font.bold=false - topic6.font.bold=false - } - } - } - Rectangle{ - id:sensor_02 - property int number_rois : 0 - width: sensor_01.width - height: sensor_01.height - color: "#c92c2c" - OptStreamingComponent { - anchors.fill: parent - objectName: "videoStream2" - id: videoStream2 -// topic: topic2.text - } - TextInput { - id: topic2 - anchors.left: parent.left - anchors.top: parent.top - color: "lime" - text: videoStream2.topic - font.pixelSize: 16 - font.bold: false - } - MouseArea{ - anchors.top: parent.top - anchors.topMargin: 20 - anchors.bottom: parent.bottom - anchors.left: parent.left - anchors.right: parent.right - onClicked: - { - main_videoStream.topic= videoStream2.topic - main_view.current_camera=2 - main_view.no_roi=sensor_02.number_rois - roiblock.model=roiModel2 - topic1.font.bold=false - topic2.font.bold=true - topic3.font.bold=false - topic4.font.bold=false - topic5.font.bold=false - topic6.font.bold=false - } - } - } - Rectangle{ - id:sensor_03 - property int number_rois : 0 - - width: sensor_01.width - height: sensor_01.height - color: "#c92c2c" - OptStreamingComponent { - anchors.fill: parent - objectName: "videoStream3" - id: videoStream3 -// topic: topic3.text - } - TextInput { - id: topic3 - anchors.left: parent.left - anchors.top: parent.top - color: "lime" - text: videoStream3.topic - font.pixelSize: 16 - font.bold: false - } - MouseArea{ - anchors.top: parent.top - anchors.topMargin: 20 - anchors.bottom: parent.bottom - anchors.left: parent.left - anchors.right: parent.right - onClicked: - { - main_videoStream.topic= videoStream3.topic - main_view.current_camera=3 - main_view.no_roi=sensor_03.number_rois - roiblock.model=roiModel3 - topic1.font.bold=false - topic2.font.bold=false - topic3.font.bold=true - topic4.font.bold=false - topic5.font.bold=false - topic6.font.bold=false - - } - } - } - Rectangle{ - id:sensor_04 - property int number_rois : 0 - width: sensor_01.width - height: sensor_01.height - color: "#c92c2c" - OptStreamingComponent { - anchors.fill: parent - objectName: "videoStream4" - id: videoStream4 -// topic: topic4.text - } - TextInput { - id: topic4 - anchors.left: parent.left - anchors.top: parent.top - color: "lime" - text: videoStream4.topic - font.pixelSize: 16 - font.bold: false - } - MouseArea{ - anchors.top: parent.top - anchors.topMargin: 20 - anchors.bottom: parent.bottom - anchors.left: parent.left - anchors.right: parent.right - onClicked: - { - main_videoStream.topic= videoStream4.topic - main_view.current_camera=4 - main_view.no_roi=sensor_04.number_rois - roiblock.model=roiModel4 - topic1.font.bold=false - topic2.font.bold=false - topic3.font.bold=false - topic4.font.bold=true - topic5.font.bold=false - topic6.font.bold=false - - } - } - } - Rectangle{ - id:sensor_05 - property int number_rois : 0 - width: sensor_01.width - height: sensor_01.height - color: "#c92c2c" - OptStreamingComponent { - anchors.fill: parent - objectName: "videoStream5" - id: videoStream5 -// topic: topic5.text - } - TextInput { - id: topic5 - anchors.left: parent.left - anchors.top: parent.top - color: "lime" - text: videoStream5.topic - font.pixelSize: 16 - font.bold: false - } - MouseArea{ - anchors.top: parent.top - anchors.topMargin: 20 - anchors.bottom: parent.bottom - anchors.left: parent.left - anchors.right: parent.right - onClicked: - { - main_videoStream.topic= videoStream5.topic - main_view.current_camera=5 - main_view.no_roi=sensor_05.number_rois - roiblock.model=roiModel5 - topic1.font.bold=false - topic2.font.bold=false - topic3.font.bold=false - topic4.font.bold=false - topic5.font.bold=true - topic6.font.bold=false - } - } - } - Rectangle{ - id:sensor_06 - property int number_rois : 0 - width: sensor_01.width - height: sensor_01.height - color: "#c92c2c" - OptStreamingComponent { - anchors.fill: parent - objectName: "videoStream6" - id: videoStream6 -// topic: topic6.text - } - TextInput { - id: topic6 - anchors.left: parent.left - anchors.top: parent.top - color: "lime" - text: videoStream6.topic - font.pixelSize: 16 - font.bold: false - } - MouseArea{ - anchors.top: parent.top - anchors.topMargin: 20 - anchors.bottom: parent.bottom - anchors.left: parent.left - anchors.right: parent.right - onClicked: - { - main_videoStream.topic= videoStream6.topic - main_view.current_camera=6 - main_view.no_roi=sensor_06.number_rois - roiblock.model=roiModel6 - topic1.font.bold=false - topic2.font.bold=false - topic3.font.bold=false - topic4.font.bold=false - topic5.font.bold=false - topic6.font.bold=true - } - } - } + // Multi-Streaming_listview + ListView { + spacing: 20 + width: (Screen.height-40)*16/54 + height: (Screen.height-40) + model: topic_model// this model comes from c++ in :"main_application.cpp" + delegate: Streaming_listview_delegate{} } - ColumnLayout { + + //Main_view + ColumnLayout {//main_view spacing: 50 - Image { + Image {//logo id: openptrack_logo source: "/images/openptrack-logo.png" width: 244 @@ -290,27 +35,25 @@ Window { anchors.horizontalCenter: parent.horizontalCenter fillMode: Image.PreserveAspectFit } - Rectangle{ + + //main_view where to select rois + Rectangle { width: 960 height:540 color: "#c92c2c" focus: true id: main_view - property int current_camera : 1 property int no_roi : 0 property int rect_X property int rect_Y property int rect_width property int rect_height - - // property string roi_name_s - OptStreamingComponent { + OptStreamingComponent {//main stream id:main_videoStream anchors.fill: parent - objectName: "main_videoStream" -// topic: main_topic.text + Component.onCompleted: main_videoStream.setup() } - TextInput { + Text {//display the text of the topic name id: main_topic anchors.left: parent.left anchors.top: parent.top @@ -321,7 +64,7 @@ Window { font.bold: true font.italic: true } - MouseArea { + MouseArea {// area to draw the bounding boxes id: selectArea; property bool ready2add : false z:0 @@ -330,6 +73,7 @@ Window { anchors.left: parent.left anchors.right: parent.right onPressed: { + selectArea.focus=true if (highlightItem !== null) { // if there is already a selection, delete it @@ -364,117 +108,174 @@ Window { } } } - Keys.onSpacePressed:{ + Keys.onSpacePressed:{//press space to confirm your BB, save the rois as images , add them to the model if(selectArea.ready2add) { - no_roi++ var topic_name_list = main_topic.text.split('/'); var sensor_name=topic_name_list[1]; var roi_name=sensor_name+"_roi_"+no_roi.toString(); - var roiSourceUrl="file:/tmp/"+sensor_name+"_roi_"+no_roi.toString()+".png" - - switch(current_camera) - { - case 1: - videoStream1.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) - roiModel1.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); - (sensor_01.number_rois)++;//use the the number of the models - selectArea.highlightItem.destroy (); - break; - case 2: - videoStream2.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) - roiModel2.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); - (sensor_02.number_rois)++; - selectArea.highlightItem.destroy (); - break; - case 3: - videoStream3.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) - roiModel3.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); - (sensor_03.number_rois)++; - selectArea.highlightItem.destroy (); - break; - case 4: - videoStream4.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) - roiModel4.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); - (sensor_04.number_rois)++; - selectArea.highlightItem.destroy (); - break; - case 5: - videoStream5.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) - roiModel5.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); - (sensor_05.number_rois)++; - selectArea.highlightItem.destroy (); - break; - case 6: - videoStream6.save_ROI(sensor_name,no_roi,roi_name,rect_X,rect_Y,rect_width,rect_height) - roiModel6.append({"roiNo": no_roi ,"roiName": roi_name, "roiSource": roiSourceUrl }); - (sensor_06.number_rois)++; - selectArea.highlightItem.destroy (); - break; - } + var roiSourceUrl=main_videoStream.all_rois_dir_path+sensor_name+"_roi_"+no_roi.toString()+".png" + main_videoStream.save_ROI(sensor_name,no_roi,rect_X,rect_Y,rect_width,rect_height);//save the roi as image + roiModel.append({"roiNo": no_roi ,"sensorName":sensor_name, "roiName": roi_name,"rectX":rect_X,"rectY":rect_Y,"rectWidth":rect_width,"rectHeight":rect_height,"roiSource": roiSourceUrl ,"showConfirmbutton":true, "published":false}); + roi_visual_Model.append({"roiNo": no_roi ,"sensorName":sensor_name, "roiName": roi_name,"rectX":rect_X,"rectY":rect_Y,"rectWidth":rect_width,"rectHeight":rect_height,"roiSource": roiSourceUrl ,"showConfirmbutton":true, "published":false}); + selectArea.highlightItem.destroy (); + no_roi++; } selectArea.ready2add=false } } } + + //Rois_view Rectangle{ id: show_rois width:Screen.width-(Screen.height-40)*16/54-980 height: Screen.height - ListView { - id: roiblock - anchors.top: parent.top - width: parent.width - height: parent.height-50 - spacing: 20 - clip: true - model: roiModel1 - delegate: Rois_listview_delegate{} - ListModel{ - id: roiModel1 - } - ListModel{ - id: roiModel2 - } - ListModel{ - id: roiModel3 - } - ListModel{ - id: roiModel4 - } - ListModel{ - id: roiModel5 - } - ListModel{ - id: roiModel6 + RowLayout{//One Row who has two items:: show roi_from_gui; shwo roi_form_file + anchors.fill: parent + + //show roi_from_gui + ListView { + id: roiblock + width: parent.width/2 + height: parent.height + spacing: 20 + clip: true + model: roi_visual_Model + delegate: Rois_listview_delegate{} + header: header_roiblock + footer: footer_roiblock + ListModel{ + id: roiModel//list model for all the rois in all the cameras + } + ListModel{ + id: roi_visual_Model// list model for the rois of the current camera + } + + Component {//header of the list view: Text and Button + id: header_roiblock + Rectangle + { + width: parent.width/2 + height: 50 + ColumnLayout{ + anchors.fill: parent + spacing: 10 + Text { + id: header_roiblock_text + width: parent.width + height: 20 + color: "blue" + font.pixelSize: 20 + text: "ROIs_from_ marking" + } + Button{ + id: publish_rois_from_gui + anchors.top:header_roiblock_text.bottom + height: 20 + width: parent.width + text: qsTr("Publish_rois_from_gui") + onClicked: + {// add all the rois in the roi_visual_Model to the msg ,and publish the msg(all in c++) + for( var i = 0; i< roi_visual_Model.count; i++ ) + { + if(roi_visual_Model.get(i).published===false) + { + main_videoStream.add_roi(roi_visual_Model.get(i).roiNo,roi_visual_Model.get(i).roiName, roi_visual_Model.get(i).rectX,roi_visual_Model.get(i).rectY, roi_visual_Model.get(i).rectWidth, roi_visual_Model.get(i).rectHeight);//add roi to the msg + roi_visual_Model.setProperty(i, "published", true)//set flag in the model + var no_ROI_=roi_visual_Model.get(i).roiNo; + roiModel.setProperty(no_ROI_, "published", true)//set flag in the model + } + } + main_videoStream.publish_rois_from_gui();//publish the msg + } + } + } + } + } + Component {//footer of the list view:empty rectangle + id: footer_roiblock + Rectangle + { + width: parent.width/2 + height: 50 + } + } } - } - Button{ - id: publish_rois - anchors.top: roiblock.bottom - anchors.topMargin: 20 - anchors.left: roiblock.left - anchors.leftMargin: 40 - height: 20 - width: 100 - text: qsTr("Start Detection") - onClicked: - { - switch(main_view.current_camera) - { - case 1: videoStream1.publish_rois(); - break; - case 2: videoStream2.publish_rois(); - break; - case 3: videoStream3.publish_rois(); - break; - case 4: videoStream4.publish_rois(); - break; - case 5: videoStream5.publish_rois(); - break; - case 6: videoStream6.publish_rois(); - break; + + + //show roi_form_file + ListView { + id:roi_folder + width: parent.width/2 + height: parent.height + spacing: 20 + model: roi_model_file + delegate: roifileDelegate + header: roi_folder_header + + FolderListModel {//model from folder file list + id:roi_model_file + nameFilters: ["*.png"] + folder: main_videoStream.rois_dir_path + } + Component {////header of the list view: Text and Button + id: roi_folder_header + Rectangle + { + width: parent.width/2 + height: 50 + ColumnLayout{ + anchors.fill: parent + spacing: 10 + Text { + id: header_roiblock_text2 + height: 20 + text: "ROIs_from_ folder" + color: "green" + font.pixelSize: 20 + } + Button{ + id: publish_rois_from_file + anchors.top:header_roiblock_text2.bottom + height: 20 + width: parent.width + text: qsTr("publish_rois_from_file") + onClicked: + { + main_videoStream.publish_rois_from_file();// call the c++ fuction + } + } + } + } + } + + Component {//delegate of the list view:show the image and text(roi file name) + id: roifileDelegate + Rectangle { + width: show_rois.width/3 + height:show_rois.width/3+20 + Image { + id:roi_folder_image + anchors.top: parent.top + width: show_rois.width/3 + height:show_rois.width/3 + z:1 + fillMode: Image.Stretch + source: roi_model_file.folder+fileName + } + Text { + id:roi_name + anchors.top: roi_folder_image.bottom + anchors.left: roi_folder_image.left + height: 20 + width: show_rois.width/3 + color: "lime" + text:fileName.substring(0,fileName.indexOf('.')) + font.pixelSize: 16 + } + } } } } diff --git a/opt_gui/resources/qml.qrc b/opt_gui/resources/qml.qrc index 552d54b..ff4983f 100644 --- a/opt_gui/resources/qml.qrc +++ b/opt_gui/resources/qml.qrc @@ -1,6 +1,7 @@ main_window.qml + Streaming_listview_delegate.qml Rois_listview_delegate.qml diff --git a/opt_gui/src/component/opt_streaming_component.cpp b/opt_gui/src/component/opt_streaming_component.cpp index 07ae0e8..5993b8d 100644 --- a/opt_gui/src/component/opt_streaming_component.cpp +++ b/opt_gui/src/component/opt_streaming_component.cpp @@ -1,182 +1,240 @@ #include "../../include/component/opt_streaming_component.hpp" + OPT_Streaming_Component::OPT_Streaming_Component(QQuickItem * parent) : - QQuickPaintedItem(parent), - current_image(NULL), - current_buffer(NULL), - topic_value("/cam0"), - ros_ready(false), - img_trans(NULL),number_of_rois(0) { + nh(), + QQuickPaintedItem(parent), + current_image(NULL), + current_buffer(NULL), + topic_value("Click_on_one_of_the_camera_views_on_the_left_to_start"), + ros_ready(false), + img_trans(NULL),number_of_rois(0) { + all_rois_dir_path_value=QString::fromStdString("file:"+ros::package::getPath("opt_gui")+"/data/"); } -void OPT_Streaming_Component::setup(ros::NodeHandle * nh) { - img_trans = new image_transport::ImageTransport(*nh); - //TODO: make these parameters of the component - image_sub = img_trans->subscribe( - topic_value.toStdString(), - 1, - &OPT_Streaming_Component::receive_image, - this, - image_transport::TransportHints("raw") - ); - - std::string image2D_rois_pub_name = topic_value.toStdString().substr(0, topic_value.toStdString().rfind('/')) + "/image2D_rois"; - image2D_rois_pub=nh->advertise(image2D_rois_pub_name,3); - ros_ready = true; - ROS_INFO("Setup of video component complete"); -} +//set up the subscriber(image msg from sensor) and advertiser(roi msg) +void OPT_Streaming_Component::setup() { + img_trans = new image_transport::ImageTransport(nh); -/* -void OPT_Streaming_Component::receive_image(const sensor_msgs::Image::ConstPtr &msg) { - // check to see if we already have an image frame, if we do then we need to - // delete it to avoid memory leaks - if( current_image ) { - delete current_image; - } - // allocate a buffer of sufficient size to contain our video frame - uchar * temp_buffer = (uchar *) malloc(sizeof(uchar) * msg->data.size()); - - // and copy the message into the buffer - // we need to do this because QImage api requires the buffer we pass in to - // continue to exist whilst the image is in use, but the msg and it's data will - // be lost once we leave this context - current_image = new QImage( - temp_buffer, - msg->width, - msg->height, - QImage::Format_RGB888 // TODO: detect the type from the message - ); - - ROS_INFO("Recieved Message"); - - // We then swap out of bufer to avoid memory leaks - if(current_buffer) { - delete current_buffer; - current_buffer = temp_buffer; - } + image_sub = img_trans->subscribe( + topic_value.toStdString(), + 1, + &OPT_Streaming_Component::receive_image, + this, + image_transport::TransportHints("raw") + ); - // finally we need to re-render the component to display the new image - update(); -} + std::string sensor_name=topic_value.toStdString().substr(1, (topic_value.toStdString().size()-17)); + std::string image2D_rois_from_gui_pub_name = "/"+sensor_name + "/image2D_rois_from_gui"; + image2D_rois_from_gui_pub=nh.advertise(image2D_rois_from_gui_pub_name,3); + std::string image2D_rois_from_file_pub_name = "/"+sensor_name + "/image2D_rois_from_file/image"; +// image2D_rois_from_file_pub=nh.advertise(image2D_rois_from_file_pub_name,3); + image2D_rois_from_file_pub=nh.advertise(image2D_rois_from_file_pub_name,3); -*/ + ros_ready = true; + ROS_INFO("Setup of video component complete"); +} -void OPT_Streaming_Component::receive_image(const sensor_msgs::Image::ConstPtr &msg) { - image2D_rois_msg.header=msg->header; - //-----------------------------This part comes from rtq_image_view---------------------------------- +//call back function to recieve images from sensor +void OPT_Streaming_Component::receive_image(const sensor_msgs::Image::ConstPtr &msg) { + image2D_rois_msg.header=msg->header; + + //-----------------------------This part comes from rtq_image_view---------------------------------- + try + { + // First let cv_bridge do its magic + cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg, sensor_msgs::image_encodings::RGB8); + conversion_mat_ = cv_ptr->image; + } + catch (cv_bridge::Exception& e) + { try { - // First let cv_bridge do its magic - cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg, sensor_msgs::image_encodings::RGB8); + // If we're here, there is no conversion that makes sense, but let's try to imagine a few first + cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg); + if (msg->encoding == "CV_8UC3") + { + // assuming it is rgb conversion_mat_ = cv_ptr->image; + } else if (msg->encoding == "8UC1") { + // convert gray to rgb + cv::cvtColor(cv_ptr->image, conversion_mat_, CV_GRAY2RGB); + } else if (msg->encoding == "16UC1" || msg->encoding == "32FC1") { + // scale / quantify + double min = 0; + double max = 10; + if (msg->encoding == "16UC1") max *= 1000; + // dynamically adjust range based on min/max in image + cv::minMaxLoc(cv_ptr->image, &min, &max); + if (min == max) { + // completely homogeneous images are displayed in gray + min = 0; + max = 2; + } + cv::Mat img_scaled_8u; + cv::Mat(cv_ptr->image-min).convertTo(img_scaled_8u, CV_8UC1, 255. / (max - min)); + cv::cvtColor(img_scaled_8u, conversion_mat_, CV_GRAY2RGB); + } else { + qWarning("ImageView.callback_image() could not convert image from '%s' to 'rgb8' (%s)", msg->encoding.c_str(), e.what()); + return; + } } catch (cv_bridge::Exception& e) { - try - { - // If we're here, there is no conversion that makes sense, but let's try to imagine a few first - cv_bridge::CvImageConstPtr cv_ptr = cv_bridge::toCvShare(msg); - if (msg->encoding == "CV_8UC3") - { - // assuming it is rgb - conversion_mat_ = cv_ptr->image; - } else if (msg->encoding == "8UC1") { - // convert gray to rgb - cv::cvtColor(cv_ptr->image, conversion_mat_, CV_GRAY2RGB); - } else if (msg->encoding == "16UC1" || msg->encoding == "32FC1") { - // scale / quantify - double min = 0; - double max = 10; - if (msg->encoding == "16UC1") max *= 1000; - // dynamically adjust range based on min/max in image - cv::minMaxLoc(cv_ptr->image, &min, &max); - if (min == max) { - // completely homogeneous images are displayed in gray - min = 0; - max = 2; - } - cv::Mat img_scaled_8u; - cv::Mat(cv_ptr->image-min).convertTo(img_scaled_8u, CV_8UC1, 255. / (max - min)); - cv::cvtColor(img_scaled_8u, conversion_mat_, CV_GRAY2RGB); - } else { - qWarning("ImageView.callback_image() could not convert image from '%s' to 'rgb8' (%s)", msg->encoding.c_str(), e.what()); - return; - } - } - catch (cv_bridge::Exception& e) - { - qWarning("ImageView.callback_image() while trying to convert image from '%s' to 'rgb8' an exception was thrown (%s)", msg->encoding.c_str(), e.what()); - return; - } + qWarning("ImageView.callback_image() while trying to convert image from '%s' to 'rgb8' an exception was thrown (%s)", msg->encoding.c_str(), e.what()); + return; } - //-----------------------------This part comes from rtq_image_view---------------------------------- + } + //-----------------------------This part comes from rtq_image_view---------------------------------- - // image must be copied since it uses the conversion_mat_ for storage which is asynchronously overwritten in the next callback invocation - current_image=QImage(conversion_mat_.data,conversion_mat_.cols,conversion_mat_.rows,QImage::Format_RGB888); - // finally we need to re-render the component to display the new image - update(); + // Only Qimage can be painted + current_image=QImage(conversion_mat_.data,conversion_mat_.cols,conversion_mat_.rows,QImage::Format_RGB888); + + // finally we need to re-render the component to display the new image + update(); } + +//This function will run after "update();" is called void OPT_Streaming_Component::paint(QPainter * painter) { - painter->drawImage(boundingRect().adjusted(1, 1, -1, -1), current_image); + painter->drawImage(boundingRect().adjusted(1, 1, -1, -1), current_image);//paint the image to fit the size of the Rectangle } + +//When the sensor name is modified in the camera view, change the topics with the new sensor name in the subscriber and advertiser void OPT_Streaming_Component::set_topic(const QString & new_value) { - if(topic_value != new_value) { - topic_value = new_value; - if(ros_ready) { - image_sub.shutdown(); - image_sub = img_trans->subscribe( - topic_value.toStdString(), - 1, - &OPT_Streaming_Component::receive_image, - this, - image_transport::TransportHints("raw") - ); - } - emit topic_changed(); + if(topic_value != new_value) { + topic_value = new_value; + if(ros_ready) { + image_sub.shutdown(); + image_sub = img_trans->subscribe( + topic_value.toStdString(), + 1, + &OPT_Streaming_Component::receive_image, + this, + image_transport::TransportHints("raw") + ); + + image2D_rois_msg.Rois.clear(); + + std::string sensor_name=topic_value.toStdString().substr(1, (topic_value.toStdString().size()-17)); + std::string image2D_rois_from_gui_pub_name = "/"+sensor_name + "/image2D_rois_from_gui"; + image2D_rois_from_gui_pub=nh.advertise(image2D_rois_from_gui_pub_name,3); + + std::string image2D_rois_from_file_pub_name = "/"+sensor_name + "/image2D_rois_from_file/image"; + image2D_rois_from_file_pub=nh.advertise(image2D_rois_from_file_pub_name,3); + } + emit topic_changed(); + } } QString OPT_Streaming_Component::get_topic() const { - return topic_value; + return topic_value; } -void OPT_Streaming_Component::save_ROI(QString sensor_name,int no_ROI,QString roi_name, int rect_X,int rect_Y, int rect_width, int rect_height ) + +void OPT_Streaming_Component::set_rois_dir_path(const QString rois_dir_path_) { + rois_dir_path_value=rois_dir_path_; + emit rois_dir_path_changed(); +} - //get the roi with the bouding box you created in qml main_view, save it as a file - cv::Rect roi_rec=cv::Rect(rect_X,rect_Y,rect_width,rect_height) & cv::Rect(0,0,conversion_mat_.size().width,conversion_mat_.size().height); -// if(roi_rec.size().area()>1) -// { - cv::Mat roi(conversion_mat_,roi_rec); - cv::cvtColor(roi, roi, CV_BGR2RGB); - QString roi_file_name="/tmp/"+sensor_name+"_roi_"+ QString::number(no_ROI)+".png"; - cv::imwrite(roi_file_name.toStdString(),roi); - - //create a image2D_roi_msg with the roi and push it to image2D_roi_array_msg called image2D_rois_msg - current_image2D_roi_msg.no=no_ROI; - current_image2D_roi_msg.name=roi_name.toStdString(); - current_image2D_roi_msg.x=rect_X; - current_image2D_roi_msg.y=rect_Y; - current_image2D_roi_msg.width=rect_width; - current_image2D_roi_msg.height=rect_height; - image2D_rois_msg.Rois.push_back(current_image2D_roi_msg); - } -//} +QString OPT_Streaming_Component::get_rois_dir_path() const { + return rois_dir_path_value; +} +QString OPT_Streaming_Component::get_all_rois_dir_path() const { + return all_rois_dir_path_value; +} + + +//save the selected rois in the gui as images to their folder named with the corresponding sensor names +void OPT_Streaming_Component::save_ROI(QString sensor_name,int no_ROI, int rect_X,int rect_Y, int rect_width, int rect_height ) +{ + //get the roi with the bouding box you created in qml main_view, save it as a file + cv::Rect roi_rec=cv::Rect(rect_X,rect_Y,rect_width,rect_height) & cv::Rect(0,0,conversion_mat_.size().width,conversion_mat_.size().height); + cv::Mat roi(conversion_mat_,roi_rec); + cv::cvtColor(roi, roi, CV_BGR2RGB); + std::string roi_file_name=ros::package::getPath("opt_gui")+"/data/"+sensor_name.toStdString()+"_roi_"+ QString::number(no_ROI).toStdString()+".png"; + cv::imwrite(roi_file_name,roi); +} + +// Add rois one by one to the msg in the qml after push the button named "publish_rois_from_gui" +void OPT_Streaming_Component::add_roi(int no_ROI,QString roi_name, int rect_X,int rect_Y, int rect_width, int rect_height) +{ + current_image2D_roi_msg.no=no_ROI; + current_image2D_roi_msg.name=roi_name.toStdString(); + current_image2D_roi_msg.x=rect_X; + current_image2D_roi_msg.y=rect_Y; + current_image2D_roi_msg.width=rect_width; + current_image2D_roi_msg.height=rect_height; + image2D_rois_msg.Rois.push_back(current_image2D_roi_msg); +} -void OPT_Streaming_Component::set_roi_name(int index, QString roi_name) +//After Add all the rois , publish the msg, this is also excuted in qml as the "add_roi" function +void OPT_Streaming_Component::publish_rois_from_gui() { - image2D_rois_msg.Rois[index-number_of_rois].name=roi_name.toStdString(); + image2D_rois_from_gui_pub.publish(image2D_rois_msg); + std::cout< roi_filenames = dir.GetListFiles(rois_dir_path, ".png"); + + //publish roi iamge one by one + for(std::vector::iterator it =roi_filenames.begin(); it!=roi_filenames.end();it++) + { + cv::Mat image = cv::imread(rois_dir_path+*it); + sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", image).toImageMsg(); + image2D_rois_from_file_pub.publish(msg); + std::cout<<"image msg for file has published"< roi_filenames = dir.GetListFiles(rois_dir_path, ".png"); + + //publish roi iamge one by one + for(std::vector::iterator it =roi_filenames.begin(); it!=roi_filenames.end();it++) + { + cv::Mat image = cv::imread(rois_dir_path+*it); + + sensor_msgs::ImagePtr image_msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", image).toImageMsg(); + + opt_msgs::Image2D_roi_file current_image2D_roi_file_msg; + current_image2D_roi_file_msg.image_roi=*image_msg; + current_image2D_roi_file_msg.name=*it; + + image2D_rois_from_file_pub.publish(current_image2D_roi_file_msg); + std::cout<<"image msg for file has published"< #include "../include/component/opt_streaming_component.hpp" +#include #include "../../opt_gui/include/opt_gui/main_application.hpp" -Main_Application::Main_Application() { - -} - +Main_Application::Main_Application(): + nh("~") +{} void Main_Application::run() { - nh.param("view_topic_1",view_topic1,std::string("/kinect2_head/rgb_lowres/image")); - nh.param("view_topic_2",view_topic2,std::string("/kinect2_far/rgb_lowres/image")); - nh.param("view_topic_3",view_topic3,std::string("/kinect2_lenovo/rgb_lowres/image")); - nh.param("view_topic_4",view_topic4,std::string("/kinect2_head/rgb_lowres/image")); - nh.param("view_topic_5",view_topic5,std::string("/kinect2_head/rgb_lowres/image")); - nh.param("view_topic_6",view_topic6,std::string("/kinect2_head/rgb_lowres/image")); - +//////////////////////////////////////////////////////////////////////////////////// +////Read sensor_name from camera_network.yaml adn use them to create topicrList , +////then create topic_model which can be used in qml +//////////////////////////////////////////////////////////////////////////////////// + + QStringList topicrList; +// std::vector keys; + // nh.getParamNames(keys); +// ROS_INFO_STREAM("These are all the parameters: "); +// std::copy(keys.begin(), keys.end(), std::ostream_iterator(std::cout, "\n")); +// ROS_INFO_STREAM("Reading network!"); + XmlRpc::XmlRpcValue list; + if (nh.getParam("network", list)) + { + //ROS_INFO_STREAM("initial: " << list); + for(int i =0; i < list.size(); ++i) + { + //ROS_INFO_STREAM("item: " << list[i]); + XmlRpc::XmlRpcValue v(list[i]["sensors"]); + //ROS_INFO_STREAM("Sensors: " << v); + for(int k = 0; k < v.size(); ++k) + { + // ROS_INFO_STREAM("id"<rootContext(); + context->setContextProperty("topic_model", QVariant::fromValue(topicrList)); qmlRegisterType("OPT_Streaming_Component", 1, 0, "OptStreamingComponent"); // this loads the qml file we are about to create this->load(QUrl(QStringLiteral("qrc:/main_window.qml"))); - // Setup a timer to get the application's idle loop QTimer * timer = new QTimer(this); connect(timer, SIGNAL(timeout()), this, SLOT(main_loop())); timer->start(0); - - // setup the video component - OPT_Streaming_Component * video1 = this->rootObjects()[0]->findChild(QString("videoStream1")); - video1->set_topic(QString::fromStdString(view_topic1)); - video1->setup(&nh); - - OPT_Streaming_Component * video2 = this->rootObjects()[0]->findChild(QString("videoStream2")); - video2->set_topic(QString::fromStdString(view_topic2)); - video2->setup(&nh); - - OPT_Streaming_Component * video3 = this->rootObjects()[0]->findChild(QString("videoStream3")); - video3->set_topic(QString::fromStdString(view_topic3)); - video3->setup(&nh); - - OPT_Streaming_Component * video4 = this->rootObjects()[0]->findChild(QString("videoStream4")); - video4->set_topic(QString::fromStdString(view_topic4)); - video4->setup(&nh); - - OPT_Streaming_Component * video5 = this->rootObjects()[0]->findChild(QString("videoStream5")); - video5->set_topic(QString::fromStdString(view_topic5)); - video5->setup(&nh); - - OPT_Streaming_Component * video6 = this->rootObjects()[0]->findChild(QString("videoStream6")); - video6->set_topic(QString::fromStdString(view_topic6)); - video6->setup(&nh); - - OPT_Streaming_Component * video_main = this->rootObjects()[0]->findChild(QString("main_videoStream")); - video_main->set_topic(QString::fromStdString(view_topic1)); - video_main->setup(&nh); } void Main_Application::main_loop() { - ros::spinOnce(); - } diff --git a/opt_gui/src/opt_gui_main.cpp b/opt_gui/src/opt_gui_main.cpp index 98a7c6f..8248f20 100644 --- a/opt_gui/src/opt_gui_main.cpp +++ b/opt_gui/src/opt_gui_main.cpp @@ -3,8 +3,7 @@ #include"../include/opt_gui/main_application.hpp" int main(int argc, char **argv) { // Set up ROS. - ros::init(argc, argv, "gui_main"); - + ros::init(argc, argv, "opt_gui"); QGuiApplication app(argc, argv); Main_Application engine; diff --git a/opt_msgs/CMakeLists.txt b/opt_msgs/CMakeLists.txt index 5ab7922..374439e 100644 --- a/opt_msgs/CMakeLists.txt +++ b/opt_msgs/CMakeLists.txt @@ -3,7 +3,7 @@ project(opt_msgs) find_package(catkin REQUIRED COMPONENTS roscpp message_generation geometry_msgs sensor_msgs) add_message_files(FILES Rois.msg RoiRect.msg HumanEntry.msg HumanEntries.msg BoundingBox2D.msg BoundingBox3D.msg Detection.msg DetectionArray.msg Track.msg TrackArray.msg CalibrationStatus.msg IDArray.msg - Image2D_roi.msg Image2D_roi_array.msg World3D_roi.msg World3D_roi_array.msg) + Image2D_roi.msg Image2D_roi_array.msg World3D_roi.msg World3D_roi_array.msg Track3D.msg Track3DArray.msg ObjectName.msg ObjectNameArray.msg Image2D_roi_file.msg) add_service_files(FILES OPTSensor.srv OPTTransform.srv) diff --git a/opt_msgs/CMakeLists.txt.user b/opt_msgs/CMakeLists.txt.user deleted file mode 100644 index a0de05f..0000000 --- a/opt_msgs/CMakeLists.txt.user +++ /dev/null @@ -1,371 +0,0 @@ - - - - - - EnvironmentId - {825f57e6-b4e3-4534-8383-4eb78c75979b} - - - ProjectExplorer.Project.ActiveTarget - 0 - - - ProjectExplorer.Project.EditorSettings - - true - false - true - - Cpp - - CppGlobal - - - - QmlJS - - QmlJSGlobal - - - 2 - UTF-8 - false - 4 - false - 80 - true - true - 1 - true - false - 0 - true - true - 0 - 8 - true - 1 - true - true - true - false - - - - ProjectExplorer.Project.PluginSettings - - - - ProjectExplorer.Project.Target.0 - - Desktop - Desktop - {b16e7ffb-e671-4440-99f4-da1004cae089} - 0 - 0 - 0 - - - /home/zhao/workspace/ros/catkin/src/open_ptrack/build-opt_msgs-Desktop-Default - - - - - all - - true - Make - - CMakeProjectManager.MakeStep - - 1 - Build - - ProjectExplorer.BuildSteps.Build - - - - - - clean - - true - Make - - CMakeProjectManager.MakeStep - - 1 - Clean - - ProjectExplorer.BuildSteps.Clean - - 2 - false - - Default - Default - CMakeProjectManager.CMakeBuildConfiguration - - - - CMAKE_BUILD_TYPE:STRING=Debug - - /home/zhao/workspace/ros/catkin/src/open_ptrack/build-opt_msgs-Desktop-Debug - - - - - - - true - Make - - CMakeProjectManager.MakeStep - - 1 - Build - - ProjectExplorer.BuildSteps.Build - - - - - - clean - - true - Make - - CMakeProjectManager.MakeStep - - 1 - Clean - - ProjectExplorer.BuildSteps.Clean - - 2 - false - - Debug - Debug - CMakeProjectManager.CMakeBuildConfiguration - - - - CMAKE_BUILD_TYPE:STRING=Release - - /home/zhao/workspace/ros/catkin/src/open_ptrack/build-opt_msgs-Desktop-Release - - - - - - - true - Make - - CMakeProjectManager.MakeStep - - 1 - Build - - ProjectExplorer.BuildSteps.Build - - - - - - clean - - true - Make - - CMakeProjectManager.MakeStep - - 1 - Clean - - ProjectExplorer.BuildSteps.Clean - - 2 - false - - Release - Release - CMakeProjectManager.CMakeBuildConfiguration - - - - CMAKE_BUILD_TYPE:STRING=RelWithDebInfo - - /home/zhao/workspace/ros/catkin/src/open_ptrack/build-opt_msgs-Desktop-Release with Debug Information - - - - - - - true - Make - - CMakeProjectManager.MakeStep - - 1 - Build - - ProjectExplorer.BuildSteps.Build - - - - - - clean - - true - Make - - CMakeProjectManager.MakeStep - - 1 - Clean - - ProjectExplorer.BuildSteps.Clean - - 2 - false - - Release with Debug Information - Release with Debug Information - CMakeProjectManager.CMakeBuildConfiguration - - - - CMAKE_BUILD_TYPE:STRING=MinSizeRel - - /home/zhao/workspace/ros/catkin/src/open_ptrack/build-opt_msgs-Desktop-Minimum Size Release - - - - - - - true - Make - - CMakeProjectManager.MakeStep - - 1 - Build - - ProjectExplorer.BuildSteps.Build - - - - - - clean - - true - Make - - CMakeProjectManager.MakeStep - - 1 - Clean - - ProjectExplorer.BuildSteps.Clean - - 2 - false - - Minimum Size Release - Minimum Size Release - CMakeProjectManager.CMakeBuildConfiguration - - 5 - - - 0 - Deploy - - ProjectExplorer.BuildSteps.Deploy - - 1 - Deploy locally - - ProjectExplorer.DefaultDeployConfiguration - - 1 - - - false - false - 1000 - - true - - false - false - false - false - true - 0.01 - 10 - true - 1 - 25 - - 1 - true - false - true - valgrind - - 0 - 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10 - 11 - 12 - 13 - 14 - - 2 - - - - %{buildDir} - Custom Executable - - ProjectExplorer.CustomExecutableRunConfiguration - 3768 - false - true - false - false - true - - 1 - - - - ProjectExplorer.Project.TargetCount - 1 - - - ProjectExplorer.Project.Updater.FileVersion - 18 - - - Version - 18 - - diff --git a/opt_msgs/msg/Detection.msg b/opt_msgs/msg/Detection.msg index 396d6c4..cde3e89 100644 --- a/opt_msgs/msg/Detection.msg +++ b/opt_msgs/msg/Detection.msg @@ -9,3 +9,5 @@ float64 height float64 confidence float64 distance bool occluded + +string object_name diff --git a/opt_msgs/msg/Image2D_roi_file.msg b/opt_msgs/msg/Image2D_roi_file.msg new file mode 100644 index 0000000..291f03d --- /dev/null +++ b/opt_msgs/msg/Image2D_roi_file.msg @@ -0,0 +1,2 @@ +sensor_msgs/Image image_roi +string name diff --git a/opt_msgs/msg/ObjectName.msg b/opt_msgs/msg/ObjectName.msg new file mode 100644 index 0000000..ae57deb --- /dev/null +++ b/opt_msgs/msg/ObjectName.msg @@ -0,0 +1,2 @@ +int32 no +string object_name diff --git a/opt_msgs/msg/ObjectNameArray.msg b/opt_msgs/msg/ObjectNameArray.msg new file mode 100644 index 0000000..4621be3 --- /dev/null +++ b/opt_msgs/msg/ObjectNameArray.msg @@ -0,0 +1,2 @@ +Header header +opt_msgs/ObjectName[] object_names diff --git a/opt_msgs/msg/Track.msg b/opt_msgs/msg/Track.msg index b068460..7874af3 100644 --- a/opt_msgs/msg/Track.msg +++ b/opt_msgs/msg/Track.msg @@ -14,4 +14,4 @@ float64 confidence uint8 visibility opt_msgs/BoundingBox2D box_2D - +string object_name diff --git a/opt_msgs/msg/Track3D.msg b/opt_msgs/msg/Track3D.msg new file mode 100644 index 0000000..94bdbd5 --- /dev/null +++ b/opt_msgs/msg/Track3D.msg @@ -0,0 +1,18 @@ +uint8 VISIBLE = 0 +uint8 OCCLUDED = 1 +uint8 NOT_VISIBLE = 2 + +int32 id + +float64 x +float64 y +float64 z +float64 height +float64 distance +float64 age +float64 confidence + +uint8 visibility + +opt_msgs/BoundingBox2D box_2D + diff --git a/opt_msgs/msg/Track3DArray.msg b/opt_msgs/msg/Track3DArray.msg new file mode 100644 index 0000000..3e5ad96 --- /dev/null +++ b/opt_msgs/msg/Track3DArray.msg @@ -0,0 +1,3 @@ +Header header + +opt_msgs/Track3D[] tracks diff --git a/opt_utils/CMakeLists.txt b/opt_utils/CMakeLists.txt index d65ce2e..e466ced 100644 --- a/opt_utils/CMakeLists.txt +++ b/opt_utils/CMakeLists.txt @@ -30,5 +30,8 @@ target_link_libraries(roi_viewer boost_system boost_filesystem boost_signals ${P add_executable(ros2udp_converter apps/ros2udp_converter.cpp src/udp_messaging.cpp)# src/json.cpp) target_link_libraries(ros2udp_converter ${catkin_LIBRARIES} json) +add_executable(ros2udp_converter_object apps/ros2udp_converter_object.cpp src/udp_messaging.cpp) +target_link_libraries(ros2udp_converter_object ${catkin_LIBRARIES} json) + add_executable(udp_listener apps/udp_listener.cpp src/udp_messaging.cpp) target_link_libraries(udp_listener ${catkin_LIBRARIES}) diff --git a/opt_utils/apps/ros2udp_converter.cpp b/opt_utils/apps/ros2udp_converter.cpp index 8aa0005..03fe985 100644 --- a/opt_utils/apps/ros2udp_converter.cpp +++ b/opt_utils/apps/ros2udp_converter.cpp @@ -84,7 +84,7 @@ trackingCallback(const opt_msgs::TrackArray::ConstPtr& tracking_msg) tracks.Add(current_track); } - root.Add("tracks", tracks); + root.Add("people_tracks", tracks); /// Convert JSON object to string: Jzon::Format message_format = Jzon::StandardFormat; diff --git a/opt_utils/apps/ros2udp_converter_object.cpp b/opt_utils/apps/ros2udp_converter_object.cpp new file mode 100644 index 0000000..f90a823 --- /dev/null +++ b/opt_utils/apps/ros2udp_converter_object.cpp @@ -0,0 +1,248 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it] + * + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +// Global variables: +int udp_buffer_length; // UDP message buffer length +int udp_port; // UDP port +std::string hostip; // UDP host +int json_indent_size; // indent size for JSON message +bool json_newline; // use newlines (true) or not (false) in JSON messages +bool json_spacing; // use spacing (true) or not (false) in JSON messages +bool json_use_tabs; // use tabs (true) or not (false) in JSON messages +struct ComData udp_data; // parameters for UDP messaging +open_ptrack::opt_utils::UDPMessaging udp_messaging(udp_data); // instance of class UDPMessaging +ros::Time last_heartbeat_time; +double heartbeat_interval; + + +void +trackingCallback(const opt_msgs::TrackArray::ConstPtr& tracking_msg) +{ + /// Create JSON-formatted message: + Jzon::Object root, header, stamp; + + /// Add header (84 characters): + header.Add("seq", int(tracking_msg->header.seq)); + stamp.Add("sec", int(tracking_msg->header.stamp.sec)); + stamp.Add("nsec", int(tracking_msg->header.stamp.nsec)); + header.Add("stamp", stamp); + header.Add("frame_id", tracking_msg->header.frame_id); + root.Add("header", header); + + /// Add tracks array: + // >50 characters for every track + Jzon::Array tracks; + + + for (unsigned int i = 0; i < tracking_msg->tracks.size(); i++) + { + Jzon::Object current_track; + int index_roiname=tracking_msg->tracks[i].id; + current_track.Add("id", index_roiname); + current_track.Add("object_name",tracking_msg->tracks[i].object_name); + current_track.Add("x", tracking_msg->tracks[i].x); + current_track.Add("y", tracking_msg->tracks[i].y); + current_track.Add("height", tracking_msg->tracks[i].height); + current_track.Add("age", tracking_msg->tracks[i].age); + current_track.Add("confidence", tracking_msg->tracks[i].confidence); + tracks.Add(current_track); + } + root.Add("object_tracks", tracks); + + /// Convert JSON object to string: + Jzon::Format message_format = Jzon::StandardFormat; + message_format.indentSize = json_indent_size; + message_format.newline = json_newline; + message_format.spacing = json_spacing; + message_format.useTabs = json_use_tabs; + Jzon::Writer writer(root, message_format); + writer.Write(); + std::string json_string = writer.GetResult(); + // std::cout << "String sent: " << json_string << std::endl; + + /// Copy string to message buffer: + udp_data.si_num_byte_ = json_string.length()+1; + char buf[udp_data.si_num_byte_]; + for (unsigned int i = 0; i < udp_data.si_num_byte_; i++) + { + buf[i] = 0; + } + sprintf(buf, "%s", json_string.c_str()); + udp_data.pc_pck_ = buf; // buffer where the message is written + + /// Send message: + udp_messaging.sendFromSocketUDP(&udp_data); +} + +void +aliveIDsCallback(const opt_msgs::IDArray::ConstPtr& alive_ids_msg) +{ + ros::Time msg_time = ros::Time(alive_ids_msg->header.stamp.sec, alive_ids_msg->header.stamp.nsec); + if ((msg_time - last_heartbeat_time).toSec() > heartbeat_interval) + { + /// Create JSON-formatted message: + Jzon::Object root, header, stamp; + + /// Add header: + header.Add("seq", int(alive_ids_msg->header.seq)); + stamp.Add("sec", int(alive_ids_msg->header.stamp.sec)); + stamp.Add("nsec", int(alive_ids_msg->header.stamp.nsec)); + header.Add("stamp", stamp); + header.Add("frame_id", "heartbeat"); + root.Add("header", header); + + Jzon::Array alive_IDs; + for (unsigned int i = 0; i < alive_ids_msg->ids.size(); i++) + { + alive_IDs.Add(alive_ids_msg->ids[i]); + } + root.Add("alive_IDs", alive_IDs); + root.Add("max_ID", alive_ids_msg->max_ID); + + /// Convert JSON object to string: + Jzon::Format message_format = Jzon::StandardFormat; + message_format.indentSize = json_indent_size; + message_format.newline = json_newline; + message_format.spacing = json_spacing; + message_format.useTabs = json_use_tabs; + Jzon::Writer writer(root, message_format); + writer.Write(); + std::string json_string = writer.GetResult(); + // std::cout << "String sent: " << json_string << std::endl; + + /// Copy string to message buffer: + udp_data.si_num_byte_ = json_string.length()+1; + char buf[udp_data.si_num_byte_]; + for (unsigned int i = 0; i < udp_data.si_num_byte_; i++) + { + buf[i] = 0; + } + sprintf(buf, "%s", json_string.c_str()); + udp_data.pc_pck_ = buf; // buffer where the message is written + + /// Send message: + udp_messaging.sendFromSocketUDP(&udp_data); + + last_heartbeat_time = msg_time; + } +} + + + +typedef unsigned long uint32; +// convert a string represenation of an IP address into its numeric equivalent +static uint32 Inet_AtoN(const char * buf) +{ + + uint32 ret = 0; + int shift = 24; // fill out the MSB first + bool startQuad = true; + while((shift >= 0)&&(*buf)) + { + if (startQuad) + { + unsigned char quad = (unsigned char) atoi(buf); + ret |= (((uint32)quad) << shift); + shift -= 8; + } + startQuad = (*buf == '.'); + buf++; + } + return ret; +} + +int +main(int argc, char **argv) +{ + // Initialization: + ros::init(argc, argv, "ros2udp_converter"); + ros::NodeHandle nh("~"); + + // Read input parameters: + nh.param("udp/port", udp_port, 21234); + nh.param("udp/hostip", hostip, std::string("127.0.0.1")); + nh.param("udp/buffer_length", udp_buffer_length, 2048); + nh.param("json/indent_size", json_indent_size, 0); + nh.param("json/newline", json_newline, false); + nh.param("json/spacing", json_spacing, false); + nh.param("json/use_tabs", json_use_tabs, false); + nh.param("json/heartbeat_interval", heartbeat_interval, 0.25); + + + // ROS subscriber: + ros::Subscriber tracking_sub = nh.subscribe("input_topic", 1, trackingCallback); + + ros::Subscriber alive_ids_sub = nh.subscribe("alive_ids_topic", 1, aliveIDsCallback); + + + // Initialize UDP parameters: + char buf[0]; + udp_data.si_port_ = udp_port; // port + udp_data.si_retry_ = 1; + udp_data.si_num_byte_ = udp_buffer_length; // number of bytes to write (2048 -> about 30 tracks) + udp_data.pc_pck_ = buf; // buffer where the message is written + udp_data.si_timeout_ = 4; + udp_data.sj_addr_ = Inet_AtoN(hostip.c_str()); + + /// Create object for UDP messaging: + udp_messaging = open_ptrack::opt_utils::UDPMessaging(udp_data); + + /// Create client socket: + udp_messaging.createSocketClientUDP(&udp_data); + + // Execute callbacks: + ros::spin(); + + // Close socket: + udp_messaging.closeSocketUDP(&udp_data); + + return 0; +} diff --git a/opt_utils/launch/ros2udp_converter.launch b/opt_utils/launch/ros2udp_converter.launch index 34e2131..0683135 100644 --- a/opt_utils/launch/ros2udp_converter.launch +++ b/opt_utils/launch/ros2udp_converter.launch @@ -1,8 +1,17 @@ - + + + + + + + + + + diff --git a/opt_utils/launch/single_camera_visualization.launch b/opt_utils/launch/single_camera_visualization.launch new file mode 100644 index 0000000..a443057 --- /dev/null +++ b/opt_utils/launch/single_camera_visualization.launch @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/tracking/CMakeLists.txt b/tracking/CMakeLists.txt index ba24988..f53dde9 100644 --- a/tracking/CMakeLists.txt +++ b/tracking/CMakeLists.txt @@ -24,12 +24,17 @@ catkin_package( CATKIN_DEPENDS roscpp bayes opencv2 pcl_ros detection tf tf_conversions opt_msgs opt_utils ) -add_library(${PROJECT_NAME} src/munkres.cpp src/kalman_filter.cpp src/track.cpp src/tracker.cpp) +add_library(${PROJECT_NAME} src/munkres.cpp src/kalman_filter.cpp src/track.cpp src/tracker.cpp src/track_object.cpp src/tracker_object.cpp) target_link_libraries(${PROJECT_NAME} ${catkin_LIBRARIES}) add_executable(tracker apps/tracker_node.cpp) target_link_libraries(tracker ${PROJECT_NAME} ${catkin_LIBRARIES}) +add_executable(tracker_object apps/tracker_object_node.cpp) +target_link_libraries(tracker_object ${PROJECT_NAME} ${catkin_LIBRARIES}) +#add_executable(tracker3d apps/tracker_node_3d.cpp) +#target_link_libraries(tracker3d ${PROJECT_NAME} ${catkin_LIBRARIES}) + #add_executable(tracker_filter apps/tracker_filter_node.cpp) #add_dependencies(tracker_filter ${PROJECT_NAME}_gencfg) #target_link_libraries(tracker_filter ${PROJECT_NAME} ${catkin_LIBRARIES}) diff --git a/tracking/apps/tracker_node.cpp b/tracking/apps/tracker_node.cpp index 0111bd8..a354067 100644 --- a/tracking/apps/tracker_node.cpp +++ b/tracking/apps/tracker_node.cpp @@ -122,6 +122,9 @@ ros::Duration max_time_between_detections_; std::map > number_messages_delay_map_; +//for object tracking +std::vector association_for_initialize_objectnames; + /** * \brief Create marker to be visualized in RViz * @@ -359,6 +362,12 @@ detection_cb(const opt_msgs::DetectionArray::ConstPtr& msg) tracker->newFrame(detections_vector); tracker->updateTracks(); + + //for object tracking +// association_for_initialize_objectnames=tracker->association_for_initialize_objectnames_; + //write the corresponding names?????????? + + // Create a TrackingResult message with the output of the tracking process if(output_tracking_results) { diff --git a/tracking/apps/tracker_object_node.cpp b/tracking/apps/tracker_object_node.cpp new file mode 100644 index 0000000..14d2f71 --- /dev/null +++ b/tracking/apps/tracker_object_node.cpp @@ -0,0 +1,834 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +//#include + +// Dynamic reconfigure: +#include +#include + +typedef tracking::TrackerConfig Config; +typedef dynamic_reconfigure::Server ReconfigureServer; + +// Global variables: +std::map detection_sources_map; +tf::TransformListener* tf_listener; +std::string world_frame_id; +bool output_history_pointcloud; +int output_history_size; +int detection_history_size; +bool output_markers; +bool output_image_rgb; +bool output_tracking_results; +bool output_detection_results; // Enables/disables the publishing of detection positions to be visualized in RViz +bool vertical; +ros::Publisher results_pub; +ros::Publisher object_names_pub; + +ros::Publisher marker_pub_tmp; +ros::Publisher marker_pub; +ros::Publisher pointcloud_pub; +ros::Publisher detection_marker_pub; +ros::Publisher detection_trajectory_pub; +ros::Publisher alive_ids_pub; +size_t starting_index; +size_t detection_insert_index; +tf::Transform camera_frame_to_world_transform; +tf::Transform world_to_camera_frame_transform; +bool extrinsic_calibration; +double period; +open_ptrack::tracking::Tracker_object* tracker_object; +pcl::PointCloud::Ptr history_pointcloud(new pcl::PointCloud); +pcl::PointCloud::Ptr detection_history_pointcloud(new pcl::PointCloud); +bool swissranger; +double min_confidence; +double min_confidence_sr; +double min_confidence_detections; +double min_confidence_detections_sr; +std::vector camera_colors; // vector containing colors to use to identify cameras in the network +std::map color_map; // map between camera frame_id and color +// Chi square distribution +std::map chi_map; +bool velocity_in_motion_term; +double acceleration_variance; +double position_variance_weight; +double voxel_size; +double gate_distance; +bool calibration_refinement; +std::map registration_matrices; +double max_detection_delay; +ros::Time latest_time; + +std::map last_received_detection_; +ros::Duration max_time_between_detections_; + +std::map > number_messages_delay_map_; + +//for object tracking +std::vector association_for_initialize_objectnames; + +/** + * \brief Create marker to be visualized in RViz + * + * \param[in] id The marker ID. + * \param[in] frame_id The marker reference frame. + * \param[in] position The marker position. + * \param[in] color The marker color. + */ +visualization_msgs::Marker +createMarker (int id, std::string frame_id, ros::Time stamp, Eigen::Vector3d position, cv::Vec3f color) +{ + visualization_msgs::Marker marker; + + marker.header.frame_id = world_frame_id; + marker.header.stamp = stamp; + marker.ns = frame_id; + marker.id = id; + marker.type = visualization_msgs::Marker::SPHERE; + marker.action = visualization_msgs::Marker::ADD; + marker.pose.position.x = position(0); + marker.pose.position.y = position(1); + marker.pose.position.z = position(2); + marker.scale.x = 0.1; + marker.scale.y = 0.1; + marker.scale.z = 0.1; + marker.color.r = color(0); + marker.color.g = color(1); + marker.color.b = color(2); + marker.color.a = 1.0; + marker.lifetime = ros::Duration(0.2); + return marker; +} + +void +plotCameraLegend (std::map curr_color_map) +{ + // Compose camera legend: + cv::Mat legend_image = cv::Mat::zeros(500, 500, CV_8UC3); + for(std::map::iterator colormap_iterator = curr_color_map.begin(); colormap_iterator != curr_color_map.end(); colormap_iterator++) + { + int color_index = colormap_iterator->second; + cv::Vec3f color = camera_colors[color_index]; + int y_coord = color_index * legend_image.rows / (curr_color_map.size()+1) + 0.5 * legend_image.rows / (curr_color_map.size()+1); + cv::line(legend_image, cv::Point(0,y_coord), cv::Point(100,y_coord), cv::Scalar(255*color(2), 255*color(1), 255*color(0)), 8); + cv::putText(legend_image, colormap_iterator->first, cv::Point(110,y_coord), 1, 1, cv::Scalar(255, 255, 255), 1); + } + + // Display the cv image + cv::imshow("Camera legend", legend_image); + cv::waitKey(1); +} + +Eigen::Matrix4d +readMatrixFromFile (std::string filename) +{ + Eigen::Matrix4d matrix; + std::string line; + std::ifstream myfile (filename.c_str()); + if (myfile.is_open()) + { + int k = 0; + std::string number; + while (myfile >> number) + { + matrix(int(k/4), int(k%4)) = std::atof(number.c_str()); + k++; + } + myfile.close(); + } + + std::cout << matrix << std::endl; + + return matrix; +} + +/** + * \brief Read the DetectionArray message and use the detections for creating/updating/deleting tracks + * + * \param[in] msg the DetectionArray message. + */ +void +detection_cb(const opt_msgs::DetectionArray::ConstPtr& msg) +{ + // Read message header information: + std::string frame_id = msg->header.frame_id; + ros::Time frame_time = msg->header.stamp; + + std::string frame_id_tmp = frame_id; + int pos = frame_id_tmp.find("_rgb_optical_frame"); + if (pos != std::string::npos) + frame_id_tmp.replace(pos, std::string("_rgb_optical_frame").size(), ""); + pos = frame_id_tmp.find("_depth_optical_frame"); + if (pos != std::string::npos) + frame_id_tmp.replace(pos, std::string("_depth_optical_frame").size(), ""); + last_received_detection_[frame_id_tmp] = frame_time; + + // Compute delay of detection message, if any: + double time_delay = 0.0; + if (frame_time > latest_time) + { + latest_time = frame_time; + time_delay = 0.0; + } + else + { + time_delay = (latest_time - frame_time).toSec(); + } + + tf::StampedTransform transform; + tf::StampedTransform inverse_transform; + // cv_bridge::CvImage::Ptr cvPtr; + + try + { + // Read transforms between camera frame and world frame: + if (!extrinsic_calibration) + { + static tf::TransformBroadcaster world_to_camera_tf_publisher; +// world_to_camera_tf_publisher.sendTransform(tf::StampedTransform(camera_frame_to_world_transform, ros::Time::now(), world_frame_id, frame_id)); + world_to_camera_tf_publisher.sendTransform(tf::StampedTransform(world_to_camera_frame_transform, ros::Time::now(), frame_id, world_frame_id)); + } + + //Calculate direct and inverse transforms between camera and world frame: + tf_listener->lookupTransform(world_frame_id, frame_id, ros::Time(0), transform); + tf_listener->lookupTransform(frame_id, world_frame_id, ros::Time(0), inverse_transform); + + // cvPtr = cv_bridge::toCvCopy(msg->image, sensor_msgs::image_encodings::BGR8); + + // Read camera intrinsic parameters: + Eigen::Matrix3d intrinsic_matrix; + for(int i = 0; i < 3; i++) + for(int j = 0; j < 3; j++) + intrinsic_matrix(i, j) = msg->intrinsic_matrix[i * 3 + j]; + + // Add a new DetectionSource or update an existing one: + if(detection_sources_map.find(frame_id) == detection_sources_map.end()) + { + detection_sources_map[frame_id] = new open_ptrack::detection::DetectionSource(cv::Mat(0, 0, CV_8UC3), + transform, inverse_transform, intrinsic_matrix, frame_time, frame_id); + } + else + { + detection_sources_map[frame_id]->update(cv::Mat(0, 0, CV_8UC3), transform, inverse_transform, + intrinsic_matrix, frame_time, frame_id); + double d = detection_sources_map[frame_id]->getDuration().toSec() / period; + int lostFrames = int(round(d)) - 1; + } + open_ptrack::detection::DetectionSource* source = detection_sources_map[frame_id]; + + // Create a Detection object for every detection in the detection message: + std::vector detections_vector; + for(std::vector::const_iterator it = msg->detections.begin(); + it != msg->detections.end(); it++) + { + detections_vector.push_back(open_ptrack::detection::Detection(*it, source)); + } + + // Convert HOG+SVM confidences to HAAR+ADABOOST-like people detection confidences: + if (not std::strcmp(msg->confidence_type.c_str(), "hog+svm")) + { + for(unsigned int i = 0; i < detections_vector.size(); i++) + { +// double new_confidence = detections_vector[i].getConfidence(); +// new_confidence = (new_confidence - min_confidence_detections_sr) / (min_confidence_sr - min_confidence_detections_sr) * +// (min_confidence - min_confidence_detections) + min_confidence_detections; +// detections_vector[i].setConfidence(new_confidence+2); + + double new_confidence = detections_vector[i].getConfidence(); + new_confidence = (new_confidence - (-3)) / 3 * 4 + 2; + detections_vector[i].setConfidence(new_confidence); + +// std::cout<< detections_vector[i].getConfidence() << std::endl; + } + } + + // Detection correction by means of calibration refinement: + if (calibration_refinement) + { + if (strcmp(frame_id.substr(0,1).c_str(), "/") == 0) + { + frame_id = frame_id.substr(1, frame_id.size() - 1); + } + + Eigen::Matrix4d registration_matrix; + std::map::iterator registration_matrices_iterator = registration_matrices.find(frame_id); + if (registration_matrices_iterator != registration_matrices.end()) + { // camera already present + registration_matrix = registration_matrices_iterator->second; + } + else + { // camera not present + std::cout << "Reading refinement matrix of " << frame_id << " from file." << std::endl; + std::string refinement_filename = ros::package::getPath("opt_calibration") + "/conf/registration_" + frame_id + ".txt"; + std::ifstream f(refinement_filename.c_str()); + if (f.good()) // if the file exists + { + f.close(); + registration_matrix = readMatrixFromFile (refinement_filename); + registration_matrices.insert(std::pair (frame_id, registration_matrix)); + } + else // if the file does not exist + { + // insert the identity matrix + std::cout << "Refinement file not found! Not doing refinement for this sensor." << std::endl; + registration_matrices.insert(std::pair (frame_id, Eigen::Matrix4d::Identity())); + } + } + + if(detections_vector.size() > 0) + { + // Apply detection refinement: + for(unsigned int i = 0; i < detections_vector.size(); i++) + { + Eigen::Vector3d old_centroid = detections_vector[i].getWorldCentroid(); + +// std::cout << frame_id << std::endl; +// std::cout << registration_matrix << std::endl; +// std::cout << "old_centroid: " << old_centroid.transpose() << std::endl; + Eigen::Vector4d old_centroid_homogeneous(old_centroid(0), old_centroid(1), old_centroid(2), 1.0); + Eigen::Vector4d refined_centroid = registration_matrix * old_centroid_homogeneous; + detections_vector[i].setWorldCentroid(Eigen::Vector3d(refined_centroid(0), refined_centroid(1), refined_centroid(2))); + + Eigen::Vector3d refined_centroid2 = detections_vector[i].getWorldCentroid(); +// std::cout << "refined_centroid2: " << refined_centroid2.transpose() << std::endl; +// std::cout << "difference: " << (refined_centroid2 - old_centroid).transpose() << std::endl << std::endl; + } + } + } + + // If at least one detection has been received: + if((detections_vector.size() > 0) && (time_delay < max_detection_delay)) + { + + // Perform detection-track association: + tracker_object->newFrame(detections_vector); + tracker_object->updateTracks(); + + //for object tracking +// association_for_initialize_objectnames=tracker_object->association_for_initialize_objectnames_; + + //write the corresponding names?????????? + + + // Create a TrackingResult message with the output of the tracking process + if(output_tracking_results) + { + opt_msgs::TrackArray::Ptr tracking_results_msg(new opt_msgs::TrackArray); + tracking_results_msg->header.stamp = ros::Time::now();//frame_time; + tracking_results_msg->header.frame_id = world_frame_id; + tracker_object->toMsg(tracking_results_msg); + // Publish tracking message: + results_pub.publish(tracking_results_msg); + } + + + opt_msgs::ObjectNameArray::Ptr object_names_msg(new opt_msgs::ObjectNameArray); + object_names_msg->header.stamp = ros::Time::now(); + object_names_msg->header.frame_id=frame_id; + tracker_object->to_object_name_Msg(object_names_msg); + object_names_pub.publish(object_names_msg); + + +// //Show the tracking process' results as an image +// if(output_image_rgb) +// { +// tracker->drawRgb(); +// for(std::map::iterator +// it = detection_sources_map.begin(); it != detection_sources_map.end(); it++) +// { +// cv::Mat image_to_show = it->second->getImage(); +// if (not vertical) +// { +// //cv::imshow("TRACKER " + it->first, image_to_show); +// cv::imshow("TRACKER ", image_to_show); // TODO: use the above row if using multiple cameras +// } +// else +// { +// cv::flip(image_to_show.t(), image_to_show, -1); +// cv::flip(image_to_show, image_to_show, 1); +// //cv::imshow("TRACKER " + it->first, image_to_show); +// cv::imshow("TRACKER ", image_to_show); // TODO: use the above row if using multiple cameras +// } +// cv::waitKey(2); +// } +// } + + // Publish IDs of active tracks: + opt_msgs::IDArray::Ptr alive_ids_msg(new opt_msgs::IDArray); + alive_ids_msg->header.stamp = ros::Time::now(); + alive_ids_msg->header.frame_id = world_frame_id; + tracker_object->getAliveIDs (alive_ids_msg); + alive_ids_pub.publish (alive_ids_msg); + + // Show the pose of each tracked object with a 3D marker (to be visualized with ROS RViz) + if(output_markers) + { + visualization_msgs::MarkerArray::Ptr marker_msg(new visualization_msgs::MarkerArray); + tracker_object->toMarkerArray(marker_msg); + marker_pub.publish(marker_msg); + } + + // Show the history of the movements in 3D (3D trajectory) of each tracked object as a PointCloud (which can be visualized in RViz) + if(output_history_pointcloud) + { + history_pointcloud->header.stamp = frame_time.toNSec() / 1e3; // Convert from ns to us + history_pointcloud->header.frame_id = world_frame_id; + starting_index = tracker_object->appendToPointCloud(history_pointcloud, starting_index, + output_history_size); + pointcloud_pub.publish(history_pointcloud); + } + + // Create message for showing detection positions in RViz: + if (output_detection_results) + { + visualization_msgs::MarkerArray::Ptr marker_msg(new visualization_msgs::MarkerArray); + detection_history_pointcloud->header.stamp = frame_time.toNSec() / 1e3; // Convert from ns to us + detection_history_pointcloud->header.frame_id = world_frame_id; + std::string frame_id = detections_vector[0].getSource()->getFrameId(); + + // Define color: + int color_index; + std::map::iterator colormap_iterator = color_map.find(frame_id); + if (colormap_iterator != color_map.end()) + { // camera already present + color_index = colormap_iterator->second; + } + else + { // camera not present + color_index = color_map.size(); + color_map.insert(std::pair (frame_id, color_index)); + + // Plot legend with camera names and colors: + plotCameraLegend (color_map); + } + for (unsigned int i = 0; i < detections_vector.size(); i++) + { + // Create marker and add it to message: + Eigen::Vector3d centroid = detections_vector[i].getWorldCentroid(); + visualization_msgs::Marker marker = createMarker (i, frame_id, frame_time, centroid, camera_colors[color_index]); + marker_msg->markers.push_back(marker); + + // Point cloud: + pcl::PointXYZRGB point; + point.x = marker.pose.position.x; + point.y = marker.pose.position.y; + point.z = marker.pose.position.z; + point.r = marker.color.r * 255.0f; + point.g = marker.color.g * 255.0f; + point.b = marker.color.b * 255.0f; + detection_insert_index = (detection_insert_index + 1) % detection_history_size; + detection_history_pointcloud->points[detection_insert_index] = point; + } + detection_marker_pub.publish(marker_msg); // publish marker message + detection_trajectory_pub.publish(detection_history_pointcloud); // publish trajectory message + } + } + else // if no detections have been received or detection_delay is above max_detection_delay + { + if(output_tracking_results) + { // Publish an empty tracking message + opt_msgs::TrackArray::Ptr tracking_results_msg(new opt_msgs::TrackArray); + tracking_results_msg->header.stamp = frame_time; + tracking_results_msg->header.frame_id = world_frame_id; + results_pub.publish(tracking_results_msg); + } + if((detections_vector.size() > 0) && (time_delay >= max_detection_delay)) + { + if (number_messages_delay_map_.find(msg->header.frame_id) == number_messages_delay_map_.end()) + number_messages_delay_map_[msg->header.frame_id] = std::pair(0.0, 0); + + number_messages_delay_map_[msg->header.frame_id].first += time_delay; + number_messages_delay_map_[msg->header.frame_id].second++; + + if (number_messages_delay_map_[msg->header.frame_id].second == 100) + { + double avg = number_messages_delay_map_[msg->header.frame_id].first / number_messages_delay_map_[msg->header.frame_id].second; + ROS_WARN_STREAM("[" << msg->header.frame_id << "] received 100 detections with average delay " << avg << " > " << max_detection_delay); + number_messages_delay_map_[msg->header.frame_id] = std::pair(0.0, 0); + } + } + } + } +// catch(cv_bridge::Exception& ex) +// { +// ROS_ERROR("cv_bridge exception: %s", ex.what()); +// } + catch(tf::TransformException& ex) + { + ROS_ERROR("transform exception: %s", ex.what()); + } +} + +void +generateColors(int colors_number, std::vector& colors) +{ + for (unsigned int i = 0; i < colors_number; i++) + { + colors.push_back(cv::Vec3f( + float(rand() % 256) / 255, + float(rand() % 256) / 255, + float(rand() % 256) / 255)); + } +} + +void +fillChiMap(std::map& chi_map, bool velocity_in_motion_term) +{ + if (velocity_in_motion_term) // chi square values with state dimension = 4 + { + chi_map[0.5] = 3.357; + chi_map[0.75] = 5.385; + chi_map[0.8] = 5.989; + chi_map[0.9] = 7.779; + chi_map[0.95] = 9.488; + chi_map[0.98] = 11.668; + chi_map[0.99] = 13.277; + chi_map[0.995] = 14.860; + chi_map[0.998] = 16.924; + chi_map[0.999] = 18.467; + } + else // chi square values with state dimension = 2 + { + chi_map[0.5] = 1.386; + chi_map[0.75] = 2.773; + chi_map[0.8] = 3.219; + chi_map[0.9] = 4.605; + chi_map[0.95] = 5.991; + chi_map[0.98] = 7.824; + chi_map[0.99] = 9.210; + chi_map[0.995] = 10.597; + chi_map[0.998] = 12.429; + chi_map[0.999] = 13.816; + } +} + +void +configCb(Config &config, uint32_t level) +{ + tracker_object->setMinConfidenceForTrackInitialization (config.min_confidence_initialization); + max_detection_delay = config.max_detection_delay; + calibration_refinement = config.calibration_refinement; + tracker_object->setSecBeforeOld (config.sec_before_old); + tracker_object->setSecBeforeFake (config.sec_before_fake); + tracker_object->setSecRemainNew (config.sec_remain_new); + tracker_object->setDetectionsToValidate (config.detections_to_validate); + tracker_object->setDetectorLikelihood (config.detector_likelihood); + tracker_object->setLikelihoodWeights (config.detector_weight*chi_map[0.999]/18.467, config.motion_weight); + +// if (config.velocity_in_motion_term != velocity_in_motion_term) +// { +// // Take chi square values with regards to the state dimension: +// fillChiMap(chi_map, config.velocity_in_motion_term); +// +// double position_variance = config.position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; +// tracker->setVelocityInMotionTerm (config.velocity_in_motion_term, config.acceleration_variance, position_variance); +// } +// else +// { + if (config.acceleration_variance != acceleration_variance) + { + tracker_object->setAccelerationVariance (config.acceleration_variance); + } + + if (config.position_variance_weight != position_variance_weight) + { + double position_variance = config.position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; + tracker_object->setPositionVariance (position_variance); + } +// } + + gate_distance = chi_map.find(config.gate_distance_probability) != chi_map.end() ? chi_map[config.gate_distance_probability] : chi_map[0.999]; + tracker_object->setGateDistance (config.gate_distance_probability); +} + +int +main(int argc, char** argv) +{ + + ros::init(argc, argv, "tracker"); + ros::NodeHandle nh("~"); + + // Subscribers/Publishers: + ros::Subscriber input_sub = nh.subscribe("input", 5, detection_cb); + marker_pub_tmp = nh.advertise("/tracker/markers", 1); + marker_pub = nh.advertise("/tracker/markers_array", 1); + pointcloud_pub = nh.advertise >("/tracker/history", 1); + results_pub = nh.advertise("/tracker/tracks", 100); + object_names_pub=nh.advertise("/tracker/object_names", 100); + detection_marker_pub = nh.advertise("/detector/markers_array", 1); + detection_trajectory_pub = nh.advertise >("/detector/history", 1); + alive_ids_pub = nh.advertise("/tracker/alive_ids", 1); + + // Dynamic reconfigure + boost::recursive_mutex config_mutex_; + boost::shared_ptr reconfigure_server_; + + tf_listener = new tf::TransformListener(); + + // Read tracking parameters: + nh.param("world_frame_id", world_frame_id, std::string("/odom")); + + nh.param("orientation/vertical", vertical, false); + nh.param("extrinsic_calibration", extrinsic_calibration, false); + + nh.param("voxel_size", voxel_size, 0.06); + + double rate; + nh.param("rate", rate, 30.0); + +// double min_confidence; + nh.param("min_confidence_initialization", min_confidence, -2.5); //0.0); + + double chi_value; + nh.param("gate_distance_probability", chi_value, 0.9); + + nh.param("acceleration_variance", acceleration_variance, 1.0); + + nh.param("position_variance_weight", position_variance_weight, 1.0); + + bool detector_likelihood; + nh.param("detector_likelihood", detector_likelihood, false); + + nh.param("velocity_in_motion_term", velocity_in_motion_term, false); + + double detector_weight; + nh.param("detector_weight", detector_weight, -1.0); + + double motion_weight; + nh.param("motion_weight", motion_weight, 0.5); + + double sec_before_old; + nh.param("sec_before_old", sec_before_old, 3.6); + + double sec_before_fake; + nh.param("sec_before_fake", sec_before_fake, 2.4); + + double sec_remain_new; + nh.param("sec_remain_new", sec_remain_new, 1.2); + + int detections_to_validate; + nh.param("detections_to_validate", detections_to_validate, 5); + + double haar_disp_ada_min_confidence, ground_based_people_detection_min_confidence; + nh.param("haar_disp_ada_min_confidence", haar_disp_ada_min_confidence, -2.5); //0.0); + nh.param("ground_based_people_detection_min_confidence", ground_based_people_detection_min_confidence, -2.5); //0.0); + + nh.param("swissranger", swissranger, false); + + nh.param("ground_based_people_detection_min_confidence_sr", min_confidence_detections_sr, -1.5); + nh.param("min_confidence_initialization_sr", min_confidence_sr, -1.1); + + nh.param("history_pointcloud", output_history_pointcloud, false); + nh.param("history_size", output_history_size, 1000); + nh.param("markers", output_markers, true); + nh.param("image_rgb", output_image_rgb, true); + nh.param("tracking_results", output_tracking_results, true); + + nh.param("detection_debug", output_detection_results, true); + nh.param("detection_history_size", detection_history_size, 1000); + + bool debug_mode; + nh.param("debug_active", debug_mode, false); + + nh.param("calibration_refinement", calibration_refinement, false); + nh.param("max_detection_delay", max_detection_delay, 3.0); + + double max_time_between_detections_d; + nh.param("max_time_between_detections", max_time_between_detections_d, 10.0); + max_time_between_detections_ = ros::Duration(max_time_between_detections_d); + + // Read number of sensors in the network: + int num_cameras = 1; + if (extrinsic_calibration) + { + num_cameras = 0; + XmlRpc::XmlRpcValue network; + nh.getParam("network", network); + for (unsigned i = 0; i < network.size(); i++) + { + num_cameras += network[i]["sensors"].size(); + for (unsigned j = 0; j < network[i]["sensors"].size(); j++) + { + std::string frame_id = network[i]["sensors"][j]["id"]; + last_received_detection_["/" + frame_id] = ros::Time(0); + } + } + } + + // Set min_confidence_detections variable based on sensor type: + if (swissranger) + min_confidence_detections = ground_based_people_detection_min_confidence; + else + min_confidence_detections = haar_disp_ada_min_confidence; + + // Take chi square values with regards to the state dimension: + fillChiMap(chi_map, velocity_in_motion_term); + + // Compute additional parameters: + period = 1.0 / rate; + gate_distance = chi_map.find(chi_value) != chi_map.end() ? chi_map[chi_value] : chi_map[0.999]; + + double position_variance; +// position_variance = 3*std::pow(2 * voxel_size, 2) / 12.0; // DEFAULT + position_variance = position_variance_weight*std::pow(2 * voxel_size, 2) / 12.0; + std::vector likelihood_weights; + likelihood_weights.push_back(detector_weight*chi_map[0.999]/18.467); + likelihood_weights.push_back(motion_weight); + + // Generate colors used to identify different cameras: + generateColors(num_cameras, camera_colors); + + // Initialize point cloud containing detections trajectory: + pcl::PointXYZRGB nan_point; + nan_point.x = std::numeric_limits::quiet_NaN(); + nan_point.y = std::numeric_limits::quiet_NaN(); + nan_point.z = std::numeric_limits::quiet_NaN(); + detection_history_pointcloud->points.resize(detection_history_size, nan_point); + + ros::Rate hz(num_cameras*rate); + +// cv::namedWindow("TRACKER ", CV_WINDOW_NORMAL); + + // Initialize an instance of the Tracker object: + tracker_object = new open_ptrack::tracking::Tracker_object( + gate_distance, + detector_likelihood, + likelihood_weights, + velocity_in_motion_term, + min_confidence, + min_confidence_detections, + sec_before_old, + sec_before_fake, + sec_remain_new, + detections_to_validate, + period, + position_variance, + acceleration_variance, + world_frame_id, + debug_mode, + vertical); + + starting_index = 0; + + // Set up dynamic reconfiguration + ReconfigureServer::CallbackType f = boost::bind(&configCb, _1, _2); + reconfigure_server_.reset(new ReconfigureServer(config_mutex_, nh)); + reconfigure_server_->setCallback(f); + + // If extrinsic calibration is not available: + if (!extrinsic_calibration) + { // Set fixed transformation from rgb frame and base_link + tf::Vector3 fixed_translation(0, 0, 0); // camera_rgb_optical_frame -> world + tf::Quaternion fixed_rotation(-0.5, 0.5, -0.5, -0.5); // camera_rgb_optical_frame -> world + tf::Vector3 inv_fixed_translation(0.0, 0.0, 0); // world -> camera_rgb_optical_frame + tf::Quaternion inv_fixed_rotation(-0.5, 0.5, -0.5, 0.5); // world -> camera_rgb_optical_frame + world_to_camera_frame_transform = tf::Transform(fixed_rotation, fixed_translation); + camera_frame_to_world_transform = tf::Transform(inv_fixed_rotation, inv_fixed_translation); + } + + // Spin and execute callbacks: +// ros::spin(); + + std::map last_message; + for (std::map::const_iterator it = last_received_detection_.begin(); it != last_received_detection_.end(); ++it) + last_message[it->first] = ros::Time::now(); + + ros::Time last_camera_legend_update = ros::Time::now(); // last time when the camera legend has been updated + + while (ros::ok()) + { + ros::spinOnce(); + ros::Time now = ros::Time::now(); + for (std::map::const_iterator it = last_received_detection_.begin(); it != last_received_detection_.end(); ++it) + { + ros::Duration duration(now - it->second); + if (duration > max_time_between_detections_) + { + if (it->second > ros::Time(0) and now - last_message[it->first] > max_time_between_detections_) + { + ROS_WARN_STREAM("[" << it->first << "] last detection was " << duration.toSec() << " seconds ago"); + last_message[it->first] = now; + } + else if (now - last_message[it->first] > max_time_between_detections_) + { + ROS_WARN_STREAM("[" << it->first << "] still waiting for detection messages..."); + last_message[it->first] = now; + } + } + + // Update camera legend every second: + if ((now - last_camera_legend_update) > ros::Duration(1.0)) // if more than one second passed since last update + { // update OpenCV image with a waitKey: + cv::waitKey(1); + last_camera_legend_update = now; + } + } + hz.sleep(); + } + + return 0; +} diff --git a/tracking/conf/multicamera_tracking.rviz b/tracking/conf/multicamera_tracking.rviz index 08b1f50..5d9ec3b 100644 --- a/tracking/conf/multicamera_tracking.rviz +++ b/tracking/conf/multicamera_tracking.rviz @@ -389,7 +389,6 @@ Visualization Manager: Namespaces: /kinect2_far_rgb_optical_frame: true /kinect2_head_rgb_optical_frame: true - /kinect2_lenovo_rgb_optical_frame: true Queue Size: 100 Value: true Enabled: true @@ -428,18 +427,18 @@ Visualization Manager: Z: 0.00524044 Name: Current View Near Clip Distance: 0.01 - Pitch: 0.48246 + Pitch: 1.09246 Target Frame: Value: Orbit (rviz) - Yaw: 4.27367 + Yaw: 3.24367 Saved: ~ Window Geometry: Displays: - collapsed: false + collapsed: true Height: 1000 - Hide Left Dock: false + Hide Left Dock: true Hide Right Dock: true - QMainWindow State: 000000ff00000000fd0000000400000000000002c90000035efc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c00610079007301000000280000035e000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004bf0000003efc0100000002fb0000000800540069006d00650100000000000004bf000002f600fffffffb0000000800540069006d00650100000000000004500000000000000000000001f00000035e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + QMainWindow State: 000000ff00000000fd0000000400000000000002c90000035efc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c00610079007300000000280000035e000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004bf0000003efc0100000002fb0000000800540069006d00650100000000000004bf000002f600fffffffb0000000800540069006d00650100000000000004500000000000000000000004bf0000035e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 Selection: collapsed: false Time: @@ -450,4 +449,4 @@ Window Geometry: collapsed: true Width: 1215 X: 55 - Y: 42 + Y: 32 diff --git a/tracking/conf/object_tracker_multicamera.yaml b/tracking/conf/object_tracker_multicamera.yaml index 09ab767..ba4ad4d 100644 --- a/tracking/conf/object_tracker_multicamera.yaml +++ b/tracking/conf/object_tracker_multicamera.yaml @@ -38,7 +38,7 @@ position_variance_weight: 100 ## Data association parameters ## ################################# # Flag stating if detection likelihood should be used in data association: -detector_likelihood: true +detector_likelihood: false # Flag stating if track velocity should be considered in data association: velocity_in_motion_term: false # Weight of detection likelihood in data association: diff --git a/tracking/conf/single_camera_tracking.rviz b/tracking/conf/single_camera_tracking.rviz new file mode 100644 index 0000000..c99d69b --- /dev/null +++ b/tracking/conf/single_camera_tracking.rviz @@ -0,0 +1,238 @@ +Panels: + - Class: rviz/Displays + Help Height: 78 + Name: Displays + Property Tree Widget: + Expanded: + - /Global Options1 + - /Status1 + - /TF1/Frames1 + - /PointCloud22 + - /MarkerArray2 + Splitter Ratio: 0.355837 + Tree Height: 719 + - Class: rviz/Selection + Name: Selection + - Class: rviz/Tool Properties + Expanded: + - /2D Pose Estimate1 + - /2D Nav Goal1 + - /Publish Point1 + Name: Tool Properties + Splitter Ratio: 0.588679 + - Class: rviz/Views + Expanded: + - /Current View1 + Name: Views + Splitter Ratio: 0.5 + - Class: rviz/Time + Experimental: false + Name: Time + SyncMode: 0 + SyncSource: PointCloud2 +Visualization Manager: + Class: "" + Displays: + - Alpha: 0.5 + Cell Size: 1 + Class: rviz/Grid + Color: 160; 160; 164 + Enabled: true + Line Style: + Line Width: 0.03 + Value: Lines + Name: Grid + Normal Cell Count: 0 + Offset: + X: 0 + Y: 0 + Z: 0 + Plane: XY + Plane Cell Count: 10 + Reference Frame: + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 5 + Size (m): 0.03 + Style: Spheres + Topic: /tracker/object_history + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /tracker/object_markers_array + Name: MarkerArray + Namespaces: + {} + Queue Size: 100 + Value: true + - Class: rviz/TF + Enabled: true + Frame Timeout: 15 + Frames: + All Enabled: false + kinect2_head_ir_optical_frame: + Value: true + world: + Value: true + Marker Scale: 1 + Name: TF + Show Arrows: true + Show Axes: true + Show Names: true + Tree: + kinect2_head_ir_optical_frame: + world: + {} + Update Interval: 0 + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 9 + Selectable: true + Size (Pixels): 3 + Size (m): 0.03 + Style: Boxes + Topic: /tracker/people_history_smoothed + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + - Class: rviz/MarkerArray + Enabled: true + Marker Topic: /tracker/people_markers_array + Name: MarkerArray + Namespaces: + {} + Queue Size: 100 + Value: true + - Alpha: 1 + Autocompute Intensity Bounds: true + Autocompute Value Bounds: + Max Value: 10 + Min Value: -10 + Value: true + Axis: Z + Channel Name: intensity + Class: rviz/PointCloud2 + Color: 255; 255; 255 + Color Transformer: RGB8 + Decay Time: 0 + Enabled: true + Invert Rainbow: false + Max Color: 255; 255; 255 + Max Intensity: 4096 + Min Color: 0; 0; 0 + Min Intensity: 0 + Name: PointCloud2 + Position Transformer: XYZ + Queue Size: 10 + Selectable: true + Size (Pixels): 3 + Size (m): 0.01 + Style: Flat Squares + Topic: /kinect2_head/depth_ir/points + Unreliable: false + Use Fixed Frame: true + Use rainbow: true + Value: true + Enabled: true + Global Options: + Background Color: 48; 48; 48 + Fixed Frame: world + Frame Rate: 60 + Name: root + Tools: + - Class: rviz/Interact + Hide Inactive Objects: true + - Class: rviz/MoveCamera + - Class: rviz/Select + - Class: rviz/FocusCamera + - Class: rviz/Measure + - Class: rviz/SetInitialPose + Topic: /initialpose + - Class: rviz/SetGoal + Topic: /move_base_simple/goal + - Class: rviz/PublishPoint + Single click: true + Topic: /clicked_point + Value: true + Views: + Current: + Class: rviz/Orbit + Distance: 17.2803 + Enable Stereo Rendering: + Stereo Eye Separation: 0.06 + Stereo Focal Distance: 1 + Swap Stereo Eyes: false + Value: false + Focal Point: + X: 0.0471745 + Y: 1.9946 + Z: 0.00524044 + Name: Current View + Near Clip Distance: 0.01 + Pitch: 0.17246 + Target Frame: + Value: Orbit (rviz) + Yaw: 2.95367 + Saved: ~ +Window Geometry: + Displays: + collapsed: false + Height: 1000 + Hide Left Dock: false + Hide Right Dock: true + QMainWindow State: 000000ff00000000fd0000000400000000000002c90000035efc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000006400fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c00610079007301000000280000035e000000dd00fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f0000037afc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a0056006900650077007300000000280000037a000000b000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004bf0000003efc0100000002fb0000000800540069006d00650100000000000004bf000002f600fffffffb0000000800540069006d00650100000000000004500000000000000000000001f00000035e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 + Selection: + collapsed: false + Time: + collapsed: false + Tool Properties: + collapsed: false + Views: + collapsed: true + Width: 1215 + X: 55 + Y: 22 diff --git a/tracking/include/open_ptrack/tracking/track_object.h b/tracking/include/open_ptrack/tracking/track_object.h new file mode 100644 index 0000000..e05569d --- /dev/null +++ b/tracking/include/open_ptrack/tracking/track_object.h @@ -0,0 +1,409 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#ifndef OPEN_PTRACK_TRACKING_TRACK_OBJECT_H_ +#define OPEN_PTRACK_TRACKING_TRACK_OBJECT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace open_ptrack +{ + namespace tracking + { + /** \brief Track represents information about a track (or target) */ + class Track_object + { + public: + + /** \brief A track has Status NEW if it has been recently created, otherwise it has NORMAL Status */ + enum Status + { + NEW, + NORMAL + }; + + /** \brief Visibility states if the track is currently visible by the sensor or partially occluded or totally occluded */ + enum Visibility + { + VISIBLE = 0, // No occlusion + OCCLUDED = 1, // Partially occlusion + NOT_VISIBLE = 2 // Total occlusion + }; + + /** \brief The object names comes from the detections*/ + std::string object_name_; + + protected: + + /** \brief Dimension of a circular buffer which keep tracks of filter parameters along time */ + int MAX_SIZE; + + /** \brief Track ID */ + const int id_; + + /** \brief Track frame id (frame id of the last detection associated to the track */ + const std::string frame_id_; + + /** \brief Inverse of the frame rate */ + const double period_; + + /** \brief If true, the track is validated, meaning that it has been associated with a certain number of high confidence detections */ + bool validated_; + + /** \brief Kalman filter associated to the track */ + open_ptrack::tracking::KalmanFilter* filter_; + + /** \brief Temporary copy of the Kalman filter associated to the track (used for recovery filter information when a track is re-found) */ + open_ptrack::tracking::KalmanFilter* tmp_filter_; + + /** \brief First time a detection is associated to the track */ + ros::Time first_time_detected_; + + /** \brief Last time a detection is associated to the track */ + ros::Time last_time_detected_; + + /** \brief Last time a detection with high detection confidence is associated to the track */ + ros::Time last_time_detected_with_high_confidence_; + + /** \brief Last time a prediction has been performed for the track */ + ros::Time last_time_predicted_; + + /** \brief Index in the circular buffer corresponding to the last time a prediction has been performed */ + int last_time_predicted_index_; + + /** Variables used for computing the detection/track Mahalanobis distance */ + std::vector mahalanobis_map2d_; + + /** Variables used for computing the detection/track Mahalanobis distance */ + std::vector mahalanobis_map4d_; + + /** \brief Track Status*/ + Status status_; + + /** \brief Track Visibility */ + Visibility visibility_; + + /** \brief Number of high confidence detections associated to the track */ + int updates_with_enough_confidence_; + + /** \brief Track centroid z coordinate */ + double z_; + + /** \brief Track height */ + double height_; + + /** \brief Track distance from the camera */ + double distance_; + + /** \brief Track age (in seconds) */ + double age_; + + /** \brief Confidence of the last detection associated to the track */ + double last_detector_confidence_; + + /** \brief Last data association score obtained by this track */ + double data_association_score_; + + /** \brief Color associated to the track */ + Eigen::Vector3f color_; + + /** \brief DetectionSource which provided the last detection associated to the track */ + open_ptrack::detection::DetectionSource* detection_source_; + + /** \brief If true, both position and velocity are considered in computing detection<->track Mahalanobis distance */ + bool velocity_in_motion_term_; + + /** \brief Count the number of consecutive updates with low confidence detections */ + int low_confidence_consecutive_frames_; + + + + public: + + /** \brief Constructor. */ + Track_object( + int id, + std::string frame_id, + double position_variance, + double acceleration_variance, + double period, + bool velocity_in_motion_term); + + /** \brief Destructor. */ + virtual ~Track_object(); + + /** \brief Track initialization with an old track. */ + void + init(const Track_object& old_track); + + /** + * \brief Track initialization. + * + * \param[in] x Track centroid x coordinate + * \param[in] y Track centroid y coordinate + * \param[in] z Track centroid z coordinate + * \param[in] height Track height + * \param[in] distance Track distance from the sensor + * \param[in] detection_source DetectionSource which provided the last detection associated to the track + */ + void + init( + double x, + double y, + double z, + double height, + double distance, + std::string object_name, + open_ptrack::detection::DetectionSource* detection_source); + + /** + * \brief Update track with new detection information. + * + * \param[in] x Detection centroid x coordinate + * \param[in] y Detection centroid y coordinate + * \param[in] z Detection centroid z coordinate + * \param[in] height Detection height + * \param[in] distance Detection distance from the sensor + * \param[in] confidence Detection confidence + * \param[in] min_confidence Minimum confidence for track initialization + * \param[in] min_confidence_detections Minimum confidence for detection + * \param[in] detection_source DetectionSource which provided the detection + */ + void + update( + double x, + double y, + double z, + double height, + double distance, + std::string object_name, + double data_assocation_score, + double confidence, + double min_confidence, + double min_confidence_detections, + open_ptrack::detection::DetectionSource* detection_source, + bool first_update = false); + + /** + * \brief Compute Mahalanobis distance between detection with position (x,y) and track. + * + * \param[in] x Detection centroid x coordinate. + * \param[in] y Detection centroid y coordinate. + * \param[in] when Time instant. + * + * \return the Mahalanobis distance. + */ + double + getMahalanobisDistance(double x, double y, const ros::Time& when); + + /* Validate a track */ + void + validate(); + + /** + * \brief Get track validation flag + * + * \return true if the track has been validated, false otherwise. + */ + bool + isValidated(); + + /** + * \brief Get track ID + * + * \return track ID + */ + int + getId(); + + /** + * \brief Set track status to s + * + * \param[in] s status + */ + void + setStatus(Status s); + + /** + * \brief Get track status + * + * \return track status + */ + Status + getStatus(); + + /** + * \brief Set track Visibility. + * + * \param[in] v Visibility status. + */ + void + setVisibility(Visibility v); + + /** + * \brief Get track Visibility. + * + * \return track Visibility. + */ + Visibility + getVisibility(); + + /** + * \brief Get time passed from first detection-track association. + * + * \return time passed from first detection-track association. + */ + float + getSecFromFirstDetection(ros::Time current_time); + + /** + * \brief Get time passed from last detection-track association. + * + * \return time passed from last detection-track association. + */ + float + getSecFromLastDetection(ros::Time current_time); + + /** + * \brief Get time passed from last detection-track association with a high confidence detection. + * + * \return time passed from last detection-track association with a high confidence detection. + */ + float + getSecFromLastHighConfidenceDetection(ros::Time current_time); + + /** + * \brief Get the number of consecutive updates with low confidence detections. + * + * \return the number of consecutive updates with low confidence detections. + */ + float + getLowConfidenceConsecutiveFrames(); + + /** + * \brief Get the number of updates with enough confidence detections. + * + * \return the number of updates with enough confidence detections. + */ + int + getUpdatesWithEnoughConfidence(); + + /** + * \brief Draw track bounding box in the image. + * + * \param[in] vertical States if the camera is vertically oriented (true) or not (false). + */ + void + draw(bool vertical); + + /** + * \brief Create RViz visualization marker with the track position. + * + * \param[in/out] msg Array containing markers of every track. + */ + void + createMarker(visualization_msgs::MarkerArray::Ptr& msg); + + /** + * \brief Get a XYZRGB point from a point cloud. + * + * \param[in/out] p Point containing position information and to be filled with color. + * + * \return true if track is visible, false if not visible. + */ + bool + getPointXYZRGB(pcl::PointXYZRGB& p); + + /** + * \brief Create track ROS message. + * + * \param[in/out] track_msg Track ROS message. + * \param[in] vertical States if the camera is vertically oriented (true) or not (false). + */ + void + toMsg(opt_msgs::Track& track_msg, bool vertical); + + /** + * \brief Get the DetectionSource corresponding to the last associated detection. + * + * \return the DetectionSource corresponding to the last associated detection. + */ + open_ptrack::detection::DetectionSource* + getDetectionSource(); + + /** + * \brief Set flag stating if people velocity should be used in motion term for data association + * + * \param[in] velocity_in_motion_term If true, people velocity is also used in motion term for data association + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); + + /** + * \brief Set acceleration variance (for Kalman Filter) + * + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + */ + void + setAccelerationVariance (double acceleration_variance); + + /** + * \brief Set position variance (for Kalman Filter) + * + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setPositionVariance (double position_variance); + }; + + } /* namespace tracking */ +} /* namespace open_ptrack */ +#endif /* OPEN_PTRACK_TRACKING_TRACK_OBJECT_H_ */ diff --git a/tracking/include/open_ptrack/tracking/tracker.h b/tracking/include/open_ptrack/tracking/tracker.h index a275acd..dd96faf 100644 --- a/tracking/include/open_ptrack/tracking/tracker.h +++ b/tracking/include/open_ptrack/tracking/tracker.h @@ -49,178 +49,181 @@ namespace open_ptrack { - namespace tracking - { - /** \brief Tracker performs tracking-by-detection */ - class Tracker - { - protected: - /** \brief List of all active tracks */ - std::list tracks_; +namespace tracking +{ +/** \brief Tracker performs tracking-by-detection */ +class Tracker +{ +protected: + /** \brief List of all active tracks */ + std::list tracks_; + + /** \brief List of lost tracks */ + std::list lost_tracks_; + + /** \brief List of tracks with Status = NEW */ + std::list new_tracks_; + + /** \brief List of current detections */ + std::vector detections_; - /** \brief List of lost tracks */ - std::list lost_tracks_; + /** \brief List of current detections not associated to any track */ + std::list unassociated_detections_; - /** \brief List of tracks with Status = NEW */ - std::list new_tracks_; + /** \brief Track ID counter */ + int tracks_counter_; - /** \brief List of current detections */ - std::vector detections_; + /** \brief World reference frame used for tracking */ + std::string world_frame_id_; - /** \brief List of current detections not associated to any track */ - std::list unassociated_detections_; + /** \brief Minimum confidence for track initialization */ + double min_confidence_; - /** \brief Track ID counter */ - int tracks_counter_; + /** \brief Minimum confidence of detections sent to tracking */ + const double min_confidence_detections_; - /** \brief World reference frame used for tracking */ - std::string world_frame_id_; + /** \brief Minimum number of detection<->track associations needed for validating a track */ + int detections_to_validate_; - /** \brief Minimum confidence for track initialization */ - double min_confidence_; + /** \brief Time after which a not visible track becomes old */ + double sec_before_old_; - /** \brief Minimum confidence of detections sent to tracking */ - const double min_confidence_detections_; + /** \brief Time after which a visible track obtain NORMAL status */ + double sec_remain_new_; - /** \brief Minimum number of detection<->track associations needed for validating a track */ - int detections_to_validate_; + /** \brief Time within which a track should be validated (otherwise it is discarded) */ + double sec_before_fake_; - /** \brief Time after which a not visible track becomes old */ - double sec_before_old_; + /** \brief Gate distance for joint likelihood in data association */ + double gate_distance_; - /** \brief Time after which a visible track obtain NORMAL status */ - double sec_remain_new_; + /** \brief Flag stating if people detection confidence should be used in data association (true) or not (false) */ + bool detector_likelihood_; - /** \brief Time within which a track should be validated (otherwise it is discarded) */ - double sec_before_fake_; + /** \brief Weights for the single terms of the joint likelihood */ + std::vector likelihood_weights_; - /** \brief Gate distance for joint likelihood in data association */ - double gate_distance_; + /** \brief If true, people velocity is also used in motion term for data association */ + bool velocity_in_motion_term_; - /** \brief Flag stating if people detection confidence should be used in data association (true) or not (false) */ - bool detector_likelihood_; + /** \brief Minimum time period between two detections messages */ + const double period_; - /** \brief Weights for the single terms of the joint likelihood */ - std::vector likelihood_weights_; + /** \brief Position variance (for Kalman Filter) */ + double position_variance_; - /** \brief If true, people velocity is also used in motion term for data association */ - bool velocity_in_motion_term_; + /** \brief Acceleration variance (for Kalman Filter) */ + double acceleration_variance_; - /** \brief Minimum time period between two detections messages */ - const double period_; + /** \brief Flag enabling debug mode */ + const bool debug_mode_; - /** \brief Position variance (for Kalman Filter) */ - double position_variance_; + /** \brief Detections<->tracks distance matrix for data association */ + cv::Mat_ distance_matrix_; - /** \brief Acceleration variance (for Kalman Filter) */ - double acceleration_variance_; + /** \brief Detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ + cv::Mat_ cost_matrix_; - /** \brief Flag enabling debug mode */ - const bool debug_mode_; + /** \brief if true, the sensor is considered to be vertically placed (portrait mode) */ + bool vertical_; - /** \brief Detections<->tracks distance matrix for data association */ - cv::Mat_ distance_matrix_; + /** \brief Create detections<->tracks distance matrix for data association */ + void + createDistanceMatrix(); - /** \brief Detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ - cv::Mat_ cost_matrix_; + /** \brief Create detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ + void + createCostMatrix(); - /** \brief if true, the sensor is considered to be vertically placed (portrait mode) */ - bool vertical_; + /** \brief Update tracks associated to a detection in the current frame */ + void + updateDetectedTracks(); - /** \brief Create detections<->tracks distance matrix for data association */ - void - createDistanceMatrix(); + /** \brief Fill list containing unassociated detections */ + void + fillUnassociatedDetections(); - /** \brief Create detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ - void - createCostMatrix(); + /** \brief Create new tracks with high confidence unassociated detections */ + void + createNewTracks(); - /** \brief Update tracks associated to a detection in the current frame */ - void - updateDetectedTracks(); + /** \brief Create a new track with detection information */ + int + createNewTrack(open_ptrack::detection::Detection& detection); - /** \brief Fill list containing unassociated detections */ - void - fillUnassociatedDetections(); + /** \brief Update lost tracks */ + void + updateLostTracks(); - /** \brief Create new tracks with high confidence unassociated detections */ - void - createNewTracks(); - /** \brief Create a new track with detection information */ - int - createNewTrack(open_ptrack::detection::Detection& detection); - /** \brief Update lost tracks */ - void - updateLostTracks(); - public: - /** \brief Constructor */ - Tracker(double gate_distance, bool detector_likelihood, std::vector likelihood_weights, bool velocity_in_motion_term, +public: + /** \brief Constructor */ + Tracker(double gate_distance, bool detector_likelihood, std::vector likelihood_weights, bool velocity_in_motion_term, double min_confidence, double min_confidence_detections, double sec_before_old, double sec_before_fake, double sec_remain_new, int detections_to_validate, double period, double position_variance, double acceleration_variance, std::string world_frame_id, bool debug_mode, bool vertical); - /** \brief Destructor */ - virtual ~Tracker(); + /** \brief Destructor */ + virtual ~Tracker(); - /** + /** * \brief Initialization when a new set of detections arrive. * * \param[in] detections Vector of current detections. * */ - void - newFrame(const std::vector& detections); + void + newFrame(const std::vector& detections); - /** + /** * \brief Update the list of tracks according to the current set of detections. */ - void - updateTracks(); + void + updateTracks(); -// /** -// * \brief Draw the tracks into the RGB image given by its sensor. -// */ -// void -// drawRgb(); + // /** + // * \brief Draw the tracks into the RGB image given by its sensor. + // */ + // void + // drawRgb(); - /** + /** * \brief Fills the MarkerArray message with a marker for each visible track (in correspondance * of its centroid) and its number. * * \param[in] msg The MarkerArray message to fill. */ - void - toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg); + void + toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg); - /** + /** * \brief Writes the state of each track into a TrackArray message. * * \param[in] msg The TrackArray message to fill. */ - void - toMsg(opt_msgs::TrackArray::Ptr& msg); + void + toMsg(opt_msgs::TrackArray::Ptr& msg); - /** + /** * \brief Writes the state of tracks with a given frame id into a TrackArray message. * * \param[in] msg The TrackArray message to fill. * \param[in] source_frame_id Frame id of tracks that have to be written to msg. */ - void - toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id); + void + toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id); - /** + /** * \brief Writes the ID of each alive track into an IDArray message. * * \param[in] msg The IDArray message to fill. */ - void - getAliveIDs (opt_msgs::IDArray::Ptr& msg); + void + getAliveIDs (opt_msgs::IDArray::Ptr& msg); - /** + /** * \brief Appends the location of each track to a point cloud starting from starting_index (using * a circular array) * @@ -230,103 +233,103 @@ namespace open_ptrack * * \return the new starting_index. */ - size_t - appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, - size_t starting_index, size_t max_size); + size_t + appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, + size_t starting_index, size_t max_size); - /** + /** * \brief Set minimum confidence for track initialization * * \param[in] min_confidence Minimum confidence for track initialization */ - void - setMinConfidenceForTrackInitialization (double min_confidence); + void + setMinConfidenceForTrackInitialization (double min_confidence); - /** + /** * \brief Set time after which a not visible track becomes old * * \param[in] sec_before_old Time after which a not visible track becomes old */ - void - setSecBeforeOld (double sec_before_old); + void + setSecBeforeOld (double sec_before_old); - /** + /** * \brief Set time within which a track should be validated (otherwise it is discarded) * * \param[in] sec_before_fake Time within which a track should be validated (otherwise it is discarded) */ - void - setSecBeforeFake (double sec_before_fake); + void + setSecBeforeFake (double sec_before_fake); - /** + /** * \brief Set time after which a visible track obtain NORMAL status * * \param[in] sec_remain_new Time after which a visible track obtain NORMAL status */ - void - setSecRemainNew (double sec_remain_new); + void + setSecRemainNew (double sec_remain_new); - /** + /** * \brief Set minimum number of detection<->track associations needed for validating a track * * \param[in] detections_to_validate Minimum number of detection<->track associations needed for validating a track */ - void - setDetectionsToValidate (int detections_to_validate); + void + setDetectionsToValidate (int detections_to_validate); - /** + /** * \brief Set flag stating if people detection confidence should be used in data association (true) or not (false) * * \param[in] detector_likelihood Flag stating if people detection confidence should be used in data association (true) or not (false) */ - void - setDetectorLikelihood (bool detector_likelihood); + void + setDetectorLikelihood (bool detector_likelihood); - /** + /** * \brief Set likelihood weights for data association * * \param[in] detector_weight Weight for detector likelihood * \param[in] motion_weight Weight for motion likelihood */ - void - setLikelihoodWeights (double detector_weight, double motion_weight); + void + setLikelihoodWeights (double detector_weight, double motion_weight); - /** + /** * \brief Set flag stating if people velocity should be used in motion term for data association * * \param[in] velocity_in_motion_term If true, people velocity is also used in motion term for data association * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) * \param[in] position_variance Position variance (for Kalman Filter) */ - void - setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); + void + setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); - /** + /** * \brief Set acceleration variance (for Kalman Filter) * * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) */ - void - setAccelerationVariance (double acceleration_variance); + void + setAccelerationVariance (double acceleration_variance); - /** + /** * \brief Set position variance (for Kalman Filter) * * \param[in] position_variance Position variance (for Kalman Filter) */ - void - setPositionVariance (double position_variance); + void + setPositionVariance (double position_variance); - /** + /** * \brief Set gate distance for joint likelihood in data association * * \param[in] gate_distance Gate distance for joint likelihood in data association. */ - void - setGateDistance (double gate_distance); - }; + void + setGateDistance (double gate_distance); +}; - } /* namespace tracking */ +} /* namespace tracking */ } /* namespace open_ptrack */ #endif /* OPEN_PTRACK_TRACKING_TRACKER_H_ */ diff --git a/tracking/include/open_ptrack/tracking/tracker_object.h b/tracking/include/open_ptrack/tracking/tracker_object.h new file mode 100644 index 0000000..be48984 --- /dev/null +++ b/tracking/include/open_ptrack/tracking/tracker_object.h @@ -0,0 +1,347 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#ifndef OPEN_PTRACK_TRACKING_TRACKER_OBJECT_H_ +#define OPEN_PTRACK_TRACKING_TRACKER_OBJECT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace open_ptrack +{ +namespace tracking +{ +/** \brief Tracker performs tracking-by-detection */ +class Tracker_object +{ +protected: + /** \brief List of all active tracks */ + std::list tracks_; + + /** \brief List of lost tracks */ + std::list lost_tracks_; + + /** \brief List of tracks with Status = NEW */ + std::list new_tracks_; + + /** \brief List of current detections */ + std::vector detections_; + + /** \brief List of current detections not associated to any track */ + std::list unassociated_detections_; + + /** \brief Track ID counter */ + int tracks_counter_; + + /** \brief World reference frame used for tracking */ + std::string world_frame_id_; + + /** \brief Minimum confidence for track initialization */ + double min_confidence_; + + /** \brief Minimum confidence of detections sent to tracking */ + const double min_confidence_detections_; + + /** \brief Minimum number of detection<->track associations needed for validating a track */ + int detections_to_validate_; + + /** \brief Time after which a not visible track becomes old */ + double sec_before_old_; + + /** \brief Time after which a visible track obtain NORMAL status */ + double sec_remain_new_; + + /** \brief Time within which a track should be validated (otherwise it is discarded) */ + double sec_before_fake_; + + /** \brief Gate distance for joint likelihood in data association */ + double gate_distance_; + + /** \brief Flag stating if people detection confidence should be used in data association (true) or not (false) */ + bool detector_likelihood_; + + /** \brief Weights for the single terms of the joint likelihood */ + std::vector likelihood_weights_; + + /** \brief If true, people velocity is also used in motion term for data association */ + bool velocity_in_motion_term_; + + /** \brief Minimum time period between two detections messages */ + const double period_; + + /** \brief Position variance (for Kalman Filter) */ + double position_variance_; + + /** \brief Acceleration variance (for Kalman Filter) */ + double acceleration_variance_; + + /** \brief Flag enabling debug mode */ + const bool debug_mode_; + + /** \brief Detections<->tracks distance matrix for data association */ + cv::Mat_ distance_matrix_; + + /** \brief Detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ + cv::Mat_ cost_matrix_; + + /** \brief if true, the sensor is considered to be vertically placed (portrait mode) */ + bool vertical_; + + /** \brief Create detections<->tracks distance matrix for data association */ + void + createDistanceMatrix(); + + /** \brief Create detections<->tracks cost matrix to be used to solve the Global Nearest Neighbor problem */ + void + createCostMatrix(); + + /** \brief Update tracks associated to a detection in the current frame */ + void + updateDetectedTracks(); + + /** \brief Fill list containing unassociated detections */ + void + fillUnassociatedDetections(); + + /** \brief Create new tracks with high confidence unassociated detections */ + void + createNewTracks(); + + /** \brief Create a new track with detection information */ + int + createNewTrack(open_ptrack::detection::Detection& detection); + + /** \brief Update lost tracks */ + void + updateLostTracks(); + + +public: + //for object tracking + std::vector association_for_initialize_objectnames_; + + +public: + /** \brief Constructor */ + Tracker_object(double gate_distance, bool detector_likelihood, std::vector likelihood_weights, bool velocity_in_motion_term, + double min_confidence, double min_confidence_detections, double sec_before_old, double sec_before_fake, + double sec_remain_new, int detections_to_validate, double period, double position_variance, + double acceleration_variance, std::string world_frame_id, bool debug_mode, bool vertical); + + /** \brief Destructor */ + virtual ~Tracker_object(); + + /** + * \brief Initialization when a new set of detections arrive. + * + * \param[in] detections Vector of current detections. + * + */ + void + newFrame(const std::vector& detections); + + /** + * \brief Update the list of tracks according to the current set of detections. + */ + void + updateTracks(); + + // /** + // * \brief Draw the tracks into the RGB image given by its sensor. + // */ + // void + // drawRgb(); + + /** + * \brief Fills the MarkerArray message with a marker for each visible track (in correspondance + * of its centroid) and its number. + * + * \param[in] msg The MarkerArray message to fill. + */ + void + toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg); + + /** + * \brief Writes the state of each track into a TrackArray message. + * + * \param[in] msg The TrackArray message to fill. + */ + void + toMsg(opt_msgs::TrackArray::Ptr& msg); + + /** + * \brief Writes the state of tracks with a given frame id into a TrackArray message. + * + * \param[in] msg The TrackArray message to fill. + * \param[in] source_frame_id Frame id of tracks that have to be written to msg. + */ + void + toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id); + + + void + to_object_name_Msg(opt_msgs::ObjectNameArray::Ptr& msg); + + + + + /** + * \brief Writes the ID of each alive track into an IDArray message. + * + * \param[in] msg The IDArray message to fill. + */ + void + getAliveIDs (opt_msgs::IDArray::Ptr& msg); + + /** + * \brief Appends the location of each track to a point cloud starting from starting_index (using + * a circular array) + * + * \param[in] pointcloud The point cloud where to append the locations. + * \param[in] starting_index The starting index of the array. + * \param[in] max_size The maximum size of the point cloud (when reached the points overwrite the initial ones) + * + * \return the new starting_index. + */ + size_t + appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, + size_t starting_index, size_t max_size); + + /** + * \brief Set minimum confidence for track initialization + * + * \param[in] min_confidence Minimum confidence for track initialization + */ + void + setMinConfidenceForTrackInitialization (double min_confidence); + + /** + * \brief Set time after which a not visible track becomes old + * + * \param[in] sec_before_old Time after which a not visible track becomes old + */ + void + setSecBeforeOld (double sec_before_old); + + /** + * \brief Set time within which a track should be validated (otherwise it is discarded) + * + * \param[in] sec_before_fake Time within which a track should be validated (otherwise it is discarded) + */ + void + setSecBeforeFake (double sec_before_fake); + + /** + * \brief Set time after which a visible track obtain NORMAL status + * + * \param[in] sec_remain_new Time after which a visible track obtain NORMAL status + */ + void + setSecRemainNew (double sec_remain_new); + + /** + * \brief Set minimum number of detection<->track associations needed for validating a track + * + * \param[in] detections_to_validate Minimum number of detection<->track associations needed for validating a track + */ + void + setDetectionsToValidate (int detections_to_validate); + + /** + * \brief Set flag stating if people detection confidence should be used in data association (true) or not (false) + * + * \param[in] detector_likelihood Flag stating if people detection confidence should be used in data association (true) or not (false) + */ + void + setDetectorLikelihood (bool detector_likelihood); + + /** + * \brief Set likelihood weights for data association + * + * \param[in] detector_weight Weight for detector likelihood + * \param[in] motion_weight Weight for motion likelihood + */ + void + setLikelihoodWeights (double detector_weight, double motion_weight); + + /** + * \brief Set flag stating if people velocity should be used in motion term for data association + * + * \param[in] velocity_in_motion_term If true, people velocity is also used in motion term for data association + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance); + + /** + * \brief Set acceleration variance (for Kalman Filter) + * + * \param[in] acceleration_variance Acceleration variance (for Kalman Filter) + */ + void + setAccelerationVariance (double acceleration_variance); + + /** + * \brief Set position variance (for Kalman Filter) + * + * \param[in] position_variance Position variance (for Kalman Filter) + */ + void + setPositionVariance (double position_variance); + + /** + * \brief Set gate distance for joint likelihood in data association + * + * \param[in] gate_distance Gate distance for joint likelihood in data association. + */ + void + setGateDistance (double gate_distance); +}; + +} /* namespace tracking */ +} /* namespace open_ptrack */ + +#endif /* OPEN_PTRACK_TRACKING_TRACKER_OBJECT_H_ */ diff --git a/tracking/launch/detection_and_tracking.launch b/tracking/launch/detection_and_tracking.launch index 39f7fae..71caead 100644 --- a/tracking/launch/detection_and_tracking.launch +++ b/tracking/launch/detection_and_tracking.launch @@ -1,15 +1,26 @@ + + + + - - + + - - + + + + + + + + - + + diff --git a/tracking/launch/object_detection_and_tracking.launch b/tracking/launch/object_detection_and_tracking.launch new file mode 100644 index 0000000..fddb798 --- /dev/null +++ b/tracking/launch/object_detection_and_tracking.launch @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tracking/launch/people_detection_and_tracking.launch b/tracking/launch/people_detection_and_tracking.launch new file mode 100644 index 0000000..317f7a8 --- /dev/null +++ b/tracking/launch/people_detection_and_tracking.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/tracking/launch/people_detection_and_tracking_kinect1.launch b/tracking/launch/people_detection_and_tracking_kinect1.launch new file mode 100644 index 0000000..39f7fae --- /dev/null +++ b/tracking/launch/people_detection_and_tracking_kinect1.launch @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracker_network.launch b/tracking/launch/tracker_network.launch index 10af930..032d6b1 100644 --- a/tracking/launch/tracker_network.launch +++ b/tracking/launch/tracker_network.launch @@ -5,8 +5,7 @@ - - + @@ -16,8 +15,6 @@ - - @@ -26,7 +23,7 @@ - + @@ -36,8 +33,8 @@ - - + + @@ -62,8 +59,6 @@ - - - + diff --git a/tracking/launch/tracker_single_camera.launch b/tracking/launch/tracker_single_camera.launch new file mode 100644 index 0000000..bec53a3 --- /dev/null +++ b/tracking/launch/tracker_single_camera.launch @@ -0,0 +1,64 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tracking/launch/tracking_node.launch b/tracking/launch/tracking_node.launch index a7af869..90f70a2 100644 --- a/tracking/launch/tracking_node.launch +++ b/tracking/launch/tracking_node.launch @@ -3,9 +3,14 @@ - + + + + + + diff --git a/tracking/src/track_object.cpp b/tracking/src/track_object.cpp new file mode 100644 index 0000000..7f1bc71 --- /dev/null +++ b/tracking/src/track_object.cpp @@ -0,0 +1,648 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#include + +#include + +namespace open_ptrack +{ +namespace tracking +{ + +Track_object::Track_object( + int id, + std::string frame_id, + double position_variance, + double acceleration_variance, + double period, + bool velocity_in_motion_term) : + id_(id), + frame_id_(frame_id), + period_(period), + velocity_in_motion_term_(velocity_in_motion_term) +{ + color_ = Eigen::Vector3f( + float(rand() % 256) / 255, + float(rand() % 256) / 255, + float(rand() % 256) / 255); + + MAX_SIZE = 90; //XXX create a parameter!!! + if (velocity_in_motion_term_) + { + filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 4); + tmp_filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 4); + mahalanobis_map4d_.resize(MAX_SIZE, MahalanobisParameters4d()); + } + else + { + filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 2); + tmp_filter_ = new open_ptrack::tracking::KalmanFilter(period, position_variance, acceleration_variance, 2); + mahalanobis_map2d_.resize(MAX_SIZE, MahalanobisParameters2d()); + } + +} + +Track_object::~Track_object() +{ + delete filter_; + delete tmp_filter_; +} + +void +Track_object::init(const Track_object& old_track) +{ + double x, y; + old_track.filter_->getState(x, y); + + filter_->init(x, y, 10, old_track.velocity_in_motion_term_); + + *tmp_filter_ = *filter_; + visibility_ = old_track.visibility_; + + ROS_INFO("%d -> %d", old_track.id_, id_); + + z_ = old_track.z_; + height_ = old_track.height_; + distance_ = old_track.distance_; + if(old_track.object_name_!="default") + object_name_=old_track.object_name_; + age_ = old_track.age_; + + detection_source_ = old_track.detection_source_; + velocity_in_motion_term_ = old_track.velocity_in_motion_term_; + validated_ = validated_ || old_track.validated_; + low_confidence_consecutive_frames_ = old_track.low_confidence_consecutive_frames_; + + first_time_detected_ = old_track.first_time_detected_; + last_time_detected_ = old_track.last_time_detected_; + last_time_detected_with_high_confidence_ = old_track.last_time_detected_with_high_confidence_; + last_time_predicted_ = old_track.last_time_predicted_; + last_time_predicted_index_ = old_track.last_time_predicted_index_; + + data_association_score_ = old_track.data_association_score_; +} + + + +void +Track_object::init(double x, double y, double z, double height, double distance,std::string object_name, + open_ptrack::detection::DetectionSource* detection_source) +{ + //Init Kalman filter + filter_->init(x, y, distance, velocity_in_motion_term_); + z_ = z; + height_ = height; + distance_ = distance; + if(object_name!="default") + object_name_=object_name; + status_ = NEW; + visibility_ = VISIBLE; + validated_ = false; + updates_with_enough_confidence_ = low_confidence_consecutive_frames_ = 0; + detection_source_ = detection_source; + first_time_detected_ = detection_source->getTime(); + last_time_predicted_ = last_time_detected_ = last_time_detected_with_high_confidence_ = detection_source->getTime(); + last_time_predicted_index_ = 0; + age_ = 0.0; +} + +void +Track_object::update( + double x, + double y, + double z, + double height, + double distance, + std::string object_name, + double data_assocation_score, + double confidence, + double min_confidence, + double min_confidence_detections, + open_ptrack::detection::DetectionSource* detection_source, + bool first_update) +{ + //Update Kalman filter + int difference; + double vx, vy; + if (velocity_in_motion_term_) + { + ros::Duration d(1.0); + ros::Duration d2(2.0); + + double t = std::max(first_time_detected_.toSec(), (detection_source->getTime() - d).toSec()); + t = std::min(t, last_time_detected_.toSec()); + t = std::max(t, (detection_source->getTime() - d2).toSec()); + double dt = t - last_time_predicted_.toSec(); + + difference = int(round(dt / period_)); + int vIndex = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + + if(difference != 0) + { + vx = - (x - mahalanobis_map4d_[vIndex].x) / dt; + vy = - (y - mahalanobis_map4d_[vIndex].y) / dt; + } + else + { + vx = mahalanobis_map4d_[vIndex].x; + vy = mahalanobis_map4d_[vIndex].x; + } + } + + // Update Kalman filter from the last time the track was visible: + int framesLost = int(round((detection_source->getTime() - last_time_detected_).toSec() / period_)) - 1; + + for(int i = 0; i < framesLost; i++) + { + filter_->predict(); + filter_->update(); + } + + filter_->predict(); + if (velocity_in_motion_term_) + { + filter_->update(x, y, vx, vy, distance); + } + else + { + filter_->update(x, y, distance); + } + + *tmp_filter_ = *filter_; + difference = int(round((detection_source->getTime() - last_time_predicted_).toSec() / period_)); + last_time_predicted_index_ = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + last_time_predicted_ = last_time_detected_ = detection_source->getTime(); + if (velocity_in_motion_term_) + filter_->getMahalanobisParameters(mahalanobis_map4d_[last_time_predicted_index_]); + else + filter_->getMahalanobisParameters(mahalanobis_map2d_[last_time_predicted_index_]); + + // Update z_ and height_ with a weighted combination of current and new values: + z_ = z_ * 0.9 + z * 0.1; + height_ = height_ * 0.9 + height * 0.1; + distance_ = distance; + if(object_name!="default") + object_name_=object_name; + if(confidence > min_confidence) + { + updates_with_enough_confidence_++; + last_time_detected_with_high_confidence_ = last_time_detected_; + } + + // if (((confidence - 0.5) < min_confidence_detections) && ((last_detector_confidence_ - 0.5) < min_confidence_detections)) + if ((confidence < (min_confidence + min_confidence_detections)/2) && (last_detector_confidence_ < (min_confidence + min_confidence_detections)/2)) + { + low_confidence_consecutive_frames_++; + } + else + { + low_confidence_consecutive_frames_ = 0; + } + last_detector_confidence_ = confidence; + + data_association_score_ = data_assocation_score; + + // Compute track age: + age_ = (detection_source->getTime() - first_time_detected_).toSec(); + + detection_source_ = detection_source; +} + +void +Track_object::validate() +{ + validated_ = true; +} + +bool +Track_object::isValidated() +{ + return validated_; +} + +int +Track_object::getId() +{ + return id_; +} + +void +Track_object::setStatus(Track_object::Status s) +{ + status_ = s; +} + +Track_object::Status +Track_object::getStatus() +{ + return status_; +} + +void +Track_object::setVisibility(Track_object::Visibility v) +{ + visibility_ = v; +} + +Track_object::Visibility +Track_object::getVisibility() +{ + return visibility_; +} + +float +Track_object::getSecFromFirstDetection(ros::Time current_time) +{ + return (current_time - first_time_detected_).toSec(); +} + +float +Track_object::getSecFromLastDetection(ros::Time current_time) +{ + return (current_time - last_time_detected_).toSec(); +} + +float +Track_object::getSecFromLastHighConfidenceDetection(ros::Time current_time) +{ + return (current_time - last_time_detected_with_high_confidence_).toSec(); +} + +float +Track_object::getLowConfidenceConsecutiveFrames() +{ + return low_confidence_consecutive_frames_; +} + +int +Track_object::getUpdatesWithEnoughConfidence() +{ + return updates_with_enough_confidence_; +} + +double +Track_object::getMahalanobisDistance(double x, double y, const ros::Time& when) +{ + int difference = int(round((when - last_time_predicted_).toSec() / period_)); + // std::cout << "time difference from last detection: " << difference << std::endl; + int index; + if(difference <= 0) + { + index = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + } + else + { + for(int i = 0; i < difference; i++) + { + tmp_filter_->predict(); + last_time_predicted_index_ = (last_time_predicted_index_ + 1) % MAX_SIZE; + if (velocity_in_motion_term_) + tmp_filter_->getMahalanobisParameters(mahalanobis_map4d_[last_time_predicted_index_]); + else + tmp_filter_->getMahalanobisParameters(mahalanobis_map2d_[last_time_predicted_index_]); + tmp_filter_->update(); + } + last_time_predicted_ = when; + index = last_time_predicted_index_; + } + + if (velocity_in_motion_term_) + { + ros::Duration d(1.0); + ros::Duration d2(2.0); + + double t = std::max(first_time_detected_.toSec(), (when - d).toSec()); + t = std::min(t, last_time_detected_.toSec()); + t = std::max(t, (last_time_predicted_ - d2).toSec()); + double dt = t - last_time_predicted_.toSec(); + + difference = int(round(dt / period_)); + int vIndex = (MAX_SIZE + last_time_predicted_index_ + difference) % MAX_SIZE; + + // std::cout << "dt: " << dt << std::endl; + // std::cout << "vIndex: " << vIndex << std::endl; + + double vx, vy; + if(difference != 0) + { + vx = - (x - mahalanobis_map4d_[vIndex].x) / dt; + vy = - (y - mahalanobis_map4d_[vIndex].y) / dt; + } + else + { + vx = mahalanobis_map4d_[vIndex].x; + vy = mahalanobis_map4d_[vIndex].x; + } + + // std::cout << "vx: " << vx << ", vy: " << vy<< std::endl; + + return open_ptrack::tracking::KalmanFilter::performMahalanobisDistance(x, y, vx, vy, mahalanobis_map4d_[index]); + } + else + { + return open_ptrack::tracking::KalmanFilter::performMahalanobisDistance(x, y, mahalanobis_map2d_[index]); + } + +} + +void +Track_object::draw(bool vertical) +{ + cv::Scalar color(int(255.0 * color_(0)), int(255.0 * color_(1)), int(255.0 * color_(2))); + + double _x2, _y2; + tmp_filter_->getState(_x2, _y2); + Eigen::Vector3d centroid2(_x2, _y2, z_); + centroid2 = detection_source_->transformToCam(centroid2); + + if(visibility_ == Track_object::NOT_VISIBLE) + return; + + double _x, _y; + filter_->getState(_x, _y); + + cv::Scalar darkBlue(130,0,0); + cv::Scalar white(255,255,255); + Eigen::Vector3d centroid(_x, _y, z_ ); + Eigen::Vector3d top(_x, _y, z_ + (height_/2)); + Eigen::Vector3d bottom(_x, _y, z_ - (height_/2)); + + std::vector points; + double delta = height_ / 5.0; + points.push_back(Eigen::Vector3d(_x - delta, _y - delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y - delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y + delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x - delta, _y + delta, z_ - (height_/2))); + points.push_back(Eigen::Vector3d(_x - delta, _y - delta, z_ + (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y - delta, z_ + (height_/2))); + points.push_back(Eigen::Vector3d(_x + delta, _y + delta, z_ + (height_/2))); + points.push_back(Eigen::Vector3d(_x - delta, _y + delta, z_ + (height_/2))); + + //TODO loop for each detection source + + centroid = detection_source_->transformToCam(centroid); + cv::circle(detection_source_->getImage(), cv::Point(centroid(0), centroid(1)), 5, color, 1); + top = detection_source_->transformToCam(top); + bottom = detection_source_->transformToCam(bottom); + for(std::vector::iterator it = points.begin(); it != points.end(); it++) + *it = detection_source_->transformToCam(*it); + + // Draw a paralellepiped around the person: + for(size_t i = 0; i < 4; i++) + { + cv::line(detection_source_->getImage(), cv::Point(points[i](0), points[i](1)), + cv::Point(points[(i + 1) % 4](0), points[(i + 1) % 4](1)), color, + visibility_ == VISIBLE ? 2 : 1, CV_AA); + cv::line(detection_source_->getImage(), cv::Point(points[i + 4](0), points[i + 4](1)), + cv::Point(points[(i + 1) % 4 + 4](0), points[(i + 1) % 4 + 4](1)), color, + visibility_ == VISIBLE ? 2 : 1, CV_AA); + cv::line(detection_source_->getImage(), cv::Point(points[i](0), points[i](1)), + cv::Point(points[i + 4](0), points[i + 4](1)), color, + visibility_ == VISIBLE ? 2 : 1, CV_AA); + } + + std::stringstream ss; + float distance_to_display = float(int(distance_*100))/100; + ss << id_ << ": " << distance_to_display; + + float id_half_number_of_digits = (float)(ss.str().size())/2; + + // Draw white id_ on a blue background: + if (not vertical) + { + cv::rectangle(detection_source_->getImage(), cv::Point(top(0)-8*id_half_number_of_digits, top(1)-12), + cv::Point(top(0)+12*id_half_number_of_digits, top(1) +2), darkBlue, CV_FILLED, + visibility_ == VISIBLE ? 8 : 1);//, 0); + cv::putText(detection_source_->getImage(), ss.str(), cv::Point(top(0)-8*id_half_number_of_digits, + top(1)), cv::FONT_HERSHEY_SIMPLEX, 0.5, white, 1.7, CV_AA); // white id_ + } + else + { + cv::Mat rotated_image = detection_source_->getImage(); + cv::flip(rotated_image.t(), rotated_image, -1); + cv::flip(rotated_image, rotated_image, 1); + cv::rectangle(rotated_image, cv::Point(top(1)-8*id_half_number_of_digits, (rotated_image.rows - top(0)+1)-12), + cv::Point(top(1) +12*id_half_number_of_digits, (rotated_image.rows - top(0)+1)+2), darkBlue, CV_FILLED, + visibility_ == VISIBLE ? 8 : 1);//, 0); + cv::putText(rotated_image, ss.str(), cv::Point(top(1)-8*id_half_number_of_digits, rotated_image.rows - top(0)+1), + cv::FONT_HERSHEY_SIMPLEX, 0.5, white, 1.7, CV_AA); // white id_ + cv::flip(rotated_image, rotated_image, -1); + cv::flip(rotated_image, rotated_image, 1); + rotated_image = rotated_image.t(); + detection_source_->setImage(rotated_image); + } + //TODO end loop +} + +void +Track_object::createMarker(visualization_msgs::MarkerArray::Ptr& msg) +{ + if(visibility_ == Track_object::NOT_VISIBLE) + return; + + double _x, _y; + filter_->getState(_x, _y); + + visualization_msgs::Marker marker; + + marker.header.frame_id = frame_id_; + marker.header.stamp = ros::Time::now(); + + marker.ns = "people"; + marker.id = id_; + + marker.type = visualization_msgs::Marker::SPHERE; + marker.action = visualization_msgs::Marker::ADD; + + marker.pose.position.x = _x; + marker.pose.position.y = _y; + marker.pose.position.z = z_; + marker.pose.orientation.x = 0.0; + marker.pose.orientation.y = 0.0; + marker.pose.orientation.z = 0.0; + marker.pose.orientation.w = 1.0; + + marker.scale.x = 0.1; + marker.scale.y = 0.1; + marker.scale.z = 0.1; + + marker.color.r = color_(2); + marker.color.g = color_(1); + marker.color.b = color_(0); + marker.color.a = 1.0; + + marker.lifetime = ros::Duration(0.2); + + msg->markers.push_back(marker); + + /////////////////////////////////// + + visualization_msgs::Marker text_marker; + + text_marker.header.frame_id = frame_id_; + text_marker.header.stamp = ros::Time::now(); + + text_marker.ns = "numbers"; + text_marker.id = id_; + + text_marker.type = visualization_msgs::Marker::TEXT_VIEW_FACING; + text_marker.action = visualization_msgs::Marker::ADD; + + std::stringstream ss; + ss << id_; +// text_marker.text = ss.str(); + text_marker.text = object_name_; + + text_marker.pose.position.x = _x; + text_marker.pose.position.y = _y; + // text_marker.pose.position.z = z_ + (height_/2) + 0.1; + text_marker.pose.position.z = z_+ 0.1; //for object markers + text_marker.pose.orientation.x = 0.0; + text_marker.pose.orientation.y = 0.0; + text_marker.pose.orientation.z = 0.0; + text_marker.pose.orientation.w = 1.0; + + text_marker.scale.x = 0.2; + text_marker.scale.y = 0.2; + text_marker.scale.z = 0.2; + + text_marker.color.r = color_(2); + text_marker.color.g = color_(1); + text_marker.color.b = color_(0); + text_marker.color.a = 1.0; + + text_marker.lifetime = ros::Duration(0.2); + + msg->markers.push_back(text_marker); + +} + +bool +Track_object::getPointXYZRGB(pcl::PointXYZRGB& p) +{ + if(visibility_ == Track_object::NOT_VISIBLE) + return false; + + double _x, _y; + filter_->getState(_x, _y); + + p.x = float(_x); + p.y = float(_y); + p.z = float(z_); + uchar* rgb_ptr = (uchar*)&p.rgb; + *rgb_ptr++ = uchar(color_(0) * 255.0f); + *rgb_ptr++ = uchar(color_(1) * 255.0f); + *rgb_ptr++ = uchar(color_(2) * 255.0f); + return true; +} + +void +Track_object::toMsg(opt_msgs::Track& track_msg, bool vertical) +{ + + double _x, _y; + filter_->getState(_x, _y); + + track_msg.id = id_; + track_msg.x = _x; + track_msg.y = _y; + track_msg.height = height_; + track_msg.distance = distance_; + track_msg.object_name=object_name_; + track_msg.age = age_; + track_msg.confidence = - data_association_score_; // minus for transforming distance into a sort of confidence + track_msg.visibility = visibility_; + + Eigen::Vector3d top(_x, _y, z_ + (height_/2)); + Eigen::Vector3d bottom(_x, _y, z_ - (height_/2)); + top = detection_source_->transformToCam(top); + bottom = detection_source_->transformToCam(bottom); + if (not vertical) + { + track_msg.box_2D.height = int(std::abs((top - bottom)(1))); + track_msg.box_2D.width = track_msg.box_2D.height / 2; + track_msg.box_2D.x = int(top(0)) - track_msg.box_2D.height / 4; + track_msg.box_2D.y = int(top(1)); + } + else + { + track_msg.box_2D.width = int(std::abs((top - bottom)(0))); + track_msg.box_2D.height = track_msg.box_2D.width / 2; + track_msg.box_2D.x = int(top(0)) - track_msg.box_2D.width; + track_msg.box_2D.y = int(top(1)) - track_msg.box_2D.width / 4; + } +} + +open_ptrack::detection::DetectionSource* +Track_object::getDetectionSource() +{ + return detection_source_; +} + +void +Track_object::setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance) +{ + velocity_in_motion_term_ = velocity_in_motion_term; + + // Re-initialize Kalman filter + filter_->setPredictModel (acceleration_variance); + filter_->setObserveModel (position_variance); + double x, y; + filter_->getState(x, y); + filter_->init(x, y, distance_, velocity_in_motion_term_); + + *tmp_filter_ = *filter_; +} + +void +Track_object::setAccelerationVariance (double acceleration_variance) +{ + filter_->setPredictModel (acceleration_variance); + tmp_filter_->setPredictModel (acceleration_variance); +} + +void +Track_object::setPositionVariance (double position_variance) +{ + filter_->setObserveModel (position_variance); + tmp_filter_->setObserveModel (position_variance); +} +} /* namespace tracking */ +} /* namespace open_ptrack */ diff --git a/tracking/src/tracker.cpp b/tracking/src/tracker.cpp index e8e7b27..cf7d22e 100644 --- a/tracking/src/tracker.cpp +++ b/tracking/src/tracker.cpp @@ -389,12 +389,17 @@ namespace open_ptrack // If a detection<->track association has been found: if(cost_matrix_(track, measure) == 0.0 && distance_matrix_(track, measure) <= gate_distance_) { + + + + open_ptrack::detection::Detection& d = detections_[measure]; // If the detection has enough confidence in the current frame or in a recent past: // if ((t->getLowConfidenceConsecutiveFrames() < 10) || ((d.getConfidence() - 0.5) > min_confidence_detections_)) if ((t->getLowConfidenceConsecutiveFrames() < 10) || (d.getConfidence() > ((min_confidence_ + min_confidence_detections_)/2))) { + // Update track with the associated detection: bool first_update = false; t->update(d.getWorldCentroid()(0), d.getWorldCentroid()(1), d.getWorldCentroid()(2),d.getHeight(), diff --git a/tracking/src/tracker_object.cpp b/tracking/src/tracker_object.cpp new file mode 100644 index 0000000..32e981a --- /dev/null +++ b/tracking/src/tracker_object.cpp @@ -0,0 +1,597 @@ +/* + * Software License Agreement (BSD License) + * + * Copyright (c) 2011-2012, Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * Copyright (c) 2013-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * Author: Matteo Munaro [matteo.munaro@dei.unipd.it], Filippo Basso [filippo.basso@dei.unipd.it] + * + */ + +#include + +#include + +namespace open_ptrack +{ +namespace tracking +{ + +Tracker_object::Tracker_object( + double gate_distance, + bool detector_likelihood, + std::vector likelihood_weights, + bool velocity_in_motion_term, + double min_confidence, + double min_confidence_detections, + double sec_before_old, + double sec_before_fake, + double sec_remain_new, + int detections_to_validate, + double period, + double position_variance, + double acceleration_variance, + std::string world_frame_id, + bool debug_mode, + bool vertical) : + gate_distance_(gate_distance), + detector_likelihood_(detector_likelihood), + likelihood_weights_(likelihood_weights), + velocity_in_motion_term_(velocity_in_motion_term), + min_confidence_(min_confidence), + min_confidence_detections_(min_confidence_detections), + sec_before_old_(sec_before_old), + sec_before_fake_(sec_before_fake), + sec_remain_new_(sec_remain_new), + detections_to_validate_(detections_to_validate), + period_(period), + position_variance_(position_variance), + acceleration_variance_(acceleration_variance), + world_frame_id_(world_frame_id), + debug_mode_(debug_mode), + vertical_(vertical) +{ + tracks_counter_ = 0; +} + +Tracker_object::~Tracker_object() +{ + // TODO Auto-generated destructor stub +} + +void +Tracker_object::newFrame(const std::vector& detections) +{ + detections_.clear(); + unassociated_detections_.clear(); + lost_tracks_.clear(); + new_tracks_.clear(); + detections_ = detections; + ros::Time current_detections_time = detections_[0].getSource()->getTime(); + + for(std::list::iterator it = tracks_.begin(); it != tracks_.end();) + { + open_ptrack::tracking::Track_object* t = *it; + bool deleted = false; + + if(((t->getVisibility() == Track_object::NOT_VISIBLE && (t->getSecFromLastHighConfidenceDetection(current_detections_time)) >= sec_before_old_) + || (!t->isValidated() && t->getSecFromFirstDetection(current_detections_time) >= sec_before_fake_))) + { + if (debug_mode_) + { + std::cout << "Track " << t->getId() << " DELETED" << std::endl; + } + delete t; + it = tracks_.erase(it); + deleted = true; + } + else if(!t->isValidated() && t->getUpdatesWithEnoughConfidence() == detections_to_validate_) + { + t->validate(); + if (debug_mode_) + { + std::cout << "Track " << t->getId() << " VALIDATED" << std::endl; + } + } + else if(t->getStatus() == Track_object::NEW && t->getSecFromFirstDetection(current_detections_time) >= sec_remain_new_) + { + t->setStatus(Track_object::NORMAL); + if (debug_mode_) + { + std::cout << "Track " << t->getId() << " set to NORMAL" << std::endl; + } + } + + if(!deleted) + { + if(t->getStatus() == Track_object::NEW && t->getVisibility() == Track_object::VISIBLE) + new_tracks_.push_back(t); + if(t->getVisibility() == Track_object::NOT_VISIBLE) + lost_tracks_.push_back(t); + it++; + } + } +} + +void +Tracker_object::updateTracks() +{ + createDistanceMatrix(); + createCostMatrix(); + + // Solve Global Nearest Neighbor problem: + Munkres munkres; + cost_matrix_ = munkres.solve(cost_matrix_, false); // rows: targets (tracks), cols: detections + + updateDetectedTracks(); + fillUnassociatedDetections(); + updateLostTracks(); + createNewTracks(); +} + +// void Tracker_object::drawRgb() +// { +// for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) +// (*it)->draw(vertical_); +// } + +void +Tracker_object::toMarkerArray(visualization_msgs::MarkerArray::Ptr& msg) +{ + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + t->createMarker(msg); + } +} + +void +Tracker_object::toMsg(opt_msgs::TrackArray::Ptr& msg) +{ + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + + opt_msgs::Track track; + t->toMsg(track, vertical_); + msg->tracks.push_back(track); + } +} + +void +Tracker_object::toMsg(opt_msgs::TrackArray::Ptr& msg, std::string& source_frame_id) +{ + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + if (strcmp(t->getDetectionSource()->getFrameId().c_str(), source_frame_id.c_str()) == 0) // if strings are equal + { + opt_msgs::Track track; + t->toMsg(track, vertical_); + + // // For publishing only not occluded tracks: + // if (track.visibility < 2) + // msg->tracks.push_back(track); + + // For publishing all tracks: + msg->tracks.push_back(track); + } + } +} + + + + + +void +Tracker_object::to_object_name_Msg(opt_msgs::ObjectNameArray::Ptr& msg) +{ + if(association_for_initialize_objectnames_.size()==tracks_.size()) + { + int index=0; + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + opt_msgs::ObjectName object_name_msg; + object_name_msg.object_name=t->object_name_; + object_name_msg.no=association_for_initialize_objectnames_[index]; + msg->object_names.push_back(object_name_msg); + index++; +// std::cout<<"send message with object name:"<object_name_<::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + msg->ids.push_back ((*it)->getId()); + } + msg->max_ID = tracks_counter_; +} + +size_t +Tracker_object::appendToPointCloud(pcl::PointCloud::Ptr& pointcloud, size_t starting_index, size_t max_size) +{ + for(size_t i = 0; i < tracks_.size() && pointcloud->size() < max_size; i++) + { + pcl::PointXYZRGB point; + pointcloud->push_back(point); + } + + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + if(t->getPointXYZRGB(pointcloud->points[starting_index])) + starting_index = (starting_index + 1) % max_size; + + } + return starting_index; +} + +/************************ protected methods ************************/ + +int +Tracker_object::createNewTrack(open_ptrack::detection::Detection& detection) +{ + if(detection.getConfidence() < min_confidence_) + return -1; + + open_ptrack::tracking::Track_object* t; + t = new open_ptrack::tracking::Track_object( + ++tracks_counter_, + world_frame_id_, + position_variance_, + acceleration_variance_, + period_, + velocity_in_motion_term_ ); + + t->init(detection.getWorldCentroid()(0), detection.getWorldCentroid()(1),detection.getWorldCentroid()(2), + detection.getHeight(), detection.getDistance(), detection.getObjectName(),detection.getSource()); + + bool first_update = true; + t->update(detection.getWorldCentroid()(0), detection.getWorldCentroid()(1), detection.getWorldCentroid()(2), + detection.getHeight(), detection.getDistance(), detection.getObjectName(),0.0, + detection.getConfidence(), min_confidence_, min_confidence_detections_, detection.getSource(), first_update); + + ROS_INFO("Created %d", t->getId()); + + tracks_.push_back(t); + return tracks_counter_; +} + +void +Tracker_object::createDistanceMatrix() +{ + distance_matrix_ = cv::Mat_(tracks_.size(), detections_.size()); + int track = 0; + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + //double x, y, height, vx, vz; + Track_object* t = *it; + //t->predict(x, y, height, vx, vz); + int measure = 0; + for(std::vector::iterator dit = detections_.begin(); dit != detections_.end(); dit++) + { + double detector_likelihood; + + // Compute detector likelihood: + if (detector_likelihood_) + { + detector_likelihood = dit->getConfidence(); + // detector_likelihood = log((dit->getConfidence() + 3) / 6); + } + else + { + detector_likelihood = 0; + } + + // Compute motion likelihood: + double motion_likelihood = t->getMahalanobisDistance( + dit->getWorldCentroid()(0), + dit->getWorldCentroid()(1), + dit->getSource()->getTime()); + + // Compute joint likelihood and put it in the distance matrix: + + distance_matrix_(track, measure++) = likelihood_weights_[0] * detector_likelihood + likelihood_weights_[1] * motion_likelihood; + + // Remove NaN and inf: + if (isnan(distance_matrix_(track, measure-1)) | (not std::isfinite(distance_matrix_(track, measure-1)))) + distance_matrix_(track, measure-1) = 2*gate_distance_; + + // std::cout << (*it)->getId() << ": " << "Motion likelihood: " << likelihood_weights_[0] * motion_likelihood << std::endl; + // if (detector_likelihood_) + // std::cout << (*it)->getId() << ": " << "Detector likelihood: " << likelihood_weights_[1] * dit->getConfidence() << std::endl; + // std::cout << (*it)->getId() << ": " << "JOINT LIKELIHOOD: " << distance_matrix_(track, measure-1) << std::endl; + + /*ROS_INFO("%d(%f, %f) = %f", t->getId(), + dit->getWorldCentroid()(0), + dit->getWorldCentroid()(1), + distance_matrix_(track, measure - 1));*/ + } + track++; + } + + // std::cout << "Distance matrix:" << std::endl; + // for(int row = 0; row < distance_matrix_.rows; row++) + // { + // for(int col = 0; col < distance_matrix_.cols; col++) + // { + // std::cout.width(8); + // std::cout << distance_matrix_(row,col) << ","; + // } + // std::cout << std::endl; + // } + // std::cout << std::endl; +} + +void +Tracker_object::createCostMatrix() +{ + cost_matrix_ = distance_matrix_.clone(); + for(int i = 0; i < distance_matrix_.rows; i++) + { + for(int j = 0; j < distance_matrix_.cols; j++) + { + if(distance_matrix_(i, j) > gate_distance_) + cost_matrix_(i, j) = 1000000.0; + } + } + + + // std::cout << "Munkres input matrix:" << std::endl; + // for(int row = 0; row < cost_matrix_.rows; row++) + // { + // for(int col = 0; col < cost_matrix_.cols; col++) + // { + // std::cout.width(8); + // std::cout << cost_matrix_(row,col) << ","; + // } + // std::cout << std::endl; + // } + // std::cout << std::endl; +} + +void +Tracker_object::updateDetectedTracks() +{ + // std::cout << "Munkres output matrix:" << std::endl; + // for(int row = 0; row < cost_matrix_.rows; row++) { + // for(int col = 0; col < cost_matrix_.cols; col++) { + // std::cout.width(1); + // std::cout << cost_matrix_(row,col) << ","; + // } + // std::cout << std::endl; + // } + // std::cout << std::endl; + + // Iterate over every track: + int track = 0; + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + bool updated = false; + open_ptrack::tracking::Track_object* t = *it; + + for(int measure = 0; measure < cost_matrix_.cols; measure++) + { + // If a detection<->track association has been found: + if(cost_matrix_(track, measure) == 0.0 && distance_matrix_(track, measure) <= gate_distance_) + { + + + open_ptrack::detection::Detection& d = detections_[measure]; + + // If the detection has enough confidence in the current frame or in a recent past: + // if ((t->getLowConfidenceConsecutiveFrames() < 10) || ((d.getConfidence() - 0.5) > min_confidence_detections_)) + if ((t->getLowConfidenceConsecutiveFrames() < 10) || (d.getConfidence() > ((min_confidence_ + min_confidence_detections_)/2))) + { + //for object tracking + association_for_initialize_objectnames_.push_back(measure); + + + // Update track with the associated detection: + bool first_update = false; + + t->update(d.getWorldCentroid()(0), d.getWorldCentroid()(1), d.getWorldCentroid()(2),d.getHeight(), + d.getDistance(),d.getObjectName(), distance_matrix_(track, measure), + d.getConfidence(), min_confidence_, min_confidence_detections_, + d.getSource(), first_update); + + t->setVisibility(d.isOccluded() ? Track_object::OCCLUDED : Track_object::VISIBLE); + updated = true; + break; + } + else + { + //std::cout << "Id: " << t->getId() << ", lowConfConsFrames: " << t->getLowConfidenceConsecutiveFrames() << ", newConf: " << d.getConfidence()<< std::endl; + } + } + } + if(!updated) + { + if(t->getVisibility() != Track_object::NOT_VISIBLE) + { + t->setVisibility(Track_object::NOT_VISIBLE); + //t->update(); + } + } + track++; + } + // std::cout << std::endl; +} + +void +Tracker_object::fillUnassociatedDetections() +{ + // Fill a list with detections not associated to any track: + if(cost_matrix_.cols == 0 && detections_.size() > 0) + { + for(size_t measure = 0; measure < detections_.size(); measure++) + unassociated_detections_.push_back(detections_[measure]); + } + else + { + for(int measure = 0; measure < cost_matrix_.cols; measure++) + { + bool associated = false; + for(int track = 0; track < cost_matrix_.rows; track++) + { + if(cost_matrix_(track, measure) == 0.0) + { + if(distance_matrix_(track, measure) > gate_distance_) + break; + associated = true; + } + } + if(!associated/* && detections_[measure].getConfidence() > min_confidence_*/) + { + unassociated_detections_.push_back(detections_[measure]); + } + } + } +} + +void +Tracker_object::updateLostTracks() +{ + //for(std::list::iterator it = lost_tracks_.begin(); it != lost_tracks_.end(); it++) + // (*it)->update(); +} + +void +Tracker_object::createNewTracks() +{ + for(std::list::iterator dit = unassociated_detections_.begin(); + dit != unassociated_detections_.end(); dit++) + { + createNewTrack(*dit); + } +} + +void +Tracker_object::setMinConfidenceForTrackInitialization (double min_confidence) +{ + min_confidence_ = min_confidence; +} + +void +Tracker_object::setSecBeforeOld (double sec_before_old) +{ + sec_before_old_ = sec_before_old; +} + +void +Tracker_object::setSecBeforeFake (double sec_before_fake) +{ + sec_before_fake_ = sec_before_fake; +} + +void +Tracker_object::setSecRemainNew (double sec_remain_new) +{ + sec_remain_new_ = sec_remain_new; +} + +void +Tracker_object::setDetectionsToValidate (int detections_to_validate) +{ + detections_to_validate_ = detections_to_validate; +} + +void +Tracker_object::setDetectorLikelihood (bool detector_likelihood) +{ + detector_likelihood_ = detector_likelihood; +} + +void +Tracker_object::setLikelihoodWeights (double detector_weight, double motion_weight) +{ + likelihood_weights_[0] = detector_weight; + likelihood_weights_[1] = motion_weight; +} + +void +Tracker_object::setVelocityInMotionTerm (bool velocity_in_motion_term, double acceleration_variance, double position_variance) +{ + velocity_in_motion_term_ = velocity_in_motion_term; + acceleration_variance_ = acceleration_variance; + position_variance_ = position_variance; + + // Update all existing tracks: + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + t->setVelocityInMotionTerm (velocity_in_motion_term_, acceleration_variance_, position_variance_); + } +} + +void +Tracker_object::setAccelerationVariance (double acceleration_variance) +{ + acceleration_variance_ = acceleration_variance; + + // Update all existing tracks: + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + t->setAccelerationVariance (acceleration_variance_); + } +} + +void +Tracker_object::setPositionVariance (double position_variance) +{ + position_variance_ = position_variance; + + // Update all existing tracks: + for(std::list::iterator it = tracks_.begin(); it != tracks_.end(); it++) + { + open_ptrack::tracking::Track_object* t = *it; + t->setPositionVariance (position_variance_); + } +} + +void +Tracker_object::setGateDistance (double gate_distance) +{ + gate_distance_ = gate_distance; +} +} /* namespace tracking */ +} /* namespace open_ptrack */ From 9addbbdb11af8af47e305b605de1084e7be445e0 Mon Sep 17 00:00:00 2001 From: yongheng1991 Date: Sat, 24 Jun 2017 08:43:42 +0200 Subject: [PATCH 8/9] modify the listener.py to auto genreate detection launch files --- opt_calibration/apps/listener.py | 263 ++++++++++++++++++++++++++----- 1 file changed, 220 insertions(+), 43 deletions(-) diff --git a/opt_calibration/apps/listener.py b/opt_calibration/apps/listener.py index 3d8f5f0..853bdc7 100755 --- a/opt_calibration/apps/listener.py +++ b/opt_calibration/apps/listener.py @@ -48,30 +48,30 @@ def __init__(self) : self.sensor_launchers_dir = rospy.get_param('~sensor_launchers_dir') if self.sensor_launchers_dir[len(self.sensor_launchers_dir) - 1] != '/': self.sensor_launchers_dir = self.sensor_launchers_dir + '/' - + self.detector_launchers_dir = rospy.get_param('~detector_launchers_dir') if self.detector_launchers_dir[len(self.detector_launchers_dir) - 1] != '/': self.detector_launchers_dir = self.detector_launchers_dir + '/' - + self.camera_poses_dir = rospy.get_param('~camera_poses_dir') if self.camera_poses_dir[len(self.camera_poses_dir) - 1] != '/': self.camera_poses_dir = self.camera_poses_dir + '/' - + self.create_sensor_launch_srv = rospy.Service('create_sensor_launch', OPTSensor, self.handle_create_sensor_launch) self.create_detector_launch_srv = rospy.Service('create_detector_launch', OPTSensor, self.handle_create_detector_launch) self.create_camera_poses_srv = rospy.Service('create_camera_poses', OPTTransform, self.handle_create_camera_poses) - - + + def handle_create_sensor_launch(self, request) : - + file_name = self.sensor_launchers_dir + 'sensor_' + request.id + '.launch' file = open(file_name, 'w') file.write('\n') file.write('\n') file.write('\n\n') - + file.write(' \n') - + if request.type == OPTSensorRequest.TYPE_SR4500: file.write(' \n') if request.ip == '': @@ -79,24 +79,24 @@ def handle_create_sensor_launch(self, request) : rospy.logerr('Missing "ip" field for the SR4500 sensor!') return (OPTSensorResponse.STATUS_ERROR, 'Missing "ip" field!') file.write(' \n\n') - + file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n\n') - + elif request.type == OPTSensorRequest.TYPE_KINECT1: file.write(' \n') if request.serial != '': file.write(' \n') file.write('\n') - + file.write(' \n') file.write(' \n') if request.serial != '': @@ -106,17 +106,17 @@ def handle_create_sensor_launch(self, request) : file.write(' \n') file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n\n') - + elif request.type == OPTSensorRequest.TYPE_STEREO_PG: file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') file.write(' \n') @@ -128,13 +128,13 @@ def handle_create_sensor_launch(self, request) : file.write(' \n') file.write(' \n') file.write(' \n') - file.write(' \n\n') + file.write(' \n\n') elif request.type == OPTSensorRequest.TYPE_KINECT2: file.write(' \n') if request.serial != '': file.write(' \n') file.write('\n') - + file.write(' \n') file.write(' \n') if request.serial != '': @@ -144,27 +144,30 @@ def handle_create_sensor_launch(self, request) : # file.write(' \n') file.write(' \n') file.write(' \n') - file.write(' \n\n') + file.write(' \n\n') file.write(' \n') - file.write(' \n\n') - + file.write(' \n\n') + file.write('\n') file.close(); rospy.loginfo(file_name + ' created!'); - + return (OPTSensorResponse.STATUS_OK, file_name + ' created!') - + + ########################################## + ## PEOPLE and OBJECT DETECTION LAUNCHER ## + ########################################## def handle_create_detector_launch(self, request) : - + file_name = self.detector_launchers_dir + 'detection_node_' + request.id + '.launch' file = open(file_name, 'w') file.write('\n') file.write('\n') file.write('\n\n') - + file.write(' \n') - + if request.type == OPTSensorRequest.TYPE_SR4500: file.write(' \n') if request.ip == '': @@ -173,7 +176,7 @@ def handle_create_detector_launch(self, request) : return (OPTSensorResponse.STATUS_ERROR, 'Missing "ip" field!') file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') file.write(' \n') @@ -181,12 +184,12 @@ def handle_create_detector_launch(self, request) : file.write(' \n') file.write(' \n') file.write(' \n\n') - + elif request.type == OPTSensorRequest.TYPE_KINECT1: if request.serial != '': file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') if request.serial != '': @@ -197,14 +200,14 @@ def handle_create_detector_launch(self, request) : file.write(' \n') file.write(' \n') file.write(' \n\n') - + elif request.type == OPTSensorRequest.TYPE_STEREO_PG: file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') file.write(' \n') @@ -218,7 +221,7 @@ def handle_create_detector_launch(self, request) : if request.serial != '': file.write(' \n') file.write(' \n\n') - + file.write(' \n') file.write(' \n') if request.serial != '': @@ -229,40 +232,214 @@ def handle_create_detector_launch(self, request) : file.write(' \n') file.write(' \n') file.write(' \n\n') - + + file.write('\n') + file.close(); + rospy.loginfo(file_name + ' created!'); + ########################################## + ########################################## + + + + ############################### + ## PEOPLE DETECTION LAUNCHER ## + ############################### + file_name = self.detector_launchers_dir + 'people_detection_node_' + request.id + '.launch' + file = open(file_name, 'w') + file.write('\n') + file.write('\n') + file.write('\n\n') + + file.write(' \n') + + if request.type == OPTSensorRequest.TYPE_SR4500: + file.write(' \n') + if request.ip == '': + file.close(); + rospy.logerr('Missing "ip" field for the SR4500 sensor!') + return (OPTSensorResponse.STATUS_ERROR, 'Missing "ip" field!') + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + elif request.type == OPTSensorRequest.TYPE_KINECT1: + if request.serial != '': + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + if request.serial != '': + file.write(' \n') + file.write(' \n') + else: + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + elif request.type == OPTSensorRequest.TYPE_STEREO_PG: + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + elif request.type == OPTSensorRequest.TYPE_KINECT2: + if request.serial != '': + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + #if request.serial != '': + #file.write(' \n') + #file.write(' \n') + #else: + #file.write(' \n') + file.write(' \n') + #file.write(' \n') + file.write(' \n\n') + + file.write('\n') + file.close(); + rospy.loginfo(file_name + ' created!'); + + ############################### + ############################### + + + + ############################### + ## OBJECT DETECTION LAUNCHER ## + ############################### + file_name = self.detector_launchers_dir + 'object_detection_node_' + request.id + '.launch' + file = open(file_name, 'w') + file.write('\n') + file.write('\n') + file.write('\n\n') + + file.write(' \n') + + if request.type == OPTSensorRequest.TYPE_SR4500: + file.write(' \n') + if request.ip == '': + file.close(); + rospy.logerr('Missing "ip" field for the SR4500 sensor!') + return (OPTSensorResponse.STATUS_ERROR, 'Missing "ip" field!') + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + elif request.type == OPTSensorRequest.TYPE_KINECT1: + if request.serial != '': + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + if request.serial != '': + file.write(' \n') + file.write(' \n') + else: + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + elif request.type == OPTSensorRequest.TYPE_STEREO_PG: + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n') + file.write(' \n\n') + elif request.type == OPTSensorRequest.TYPE_KINECT2: + if request.serial != '': + file.write(' \n') + file.write(' \n\n') + + file.write(' \n') + file.write(' \n') + #if request.serial != '': + #file.write(' \n') + #file.write(' \n') + #else: + #file.write(' \n') + file.write(' \n') + #file.write(' \n') + file.write(' \n\n') + file.write('\n') file.close(); rospy.loginfo(file_name + ' created!'); - + return (OPTSensorResponse.STATUS_OK, file_name + ' created!') - + ############################### + ############################### + + + + def handle_create_camera_poses(self, request) : - + file_name = self.camera_poses_dir + 'camera_poses.txt' file = open(file_name, 'w') file.write('# Auto-generated file.\n') file.write('# CALIBRATION ID: ' + str(request.calibration_id) + '\n') - + data = zip(request.child_id, request.transform) for item in data: t = item[1].translation r = item[1].rotation file.write(item[0] + ': ' + str(t.x) + ' ' + str(t.y) + ' ' + str(t.z) + ' ') file.write(str(r.x) + ' ' + str(r.y) + ' ' + str(r.z) + ' ' + str(r.w) + '\n') - + file.close() rospy.loginfo(file_name + ' created!'); - + return (OPTTransformResponse.STATUS_OK, file_name + ' created!') - + if __name__ == '__main__' : - + rospy.init_node('listener') - + try: - + listener = Listener() rospy.spin() - + except rospy.ROSInterruptException: pass From 49def94136a4e34620f8f26621fbfcf5e5df4736 Mon Sep 17 00:00:00 2001 From: yongheng1991 Date: Wed, 28 Jun 2017 06:19:35 +0200 Subject: [PATCH 9/9] modify the object detector to get RGB_point_cloud rather than IR_point cloud --- detection/launch/object_detector_kinect2.launch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/detection/launch/object_detector_kinect2.launch b/detection/launch/object_detector_kinect2.launch index 531d410..4699992 100644 --- a/detection/launch/object_detector_kinect2.launch +++ b/detection/launch/object_detector_kinect2.launch @@ -6,7 +6,7 @@ - +