ROS biuetooth and Bluez

144次 1次 发表于:2017年08月23日 16:09:02

freshman

  • 主题:1
  • 回复:0
  • 积分:2

ROS biuetooth and Bluez

想尋求一下有關ROS使用藍牙的問題

目前使用的程式為Linux Ubuntu 14.04ROS IndigoBluez 4.1版,我利用cob_people_detection的人臉辨識package(http://wiki.ros.org/cob_people_detection)想利用linux內建得藍牙(Bluez)將人臉辨識結果傳送至令一台電腦當中,我有先從教程(http://people.csail.mit.edu/albert/bluez-intro/x502.html)當中寫個簡單的core

#include <stdio.h>

#include <unistd.h>

#include <sys/socket.h>

#include <bluetooth/bluetooth.h>

#include <bluetooth/rfcomm.h>

int main(int argc, char **argv)

{

struct sockaddr_rc addr = { 0 };

int s, status;

char dest[18] = "01:23:45:67:89:AB";

// allocate a socket

s = socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM);

// set the connection parameters (who to connect to)

addr.rc_family = AF_BLUETOOTH;

addr.rc_channel = (uint8_t) 1;

str2ba( dest, &addr.rc_bdaddr );

// connect to server

status = connect(s, (struct sockaddr *)&addr, sizeof(addr));

// send a message

if( status == 0 ) {

status = write(s, "hello!", 6);

}

if( status < 0 ) perror("uh oh");

close(s);

return 0;

}


並立用gcc連接libbluetooth(執行:gcc -o rfcomm-client rfcomm-client.c -lbluetooth)進行編譯並在兩台電腦中成功的傳送文字訊息,

我再試著把他寫入rospackage當中的people_detection_display_node.cpp當中,想把辨識結果透過藍牙(Bluez)傳出來


#include <cob_people_detection/people_detection_display_node.h>

#include <vector>

#include <string>

#include <stdio.h>

#include <unistd.h>

#include <sys/socket.h>

#include <bluetooth/bluetooth.h>

#include <bluetooth/rfcomm.h>

#include <bluetooth/hci.h>  

#include <bluetooth/hci_lib.h>  

using namespace ipa_PeopleDetector;

inline bool isnan_(double num)

{

return (num != num);

}

;

PeopleDetectionDisplayNode::PeopleDetectionDisplayNode(ros::NodeHandle nh) :

node_handle_(nh)

{

it_ = 0;

sync_input_3_ = 0;

// parameters

std::cout << "\n---------------------------\nPeople Detection Display Parameters:\n---------------------------\n";

node_handle_.param("display", display_, false);

std::cout << "display = " << display_ << "\n";

node_handle_.param("display_timing", display_timing_, false);

std::cout << "display_timing = " << display_timing_ << "\n";

// subscribers

// people_segmentation_image_sub_.subscribe(*it_, "people_segmentation_image", 1);

it_ = new image_transport::ImageTransport(node_handle_);

colorimage_sub_.subscribe(*it_, "colorimage_in", 1);

face_recognition_subscriber_.subscribe(node_handle_, "face_position_array", 1);

face_detection_subscriber_.subscribe(node_handle_, "face_detections", 1);

// pointcloud_sub_.subscribe(node_handle_, "pointcloud_in", 1);

// input synchronization

// sync_input_3_ = new message_filters::Synchronizer<message_filters::sync_policies::ApproximateTime<cob_perception_msgs::DetectionArray, cob_perception_msgs::ColorDepthImageArray, sensor_msgs::PointCloud2> >(30);

// sync_input_3_->connectInput(face_recognition_subscriber_, face_detection_subscriber_, pointcloud_sub_);

sync_input_3_ = new message_filters::Synchronizer<message_filters::sync_policies::ApproximateTime<cob_perception_msgs::DetectionArray,

cob_perception_msgs::ColorDepthImageArray, sensor_msgs::Image> >(60);

sync_input_3_->connectInput(face_recognition_subscriber_, face_detection_subscriber_, colorimage_sub_);

sync_input_3_->registerCallback(boost::bind(&PeopleDetectionDisplayNode::inputCallback, this, _1, _2, _3));

// publishers

people_detection_image_pub_ = it_->advertise("face_position_image", 1);

std::cout << "PeopleDetectionDisplay initialized." << std::endl;

}

PeopleDetectionDisplayNode::~PeopleDetectionDisplayNode()

{

if (it_ != 0)

delete it_;

if (sync_input_3_ != 0)

delete sync_input_3_;

}

/// Converts a color image message to cv::Mat format.

unsigned long PeopleDetectionDisplayNode::convertColorImageMessageToMat(const sensor_msgs::Image::ConstPtr& image_msg, cv_bridge::CvImageConstPtr& image_ptr, cv::Mat& image)

{

try

{

image_ptr = cv_bridge::toCvShare(image_msg, sensor_msgs::image_encodings::BGR8);

} catch (cv_bridge::Exception& e)

{

ROS_ERROR("PeopleDetection: cv_bridge exception: %s", e.what());

return ipa_Utils::RET_FAILED;

}

image = image_ptr->image;

return ipa_Utils::RET_OK;

}

/// checks the detected faces from the input topic against the people segmentation and outputs faces if both are positive

//void PeopleDetectionDisplayNode::inputCallback(const cob_perception_msgs::DetectionArray::ConstPtr& face_recognition_msg, const cob_perception_msgs::ColorDepthImageArray::ConstPtr& face_detection_msg, const sensor_msgs::PointCloud2::ConstPtr& pointcloud_msg)

void PeopleDetectionDisplayNode::inputCallback(const cob_perception_msgs::DetectionArray::ConstPtr& face_recognition_msg,

const cob_perception_msgs::ColorDepthImageArray::ConstPtr& face_detection_msg, const sensor_msgs::Image::ConstPtr& color_image_msg)

{

// convert color image to cv::Mat

cv_bridge::CvImageConstPtr color_image_ptr;

cv::Mat color_image;

convertColorImageMessageToMat(color_image_msg, color_image_ptr, color_image);

// // get color image from point cloud

// pcl::PointCloud<pcl::PointXYZRGB> point_cloud_src;

// pcl::fromROSMsg(*pointcloud_msg, point_cloud_src);

//

// cv::Mat color_image = cv::Mat::zeros(point_cloud_src.height, point_cloud_src.width, CV_8UC3);

// for (unsigned int v=0; v<point_cloud_src.height; v++)

// {

// for (unsigned int u=0; u<point_cloud_src.width; u++)

// {

// pcl::PointXYZRGB point = point_cloud_src(u,v);

// if (isnan_(point.z) == false)

// color_image.at<cv::Point3_<unsigned char> >(v,u) = cv::Point3_<unsigned char>(point.b, point.g, point.r);

// }

// }

// insert all detected heads and faces

for (int i = 0; i < (int)face_detection_msg->head_detections.size(); i++)

{

// paint head

const cob_perception_msgs::Rect& head_rect = face_detection_msg->head_detections[i].head_detection;

cv::Rect head(head_rect.x, head_rect.y, head_rect.width, head_rect.height);

cv::rectangle(color_image, cv::Point(head.x, head.y), cv::Point(head.x + head.width, head.y + head.height), CV_RGB(148, 219, 255), 2, 8, 0);

// paint faces

for (int j = 0; j < (int)face_detection_msg->head_detections[i].face_detections.size(); j++)

{

const cob_perception_msgs::Rect& face_rect = face_detection_msg->head_detections[i].face_detections[j];

cv::Rect face(face_rect.x + head.x, face_rect.y + head.y, face_rect.width, face_rect.height);

cv::rectangle(color_image, cv::Point(face.x, face.y), cv::Point(face.x + face.width, face.y + face.height), CV_RGB(191, 255, 148), 2, 8, 0);

}

}

// insert recognized faces

std::vector<std::string>name;

name.reserve(30);

//name.reserve(45);

 for (int i = 0; i < (int)face_recognition_msg->detections.size(); i++)

 {

const cob_perception_msgs::Rect& face_rect = face_recognition_msg->detections[i].mask.roi;

cv::Rect face(face_rect.x, face_rect.y, face_rect.width, face_rect.height);

if (face_recognition_msg->detections[i].detector == "head")

cv::rectangle(color_image, cv::Point(face.x, face.y), cv::Point(face.x + face.width, face.y + face.height), CV_RGB(0, 0, 255), 2, 8,  0);

else

cv::rectangle(color_image, cv::Point(face.x, face.y), cv::Point(face.x + face.width, face.y + face.height), CV_RGB(0, 255, 0), 2, 8,  0);

if (face_recognition_msg->detections[i].label == "Unknown")

// Distance to face class is too high

cv::putText(color_image, "Unknown", cv::Point(face.x, face.y + face.height + 25), cv::FONT_HERSHEY_PLAIN, 2, CV_RGB(255, 0, 0), 2);

else if (face_recognition_msg->detections[i].label == "No face")

// Distance to face space is too high

cv::putText(color_image, "No face", cv::Point(face.x, face.y + face.height + 25), cv::FONT_HERSHEY_PLAIN, 2, CV_RGB(255, 0, 0), 2);

else

// Face classified

cv::putText(color_image, face_recognition_msg->detections[i].label.c_str(), cv::Point(face.x, face.y + face.height + 25), cv::FONT_HERSHEY_PLAIN, 2, CV_RGB(0, 255, 0),2);

//data 1(face recognize test)

//cv::putText(color_image, "hello world", cv::Point(face.x+10, face.y + face.height + 50), cv::FONT_HERSHEY_PLAIN, 2, CV_RGB(255,  255,  0),2);

//emotion recognition;

name.push_back(face_recognition_msg->detections[i].label.c_str());

 }

for (int k =0; k < (int)face_recognition_msg->detections.size(); k++){

//cv::putText(color_image, face_recognition_msg->detections[k].label.c_str(), cv::Point(50, 50+k*50),  cv::FONT_HERSHEY_PLAIN, 2, CV_RGB(0,255,0),2);

//send bluetooth message;

struct sockaddr_rc addr = { 0 };

int s, status;

char dest[18] = "43:43:A1:12:1F:AC";

// allocate a socket

s = socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM);

// set the connection parameters (who to connect to)

addr.rc_family = AF_BLUETOOTH;

addr.rc_channel = (uint8_t) 1;

str2ba( dest, &addr.rc_bdaddr );

// connect to server

 status = connect(s, (struct sockaddr *)&addr, sizeof(addr));

  // send a message

if( status == 0 ) {

  status = write(s, face_recognition_msg->detections[k].label.c_str(), 10);

}

if( status < 0 ) perror("uh oh");

name.clear();

close(s);

}

// display image

if (display_ == true)

{

cv::imshow("Detections and Recognitions", color_image);

cv::waitKey(10);

}

// publish image

cv_bridge::CvImage cv_ptr;

cv_ptr.image = color_image;

cv_ptr.encoding = "bgr8";

people_detection_image_pub_.publish(cv_ptr.toImageMsg());

if (display_timing_ == true)

ROS_INFO("%d Display: Time stamp of image message: %f. Delay: %f.", color_image_msg->header.seq, color_image_msg->header.stamp.toSec(),

ros::Time::now().toSec() - color_image_msg->header.stamp.toSec());

}

//#######################

//#### main programm ####

int main(int argc, char** argv)

{

// Initialize ROS, specify name of node

ros::init(argc, argv, "people_detection_display");

// Create a handle for this node, initialize node

ros::NodeHandle nh("~");

// Create FaceRecognizerNode class instance

PeopleDetectionDisplayNode people_detection_display_node(nh);

// Create action nodes

//DetectObjectsAction detect_action_node(object_detection_node, nh);

//AcquireObjectImageAction acquire_image_node(object_detection_node, nh);

//TrainObjectAction train_object_node(object_detection_node, nh);

ros::spin();

return 0;

}


這是程式當中藍牙的部份:


,但在catkin_make編譯時發生問題,

這是使用catkin_make出現錯誤訊息的部份


我預估問題發生在catkin_make時沒有跟libbluetooth進行鏈結所以未能成功定義str2ba,請問如何能夠把兩者能夠成功鏈結呢?或是不是這部份出問題呢?


机器人五度空间

  • 主题:0
  • 回复:11
  • 积分:11
发表于 2017年10月18日 09:45:41 沙发

上交大机器人五度空间学习平台    Turtlebot    机器人操作系统(ROS)SLAM,机器人视觉,自然语言处理,模式识别514624672


回复