diff --git a/package.xml b/package.xml index 816df05..ac289e1 100644 --- a/package.xml +++ b/package.xml @@ -28,12 +28,16 @@ manipulation2 openrave openrave_catkin + or_mesh_marker + chisel_msgs + offscreen_render python-scipy python-termcolor python-trollius python-rospkg python-yaml python-lxml + tf python-nose cbirrt2 diff --git a/src/prpy/perception/chisel.py b/src/prpy/perception/chisel.py new file mode 100644 index 0000000..09070ba --- /dev/null +++ b/src/prpy/perception/chisel.py @@ -0,0 +1,146 @@ +import logging +import openravepy +from base import PerceptionModule, PerceptionMethod + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + + +class ChiselModule(PerceptionModule): + + def __init__(self, env, service_namespace='Chisel', mesh_name = 'Chisel/full_mesh', + detection_frame='/herb_base', destination_frame='/map', reference_link=None): + """ + The perception module implementation for CHISEL. It converts all unmodelled clutter from the point + cloud to an OpenRAVE Kinbody - after masking out specified known objects already in the Environment. + + @param env The OpenRAVE environment + @param service_namespace The namespace to use for Chisel service calls (default: Chisel) + @param mesh_name The name of the topic on which the Chisel marker is published. + @param detection_frame The frame in which the Chisel mesh is detected. (default: map) + @param destination_frame The tf frame the detected chisel kinbody should be transformed to (default: map) + @param reference_link A link on an OpenRAVE kinbody that corresponds to the tf frame + specified by the destination_frame parameter. If not provided it is assumed that the + transform from the destination_frame tf frame to the OpenRAVE environment is the identity + """ + import rospy + + self.mesh_client = openravepy.RaveCreateSensorSystem(env, 'mesh_marker') + + if self.mesh_client is None: + raise RuntimeError('Could not create mesh client') + + self.serv_ns = service_namespace + self.mesh_name = mesh_name + + # Create a ROS camera used to generate a synthetic point cloud + # to mask known kinbodies from the chisel mesh + from offscreen_render import ros_camera_sim + self.camera = ros_camera_sim.RosCameraSim(env) + self.camera.start('/head/kinect2/qhd') + self.camera_bodies = [] + + self.reset_detection_srv = None + self.detection_frame = detection_frame + self.destination_frame = destination_frame + self.reference_link = reference_link + + @PerceptionMethod + def DetectObject(self, robot, ignored_bodies=None, timeout=5, **kw_args): + """ + Obtain the KinBody corresponding to the Chisel Mesh after masking out + any existing kinbodies if desired (as specified in ignored_bodies). + + @param robot Required by the PerceptionMethod interface + @param ignored_bodies A list of known kinbodies to be masked out + @param timeout The timeout for which to wait for the Chisel Mesh + of the chisel mesh + """ + env = robot.GetEnv() + + if ignored_bodies is None: + ignored_bodies = [] + + #Check if chisel kinbody in env + chisel_kb = env.GetKinBody(self.mesh_name) + if chisel_kb is not None: + env.RemoveKinBody(chisel_kb) + + with env: + try: + from kinbody_helper import transform_from_or + import openravepy + # Add any ignored bodies to the camera + # We need to transform all these bodies from their pose + # in OpenRAVE to the equivalent pose in TF so that + # chisel can perform the appropriate masking + # Then we will transform them all back after the server + # performs the detection + for b in ignored_bodies: + if b not in self.camera_bodies: + poses = self.SamplePoses(b.GetTransform()) + orig_name = b.GetName() + + for idx, pose in enumerate(poses): + bcloned = openravepy.RaveCreateKinBody(env, b.GetXMLId()) + bcloned.Clone(b, 0) + + bcloned.SetName('%s_%d' % (orig_name, idx)) + env.Add(bcloned) + transform_from_or(kinbody=bcloned, + pose=pose, + detection_frame='/map', + destination_frame=self.destination_frame, + reference_link=self.reference_link) + self.camera.add_body(bcloned) + self.camera_bodies.append(bcloned) + + #Reset Chisel + import rospy, chisel_msgs.srv + srv_nm = self.serv_ns+'/Reset' + rospy.wait_for_service(srv_nm,timeout) + reset_detection_srv = rospy.ServiceProxy(srv_nm, + chisel_msgs.srv.ResetService) + reset_detection_srv() + + #Get Kinbody and load into env + self.mesh_client.SendCommand('GetMesh '+self.mesh_name) + chisel_mesh = env.GetKinBody(self.mesh_name) + + finally: + + # Remove any previous bodies in the camera + for b in self.camera_bodies: + self.camera.remove_body_now(b) + env.Remove(b) + + if chisel_mesh is not None and self.reference_link is not None: + # Apply a transform to the chisel kinbody to put it in the correct + # location in the OpenRAVE environment + from kinbody_helper import transform_to_or + transform_to_or(kinbody=chisel_mesh, + detection_frame=self.detection_frame, + destination_frame=self.destination_frame, + reference_link=self.reference_link) + + return chisel_mesh + + def SamplePoses(self, orig_transform, radius=0.05): + """ + Sample poses around the sphere of the given radius + Sampled poses maintain orientation but have different translations + @param orig_transform The original pose + @param radius The radius of the sphere to sample from + """ + import copy, numpy + retvals = [] + for i in [-1., 0., 1.]: + for j in [-1., 0., 1.]: + for k in [-1., 0., 1.]: + v = numpy.array([i, j, k]) + if numpy.linalg.norm(v) > 0: + v = radius * v / numpy.linalg.norm(v) + new_transform = copy.deepcopy(orig_transform) + new_transform[:3,3] = orig_transform[:3,3] + v + retvals.append(new_transform) + return retvals diff --git a/src/prpy/perception/kinbody_helper.py b/src/prpy/perception/kinbody_helper.py new file mode 100644 index 0000000..36731c6 --- /dev/null +++ b/src/prpy/perception/kinbody_helper.py @@ -0,0 +1,101 @@ + +def transform_to_or(kinbody, detection_frame=None, destination_frame=None, + detection_in_destination=None, + reference_link=None, pose=None): + """ + Transform the pose of a kinbody from a pose determined using TF to + the correct relative pose in OpenRAVE. This transformation is performed + by providing a link in OpenRAVE that corresponds directly to a frame in TF. + + @param kinbody The kinbody to transform + @param detection_frame The tf frame the kinbody was originally detected in + @param destination_frame A tf frame that has a direct correspondence with + a link on an OpenRAVE Kinbody + @param reference_link The OpenRAVE link that corresponds to the tf frame + given by the destination_frame parameter (if None it is assumed + that the transform between the OpenRAVE world and the destination_frame + tf frame is the identity) + @param pose The pose to transform (if None, kinbody.GetTransform() is used) + """ + import numpy + if pose is None: + with kinbody.GetEnv(): + pose = kinbody.GetTransform() + + if detection_in_destination is None: + import tf, rospy + listener = tf.TransformListener() + + listener.waitForTransform( + detection_frame, destination_frame, + rospy.Time(), + rospy.Duration(10)) + + frame_trans, frame_rot = listener.lookupTransform( + destination_frame, detection_frame, + rospy.Time(0)) + + from tf.transformations import quaternion_matrix + detection_in_destination = numpy.array(numpy.matrix(quaternion_matrix(frame_rot))) + detection_in_destination[:3,3] = frame_trans + + with kinbody.GetEnv(): + body_in_destination = numpy.dot(detection_in_destination, pose) + + if reference_link is not None: + destination_in_or = reference_link.GetTransform() + else: + destination_in_or = numpy.eye(4) + + body_in_or= numpy.dot(destination_in_or, body_in_destination) + kinbody.SetTransform(body_in_or) + +def transform_from_or(kinbody, detection_frame, destination_frame, + reference_link=None, pose=None): + """ + Transform the pose of a kinbody from a OpenRAVE pose to the correct + relative pose in TF.This transformation is performed + by providing a link in OpenRAVE that corresponds directly to a frame in TF. + + @param kinbody The kinbody to transform + @param detection_frame The tf frame the kinbody was originally detected in + @param destination_frame A tf frame that has a direct correspondence with + a link on an OpenRAVE Kinbody + @param reference_link The OpenRAVE link that corresponds to the tf frame + given by the destination_frame parameter (if None it is assumed + that the transform between the OpenRAVE world and the destination_frame + tf frame is the identity) + """ + + import numpy + import tf + import rospy + listener = tf.TransformListener() + + if pose is None: + with kinbody.GetEnv(): + pose = kinbody.GetTransform() + + listener.waitForTransform( + detection_frame, destination_frame, + rospy.Time(), + rospy.Duration(10)) + + frame_trans, frame_rot = listener.lookupTransform( + detection_frame, destination_frame, + rospy.Time(0)) + + from tf.transformations import quaternion_matrix + destination_in_detection = numpy.array(numpy.matrix(quaternion_matrix(frame_rot))) + destination_in_detection[:3,3] = frame_trans + + with kinbody.GetEnv(): + body_in_or = pose + if reference_link is not None: + or_in_destination = numpy.linalg.inv(reference_link.GetTransform()) + else: + or_in_destination = numpy.eye(4) + body_in_destination = numpy.dot(or_in_destination, body_in_or) + + body_in_detection = numpy.dot(destination_in_detection, body_in_destination) + kinbody.SetTransform(body_in_detection) diff --git a/src/prpy/perception/simtrack.py b/src/prpy/perception/simtrack.py index c65058a..58e3205 100644 --- a/src/prpy/perception/simtrack.py +++ b/src/prpy/perception/simtrack.py @@ -7,14 +7,16 @@ class SimtrackModule(PerceptionModule): - def __init__(self, kinbody_path, detection_frame, world_frame, + def __init__(self, kinbody_path, detection_frame, + destination_frame=None, reference_link=None, service_namespace=None): """ This initializes a simtrack detector. @param kinbody_path The path to the folder where kinbodies are stored @param detection_frame The TF frame of the camera - @param world_frame The desired world TF frame + @param destination_frame The tf frame that the kinbody should be transformed to + @param reference_link The OpenRAVE link that corresponds to the tf destination_frame @param service_namespace The namespace for the simtrack service (default: /simtrack) """ import rospy @@ -25,19 +27,16 @@ def __init__(self, kinbody_path, detection_frame, world_frame, rospy.init_node('simtrack_detector', anonymous=True) except rospy.exceptions.ROSException: pass - - #For getting transforms in world frame - if detection_frame is not None and world_frame is not None: - self.listener = tf.TransformListener() - else: - self.listener = None - + if service_namespace is None: service_namespace='/simtrack' + self.service_namespace = service_namespace self.detection_frame = detection_frame - self.world_frame = world_frame - self.service_namespace = service_namespace + if destination_frame is None: + destination_frame='/map' + self.destination_frame = destination_frame + self.reference_link = reference_link self.kinbody_path = kinbody_path @@ -46,6 +45,9 @@ def __init__(self, kinbody_path, detection_frame, world_frame, 'pop_tarts': 'pop_tarts_visual'} self.query_to_kinbody_map = {'fuze_bottle_visual': 'fuze_bottle', 'pop_tarts_visual': 'pop_tarts'} + + # A dictionary of subscribers to topic - one for each body + self.track_sub = {} @staticmethod def _MsgToPose(msg): @@ -65,39 +67,15 @@ def _MsgToPose(msg): return pose - def _LocalToWorld(self,pose): - """ - Transform a pose from local frame to world frame - @param pose The 4x4 transformation matrix containing the pose to transform - @return The 4x4 transformation matrix describing the pose in world frame - """ - import rospy - #Get pose w.r.t world frame - self.listener.waitForTransform(self.world_frame,self.detection_frame, - rospy.Time(),rospy.Duration(10)) - t, r = self.listener.lookupTransform(self.world_frame,self.detection_frame, - rospy.Time(0)) - - #Get relative transform between frames - offset_to_world = numpy.matrix(transformations.quaternion_matrix(r)) - offset_to_world[0,3] = t[0] - offset_to_world[1,3] = t[1] - offset_to_world[2,3] = t[2] - - #Compose with pose to get pose in world frame - result = numpy.array(numpy.dot(offset_to_world, pose)) - - return result - - def _GetDetections(self, obj_names): - import simtrack_msgs.srv """ Calls the service to get a detection of a particular object. @param obj_name The name of the object to detect @return A 4x4 transformation matrix describing the pose of the object in world frame, None if the object is not detected """ + import simtrack_msgs.srv, rospy + #Call detection service for a particular object detect_simtrack = rospy.ServiceProxy(self.service_namespace+'/detect_objects', simtrack_msgs.srv.DetectObjects) @@ -111,8 +89,6 @@ def _GetDetections(self, obj_names): obj_name = detect_resp.detected_models[i]; obj_pose = detect_resp.detected_poses[i]; obj_pose_tf = self._MsgToPose(obj_pose); - if (self.detection_frame is not None and self.world_frame is not None): - obj_pose_tf = self._LocalToWorld(obj_pose_tf) detections.append((obj_name, obj_pose_tf)); return detections @@ -133,19 +109,24 @@ def DetectObject(self, robot, obj_name, **kw_args): raise PerceptionException('The simtrack module cannot detect object %s', obj_name) query_name = self.kinbody_to_query_map[obj_name] - obj_poses = self._GetDetections(query_name) - if len(obj_poses is 0): + obj_poses = self._GetDetections([query_name]) + if len(obj_poses) == 0: raise PerceptionException('Failed to detect object %s', obj_name) obj_pose = None + print query_name for (name, pose) in obj_poses: - if (name is query_name): + print name + if name == query_name: obj_pose = pose break; if (obj_pose is None): raise PerceptionException('Failed to detect object %s', obj_name) + + if numpy.array_equal(obj_pose, numpy.eye(4)): + raise PerceptionException('Object %s detected but failed to compute pose', obj_name) env = robot.GetEnv() if env.GetKinBody(obj_name) is None: @@ -156,8 +137,18 @@ def DetectObject(self, robot, obj_name, **kw_args): obj_name, os.path.join(self.kinbody_path, kinbody_file)) + print 'Object pose: ', obj_pose body = env.GetKinBody(obj_name) - body.SetTransform(obj_pose) + + # Apply a transform to the kinbody to put it in the + # desired location in OpenRAVE + from kinbody_helper import transform_to_or + transform_to_or(kinbody=body, + detection_frame=self.detection_frame, + destination_frame=self.destination_frame, + reference_link=self.reference_link, + pose=obj_pose) + return body @PerceptionMethod @@ -166,6 +157,8 @@ def DetectObjects(self, robot, **kw_args): Overriden method for detection_frame """ from prpy.perception.base import PerceptionException + from kinbody_helper import transform_to_or + env = robot.GetEnv() # Detecting empty list will detect all possible objects detections = self._GetDetections([]) @@ -175,7 +168,6 @@ def DetectObjects(self, robot, **kw_args): continue kinbody_name = self.query_to_kinbody_map[obj_name] - if env.GetKinBody(kinbody_name) is None: from prpy.rave import add_object kinbody_file = '%s.kinbody.xml' % kinbody_name @@ -183,8 +175,111 @@ def DetectObjects(self, robot, **kw_args): env, kinbody_name, os.path.join(self.kinbody_path, kinbody_file)) - print kinbody_name + + body = env.GetKinBody(kinbody_name) - body.SetTransform(obj_pose) + transform_to_or(kinbody=body, + detection_frame=self.detection_frame, + destination_frame=self.destination_frame, + reference_link=self.reference_link, + pose=obj_pose) + + def _UpdatePose(self, msg, kwargs): + """ + Update the pose of an object + """ + pose_tf = self._MsgToPose(msg) + + robot = kwargs['robot'] + obj_name = kwargs['obj_name'] + detection_in_destination = kwargs['detection_in_destination'] + + env = robot.GetEnv() + with env: + obj = env.GetKinBody(obj_name) + if obj is None: + from prpy.rave import add_object + kinbody_file = '%s.kinbody.xml' % obj_name + new_body = add_object( + env, + obj_name, + os.path.join(self.kinbody_path, kinbody_file)) + + with env: + obj = env.GetKinBody(obj_name) + + from kinbody_helper import transform_to_or + if detection_in_destination is not None: + transform_to_or(kinbody=obj, + detection_in_destination=detection_in_destination, + reference_link=self.reference_link, + pose=pose_tf) + else: + transform_to_or(kinbody=obj, + detection_frame=self.detection_frame, + destination_frame=self.destination_frame, + reference_link=self.reference_link, + pose=pose_tf) + @PerceptionMethod + def StartTrackObject(self, robot, obj_name, cache_transform=True, **kw_args): + """ + Subscribe to the pose array for an object in order to track + @param robot The OpenRAVE robot + @param obj_name The name of the object to track + @param cache_transform If true, lookup the transform from detection_frame + to destination_frame once, cache it and use it for the duration of tracking. + This speeds up tracking by removing the need to perform a tf lookup + in the callback. + """ + import rospy + from geometry_msgs.msg import PoseStamped + from prpy.perception.base import PerceptionException + if obj_name not in self.kinbody_to_query_map: + raise PerceptionException('The simtrack module cannot track object %s' & obj_name) + query_name = self.kinbody_to_query_map[obj_name] + pose_topic = self.service_namespace + '/' + query_name + + if obj_name in self.track_sub and self.track_sub[obj_name] is not None: + raise PerceptionException('The object %s is already being tracked' % obj_name) + + # These speeds up tracking by removing the need to do a tf lookup + detection_in_destination=None + if cache_transform: + import numpy, tf, rospy + listener = tf.TransformListener() + + listener.waitForTransform( + self.detection_frame, self.destination_frame, + rospy.Time(), + rospy.Duration(10)) + + frame_trans, frame_rot = listener.lookupTransform( + self.destination_frame, self.detection_frame, + rospy.Time(0)) + + from tf.transformations import quaternion_matrix + detection_in_destination = numpy.array(numpy.matrix(quaternion_matrix(frame_rot))) + detection_in_destination[:3,3] = frame_trans + + self.track_sub[obj_name] = rospy.Subscriber(pose_topic, + PoseStamped, + callback=self._UpdatePose, + callback_args={ + 'robot':robot, + 'obj_name':obj_name, + 'detection_in_destination': detection_in_destination}, + queue_size=1 + ) + + @PerceptionMethod + def StopTrackObject(self, robot, obj_name, **kw_args): + """ + Unsubscribe from the pose array to end tracking + @param robot The OpenRAVE robot + @param obj_name The name of the object to stop tracking + """ + if obj_name in self.track_sub and self.track_sub[obj_name] is not None: + self.track_sub[obj_name].unregister() + self.track_sub[obj_name] = None diff --git a/src/prpy/perception/tracking/base.py b/src/prpy/perception/tracking/base.py new file mode 100644 index 0000000..ae3a29f --- /dev/null +++ b/src/prpy/perception/tracking/base.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python + +import functools + +class TrackingException(Exception): + pass + +class TrackingMethod(object): + + def __init__(self, func): + self.func = func + + def __call__(self,instance,robot,*args,**kw_args): + return self.func(instance, robot, *args, **kw_args) + + def __get__(self, instance, instancetype): + wrapper = functools.partial(self.__call__, instance) + functools.update_wrapper(wrapper, self.func) + wrapper.is_tracking_method = True + return wrapper + +class TrackingModule(object): + def has_tracking_method(self, method_name): + """ + Check if this module has the desired TrackingMethod + """ + if hasattr(self, method_name): + method = getattr(self, method_name) + if hasattr(method, 'is_tracking_method'): + return method.is_tracking_method + else: + return False + else: + return False + + def get_tracking_method_names(self): + """ + @return A list of all the TrackingMethod functions + defined for this module + """ + return filter(lambda method_name: self.has_tracking_method(method_name), dir(self)) + diff --git a/src/prpy/perception/tracking/point_cloud_tracker.py b/src/prpy/perception/tracking/point_cloud_tracker.py new file mode 100644 index 0000000..5aaf106 --- /dev/null +++ b/src/prpy/perception/tracking/point_cloud_tracker.py @@ -0,0 +1,51 @@ +from base import TrackingModule, TrackingMethod +import openravepy + +class PointCloudTracker(TrackingModule): + + def __init__(self,env,depth_info='/head/kinect2/qhd/camera_info',pcl_topic='/head/kinect2/qhd/points'): + """ + Initializes an OpenRAVE plugin that tracks the point-cloud + corresponding to some provided KinBody + @param env The OpenRAVE robot + @depth_info The camera information of depth topic + @pcl_topic + """ + + self.env = env + self.depth_info = depth_info + self.pcl_topic = pcl_topic + self.tracked_objects = list() + + #Create tracker module + self.tracker = openravepy.RaveCreateModule(env, "object_tracker") + + #Initialize tracker + tracker_init_message = + self.tracker.SendCommand('Initialize '+self.depth_info+' '+self.pcl_topic) + + @TrackingMethod + def StartTrackObject(self,obj_name,n_iters=300): + """ + Begin tracking the OpenRAVE object - blocks and updated kinbody pose + @param obj_name The name of the kinbody to track + @n_iters The number of tracking iterations it is blocked for + """ + from prpy.perception.base import PerceptionException + if obj_name in self.tracked_objects: + raise TrackingException('The object %s is already being tracked' % obj_name) + + self.tracked_objects.append(obj_name) + self.tracker.SendCommand("Track "+obj_name+' '+`n_iters`) + + @TrackingMethod + def StopTrackObject(self,obj_name): + """ + Stop tracking given object. Currently nothing because PCL tracker stops after iterations + @param obj_name The name of the object to stop tracking + """ + + if obj_name not in self.tracked_objects: + print '{0} is not being tracked!'.format(obj_name) + else: + self.tracked_objects.remove(obj_name) \ No newline at end of file diff --git a/src/prpy/perception/tracking/sim_track_tracker.py b/src/prpy/perception/tracking/sim_track_tracker.py new file mode 100644 index 0000000..4d21a9c --- /dev/null +++ b/src/prpy/perception/tracking/sim_track_tracker.py @@ -0,0 +1,124 @@ +from base import TrackingModule, TrackingMethod + +class SimTracker(TrackingModule): + + def __init__(self,env,detection_frame, destination_frame = '/map', + service_namespace='/simtrack'): + + self.env = env + self.service_namespace = service_namespace + + # A map of known objects that can be queries + self.kinbody_to_query_map = {'fuze_bottle': 'fuze_bottle_visual', + 'pop_tarts': 'pop_tarts_visual'} + self.query_to_kinbody_map = {'fuze_bottle_visual': 'fuze_bottle', + 'pop_tarts_visual': 'pop_tarts'} + + # A dictionary of subscribers to topic - one for each body + self.track_sub = {} + + self.detection_frame = detection_frame + self.destination_frame = destination_frame + + + @TrackingMethod + def StartTrackObject(self, robot, obj_name, cache_transform=True): + """ + Subscribe to the pose array for an object in order to track + @param robot The OpenRAVE robot + @param obj_name The name of the object to track + @param cache_transform If true, lookup the transform from detection_frame + to destination_frame once, cache it and use it for the duration of tracking. + This speeds up tracking by removing the need to perform a tf lookup + in the callback. + """ + import rospy + from geometry_msgs.msg import PoseStamped + from prpy.perception.base import PerceptionException + + if obj_name not in self.kinbody_to_query_map: + raise TrackingException('The simtrack tracker cannot track object %s' & obj_name) + + query_name = self.kinbody_to_query_map[obj_name] + pose_topic = self.service_namespace + '/' + query_name + + if obj_name in self.track_sub and self.track_sub[obj_name] is not None: + raise TrackingException('The object %s is already being tracked' % obj_name) + + + detection_in_destination=None + if cache_transform: + import numpy, tf, rospy + listener = tf.TransformListener() + + listener.waitForTransform( + self.detection_frame, self.destination_frame, + rospy.Time(), + rospy.Duration(10)) + + frame_trans, frame_rot = listener.lookupTransform( + self.destination_frame, self.detection_frame, + rospy.Time(0)) + + from tf.transformations import quaternion_matrix + detection_in_destination = numpy.array(numpy.matrix(quaternion_matrix(frame_rot))) + detection_in_destination[:3,3] = frame_trans + + self.track_sub[obj_name] = rospy.Subscriber(pose_topic, + PoseStamped, + callback=self._UpdatePose, + callback_args={ + 'robot':robot, + 'obj_name':obj_name, + 'detection_in_destination': detection_in_destination}, + queue_size=1 + ) + + + @TrackingMethod + def StopTrackObject(self, robot, obj_name, **kw_args): + """ + Unsubscribe from the pose array to end tracking + @param robot The OpenRAVE robot + @param obj_name The name of the object to stop tracking + """ + if obj_name in self.track_sub and self.track_sub[obj_name] is not None: + self.track_sub[obj_name].unregister() + self.track_sub[obj_name] = None + + def _UpdatePose(self, msg, kwargs): + """ + Update the pose of an object + """ + pose_tf = self._MsgToPose(msg) + + robot = kwargs['robot'] + obj_name = kwargs['obj_name'] + detection_in_destination = kwargs['detection_in_destination'] + + env = robot.GetEnv() + with env: + obj = env.GetKinBody(obj_name) + if obj is None: + from prpy.rave import add_object + kinbody_file = '%s.kinbody.xml' % obj_name + new_body = add_object( + env, + obj_name, + os.path.join(self.kinbody_path, kinbody_file)) + + with env: + obj = env.GetKinBody(obj_name) + + from kinbody_helper import transform_to_or + if detection_in_destination is not None: + transform_to_or(kinbody=obj, + detection_in_destination=detection_in_destination, + reference_link=self.reference_link, + pose=pose_tf) + else: + transform_to_or(kinbody=obj, + detection_frame=self.detection_frame, + destination_frame=self.destination_frame, + reference_link=self.reference_link, + pose=pose_tf) \ No newline at end of file diff --git a/src/prpy/perception/vncc.py b/src/prpy/perception/vncc.py index b28edba..a911304 100644 --- a/src/prpy/perception/vncc.py +++ b/src/prpy/perception/vncc.py @@ -12,14 +12,17 @@ class VnccModule(PerceptionModule): - def __init__(self, kinbody_path, detection_frame, world_frame, + def __init__(self, kinbody_path, detection_frame, + destination_frame=None, reference_link=None, service_namespace=None): """ This initializes a VNCC detector. @param kinbody_path The path to the folder where kinbodies are stored @param detection_frame The TF frame of the camera - @param world_frame The desired world TF frame + @param destination_frame The tf frame that the kinbody should be transformed to + @param reference_link The OpenRAVE link that corresponds to the tf frame + given by the destination_frame parameter @param service_namespace The namespace for the VNCC service (default: /vncc) """ @@ -39,7 +42,11 @@ def __init__(self, kinbody_path, detection_frame, world_frame, service_namespace='/vncc' self.detection_frame = detection_frame - self.world_frame = world_frame + if destination_frame is None: + destination_frame='/map' + self.destination_frame = destination_frame + self.reference_link = reference_link + self.service_namespace = service_namespace self.kinbody_path = kinbody_path @@ -65,30 +72,6 @@ def _MsgToPose(msg): return pose - def _LocalToWorld(self,pose): - """ - Transform a pose from local frame to world frame - @param pose The 4x4 transformation matrix containing the pose to transform - @return The 4x4 transformation matrix describing the pose in world frame - """ - #Get pose w.r.t world frame - self.listener.waitForTransform(self.world_frame,self.detection_frame, - rospy.Time(),rospy.Duration(10)) - t, r = self.listener.lookupTransform(self.world_frame,self.detection_frame, - rospy.Time(0)) - - #Get relative transform between frames - offset_to_world = numpy.matrix(transformations.quaternion_matrix(r)) - offset_to_world[0,3] = t[0] - offset_to_world[1,3] = t[1] - offset_to_world[2,3] = t[2] - - #Compose with pose to get pose in world frame - result = numpy.array(numpy.dot(offset_to_world, pose)) - - return result - - def _GetDetection(self, obj_name): """ Calls the service to get a detection of a particular object. @@ -107,9 +90,8 @@ def _GetDetection(self, obj_name): #Assumes one instance of object result = self._MsgToPose(detect_resp.detections[0]) - if (self.detection_frame is not None and self.world_frame is not None): - result = self._LocalToWorld(result) result[:3,:3] = numpy.eye(3) + return result @PerceptionMethod @@ -144,5 +126,12 @@ def DetectObject(self, robot, obj_name, **kw_args): os.path.join(self.kinbody_path, kinbody_file)) body = env.GetKinBody(obj_name) - body.SetTransform(obj_pose) + + # Apply a transform to the kinbody to put it in the + # desired location in OpenRAVE + from kinbody_helper import transform_to_or + transform_to_or(kinbody=body, + detection_frame=self.detection_frame, + destination_frame=self.destination_frame, + reference_link=self.reference_link) return body diff --git a/src/prpy/rave.py b/src/prpy/rave.py index f39166b..be81376 100644 --- a/src/prpy/rave.py +++ b/src/prpy/rave.py @@ -109,15 +109,17 @@ def add_object(env, object_name, object_xml_path, object_pose = None): return None + + # render a trajectory in the environment -def render_trajectory(env, manip, traj): +def render_trajectory(env, manip, traj,obj_path): ''' Renders a trajectory in the specified OpenRAVE environment @param env The openrave environment object @param traj The trajectory to render ''' - sphere_obj_path = 'objects/misc/smallsphere.kinbody.xml' + sphere_obj_path = obj_path+'/objects/smallsphere.kinbody.xml' numdofs = len(manip.GetArmIndices()) with env: @@ -132,12 +134,11 @@ def render_trajectory(env, manip, traj): # put the arm in that pose manip.SetActive() - manip.parent.SetActiveDOFValues(arm_config) + manip.GetRobot().SetActiveDOFValues(arm_config) # grab the end effector pose ee_pose = manip.GetEndEffectorTransform() - # render a sphere here add_object(env, 'traj_sphere'+str(wid), sphere_obj_path, ee_pose) # Clear out any rendered trajectories diff --git a/src/prpy/serialization.py b/src/prpy/serialization.py index 139fc82..c837f15 100644 --- a/src/prpy/serialization.py +++ b/src/prpy/serialization.py @@ -558,7 +558,7 @@ def deserialize_transform(data): '_name': str_identity, '_type': ( lambda x: x.name, - lambda x: openravepy.KinBody.JointType.names[x].encode() + lambda x: openravepy.KinBody.JointType.names[x] ), '_vanchor': numpy_identity, '_vaxes': (