Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

How to Radar to Camera? #8570

Open
KJ-Chang opened this issue Jan 15, 2025 · 0 comments
Open

How to Radar to Camera? #8570

KJ-Chang opened this issue Jan 15, 2025 · 0 comments

Comments

@KJ-Chang
Copy link

[Version 0.9.15]
How do I point the radar point correctly on the photo I get from the camera?
I want the same effect as world.debug.draw_point().
image

I tried modifying with lidar_to_camer.py, but the result seems to be wrong.
(very small green points)
image

[code]

if not os.path.isdir('_out'):
os.mkdir('_out')
# Search the desired blueprints
vehicle_bp = bp_lib.filter("vehicle.lincoln.mkz_2017")[0]
camera_bp = bp_lib.filter("sensor.camera.rgb")[0]
radar_bp = bp_lib.filter('sensor.other.radar')[0]

    # Configure the blueprints
    camera_bp.set_attribute("image_size_x", str(args.width))
    camera_bp.set_attribute("image_size_y", str(args.height))

    radar_bp.set_attribute('horizontal_fov', str(90))
    radar_bp.set_attribute('points_per_second', str(2000))
    radar_bp.set_attribute('range', str(50))
    radar_bp.set_attribute('sensor_tick', str(0))
    radar_bp.set_attribute('vertical_fov', str(50))


    # Spawn the blueprints
    vehicle = world.spawn_actor(
        blueprint=vehicle_bp,
        transform=carla.Transform(carla.Location(x=-64, y=15.471010, z=0.05), carla.Rotation(pitch=0, yaw=0.159198, roll=0)))
    vehicle2 = world.spawn_actor(
        blueprint=vehicle_bp,
        transform=carla.Transform(carla.Location(x=-54, y=15.471010, z=0.05), carla.Rotation(pitch=0, yaw=0.159198, roll=0)))
    vehicle3 = world.spawn_actor(
        blueprint=vehicle_bp,
        transform=carla.Transform(carla.Location(x=-45, y=22.471010, z=0.05), carla.Rotation(pitch=0, yaw=0.159198, roll=0)))
    vehicle4 = world.spawn_actor(
        blueprint=vehicle_bp,
        transform=carla.Transform(carla.Location(x=-40, y=13.471010, z=0.05), carla.Rotation(pitch=0, yaw=0.159198, roll=0)))
    vehicle.set_autopilot(True)
    bound_x = 0.5 + vehicle.bounding_box.extent.x
    bound_y = 0.5 + vehicle.bounding_box.extent.y
    bound_z = 0.5 + vehicle.bounding_box.extent.z
    camera = world.spawn_actor(
        blueprint=camera_bp,
        transform=carla.Transform(carla.Location(x=+0.8*bound_x, y=0.0*bound_y, z=0.6*bound_z), carla.Rotation(pitch=0)),
        attach_to=vehicle,
        attachment_type = carla.AttachmentType.Rigid)
    radar = world.spawn_actor(
        blueprint=radar_bp,
        transform=carla.Transform(
            carla.Location(x=+0.8*bound_x, y=+0.0*bound_y, z=0.6*bound_z),
            carla.Rotation(pitch=0),
            ),
        attach_to=vehicle)

    # Build the K projection matrix:
    # K = [[Fx,  0, image_w/2],
    #      [ 0, Fy, image_h/2],
    #      [ 0,  0,         1]]
    image_w = camera_bp.get_attribute("image_size_x").as_int()
    image_h = camera_bp.get_attribute("image_size_y").as_int()
    fov_x = camera_bp.get_attribute("fov").as_float()
    fov_y = fov_x * image_h / image_w

    fx = image_w / (2.0 * math.tan(math.radians(fov_x) / 2))
    fy = image_h / (2.0 * math.tan(math.radians(fov_y) / 2))
    # focal = image_w / (2.0 * np.tan(fov * np.pi / 360.0))
    print("fx:", fx, "fy: ", fy)

    # In this case Fx and Fy are the same since the pixel aspect
    # ratio is 1
    K = np.identity(3)
    # K[0, 0] = K[1, 1] = focal
    K[0, 0] = fx
    K[1, 1] = fy
    K[0, 2] = image_w / 2.0
    K[1, 2] = image_h / 2.0
   

    # The sensor data will be saved in thread-safe Queues
    image_queue = Queue()
    radar_queue = Queue()

    camera.listen(lambda data: sensor_callback(data, image_queue))
    radar.listen(lambda data: sensor_callback(data, radar_queue))

    for frame in range(args.frames):
        if frame % 20 !=0:
            continue
        world.tick()
        world_frame = world.get_snapshot().frame

        try:
            # Get the data once it's received.
            image_data = image_queue.get(True, 1.0)
            radar_data = radar_queue.get(True, 1.0)
        except Empty:
            print("[Warning] Some sensor data has been missed")
            continue

        assert image_data.frame == radar_data.frame == world_frame
        # At this point, we have the synchronized information from the 2 sensors.
        sys.stdout.write("\r(%d/%d) Simulation: %d Camera: %d radar: %d" %
            (frame, args.frames, world_frame, image_data.frame, radar_data.frame) + ' ')
        sys.stdout.flush()

        # Get the raw BGRA buffer and convert it to an array of RGB of
        # shape (image_data.height, image_data.width, 3).
        im_array = np.copy(np.frombuffer(image_data.raw_data, dtype=np.dtype("uint8")))
        im_array = np.reshape(im_array, (image_data.height, image_data.width, 4))
        im_array = im_array[:, :, :3][:, :, ::-1]

        # Get the radar data and convert it to a numpy array.
        p_radar_size = len(radar_data)
        # print(radar_data.raw_data)
        p_radar = np.copy(np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4')))
        p_radar = np.reshape(p_radar, (p_radar_size, 4))
        
        for (_, altitude, azimuth, depth) in p_radar:
            x = depth * math.cos(altitude) * math.cos(azimuth)
            y = depth * math.cos(altitude) * math.sin(azimuth)
            z = depth * math.sin(altitude)
            
            rd_local = np.array([x, y, z, 1]).T
            # print("\nrd_local: ", rd_local)
            rd_matrix = radar.get_transform().get_matrix()
            rd_world2 = np.matmul(rd_matrix, rd_local)
            # print("rd_world2:", rd_world2)
            rd_world1 = radar.get_transform().transform(carla.Location(x=x, y=y, z=z))
            # print("rd_world1: ", rd_world1)
            camera_inverse_matrix = camera.get_transform().get_inverse_matrix()
            cam_local = np.matmul(camera_inverse_matrix, rd_world2)
            # print("cam_local: ", cam_local)
            camera_matrix = camera.get_transform().get_matrix()
            cam_world = np.matmul(camera_matrix, cam_local)
            # print("cam_world: ", cam_world)
            
            ue4_to_opencv = np.array([
                [0, 1, 0],
                [0, 0, -1],
                [1, 0, 0],
            ])
            
            cam_new_local = np.matmul(ue4_to_opencv, cam_local[:3])

            pixel = np.matmul(K, cam_new_local)
            
            u = pixel[0] / pixel[2]
            v = pixel[1] / pixel[2]
            print(f'(u, v) = ({u}, {v})')
            
            if (u<0) | (u>=args.height) | (v<0) | (v>=args.width):
                continue

            im_array[int(u), int(v)] = [0, 255, 0]

        # Save the image using Pillow module.
        image = Image.fromarray(im_array)
        image.save("_out/%08d.png" % image_data.frame)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant