diff --git a/.github/workflows/apk.yaml b/.github/workflows/apk.yaml new file mode 100644 index 00000000..40100b33 --- /dev/null +++ b/.github/workflows/apk.yaml @@ -0,0 +1,71 @@ +# Check for lint error and auto correct them + +name: Compile APK + +on: ['push', 'pull_request'] + +jobs: + install_dependencies: + runs-on: ubuntu-18.04 + + steps: + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ + + + - name: Build image isaac/astrobee:msgs-ubuntu16.04 + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t isaac/isaac:astrobee-msgs-ubuntu16.04 + + - name: Build image isaac/isaac:msgs-ubuntu16.04 + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t isaac/isaac:msgs-ubuntu16.04 + + - name: Build image isaac/isaac:latest-msgs-jar-ubuntu16.04 + run: docker build isaac -f isaac/scripts/docker/build_msgs_jar.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t isaac/isaac:latest-msgs-jar-ubuntu16.04 + + - name: Copy jar files + run: | + docker cp $(docker create --rm isaac/isaac:latest-msgs-jar-ubuntu16.04):/src/msgs/devel/share/maven/ . + rm isaac/apks/isaac_gs_ros_bridge/app/libs/*msgs* + find maven -name *.jar -print0 | xargs -0 cp -t isaac/apks/isaac_gs_ros_bridge/app/libs + + - name: Install APK dependencies + run: | + sudo apt-get install -y libc6-dev-i386 lib32z1 openjdk-8-jdk + mkdir $HOME/android-sdk + cd $HOME/android-sdk + wget https://dl.google.com/android/repository/tools_r25.2.3-linux.zip + java -version + unzip tools_r25.2.3-linux.zip + tools/bin/sdkmanager --update + yes | tools/bin/sdkmanager "platforms;android-25" "build-tools;25.0.2" "extras;google;m2repository" "extras;android;m2repository" + wget https://dl.google.com/android/repository/android-ndk-r22b-linux-x86_64.zip + unzip android-ndk-r22b-linux-x86_64.zip + mv android-ndk-r22b ndk-bundle + cd ~/android-sdk/ndk-bundle/toolchains + ln -s aarch64-linux-android-4.9 mips64el-linux-android + ln -s arm-linux-androideabi-4.9 mipsel-linux-android + - name: Build APK + run: | + cd isaac/apks/isaac_gs_ros_bridge + ANDROID_HOME=$HOME/android-sdk ANDROID_NDK_HOME=$HOME/android-sdk/ndk-bundle ./gradlew build diff --git a/.github/workflows/ci_pr.yml b/.github/workflows/ci_pr.yml index 5463516b..ddf03286 100644 --- a/.github/workflows/ci_pr.yml +++ b/.github/workflows/ci_pr.yml @@ -11,13 +11,21 @@ jobs: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=16.04 --build-arg ROS_VERSION=kinetic --build-arg PYTHON='' @@ -25,24 +33,44 @@ jobs: -t isaac/isaac:latest-astrobee-ubuntu16.04 - name: Build code for isaac:latest Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=16.04 --build-arg ROS_VERSION=kinetic --build-arg PYTHON='' -t isaac:latest-ubuntu16.04 + - name: Build messages image isaac/astrobee:msgs-ubuntu16.04 + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t isaac/isaac:astrobee-msgs-ubuntu16.04 + - name: Build messages image isaac/isaac:msgs-ubuntu16.04 + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t isaac/isaac:msgs-ubuntu16.04 build-bionic: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=18.04 --build-arg ROS_VERSION=melodic --build-arg PYTHON=3 @@ -50,24 +78,45 @@ jobs: -t isaac/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=18.04 --build-arg ROS_VERSION=melodic --build-arg PYTHON=3 -t isaac:latest-ubuntu18.04 + - name: Build messages image isaac/astrobee:msgs-ubuntu18.04 + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON='' + -t isaac/isaac:astrobee-msgs-ubuntu18.04 + + - name: Build messages image isaac/isaac:msgs-ubuntu18.04 + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON='' + -t isaac/isaac:msgs-ubuntu18.04 build-focal: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=20.04 --build-arg ROS_VERSION=noetic --build-arg PYTHON=3 @@ -75,8 +124,22 @@ jobs: -t isaac/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=20.04 --build-arg ROS_VERSION=noetic --build-arg PYTHON=3 -t isaac:latest-ubuntu20.04 + + - name: Build messages image isaac/astrobee:msgs-ubuntu20.04 + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + -t isaac/isaac:astrobee-msgs-ubuntu20.04 + + - name: Build messages image isaac/isaac:msgs-ubuntu20.04 + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + -t isaac/isaac:msgs-ubuntu20.04 \ No newline at end of file diff --git a/.github/workflows/ci_push.yml b/.github/workflows/ci_push.yml index 9ba53e2c..60b58a05 100644 --- a/.github/workflows/ci_push.yml +++ b/.github/workflows/ci_push.yml @@ -11,13 +11,21 @@ jobs: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=16.04 --build-arg ROS_VERSION=kinetic --build-arg PYTHON='' @@ -25,13 +33,28 @@ jobs: -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu16.04 - name: Build code for isaac:latest Ubuntu 16 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=16.04 --build-arg ROS_VERSION=kinetic --build-arg PYTHON='' --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu16.04 + - name: Build messages dockers for Ubuntu 16 (astrobee) + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu16.04 + + - name: Build messages dockers for Ubuntu 16 (isaac) + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=16.04 + --build-arg ROS_VERSION=kinetic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa + -t ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu16.04 + - name: Log in to registry run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.repository_owner }} --password-stdin @@ -39,19 +62,29 @@ jobs: run: | if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu16.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu16.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu16.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu16.04; fi; build-bionic: runs-on: ubuntu-18.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=18.04 --build-arg ROS_VERSION=melodic --build-arg PYTHON=3 @@ -59,13 +92,28 @@ jobs: -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu18.04 - name: Build code for isaac:latest Ubuntu 18 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=18.04 --build-arg ROS_VERSION=melodic --build-arg PYTHON=3 --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu18.04 + - name: Build messages dockers for Ubuntu 18 (astrobee) + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON='' + -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu18.04 + + - name: Build messages dockers for Ubuntu 18 (isaac) + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=18.04 + --build-arg ROS_VERSION=melodic + --build-arg PYTHON='' + --build-arg REMOTE=ghcr.io/nasa + -t ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu18.04 + - name: Log in to registry run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.repository_owner }} --password-stdin @@ -73,19 +121,29 @@ jobs: run: | if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu18.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu18.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu18.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu18.04; fi; build-focal: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 - - name: Checkout submodule - run: git submodule update --init --depth 1 isaac_msgs + - name: Checkout Astrobee + uses: actions/checkout@v2 + with: + repository: nasa/astrobee + path: astrobee/ + + - name: Checkout ISAAC + uses: actions/checkout@v2 + with: + submodules: recursive + path: isaac/ - name: Build code for isaac:astrobee Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac_astrobee.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac_astrobee.Dockerfile --build-arg UBUNTU_VERSION=20.04 --build-arg ROS_VERSION=noetic --build-arg PYTHON=3 @@ -93,13 +151,28 @@ jobs: -t ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu20.04 - name: Build code for isaac:latest Ubuntu 20 - run: docker build . -f ./scripts/docker/isaac.Dockerfile + run: docker build isaac -f isaac/scripts/docker/isaac.Dockerfile --build-arg UBUNTU_VERSION=20.04 --build-arg ROS_VERSION=noetic --build-arg PYTHON=3 --build-arg REMOTE=ghcr.io/nasa -t ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu20.04 + - name: Build messages dockers for Ubuntu 20 (astrobee) + run: docker build astrobee -f isaac/scripts/docker/astrobee_msgs.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + -t ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu20.04 + + - name: Build messages dockers for Ubuntu 20 (isaac) + run: docker build isaac -f isaac/scripts/docker/isaac_msgs.Dockerfile + --build-arg UBUNTU_VERSION=20.04 + --build-arg ROS_VERSION=noetic + --build-arg PYTHON=3 + --build-arg REMOTE=ghcr.io/nasa + -t ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu20.04 + - name: Log in to registry run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.repository_owner }} --password-stdin @@ -107,3 +180,5 @@ jobs: run: | if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-astrobee-ubuntu20.04; fi; if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:latest-ubuntu20.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:astrobee-msgs-ubuntu20.04; fi; + if [ "${{ github.repository_owner }}" = "nasa" ]; then docker push ghcr.io/${{ github.repository_owner }}/isaac:msgs-ubuntu20.04; fi; diff --git a/INSTALL.md b/INSTALL.md index d51722b7..d8561607 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -33,7 +33,7 @@ At this point you need to decide where you'd like to put the ISAAC workspace and First, clone the flight software repository: - git clone --recursive ssh://git@babelfish.arc.nasa.gov:7999/isaac/isaac.git \ + git clone --recursive https://github.com/nasa/isaac.git \ --branch develop $ISAAC_WS/src/ Checkout the submodule: @@ -91,18 +91,18 @@ Cross-compiling isaac --------- -To cross-compile ISAAC, one must first cross compile the astobee code using the NASA_INSTALL instructions. +To cross-compile ISAAC, one must first cross compile the astobee code using the NASA_INSTALL instructions. Note that ASTROBEE_WS must be defined. A new catkin profile should be made to retain the configurations and easily switch between normal build. catkin profile add cross catkin profile set cross - catkin config --extend $ARMHF_CHROOT_DIR/opt/astrobee \ - --build-space $ARMHF_CHROOT_DIR/home/astrobee/isaac/build \ - --install-space $ARMHF_CHROOT_DIR/opt/isaac \ - --devel-space $ARMHF_CHROOT_DIR/home/astrobee/isaac/devel \ - --log-space $ARMHF_CHROOT_DIR/home/astrobee/isaac/logs \ - --whitelist isaac_astrobee_description isaac_util isaac_msgs inspection isaac_hw_msgs wifi isaac gs_action_helper \ + catkin config --extend $ASTROBEE_WS/armhf/opt/astrobee \ + --build-space armhf/build \ + --install-space armhf/opt/isaac \ + --devel-space armhf/devel \ + --log-space armhf/logs \ + --whitelist isaac_astrobee_description isaac_util isaac_msgs inspection cargo isaac_hw_msgs wifi isaac gs_action_helper \ --install \ --cmake-args -DCMAKE_TOOLCHAIN_FILE=$ISAAC_WS/src/scripts/build/isaac_cross.cmake \ -DARMHF_CHROOT_DIR=$ARMHF_CHROOT_DIR diff --git a/RELEASE.md b/RELEASE.md index 7e628ffd..55540d42 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,5 +1,11 @@ # Releases +## Release 0.2.4 + + * Inspection tool improvements + * Better CI + * Small bug fixes + ## Release 0.2.3 * Panorama picture taking mode supported diff --git a/apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar b/apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar deleted file mode 100644 index e04d1d37..00000000 Binary files a/apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar and /dev/null differ diff --git a/apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar b/apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar new file mode 120000 index 00000000..ac4e1ee9 --- /dev/null +++ b/apks/isaac_gs_ros_bridge/app/libs/isaac_hw_msgs-0.0.0.jar @@ -0,0 +1 @@ +../../../../../devel/share/maven/org/ros/rosjava_messages/isaac_hw_msgs/0.0.0/isaac_hw_msgs-0.0.0.jar \ No newline at end of file diff --git a/apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar b/apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar deleted file mode 100644 index f69d5784..00000000 Binary files a/apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar and /dev/null differ diff --git a/apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar b/apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar new file mode 120000 index 00000000..d8d21b57 --- /dev/null +++ b/apks/isaac_gs_ros_bridge/app/libs/isaac_msgs-0.0.0.jar @@ -0,0 +1 @@ +../../../../../devel/share/maven/org/ros/rosjava_messages/isaac_msgs/0.0.0/isaac_msgs-0.0.0.jar \ No newline at end of file diff --git a/apks/isaac_gs_ros_bridge/app/src/main/java/gov/nasa/arc/irg/astrobee/isaac_gs_ros_bridge/RosPubSub.java b/apks/isaac_gs_ros_bridge/app/src/main/java/gov/nasa/arc/irg/astrobee/isaac_gs_ros_bridge/RosPubSub.java index 6c32d646..e4916a90 100644 --- a/apks/isaac_gs_ros_bridge/app/src/main/java/gov/nasa/arc/irg/astrobee/isaac_gs_ros_bridge/RosPubSub.java +++ b/apks/isaac_gs_ros_bridge/app/src/main/java/gov/nasa/arc/irg/astrobee/isaac_gs_ros_bridge/RosPubSub.java @@ -155,6 +155,7 @@ public boolean handleGuestScienceCustomCmd(String command) { if (cmd_split[0].compareTo("iir") == 0) { // This is an image inspection result + // TODO(Katie) Need to add anomaly_result once it is added to the ground node Log.i(mBridgeService.ISAAC_GS_ROS_BRIDGE_TAG, "Got image inspection result"); ImageInspectionResult imageInspectionResultMsg = mImageInspectionResultPublisher.newMessage(); imageInspectionResultMsg.setResponse(Integer.parseInt(cmd_split[1])); @@ -170,7 +171,9 @@ public boolean handleGuestScienceCustomCmd(String command) { inspectionGoalMsg.setCommand(Byte.parseByte(cmd_split[1])); // Need to figure out how many poses we have, subtract 2 for gs command name // and inspection command arg - int num_args = cmd_split.length - 2; + // TODO(Katie) This extraction needs work. The inspection poses were changed from a + // TODO(Katie) list of poses to a pose array. + /*int num_args = cmd_split.length - 2; int num_poses = num_args/7; if (num_args % 7 == 0) { List inspectPoses = inspectionGoalMsg.getInspectPoses(); @@ -192,7 +195,7 @@ public boolean handleGuestScienceCustomCmd(String command) { inspectPoses.add(poseStamped); } inspectionGoalMsg.setInspectPoses(inspectPoses); - } + }*/ mInspectionGoalPublisher.publish(inspectionGoalMsg); } else { Log.e(mBridgeService.ISAAC_GS_ROS_BRIDGE_TAG, "Unknown custom guest science command."); @@ -235,20 +238,9 @@ public void inspectionFeedbackCallback(InspectionFeedback feedback) { public void inspectionResultCallback(InspectionResult result) { Log.i(mBridgeService.ISAAC_GS_ROS_BRIDGE_TAG, "Got inspection result"); // TODO(Katie) Extract header timestamp and frame id to be thorough + // TODO(Katie) Update to work with new inspection result String gs_data = ""; - ChannelBuffer vent_result = result.getVentResult(); - gs_data += vent_result.capacity() + "@"; - for (int i = 0; i < vent_result.capacity(); i++) { - gs_data += Byte.toString(vent_result.getByte(i)) + "@"; - } - - ChannelBuffer geometry_result = result.getGeometryResult(); - gs_data += geometry_result.capacity() + "@"; - for (int i = 0; i < geometry_result.capacity(); i++) { - gs_data += Byte.toString(geometry_result.getByte(i)) + "@"; - } - gs_data += result.getResponse() + "@"; gs_data += result.getFsmResult() + "\0"; diff --git a/astrobee/behaviors/cargo/src/cargo_node.cc b/astrobee/behaviors/cargo/src/cargo_node.cc index 960f608d..20e32568 100755 --- a/astrobee/behaviors/cargo/src/cargo_node.cc +++ b/astrobee/behaviors/cargo/src/cargo_node.cc @@ -321,7 +321,7 @@ class CargoNode : public ff_util::FreeFlyerNodelet { void Initialize(ros::NodeHandle* nh) { nh_ = nh; // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) cfg_.SetPath(path); // Grab some configuration parameters for this node from the LUA config reader diff --git a/astrobee/behaviors/inspection/include/inspection/inspection.h b/astrobee/behaviors/inspection/include/inspection/inspection.h index f833bdec..a19a33fe 100755 --- a/astrobee/behaviors/inspection/include/inspection/inspection.h +++ b/astrobee/behaviors/inspection/include/inspection/inspection.h @@ -133,7 +133,7 @@ class Inspection { tf2_ros::Buffer tf_buffer_; std::shared_ptr tf_listener_; ros::Publisher pub_; - tf2::Quaternion vent_to_scicam_rot_; + tf2::Quaternion target_to_scicam_rot_; geometry_msgs::PoseArray points_; // Vector containing inspection poses diff --git a/astrobee/behaviors/inspection/readme.md b/astrobee/behaviors/inspection/readme.md index e280c7d6..0fa674c5 100644 --- a/astrobee/behaviors/inspection/readme.md +++ b/astrobee/behaviors/inspection/readme.md @@ -57,4 +57,4 @@ The pictures will be published on the topic They will also show up in the sci cam window in RVIZ. -If requests to take a single picture come at a high rate, some of them will be dropped. \ No newline at end of file +If requests to take a single picture come at a high rate, some of them will be dropped. diff --git a/astrobee/behaviors/inspection/resources/scicam_panorama_geometry.txt b/astrobee/behaviors/inspection/resources/scicam_panorama_geometry.txt new file mode 100644 index 00000000..5cd5cd2b --- /dev/null +++ b/astrobee/behaviors/inspection/resources/scicam_panorama_geometry.txt @@ -0,0 +1,52 @@ +10.90 -9.00 4.88 0.00 -90.00 0.00 +10.86 -9.00 4.95 0.00 -60.00 0.00 +10.89 -9.08 4.95 0.00 -60.00 36.00 +10.96 -9.14 4.95 0.00 -60.00 72.00 +11.04 -9.14 4.95 0.00 -60.00 108.00 +11.11 -9.08 4.95 0.00 -60.00 144.00 +11.14 -9.00 4.95 0.00 -60.00 180.00 +11.12 -8.92 4.95 0.00 -60.00 -144.01 +11.04 -8.86 4.95 0.00 -60.00 -108.01 +10.96 -8.86 4.95 0.00 -60.00 -72.01 +10.89 -8.92 4.95 0.00 -60.00 -36.01 +10.85 -9.00 5.02 0.00 -30.00 0.00 +10.88 -9.09 5.02 0.00 -30.00 36.00 +10.95 -9.14 5.02 0.00 -30.00 72.00 +11.05 -9.14 5.02 0.00 -30.00 108.00 +11.12 -9.09 5.02 0.00 -30.00 144.00 +11.15 -9.00 5.02 0.00 -30.00 180.00 +11.12 -8.91 5.02 0.00 -30.00 -144.01 +11.05 -8.86 5.02 0.00 -30.00 -108.01 +10.95 -8.86 5.02 0.00 -30.00 -72.01 +10.88 -8.91 5.02 0.00 -30.00 -36.01 +10.88 -9.00 5.10 0.00 0.00 0.00 +10.90 -9.07 5.10 0.00 0.00 36.00 +10.96 -9.11 5.10 0.00 0.00 72.00 +11.04 -9.11 5.10 0.00 0.00 108.00 +11.10 -9.07 5.10 0.00 0.00 144.00 +11.12 -9.00 5.10 0.00 0.00 180.00 +11.10 -8.93 5.10 0.00 -0.00 -144.01 +11.04 -8.89 5.10 0.00 0.00 -108.01 +10.96 -8.89 5.10 0.00 0.00 -72.01 +10.90 -8.93 5.10 0.00 0.00 -36.01 +10.95 -9.00 5.14 0.00 30.00 0.00 +10.96 -9.03 5.14 0.00 30.00 36.00 +10.98 -9.05 5.14 0.00 30.00 72.00 +11.02 -9.05 5.14 0.00 30.00 108.00 +11.04 -9.03 5.14 0.00 30.00 144.00 +11.05 -9.00 5.14 0.00 30.00 180.00 +11.04 -8.97 5.14 0.00 30.00 -144.01 +11.02 -8.95 5.14 0.00 30.00 -108.01 +10.98 -8.95 5.14 0.00 30.00 -72.01 +10.96 -8.97 5.14 0.00 30.00 -36.01 +11.02 -9.00 5.15 0.00 60.00 0.00 +11.02 -8.99 5.15 0.00 60.00 36.00 +11.01 -8.98 5.15 0.00 60.00 72.00 +10.99 -8.98 5.15 0.00 60.00 108.00 +10.98 -8.99 5.15 0.00 60.00 144.00 +10.98 -9.00 5.15 0.00 60.00 180.00 +10.98 -9.01 5.15 0.00 60.00 -144.01 +10.99 -9.02 5.15 0.00 60.00 -108.01 +11.01 -9.02 5.15 0.00 60.00 -72.01 +11.02 -9.01 5.15 0.00 60.00 -36.01 +11.10 -9.00 5.12 0.00 90.00 0.00 diff --git a/astrobee/behaviors/inspection/resources/soundsee_background_noise.txt b/astrobee/behaviors/inspection/resources/soundsee_background_noise.txt new file mode 100644 index 00000000..8ce6e8d5 --- /dev/null +++ b/astrobee/behaviors/inspection/resources/soundsee_background_noise.txt @@ -0,0 +1,48 @@ +# Scicam stations +11.019643 -8.20 4.28 0.00 0.00 180.00 +11.019643 -7.70 4.28 0.00 0.00 180.00 +11.019643 -7.70 4.78 0.00 0.00 180.00 +11.019643 -8.20 4.78 0.00 0.00 180.00 +11.019643 -8.20 5.28 0.00 0.00 180.00 +11.019643 -7.70 5.28 0.00 0.00 180.00 +# Soundsee stations +10.38 -8.20 4.08 0.00 0.00 180.00 +10.38 -8.05 4.08 0.00 0.00 180.00 +10.38 -7.90 4.08 0.00 0.00 180.00 +10.38 -7.75 4.08 0.00 0.00 180.00 +10.38 -7.60 4.08 0.00 0.00 180.00 +10.38 -7.60 4.22 0.00 0.00 180.00 +10.38 -7.75 4.22 0.00 0.00 180.00 +10.38 -7.90 4.22 0.00 0.00 180.00 +10.38 -8.05 4.22 0.00 0.00 180.00 +10.38 -8.20 4.22 0.00 0.00 180.00 +10.38 -8.20 4.38 0.00 0.00 180.00 +10.38 -8.05 4.38 0.00 0.00 180.00 +10.38 -7.90 4.38 0.00 0.00 180.00 +10.38 -7.75 4.38 0.00 0.00 180.00 +10.38 -7.60 4.38 0.00 0.00 180.00 +10.38 -7.60 4.52 0.00 0.00 180.00 +10.38 -7.75 4.52 0.00 0.00 180.00 +10.38 -7.90 4.52 0.00 0.00 180.00 +10.38 -8.05 4.52 0.00 0.00 180.00 +10.38 -8.20 4.52 0.00 0.00 180.00 +10.38 -8.20 4.68 0.00 0.00 180.00 +10.38 -8.05 4.68 0.00 0.00 180.00 +10.38 -7.90 4.68 0.00 0.00 180.00 +10.38 -7.75 4.68 0.00 0.00 180.00 +10.38 -7.60 4.68 0.00 0.00 180.00 +10.38 -7.60 4.82 0.00 0.00 180.00 +10.38 -7.75 4.82 0.00 0.00 180.00 +10.38 -7.90 4.82 0.00 0.00 180.00 +10.38 -8.05 4.82 0.00 0.00 180.00 +10.38 -8.20 4.82 0.00 0.00 180.00 +10.38 -8.20 4.98 0.00 0.00 180.00 +10.38 -8.05 4.98 0.00 0.00 180.00 +10.38 -7.90 4.98 0.00 0.00 180.00 +10.38 -7.75 4.98 0.00 0.00 180.00 +10.38 -7.60 4.98 0.00 0.00 180.00 +10.38 -7.60 5.12 0.00 0.00 180.00 +10.38 -7.75 5.12 0.00 0.00 180.00 +10.38 -7.90 5.12 0.00 0.00 180.00 +10.38 -8.05 5.12 0.00 0.00 180.00 +10.38 -8.20 5.12 0.00 0.00 180.00 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt b/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt deleted file mode 100644 index d3dd47a1..00000000 --- a/astrobee/behaviors/inspection/resources/soundsee_backround_noise.txt +++ /dev/null @@ -1,2 +0,0 @@ -# Panorama -11 -9 5.5 0 -90 0 \ No newline at end of file diff --git a/astrobee/behaviors/inspection/resources/soundsee_panorama_geometry.txt b/astrobee/behaviors/inspection/resources/soundsee_panorama_geometry.txt new file mode 100644 index 00000000..5b307f9c --- /dev/null +++ b/astrobee/behaviors/inspection/resources/soundsee_panorama_geometry.txt @@ -0,0 +1,16 @@ +11.16 -8.97 5.45 130.89 -48.59 -139.11 +11.16 -8.99 5.44 160.58 -58.53 -163.26 +11.16 -9.01 5.44 -160.58 -58.53 163.26 +11.16 -9.03 5.45 -130.89 -48.59 139.11 +11.13 -8.95 5.40 106.74 -58.53 -109.42 +11.13 -8.98 5.39 134.56 -75.90 -135.44 +11.13 -9.02 5.39 -134.56 -75.90 135.44 +11.13 -9.06 5.40 -106.74 -58.53 109.42 +11.08 -8.93 5.37 73.26 -58.53 -70.57 +11.08 -8.98 5.35 45.43 -75.89 -44.56 +11.08 -9.03 5.35 -45.43 -75.89 44.56 +11.08 -9.08 5.37 -73.26 -58.53 70.57 +11.03 -8.92 5.35 49.11 -48.59 -40.89 +11.03 -8.97 5.33 19.42 -58.53 -16.74 +11.03 -9.03 5.34 -19.42 -58.53 16.74 +11.03 -9.09 5.36 -49.11 -48.59 40.89 diff --git a/astrobee/behaviors/inspection/scripts/panorama_maker.sh b/astrobee/behaviors/inspection/scripts/panorama_maker.sh new file mode 100755 index 00000000..b0aa2a68 --- /dev/null +++ b/astrobee/behaviors/inspection/scripts/panorama_maker.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# + +# generate pto file with images +pto_gen -o panorama.pto -f 62.0 *.jpg + +# generate control points +cpfind --multirow -o panorama.pto panorama.pto + +# set optimization variables, optimize +pto_var --opt y,p,r,b -o panorama.pto panorama.pto +autooptimiser -n -o panorama.pto panorama.pto + +# clean outliers +cpclean --max-distance=3 -o panorama.pto panorama.pto + +# optimize pohotometric parameters +pto_var --opt y,p,r,TrX,TrY,TrZ,b -o panorama.pto panorama.pto +autooptimiser -n -o panorama.pto panorama.pto + + +autooptimiser -m -o panorama.pto panorama.pto + +# configure image output +pano_modify -o panorama.pto --center --canvas=AUTO --projection=2 --fov=360x180 panorama.pto + +# generate panorama +hugin_executor --stitching --prefix=prefix panorama.pto \ No newline at end of file diff --git a/astrobee/behaviors/inspection/src/inspection.cc b/astrobee/behaviors/inspection/src/inspection.cc index 93678730..a29da70e 100644 --- a/astrobee/behaviors/inspection/src/inspection.cc +++ b/astrobee/behaviors/inspection/src/inspection.cc @@ -72,7 +72,6 @@ namespace inspection { void Inspection::ReadParam() { // Parameters Anomaly survey - opt_distance_ = cfg_->Get("optimal_distance"); dist_resolution_ = cfg_->Get("distance_resolution"); angle_resolution_ = cfg_->Get("angle_resolution"); max_angle_ = cfg_->Get("max_angle"); @@ -80,10 +79,20 @@ namespace inspection { min_distance_ = cfg_->Get("min_distance"); target_size_x_ = cfg_->Get("target_size_x"); target_size_y_ = cfg_->Get("target_size_y"); - vent_to_scicam_rot_ = tf2::Quaternion(cfg_->Get("vent_to_sci_cam_rotation_x"), - cfg_->Get("vent_to_sci_cam_rotation_y"), - cfg_->Get("vent_to_sci_cam_rotation_z"), - cfg_->Get("vent_to_sci_cam_rotation_w")); + + // Get transform from target to sci cam + try { + geometry_msgs::TransformStamped tf_target_to_sci_cam = tf_buffer_.lookupTransform("sci_cam", "target", + ros::Time(0)); + opt_distance_ = tf_target_to_sci_cam.transform.translation.z; + target_to_scicam_rot_ = tf2::Quaternion( + tf_target_to_sci_cam.transform.rotation.x, + tf_target_to_sci_cam.transform.rotation.y, + tf_target_to_sci_cam.transform.rotation.z, + tf_target_to_sci_cam.transform.rotation.w); + } catch (tf2::TransformException &ex) { + ROS_ERROR("ERROR getting target to sci_cam transform: %s", ex.what()); + } // Parameters Panorama survey pan_min_ = cfg_->Get("pan_min") * PI / 180.0; @@ -553,19 +562,54 @@ bool Inspection::PointInsideCuboid(geometry_msgs::Point const& x, if (pan_max_ - pan_min_ >= 2*PI) pan_max_-= 2 * EPS; // Go through all the points // Generate all the pan/tilt values - for (double tilt = tilt_min_; tilt <= tilt_max_ + EPS; tilt += k_tilt) { - for (double pan = pan_min_; pan <= pan_max_ + EPS; pan += k_pan) { - ROS_DEBUG_STREAM("pan:" << pan * 180 / PI << " tilt:" << tilt * 180 / PI); - panorama_rotation.setRPY(0, tilt, pan); - panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * vent_to_scicam_rot_; - point.orientation.x = panorama_rotation.x(); - point.orientation.y = panorama_rotation.y(); - point.orientation.z = panorama_rotation.z(); - point.orientation.w = panorama_rotation.w(); - panorama_relative.poses.push_back(point); - if ((tilt < -PI/2 + EPS && tilt > -PI/2 - EPS) || (tilt < PI/2 + EPS && tilt > PI/2 - EPS)) - break; + int i = 0; + bool top_view = false; + bool bottom_view = false; + for (double pan = pan_min_; pan <= pan_max_ + EPS; pan += k_pan) { + // zig zag through views + if (i%2 == 0) { + for (double tilt = tilt_min_; tilt <= tilt_max_ + EPS; tilt += k_tilt) { + // Avoid taking multiple pictures at top and bottom + if (tilt < -PI/2 + EPS && tilt > -PI/2 - EPS) { + if (bottom_view) {continue;} else {bottom_view = true;} + } + if (tilt < PI/2 + EPS && tilt > PI/2 - EPS) { + if (top_view) {continue;} else {top_view = true;} + } + + ROS_DEBUG_STREAM("pan:" << pan * 180 / PI << " tilt:" << tilt * 180 / PI); + panorama_rotation.setRPY(0, tilt, pan); + panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * target_to_scicam_rot_; + point.orientation.x = panorama_rotation.x(); + point.orientation.y = panorama_rotation.y(); + point.orientation.z = panorama_rotation.z(); + point.orientation.w = panorama_rotation.w(); + panorama_relative.poses.push_back(point); + } + } else { + for (double tilt = tilt_max_; tilt >= tilt_min_ - EPS; tilt -= k_tilt) { + // Avoid taking multiple pictures at top and bottom + if (tilt < -PI/2 + EPS && tilt > -PI/2 - EPS) { + if (bottom_view) {continue;} else {bottom_view = true;} + } + if (tilt < PI/2 + EPS && tilt > PI/2 - EPS) { + if (top_view) {continue;} else {top_view = true;} + } + + + ROS_DEBUG_STREAM("pan:" << pan * 180 / PI << " tilt:" << tilt * 180 / PI); + panorama_rotation.setRPY(0, tilt, pan); + panorama_rotation = panorama_rotation * tf2::Quaternion(0, 0, -1, 0) * target_to_scicam_rot_; + point.orientation.x = panorama_rotation.x(); + point.orientation.y = panorama_rotation.y(); + point.orientation.z = panorama_rotation.z(); + point.orientation.w = panorama_rotation.w(); + panorama_relative.poses.push_back(point); + // if ((tilt < -PI/2 + EPS && tilt > -PI/2 - EPS) || (tilt < PI/2 + EPS && tilt > PI/2 - EPS)) + // break; + } } + i++; } // Go through all the panorama points diff --git a/astrobee/behaviors/inspection/src/inspection_node.cc b/astrobee/behaviors/inspection/src/inspection_node.cc index 92bd993b..43e8417e 100644 --- a/astrobee/behaviors/inspection/src/inspection_node.cc +++ b/astrobee/behaviors/inspection/src/inspection_node.cc @@ -178,7 +178,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { } } // Inspection is over, return - Result(RESPONSE::SUCCESS); + Result(RESPONSE::SUCCESS, "Inspection Over"); return STATE::WAITING; }); // [5] @@ -192,7 +192,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { return STATE::VOL_INSPECTION; } // Inspection is over, return - Result(RESPONSE::SUCCESS); + Result(RESPONSE::SUCCESS, "Inspection Over"); Dock("DOCK"); return STATE::RETURN_INSPECTION; }); @@ -247,7 +247,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { void Initialize(ros::NodeHandle* nh) { nh_ = nh; // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) cfg_.SetPath(path); // Grab some configuration parameters for this node from the LUA config reader @@ -515,7 +515,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { } // Allow image to stabilize - ros::Duration(2.0).sleep(); + ros::Duration(cfg_.Get("station_time")).sleep(); // Signal an imminent sci cam image sci_cam_req_ = true; @@ -556,7 +556,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { // The sci cam image was received if (sci_cam_req_) { sci_cam_req_ = false; - result_.geometry_result.push_back(isaac_msgs::InspectionResult::PIC_ACQUIRED); + result_.inspection_result.push_back(isaac_msgs::InspectionResult::PIC_ACQUIRED); result_.picture_time.push_back(msg->header.stamp); if (goal_.command == isaac_msgs::InspectionGoal::ANOMALY && ground_active_) { @@ -584,7 +584,7 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { ROS_DEBUG_STREAM("IResultCallback()"); // Fill in the result message with the result if (result != nullptr) - result_.vent_result.push_back(result->response); + result_.anomaly_result.push_back(result->anomaly_result); else ROS_ERROR_STREAM("Invalid result received Image Analysis"); return fsm_.Update(NEXT_INSPECT); @@ -595,9 +595,13 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { // A new arm action has been called void GoalCallback(isaac_msgs::InspectionGoalConstPtr const& goal) { if (!goal_.inspect_poses.poses.empty()) { + isaac_msgs::InspectionResult result; switch (goal->command) { // Pause command case isaac_msgs::InspectionGoal::PAUSE: + result.fsm_result = "Pausing"; + result.response = RESPONSE::SUCCESS; + server_.SendResult(ff_util::FreeFlyerActionState::SUCCESS, result); return fsm_.Update(GOAL_PAUSE); // Resume command case isaac_msgs::InspectionGoal::RESUME: @@ -605,25 +609,24 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { // Skip command case isaac_msgs::InspectionGoal::SKIP: { - isaac_msgs::InspectionResult result; - if (!goal_.inspect_poses.poses.empty() && goal_counter_ < goal_.inspect_poses.poses.size()) { + if (!goal_.inspect_poses.poses.empty() && goal_counter_ < goal_.inspect_poses.poses.size() - 1) { // Skip the current pose goal_counter_++; - result.fsm_result = "Skipped pose"; result.response = RESPONSE::SUCCESS; server_.SendResult(ff_util::FreeFlyerActionState::SUCCESS, result); + return fsm_.Update(GOAL_PAUSE); } else { result.fsm_result = "Nothing to skip"; result.response = RESPONSE::INVALID_COMMAND; server_.SendResult(ff_util::FreeFlyerActionState::ABORTED, result); + return fsm_.Update(GOAL_PAUSE); } return; } // Repeat last executed step command case isaac_msgs::InspectionGoal::REPEAT: { - isaac_msgs::InspectionResult result; if (!goal_.inspect_poses.poses.empty() && goal_counter_ > 0) { // Go back to the last pose goal_counter_--; @@ -631,35 +634,49 @@ class InspectionNode : public ff_util::FreeFlyerNodelet { result.fsm_result = "Will repeat last pose"; result.response = RESPONSE::SUCCESS; server_.SendResult(ff_util::FreeFlyerActionState::SUCCESS, result); + return fsm_.Update(GOAL_PAUSE); } else { result.fsm_result = "Nothing to repeat"; result.response = RESPONSE::INVALID_COMMAND; server_.SendResult(ff_util::FreeFlyerActionState::ABORTED, result); + return fsm_.Update(GOAL_PAUSE); } return; } // Save command case isaac_msgs::InspectionGoal::SAVE: - std::ofstream myfile; - std::string path = ros::package::getPath("inspection") + "/resources/current.txt"; - myfile.open(path); - for (int i = 0; i < goal_.inspect_poses.poses.size(); i++) { - myfile << goal_.inspect_poses.poses[i].position.x << " " - << goal_.inspect_poses.poses[i].position.y << " " - << goal_.inspect_poses.poses[i].position.z << " " - << goal_.inspect_poses.poses[i].orientation.x << " " - << goal_.inspect_poses.poses[i].orientation.y << " " - << goal_.inspect_poses.poses[i].orientation.z << " " - << goal_.inspect_poses.poses[i].orientation.w<< "\n"; + if (!goal_.inspect_poses.poses.empty() && goal_counter_ < goal_.inspect_poses.poses.size()) { + std::ofstream myfile; + std::string path = ros::package::getPath("inspection") + "/resources/current.txt"; + myfile.open(path); + for (int i = goal_counter_; i < goal_.inspect_poses.poses.size(); i++) { + myfile << goal_.inspect_poses.poses[i].position.x << " " + << goal_.inspect_poses.poses[i].position.y << " " + << goal_.inspect_poses.poses[i].position.z << " " + << goal_.inspect_poses.poses[i].orientation.x << " " + << goal_.inspect_poses.poses[i].orientation.y << " " + << goal_.inspect_poses.poses[i].orientation.z << " " + << goal_.inspect_poses.poses[i].orientation.w << "\n"; + } + myfile.close(); + result.fsm_result = "Saved"; + result.response = RESPONSE::SUCCESS; + server_.SendResult(ff_util::FreeFlyerActionState::SUCCESS, result); + return fsm_.Update(GOAL_PAUSE); + } else { + NODELET_ERROR_STREAM("Nothing to save"); + result.fsm_result = "Nothing to save"; + result.response = RESPONSE::FAILED; + server_.SendResult(ff_util::FreeFlyerActionState::ABORTED, result); + return fsm_.Update(GOAL_PAUSE); } - myfile.close(); return; } } // Save new goal goal_ = *goal; - result_.vent_result.clear(); - result_.geometry_result.clear(); + result_.anomaly_result.clear(); + result_.inspection_result.clear(); result_.picture_time.clear(); goal_counter_ = 0; ROS_DEBUG_STREAM("RESET COUNTER"); diff --git a/astrobee/behaviors/inspection/tools/inspection_tool.cc b/astrobee/behaviors/inspection/tools/inspection_tool.cc index ac5ed55e..9b45b29c 100644 --- a/astrobee/behaviors/inspection/tools/inspection_tool.cc +++ b/astrobee/behaviors/inspection/tools/inspection_tool.cc @@ -41,6 +41,7 @@ #include // C++ STL includes +#include #include #include #include @@ -87,6 +88,7 @@ DEFINE_double(deadline, -1.0, "Action deadline timeout"); // Match the internal states and responses with the message definition using STATE = isaac_msgs::InspectionState; +bool stopflag_ = false; bool has_only_whitespace_or_comments(const std::string & str) { for (std::string::const_iterator it = str.begin(); it != str.end(); it++) { @@ -156,74 +158,75 @@ void ReadFile(std::string file, isaac_msgs::InspectionGoal &goal) { // Inspection action feedback void FeedbackCallback(isaac_msgs::InspectionFeedbackConstPtr const& feedback) { - std::cout << "\r " - << "\rFSM: " << feedback->state.fsm_event - << " -> " << feedback->state.fsm_state - << " (" << feedback->state.fsm_subevent - << " -> " << feedback->state.fsm_substate << ") " << std::flush; + std::string s; + s = feedback->state.fsm_event + + " -> " + feedback->state.fsm_state + + " (" + feedback->state.fsm_subevent + + " -> " + feedback->state.fsm_substate + ")"; + if (s.size() < 70) s.append(70 - s.size(), ' '); + std::cout << "\r" << s.substr(0, 70) << "|Input: " << std::flush; } // Inspection action result void ResultCallback(ff_util::FreeFlyerActionState::Enum code, isaac_msgs::InspectionResultConstPtr const& result) { - std::cout << std::endl << "Result: "; + std::string s; + s = "\nResult: "; // Print general response code switch (code) { case ff_util::FreeFlyerActionState::Enum::SUCCESS: - std::cout << "[SUCCESS] "; - if (FLAGS_anomaly) { - for (int i = 0; i < result->vent_result.size(); i++) { - switch (result->vent_result[i]) { - case isaac_msgs::InspectionResult::VENT_FREE: - std::cout << "Vent " << i <<" is free " << std::endl; break; - case isaac_msgs::InspectionResult::VENT_OBSTRUCTED: - std::cout << "Vent " << i <<" is obstructed " << std::endl; break; - case isaac_msgs::InspectionResult::INCONCLUSIVE: - std::cout << "Vent " << i <<" picture was inconclusive" << std::endl; break; - } - } - } - - if (FLAGS_geometry) { - for (int i = 0; i < result->geometry_result.size(); i++) { - switch (result->geometry_result[i]) { - case isaac_msgs::InspectionResult::PIC_ACQUIRED: - std::cout << "Pic " << i <<" was processed " << std::endl; break; - } - } - } + s += "[SUCCESS] "; break; case ff_util::FreeFlyerActionState::Enum::PREEMPTED: - std::cout << "[PREEMPT] "; break; + s += "[PREEMPT] "; break; case ff_util::FreeFlyerActionState::Enum::ABORTED: - std::cout << "[ABORTED] "; break; + s += "[ABORTED] "; break; case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_CONNECT: - std::cout << "Action timed out on connect"; goto teardown; + std::cout << "\nResult: Action timed out on connect"; goto teardown; case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_ACTIVE: - std::cout << "Action timed out on active"; goto teardown; + std::cout << "\nResult: Action timed out on active"; goto teardown; case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_RESPONSE: - std::cout << "Action timed out on response"; goto teardown; + std::cout << "\nResult: Action timed out on response"; goto teardown; case ff_util::FreeFlyerActionState::Enum::TIMEOUT_ON_DEADLINE: - std::cout << "Action timed out on deadline"; goto teardown; + std::cout << "\nResult: Action timed out on deadline"; goto teardown; + } + // Check that result var is valid + if (result != nullptr) { + // If we get there then we have some response data + s += result->fsm_result + " (Code " + std::to_string(result->response) + ")"; + } + // Limit line to the maximum amout of characters + if (s.size() < 71) s.append(71 - s.size(), ' '); + // In the case we continue + if (result->fsm_result != "Inspection Over") { + s += "|Input: "; + std::cout << s << std::flush; + return; + } + std::cout << s << std::flush; + if (FLAGS_anomaly) { + for (int i = 0; i < result->anomaly_result.size(); i++) { + std::cout << "\n\tVent " << i <<" is " << result->anomaly_result[i].classifier_result; + } + } + + if (FLAGS_geometry) { + for (int i = 0; i < result->inspection_result.size(); i++) { + if (result->inspection_result[i] == isaac_msgs::InspectionResult::PIC_ACQUIRED) { + std::cout << "Pic " << i <<" was processed " << std::endl; break; + } + } } - // If we get there then we have some response data - std::cout << result->fsm_result - << " (Code " << result->response << ")" << std::endl; -teardown: - std::cout << std::endl; - // In all cases we must shutdown - ros::shutdown(); + + teardown: + std::cout << std::endl; + stopflag_ = true; + ros::shutdown(); } -// Ensure all clients are connected -void ConnectedCallback( - ff_util::FreeFlyerActionClient *client) { - // Check to see if connected - if (!client->IsConnected()) return; - // Print out a status message - std::cout << "\r " - << "\rState: CONNECTED" << std::flush; +// Send the inspection goal to the server +void SendGoal(ff_util::FreeFlyerActionClient *client) { // Prepare the goal isaac_msgs::InspectionGoal goal; std::string path = std::string(ros::package::getPath("inspection")); @@ -242,7 +245,7 @@ void ConnectedCallback( goal.command = isaac_msgs::InspectionGoal::ANOMALY; // Read file - std::cout << "Reading: " << FLAGS_anomaly_poses << std::endl; + // std::cout << "Reading: " << FLAGS_anomaly_poses << std::endl; ReadFile(path + FLAGS_anomaly_poses, goal); } else if (FLAGS_geometry) { @@ -250,7 +253,7 @@ void ConnectedCallback( goal.command = isaac_msgs::InspectionGoal::GEOMETRY; // Read file - std::cout << "Reading: " << FLAGS_geometry_poses << std::endl; + // std::cout << "Reading: " << FLAGS_geometry_poses << std::endl; ReadFile(path + FLAGS_geometry_poses, goal); } else if (FLAGS_panorama) { @@ -258,7 +261,7 @@ void ConnectedCallback( goal.command = isaac_msgs::InspectionGoal::PANORAMA; // Read file - std::cout << "Reading: " << FLAGS_panorama_poses << std::endl; + // std::cout << "Reading: " << FLAGS_panorama_poses << std::endl; ReadFile(path + FLAGS_panorama_poses, goal); } else if (FLAGS_volumetric) { @@ -266,13 +269,100 @@ void ConnectedCallback( goal.command = isaac_msgs::InspectionGoal::VOLUMETRIC; // Read file - std::cout << "Reading: " << FLAGS_volumetric_poses << std::endl; + // std::cout << "Reading: " << FLAGS_volumetric_poses << std::endl; ReadFile(path + FLAGS_volumetric_poses, goal); } client->SendGoal(goal); } +void GetInput(ff_util::FreeFlyerActionClient *client) { + while (!stopflag_ && ros::ok()) { + std::string line, val; + std::getline(std::cin, line); + std::string s; + try { + switch (std::stoi(line)) { + case 0: + FLAGS_pause = true; + SendGoal(client); + s = "\r Input: " + line + ") Exiting"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::endl; + stopflag_ = true; + break; + case 1: + FLAGS_pause = true; + SendGoal(client); + s = "\r Input: " + line + ") Pausing"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::flush; + break; + case 2: + FLAGS_pause = false; + FLAGS_resume = true; + SendGoal(client); + s = "\r Input: " + line + ") Resuming"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::endl; + break; + case 3: + FLAGS_pause = false; + FLAGS_resume = false; + FLAGS_repeat = true; + SendGoal(client); + s = "\r Input: " + line + ") Pausing and repeating pose (needs resume)"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::flush; + break; + case 4: + FLAGS_pause = false; + FLAGS_resume = false; + FLAGS_repeat = false; + FLAGS_skip = true; + SendGoal(client); + s = "\r Input: " + line + ") Pausing and skipping pose (needs resume)"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::flush; + break; + case 5: + FLAGS_pause = false; + FLAGS_resume = false; + FLAGS_repeat = false; + FLAGS_skip = false; + FLAGS_save = true; + SendGoal(client); + s = "\r Input: " + line + ") Pausing and saving (needs resume)"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::flush; + break; + default: + s = "\r Input: " + line + ") Invalid option"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::endl; + } + } catch (const std::invalid_argument&) { + if (line != "") { + s = "\r Input: " + line + ") Invalid option"; + if (s.size() < 80) s.append(80 - s.size(), ' '); + std::cout << s << std::endl; + } + } + } + return; +} + +// Ensure all clients are connected +void ConnectedCallback( + ff_util::FreeFlyerActionClient *client) { + // Check to see if connected + if (!client->IsConnected()) return; + // Print out a status message + std::cout << "\r " + << "\rState: CONNECTED" << std::flush; + SendGoal(client); +} + // Main entry point for application int main(int argc, char *argv[]) { // Initialize a ros node @@ -327,8 +417,26 @@ int main(int argc, char *argv[]) { ros::shutdown(); } } + +std::cout << "\r " + << "Available actions:\n" + << "0) Exit \n" + << "1) Pause \n" + << "2) Resume \n" + << "3) Repeat \n" + << "4) Skip \n" + << "5) Save \n" + << "Specify the number of the command to publish and hit 'enter'.\n"<< std::endl; + + // Start input thread + boost::thread inp(GetInput, &client); // Synchronous mode - ros::spin(); + while (!stopflag_) { + ros::spinOnce(); + } + // Wait for thread to exit + inp.join(); + // Finish commandline flags google::ShutDownCommandLineFlags(); // Make for great success diff --git a/astrobee/behaviors/inspection/tools/sci_cam_tool.cc b/astrobee/behaviors/inspection/tools/sci_cam_tool.cc index 28515bb8..0e5336b2 100644 --- a/astrobee/behaviors/inspection/tools/sci_cam_tool.cc +++ b/astrobee/behaviors/inspection/tools/sci_cam_tool.cc @@ -60,7 +60,7 @@ void publish_cmd(ros::NodeHandle & nh, ros::Publisher & cmd_pub, arg.data_type = ff_msgs::CommandArg::DATA_TYPE_STRING; arg.s = "{\"name\": \"" + cmdName + "\""; if (cmdVal != "") - arg.s += ", \"value\": \"" + cmdVal + "\""; + arg.s += ", " + cmdVal; arg.s += "}"; cmd_args.push_back(arg); @@ -86,15 +86,15 @@ int main(int argc, char *argv[]) { // Accepted commands std::vector cmd_names, cmd_vals; - cmd_names.push_back("takeSinglePicture"); cmd_vals.push_back(""); - cmd_names.push_back("turnOnContinuousPictureTaking"); cmd_vals.push_back(""); - cmd_names.push_back("turnOffContinuousPictureTaking"); cmd_vals.push_back(""); - cmd_names.push_back("turnOnSavingPicturesToDisk"); cmd_vals.push_back(""); - cmd_names.push_back("turnOffSavingPicturesToDisk"); cmd_vals.push_back(""); - cmd_names.push_back("setPreviewImageWidth"); cmd_vals.push_back("640"); - cmd_names.push_back("setPreviewImageType"); cmd_vals.push_back("color"); - cmd_names.push_back("setFocusDistance"); cmd_vals.push_back("0.39"); - cmd_names.push_back("setFocusMode"); cmd_vals.push_back("manual"); + cmd_names.push_back("takePicture"); cmd_vals.push_back(""); + cmd_names.push_back("setAutoExposure"); cmd_vals.push_back("\"auto\": true"); + cmd_names.push_back("setContinuousPictureTaking"); cmd_vals.push_back("\"continuous\": true"); + cmd_names.push_back("setFocusDistance"); cmd_vals.push_back("\"distance\": 0.39"); + cmd_names.push_back("setFocusMode"); cmd_vals.push_back("\"mode\": \"manual\""); + cmd_names.push_back("setPublishImage"); cmd_vals.push_back("\"publish\": true"); + cmd_names.push_back("setPublishedImageSize"); cmd_vals.push_back("\"width\": 640, \"height\": 480"); + cmd_names.push_back("setPublishedImageType"); cmd_vals.push_back("\"type\": \"color\""); + cmd_names.push_back("setSavePicturesToDisk"); cmd_vals.push_back("\"save\": true"); // Initialize a ros node ros::init(argc, argv, "sci_cam_tool", ros::init_options::AnonymousName); diff --git a/astrobee/hardware/wifi/src/wifi_node.cc b/astrobee/hardware/wifi/src/wifi_node.cc index c84b41a8..fb1ff813 100644 --- a/astrobee/hardware/wifi/src/wifi_node.cc +++ b/astrobee/hardware/wifi/src/wifi_node.cc @@ -53,7 +53,7 @@ class WifiNode : public ff_util::FreeFlyerNodelet { // Read the configuration config_reader::ConfigReader config_params; // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) config_params.SetPath(path); config_params.AddFile("hw/wifi.config"); diff --git a/astrobee/hardware/wifi/tools/wifi_tool.cc b/astrobee/hardware/wifi/tools/wifi_tool.cc index f15cf2ea..5928591c 100644 --- a/astrobee/hardware/wifi/tools/wifi_tool.cc +++ b/astrobee/hardware/wifi/tools/wifi_tool.cc @@ -56,7 +56,7 @@ class WifiTool { // Read the configuration config_reader::ConfigReader config_params; // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) config_params.SetPath(path); config_params.AddFile("hw/wifi.config"); diff --git a/astrobee/simulation/acoustics_cam/readme.md b/astrobee/simulation/acoustics_cam/readme.md index a57fc13e..24660f9d 100644 --- a/astrobee/simulation/acoustics_cam/readme.md +++ b/astrobee/simulation/acoustics_cam/readme.md @@ -23,25 +23,29 @@ The acoustics camera depends on the pyroomacoustics package. This package can be installed together with its dependencies in a Python 2.7 environment using the command: - pip install numpy==1.15.4 scipy==0.18 pillow==6 PyWavelets==0.4.0 \ - networkx==1.8 matplotlib==2.0.0 scikit-image==0.14 \ - pyroomacoustics==0.3.1 + pip install numpy==1.15.4 scipy==0.18 pillow==6 PyWavelets==0.4.0 \ + networkx==1.8 matplotlib==2.0.0 scikit-image==0.14 \ + pyroomacoustics==0.3.1 It would normally install itself in: - $HOME/.local/lib/python2.7/site-packages/pyroomacoustics + $HOME/.local/lib/python2.7/site-packages/pyroomacoustics ### Running the acoustics camera -The acoustics camera ROS node can be run as part of the simulator as: +The acoustics camera ROS node can be run as part of the simulator. For that, +first set up the environment along the lines of: - source ~/astrobee_build/native/devel/setup.zsh - source ~/projects/isaac/devel/setup.zsh - roslaunch isaac sim.launch world:=iss rviz:=true acoustics_cam:=true \ - pose:="11.2 -7.72 5.0 0 0 0 0 1" + export ASTROBEE_SOURCE_PATH=$HOME/astrobee/src + export ASTROBEE_BUILD_PATH=$HOME/astrobee + export ISAAC_WS=$HOME/isaac + source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ISAAC_WS/devel/setup.bash -Here we assume that the astrobee build is in `~/astrobee_build` and that -the ISAAC build is in `~/projects/isaac`. +then run: + + roslaunch isaac sim.launch world:=iss rviz:=true acoustics_cam:=true \ + pose:="11.2 -7.72 5.0 0 0 0 0 1" One must ensure that the "DEBUG: AcousticsCam" image checkbox is checked in RViz upon launch, to be able to visualize this image. If it @@ -50,17 +54,17 @@ should be saved from the RViz menu, and the simulation should be restarted. It is also possible to start the simulator without this camera, -and then launch it separately via: +and then launch this camera separately as: - source ~/astrobee_build/native/devel/setup.zsh - source ~/projects/isaac/devel/setup.zsh - roslaunch acoustics_cam acoustics_cam.launch output:=screen + source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ISAAC_WS/devel/setup.bash + roslaunch acoustics_cam acoustics_cam.launch output:=screen The acoustics camera can be run without ROS as: - ~/projects/isaac/src/isaac/hardware/acoustics_cam/nodes/acoustics_cam debug_mode + $ISAAC_WS/src/astrobee/simulation/acoustics_cam/nodes/acoustics_cam debug_mode -In that case, it assumes that the robot pose is the value set in the +In that case it assumes that the robot pose is the value set in the field "debug_robot_pose" in acoustics_cam.json (see below). In this mode it will only create a plot of the acoustics cam image. The sources of sounds will be represented as crosses in this plot, and the @@ -70,20 +74,20 @@ camera (microphone) position will be shown as a star. The acoustics camera subscribes to - /loc/truth/pose + /loc/truth/pose to get the robot pose. It publishes its image, camera pose, and camera intrinsics on topics: - /hw/cam_acoustics - /sim/acoustics_cam/pose - /sim/acoustics_cam/info + /hw/cam_acoustics + /sim/acoustics_cam/pose + /sim/acoustics_cam/info By default, the camera takes pictures as often as it can (see the configuration below), which is rarely, in fact, as it is slow. It -listens however to the topic +listens however to the topic: - /comm/dds/command + /comm/dds/command for guest science commands that may tell it to take a single picture at a specific time, or to take pictures continuously. Such a command @@ -93,9 +97,9 @@ processed. ### Configuration -The behavior of this camera is described in +The behavior of this camera is described in: - isaac/hardware/acoustics_cam/acoustics_cam.json + $ISAAC_WS/src/astrobee/simulation/acoustics_cam/acoustics_cam.json It has the following entries: diff --git a/astrobee/simulation/isaac_gazebo/readme.md b/astrobee/simulation/isaac_gazebo/readme.md index 79661374..4a9fd09f 100644 --- a/astrobee/simulation/isaac_gazebo/readme.md +++ b/astrobee/simulation/isaac_gazebo/readme.md @@ -52,10 +52,17 @@ This camera publishes the RGB heat image, its pose and intrinsics, on topics: /sim/heat_cam/pose /sim/heat_cam/info -To launch the simulator and see the heat map, one can do: +To launch the simulator and see the heat map, first set up the environment. +The paths below may need to be adjusted for your system. + + export ASTROBEE_SOURCE_PATH=$HOME/astrobee/src + export ASTROBEE_BUILD_PATH=$HOME/astrobee + export ISAAC_WS=$HOME/isaac + source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ISAAC_WS/devel/setup.bash + +Then run: - source ~/freeflyer_build/native/devel/setup.zsh - source ~/projects/isaac/devel/setup.zsh roslaunch isaac sim.launch world:=iss rviz:=true \ pose:="11.2 -7.72 5.0 0 0 0 0 1" diff --git a/communications/ros_gs_bridge/src/ros_gs_bridge.cc b/communications/ros_gs_bridge/src/ros_gs_bridge.cc index 19b8847f..3e4deb87 100644 --- a/communications/ros_gs_bridge/src/ros_gs_bridge.cc +++ b/communications/ros_gs_bridge/src/ros_gs_bridge.cc @@ -165,55 +165,13 @@ void RosGsBridge::GuestScienceDataCallback( // TODO(Katie) } else if (data->topic == "ir") { // Data contains the inspection result + // TODO(Katie) Inspection result has been changed, need to fix this code isaac_msgs::InspectionResult result; - int vent_size, geometry_size, response; - - // Get vent array - if (delim_index == std::string::npos) { - ROS_ERROR_STREAM("ros gs bridge: Error parsing vent size out of " << - "inspection result. Data string is " << data_str); - return; - } - - vent_size = std::stoi(data_str.substr(0, delim_index)); - data_str.erase(0, (delim_index + 1)); - for (int i = 0; i < vent_size; i++) { - delim_index = data_str.find("@"); - if (delim_index == std::string::npos) { - ROS_ERROR_STREAM("ros gs bridge: Error parsing vent array out of " << - "inspection result. Data string is " << data_str); - return; - } - result.vent_result.push_back(std::stoi(data_str.substr(0, delim_index))); - data_str.erase(0, (delim_index + 1)); - } - - // Get geometry array - delim_index = data_str.find("@"); - if (delim_index == std::string::npos) { - ROS_ERROR_STREAM("ros gs bridge: Error parsing geometry size out of " << - "inspection result. Data string is " << data_str); - return; - } - - geometry_size = std::stoi(data_str.substr(0, delim_index)); - data_str.erase(0, (delim_index + 1)); - for (int i = 0; i < geometry_size; i++) { - delim_index = data_str.find("@"); - if (delim_index == std::string::npos) { - ROS_ERROR_STREAM("ros gs bridge: Error parsing geometry array out of" << - " inspection result. Data string is " << data_str); - return; - } - result.geometry_result.push_back(std::stoi(data_str.substr(0, - delim_index))); - data_str.erase(0, (delim_index + 1)); - } + int response; // Get response - delim_index = data_str.find("@"); if (delim_index == std::string::npos) { - ROS_ERROR_STREAM("ros gs bridge: Error parsing response out of " << + ROS_ERROR_STREAM("ros gs bridge: Error parsing vent size out of " << "inspection result. Data string is " << data_str); return; } @@ -339,6 +297,7 @@ bool RosGsBridge::GrabControlService(std_srvs::Empty::Request& req, void RosGsBridge::ImageInspectionResultCallback( ff_util::FreeFlyerActionState::Enum const& state, isaac_msgs::ImageInspectionResultConstPtr const& result) { + // TODO(Katie) Need to add the anomaly_result // Time stamp in the result header is not used so we don't need to package it // TODO(Katie) to be thorough, it would be good to package up the timestamp // TODO(Katie) change string to be json diff --git a/debian/changelog b/debian/changelog index 1ddfd3e3..c7b1fd46 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,4 +1,12 @@ -isaac (0.2.3) UNRELEASED; urgency=medium +isaac (0.2.4) testing; urgency=medium + + * Inspection tool improvements + * Better CI + * Small bug fixes + + -- Marina Moreira Fri, 11 Feb 2022 13:57:38 -0800 + +isaac (0.2.3) testing; urgency=medium * Panorama picture taking mode supported * Cargo transport simulation support diff --git a/debian/isaac0.postinst b/debian/isaac0.postinst index d0671567..2233df9d 100755 --- a/debian/isaac0.postinst +++ b/debian/isaac0.postinst @@ -2,7 +2,9 @@ if [ "$1" = "configure" ]; then chmod -R g+rwx /opt/isaac/env_wrapper.sh + chmod -R g+rwx /opt/isaac/share/inspection/resources if [ $(getent group users) ]; then chgrp -R users /opt/isaac/env_wrapper.sh + chgrp -R users /opt/isaac/share/inspection/resources fi fi diff --git a/debian/rules b/debian/rules index eb689b40..8a19d854 100755 --- a/debian/rules +++ b/debian/rules @@ -41,26 +41,24 @@ override_dh_auto_configure: catkin profile add debian --force && \ catkin profile set debian && \ catkin config \ - --extend $(ARMHF_CHROOT_DIR)/opt/astrobee \ - --build-space $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp/build \ - --install-space $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp/opt/isaac \ - --devel-space $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp/devel \ - --log-space $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp/logs \ - --whitelist isaac_astrobee_description isaac_util isaac_msgs inspection isaac_hw_msgs wifi isaac gs_action_helper \ + --extend $(ASTROBEE_WS)/armhf/opt/astrobee \ + --build-space debian/build \ + --install-space src/debian/tmp/opt/isaac \ + --devel-space debian/devel \ + --log-space debian/logs \ + --whitelist isaac_astrobee_description isaac_util isaac_msgs inspection cargo isaac_hw_msgs wifi isaac gs_action_helper \ --install \ --cmake-args \ -DCMAKE_TOOLCHAIN_FILE=$(CMAKE_TOOLCHAIN_FILE) \ -DARMHF_CHROOT_DIR=$(ARMHF_CHROOT_DIR) \ - -DARMHF_TOOLCHAIN=$(ARMHF_TOOLCHAIN) && \ - catkin clean -y --force + -DARMHF_TOOLCHAIN=$(ARMHF_TOOLCHAIN) override_dh_auto_build: ; # the install command is already invoked in the build override_dh_auto_install: cd .. && \ - catkin build && \ - rsync --remove-source-files -azh $(ARMHF_CHROOT_DIR)/home/astrobee/isaac/tmp src/debian + catkin build # we don't test override_dh_auto_test: ; diff --git a/dense_map/geometry_mapper/CMakeLists.txt b/dense_map/geometry_mapper/CMakeLists.txt index 6823fdb6..18c12e73 100644 --- a/dense_map/geometry_mapper/CMakeLists.txt +++ b/dense_map/geometry_mapper/CMakeLists.txt @@ -91,10 +91,11 @@ set(TEXRECON_DIR ${CMAKE_BINARY_DIR}/texrecon) ExternalProject_Add(texrecon PREFIX ${TEXRECON_DIR} GIT_REPOSITORY https://github.com/oleg-alexandrov/mvs-texturing.git - GIT_TAG b6bac86 + GIT_TAG 015a2ac CMAKE_ARGS -DCMAKE_VERBOSE_MAKEFILE=TRUE -DCMAKE_CXX_FLAGS=-fPIC - BUILD_COMMAND $(MAKE) # Per: https://cmake.org/pipermail/cmake/2011-April/043772.html - # Set some options below to not have it always recreate CMakeLists files + BUILD_COMMAND $(MAKE) + # Per: https://cmake.org/pipermail/cmake/2011-April/043772.html, + # set some options below to not have it always recreate CMakeLists files BUILD_ALWAYS 0 UPDATE_COMMAND "" INSTALL_COMMAND cmake -E echo "Skipping install step for texrecon." @@ -175,8 +176,8 @@ add_executable(camera_refiner tools/camera_refiner.cc) target_link_libraries(camera_refiner geometry_mapper_lib gflags glog) -add_executable(camera_refiner2 tools/camera_refiner2.cc) -target_link_libraries(camera_refiner2 +add_executable(camera_refiner_old tools/camera_refiner_old.cc) +target_link_libraries(camera_refiner_old geometry_mapper_lib gflags glog) add_executable(extract_pc_from_bag tools/extract_pc_from_bag.cc) @@ -187,8 +188,8 @@ add_executable(add_sci_cam_to_bag tools/add_sci_cam_to_bag.cc) target_link_libraries(add_sci_cam_to_bag geometry_mapper_lib gflags glog) -add_executable(process_bag tools/process_bag.cc) -target_link_libraries(process_bag +add_executable(scale_bag tools/scale_bag.cc) +target_link_libraries(scale_bag geometry_mapper_lib gflags glog) add_executable(colorize_bag_images tools/colorize_bag_images.cc) @@ -207,6 +208,14 @@ add_executable(orthoproject tools/orthoproject.cc) target_link_libraries(orthoproject geometry_mapper_lib gflags glog) +add_executable(test_texture_gen tools/test_texture_gen.cc) +target_link_libraries(test_texture_gen + geometry_mapper_lib gflags glog) + +add_executable(old_orthoproject tools/old_orthoproject.cc) +target_link_libraries(old_orthoproject + geometry_mapper_lib gflags glog) + add_executable(streaming_mapper tools/streaming_mapper.cc) target_link_libraries(streaming_mapper geometry_mapper_lib gflags glog) diff --git a/dense_map/geometry_mapper/include/camera_image.h b/dense_map/geometry_mapper/include/camera_image.h new file mode 100644 index 00000000..b3568460 --- /dev/null +++ b/dense_map/geometry_mapper/include/camera_image.h @@ -0,0 +1,62 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +#ifndef CAMERA_IMAGE_H_ +#define CAMERA_IMAGE_H_ + +#include + +namespace dense_map { + +// A class to encompass all known information about a camera +struct cameraImage { + // An index to look up the type of camera. This will equal the + // value ref_camera_type if and only if this is a reference + // camera. + int camera_type; + + // The timestamp for this camera (in floating point seconds since epoch) + // measured by the clock/cpu which is particular to this camera. + double timestamp; + + // The timestamp with an adjustment added to it to be in + // reference camera time + double ref_timestamp; + + // The timestamp of the closest cloud for this image, measured + // with the same clock as the 'timestamp' value. + double cloud_timestamp; + + // Indices to look up the reference cameras bracketing this camera + // in time. The two indices will have same value if and only if + // this is a reference camera. + int beg_ref_index; + int end_ref_index; + + // The image for this camera, in grayscale + cv::Mat image; + + // The corresponding depth cloud, for an image + depth camera. + // Will be empty and uninitialized for a camera lacking depth. + cv::Mat depth_cloud; +}; + +} // namespace dense_map + +#endif // CAMERA_IMAGE_H_ diff --git a/dense_map/geometry_mapper/include/dense_map_ros_utils.h b/dense_map/geometry_mapper/include/dense_map_ros_utils.h index bd89a9c0..c9c1bf9a 100644 --- a/dense_map/geometry_mapper/include/dense_map_ros_utils.h +++ b/dense_map/geometry_mapper/include/dense_map_ros_utils.h @@ -40,6 +40,13 @@ #include #include +// Forward declaration +namespace pcl { + class PointXYZ; + template + class PointCloud; +} + namespace dense_map { // Publish a given pose @@ -72,8 +79,13 @@ bool lookupImage(double desired_time, std::vector const // repeated calls to this function we always travel forward in time, // and we keep track of where we are in the bag using the variable // bag_pos that we update as we go. -bool lookupCloud(double desired_time, std::vector const& bag_msgs, double max_time_diff, - cv::Mat& cloud, int& bag_pos, double& found_time); +bool lookupCloud(double desired_time, std::vector const& bag_msgs, + double max_time_diff, cv::Mat& cloud, int& bag_pos, double& found_time); + +// A wrapper around a function in pcl_ros/point_cloud.h to avoid +// including that header all over the place as it creates an annoying +// warning. +void msgToPcl(sensor_msgs::PointCloud2::ConstPtr pc_msg, pcl::PointCloud & pc); // Read the list of topics in a bag while avoiding repetitions void readTopicsInBag(std::string const& bag_file, std::vector& topics); @@ -81,12 +93,13 @@ void readTopicsInBag(std::string const& bag_file, std::vector& topi // A small struct in which to store an opened ROS bag and the vector of its messages // that we will use later to quickly navigate through it while going forward in time. struct RosBagHandle { + RosBagHandle() = delete; // The rosbag API prevents anything else than initialization RosBagHandle(std::string const& bag_file, std::string const& topic) { + bag_msgs.clear(); bag.open(bag_file, rosbag::bagmode::Read); std::vector topics; topics.push_back(topic); view = boost::shared_ptr(new rosbag::View(bag, rosbag::TopicQuery(topics))); - bag_msgs.clear(); for (rosbag::MessageInstance const m : *view) bag_msgs.push_back(m); } rosbag::Bag bag; diff --git a/dense_map/geometry_mapper/include/dense_map_utils.h b/dense_map/geometry_mapper/include/dense_map_utils.h index 24b03d0c..4deefd77 100644 --- a/dense_map/geometry_mapper/include/dense_map_utils.h +++ b/dense_map/geometry_mapper/include/dense_map_utils.h @@ -41,15 +41,25 @@ namespace dense_map { -const int NUM_SCALAR_PARAMS = 1; -const int NUM_OPT_CTR_PARAMS = 2; // optical center in x and y -const int NUM_RESIDUALS = 2; // Same as pixel size -const int NUM_XYZ_PARAMS = 3; -const int NUM_RIGID_PARAMS = 7; // (quaternion (4) + translation (3)) +const int NUM_SCALAR_PARAMS = 1; // Used to float single-value params // NOLINT +const int NUM_OPT_CTR_PARAMS = 2; // optical center in x and y // NOLINT +const int NUM_PIX_PARAMS = 2; // NOLINT +const int NUM_XYZ_PARAMS = 3; // NOLINT +const int NUM_RIGID_PARAMS = 7; // quaternion (4) + translation (3) // NOLINT +const int NUM_AFFINE_PARAMS = 12; // 3x3 matrix (9) + translation (3) // NOLINT // A function to split a string like 'optical_center focal_length' into // its two constituents. -void parse_intrinsics_to_float(std::string const& intrinsics_to_float, std::set& intrinsics_to_float_set); +void parse_intrinsics_to_float(std::string const& intrinsics_to_float, + std::set& intrinsics_to_float_set); + +// A function to split a string like 'haz_cam sci_cam' into +// its two constituents and validate against the list of known cameras. +void parse_extrinsics_to_float(std::vector const& cam_names, + std::string const& ref_cam_name, + std::string const& depth_to_image_name, + std::string const& extrinsics_to_float, + std::set& extrinsics_to_float_set); // Extract a rigid transform to an array of length NUM_RIGID_PARAMS void rigid_transform_to_array(Eigen::Affine3d const& aff, double* arr); @@ -58,6 +68,12 @@ void rigid_transform_to_array(Eigen::Affine3d const& aff, double* arr); // transform. Normalize the quaternion to make it into a rotation. void array_to_rigid_transform(Eigen::Affine3d& aff, const double* arr); +void affine_transform_to_array(Eigen::Affine3d const& aff, double* arr); +void array_to_affine_transform(Eigen::Affine3d& aff, const double* arr); + +// Convert a string of values separated by spaces to a vector of doubles. +std::vector string_to_vector(std::string const& str); + // Read a 4x4 pose matrix of doubles from disk void readPoseMatrix(cv::Mat& pose, std::string const& filename); @@ -74,26 +90,45 @@ std::string matType(cv::Mat const& mat); // Read the transform from depth to given camera void readCameraTransform(config_reader::ConfigReader& config, std::string const transform_str, - Eigen::MatrixXd& transform); - -// Read a bunch of transforms from the robot calibration file -void readConfigFile(std::string const& navcam_to_hazcam_timestamp_offset_str, - std::string const& scicam_to_hazcam_timestamp_offset_str, - std::string const& hazcam_to_navcam_transform_str, - std::string const& scicam_to_hazcam_transform_str, std::string const& navcam_to_body_transform_str, - std::string const& hazcam_depth_to_image_transform_str, double& navcam_to_hazcam_timestamp_offset, - double& scicam_to_hazcam_timestamp_offset, Eigen::MatrixXd& hazcam_to_navcam_trans, - Eigen::MatrixXd& scicam_to_hazcam_trans, Eigen::MatrixXd& navcam_to_body_trans, - Eigen::Affine3d& hazcam_depth_to_image_transform, camera::CameraParameters& nav_cam_params, - camera::CameraParameters& haz_cam_params, camera::CameraParameters& sci_cam_params); + Eigen::Affine3d& transform); + +// Read some transforms from the robot calibration file +void readConfigFile // NOLINT +(// Inputs // NOLINT + std::vector const& cam_names, // NOLINT + std::string const& nav_cam_to_body_trans_str, // NOLINT + std::string const& haz_cam_depth_to_image_trans_str, // NOLINT + // Outputs // NOLINT + std::vector & cam_params, // NOLINT + std::vector & nav_to_cam_trans, // NOLINT + std::vector & nav_to_cam_timestamp_offset, // NOLINT + Eigen::Affine3d & nav_cam_to_body_trans, // NOLINT + Eigen::Affine3d & haz_cam_depth_to_image_trans); // NOLINT + +// Save some transforms from the robot calibration file. This has some very fragile +// logic and cannot handle comments in the config file. +void updateConfigFile // NOLINT +(std::vector const& cam_names, // NOLINT + std::string const& haz_cam_depth_to_image_trans_str, // NOLINT + std::vector const& cam_params, // NOLINT + std::vector const& nav_to_cam_trans, // NOLINT + std::vector const& nav_to_cam_timestamp_offset, // NOLINT + Eigen::Affine3d const& haz_cam_depth_to_image_trans); // NOLINT // Given two poses aff0 and aff1, and 0 <= alpha <= 1, do linear interpolation. -Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, Eigen::Affine3d const& aff1); +Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, + Eigen::Affine3d const& aff1); // Given a set of poses indexed by timestamp in an std::map, find the // interpolated pose at desired timestamp. This is efficient // only for very small maps. Else use the StampedPoseStorage class. -bool findInterpPose(double desired_time, std::map const& poses, Eigen::Affine3d& interp_pose); +bool findInterpPose(double desired_time, std::map const& poses, + Eigen::Affine3d& interp_pose); + +// Implement some heuristic to find the maximum rotation angle that can result +// from applying the given transform. It is assumed that the transform is not +// too different from the identity. +double maxRotationAngle(Eigen::Affine3d const& T); // A class to store timestamped poses, implementing O(log(n)) linear // interpolation at a desired timestamp. For fast access, keep the @@ -138,15 +173,6 @@ double fileNameToTimestamp(std::string const& file_name); // Create a directory unless it exists already void createDir(std::string const& dir); -// Modify in-place the robot config file -void update_config_file(bool update_cam1, std::string const& cam1_name, - boost::shared_ptr cam1_params, bool update_cam2, - std::string const& cam2_name, boost::shared_ptr cam2_params, - bool update_depth_to_image_transform, Eigen::Affine3d const& depth_to_image_transform, - bool update_extrinsics, Eigen::Affine3d const& cam2_to_cam1_transform, - bool update_timestamp_offset, std::string const& cam1_to_cam2_timestamp_offset_str, - double cam1_to_cam2_timestamp_offset); - // A little holding structure for nav, sci, and haz poses struct CameraPoses { std::map haz_depth_to_image_timestamps; @@ -190,16 +216,6 @@ void pickTimestampsInBounds(std::vector const& timestamps, double left_b // Must always have NUM_EXIF the last. enum ExifData { TIMESTAMP = 0, EXPOSURE_TIME, ISO, APERTURE, FOCAL_LENGTH, NUM_EXIF }; -// Triangulate two rays emanating from given undistorted and centered pixels -Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, Eigen::Affine3d const& world_to_cam1, - Eigen::Affine3d const& world_to_cam2, Eigen::Vector2d const& pix1, - Eigen::Vector2d const& pix2); - -// Triangulate n rays emanating from given undistorted and centered pixels -Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, - std::vector const& world_to_cam_vec, - std::vector const& pix_vec); - // A utility for saving a camera in a format ASP understands. // TODO(oalexan1): Expose the sci cam intrinsics rather than having // them hard-coded. diff --git a/dense_map/geometry_mapper/include/interest_point.h b/dense_map/geometry_mapper/include/interest_point.h index d505d573..45ea3b4f 100644 --- a/dense_map/geometry_mapper/include/interest_point.h +++ b/dense_map/geometry_mapper/include/interest_point.h @@ -21,16 +21,19 @@ #define INTEREST_POINT_H_ #include +#include #include #include + #include #include #include #include +#include #include #include @@ -146,6 +149,7 @@ struct InterestPoint { }; // End class InterestPoint typedef std::pair, std::vector > MATCH_PAIR; +typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; void detectFeatures(const cv::Mat& image, bool verbose, // Outputs @@ -166,6 +170,30 @@ void saveImagesAndMatches(std::string const& left_prefix, std::string const& rig std::pair const& index_pair, MATCH_PAIR const& match_pair, std::vector const& images); +// Triangulate two rays emanating from given undistorted and centered pixels +Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, Eigen::Affine3d const& world_to_cam1, + Eigen::Affine3d const& world_to_cam2, Eigen::Vector2d const& pix1, + Eigen::Vector2d const& pix2); + +// Triangulate n rays emanating from given undistorted and centered pixels +Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, + std::vector const& world_to_cam_vec, + std::vector const& pix_vec); + +struct cameraImage; + +void detectMatchFeatures( // Inputs + std::vector const& cams, + std::vector const& cam_names, + std::vector const& cam_params, + std::vector const& world_to_cam, int num_overlaps, + int initial_max_reprojection_error, int num_match_threads, + bool verbose, + // Outputs + std::vector>>& keypoint_vec, + std::vector>& pid_to_cid_fid, + std::vector & image_files); + } // namespace dense_map #endif // INTEREST_POINT_H_ diff --git a/dense_map/geometry_mapper/include/texture_processing.h b/dense_map/geometry_mapper/include/texture_processing.h index 65395966..4bf09af0 100644 --- a/dense_map/geometry_mapper/include/texture_processing.h +++ b/dense_map/geometry_mapper/include/texture_processing.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -65,31 +66,46 @@ namespace dense_map { // and the number of samples needed (with given pixel size) to // sample that face (with a small padding on both sides). // Also store the transform from that plane to the face itself. + +// We use 'int64_t' values instead of 'int', as the latter is 32-bit and +// the area of some images we encounter can then overflow. + struct FaceInfo { double x, min_y, min_z; - Eigen::Affine3d YZPlaneToTriangleFace; - // We will have [min_y, min_y + width * pixel_size] x [min_z, min_z + height * pixel_size] // contain the face transformed in that plane with the normal pointing along the z axis. - int width, height; + int64_t width, height; - int padding; // The padding to apply to each face bounding box before - // sampling it + // The padding to apply to each face bounding box before sampling it + int64_t face_info_padding; // The pixel at (x, min_y, min_z) in the plane will end up at location (shift_u, shift_v) // in the texture. - int shift_u, shift_v; + int64_t shift_u, shift_v; + + // Used to flag a valid face + bool valid; + + // The transform which makes a version of the face in the y-z plane to the actual triangle + Eigen::Affine3d YZPlaneToTriangleFace; // The vertices of a face after being transformed to a plane with x constant std::vector TransformedVertices; - // Keep track of the fact that sometimes not all faces have their info - // initialized + // Initialize all members. Invalid or unprocessed faces will have + // shift_u == invalid_shift_u. FaceInfo() { - int max_int = std::numeric_limits::max(); - shift_u = max_int; - shift_v = max_int; + x = 0.0; + min_y = 0.0; + min_z = 0.0; + width = 0L; + height = 0L; + face_info_padding = 1L; + valid = true; + shift_u = std::numeric_limits::max(); + shift_v = 0L; + YZPlaneToTriangleFace = Eigen::Affine3d::Identity(); TransformedVertices.resize(3); } }; @@ -104,46 +120,49 @@ class IsaacTexturePatch { typedef std::shared_ptr Ptr; typedef std::shared_ptr ConstPtr; typedef std::vector Faces; - typedef std::vector Texcoords; + typedef std::vector Texcoords; private: - int label; + int64_t label; Faces faces; Texcoords texcoords; - int width, height; + int64_t width, height; public: /** Constructs a texture patch. */ - IsaacTexturePatch(int _label, std::vector const& faces, std::vector const& texcoords, - int width, int height); + IsaacTexturePatch(int64_t label, std::vector const& faces, + std::vector const& texcoords, + int64_t width, int64_t height); IsaacTexturePatch(IsaacTexturePatch const& texture_patch); static IsaacTexturePatch::Ptr create(IsaacTexturePatch::ConstPtr texture_patch); - static IsaacTexturePatch::Ptr create(int label, std::vector const& faces, - std::vector const& texcoords, int width, int height); + static IsaacTexturePatch::Ptr create(int64_t label, std::vector const& faces, + std::vector const& texcoords, + int64_t width, int64_t height); IsaacTexturePatch::Ptr duplicate(void); std::vector& get_faces(void); std::vector const& get_faces(void) const; - std::vector& get_texcoords(void); - std::vector const& get_texcoords(void) const; + std::vector & get_texcoords(void); + std::vector const& get_texcoords(void) const; - int get_label(void) const; - int get_width(void) const; - int get_height(void) const; - int get_size(void) const; + int64_t get_label(void) const; + int64_t get_width(void) const; + int64_t get_height(void) const; + int64_t get_size(void) const; }; -inline IsaacTexturePatch::IsaacTexturePatch(int label, std::vector const& faces, - std::vector const& texcoords, int width, int height) +inline IsaacTexturePatch::IsaacTexturePatch(int64_t label, std::vector const& faces, + std::vector const& texcoords, + int64_t width, int64_t height) : label(label), faces(faces), texcoords(texcoords), width(width), height(height) {} IsaacTexturePatch::IsaacTexturePatch(IsaacTexturePatch const& texture_patch) { label = texture_patch.label; faces = std::vector(texture_patch.faces); - texcoords = std::vector(texture_patch.texcoords); + texcoords = std::vector(texture_patch.texcoords); width = texture_patch.width; height = texture_patch.height; } @@ -152,32 +171,39 @@ inline IsaacTexturePatch::Ptr IsaacTexturePatch::create(IsaacTexturePatch::Const return std::make_shared(*texture_patch); } -inline IsaacTexturePatch::Ptr IsaacTexturePatch::create(int label, std::vector const& faces, - std::vector const& texcoords, int width, - int height) { +inline IsaacTexturePatch::Ptr IsaacTexturePatch::create( + int64_t label, std::vector const& faces, + std::vector const& texcoords, int64_t width, int64_t height) { return std::make_shared(label, faces, texcoords, width, height); } -inline IsaacTexturePatch::Ptr IsaacTexturePatch::duplicate(void) { return Ptr(new IsaacTexturePatch(*this)); } +inline IsaacTexturePatch::Ptr IsaacTexturePatch::duplicate(void) { + return Ptr(new IsaacTexturePatch(*this)); +} -inline int IsaacTexturePatch::get_label(void) const { return label; } +inline int64_t IsaacTexturePatch::get_label(void) const { return label; } -inline int IsaacTexturePatch::get_width(void) const { return width; } +inline int64_t IsaacTexturePatch::get_width(void) const { return width; } -inline int IsaacTexturePatch::get_height(void) const { return height; } +inline int64_t IsaacTexturePatch::get_height(void) const { return height; } -inline std::vector& IsaacTexturePatch::get_texcoords(void) { return texcoords; } +inline std::vector& IsaacTexturePatch::get_texcoords(void) { + return texcoords; +} inline std::vector& IsaacTexturePatch::get_faces(void) { return faces; } -inline std::vector const& IsaacTexturePatch::get_texcoords(void) const { return texcoords; } +inline std::vector const& IsaacTexturePatch::get_texcoords(void) const { + return texcoords; +} inline std::vector const& IsaacTexturePatch::get_faces(void) const { return faces; } -inline int IsaacTexturePatch::get_size(void) const { return get_width() * get_height(); } +inline int64_t IsaacTexturePatch::get_size(void) const { return get_width() * get_height(); } /** - * Class representing a texture atlas. + * Class representing a texture atlas. Have to duplicate the code from texrecon + * as there are custom changes for ISAAC. */ class IsaacTextureAtlas { public: @@ -185,14 +211,13 @@ class IsaacTextureAtlas { typedef std::vector Faces; typedef std::vector TexcoordIds; - typedef std::vector Texcoords; + typedef std::vector Texcoords; - unsigned int get_width(); - unsigned int get_height(); + int64_t get_width(); + int64_t get_height(); private: - unsigned int width, height, determined_height; - unsigned int const padding; + int64_t width, height, determined_height; bool finalized; Faces faces; @@ -206,9 +231,9 @@ class IsaacTextureAtlas { void resize_atlas(void); public: - IsaacTextureAtlas(unsigned int width, unsigned int height); + IsaacTextureAtlas(int64_t width, int64_t height); - static IsaacTextureAtlas::Ptr create(unsigned int width, unsigned int height); + static IsaacTextureAtlas::Ptr create(int64_t width, int64_t height); Faces& get_faces(void); TexcoordIds& get_texcoord_ids(void); @@ -224,57 +249,160 @@ class IsaacTextureAtlas { void finalize(void); }; -inline IsaacTextureAtlas::Ptr IsaacTextureAtlas::create(unsigned int width, unsigned int height) { +inline IsaacTextureAtlas::Ptr IsaacTextureAtlas::create(int64_t width, int64_t height) { return Ptr(new IsaacTextureAtlas(width, height)); } inline IsaacTextureAtlas::Faces& IsaacTextureAtlas::get_faces(void) { return faces; } -inline IsaacTextureAtlas::TexcoordIds& IsaacTextureAtlas::get_texcoord_ids(void) { return texcoord_ids; } +inline IsaacTextureAtlas::TexcoordIds& IsaacTextureAtlas::get_texcoord_ids(void) { + return texcoord_ids; +} inline IsaacTextureAtlas::Texcoords& IsaacTextureAtlas::get_texcoords(void) { return texcoords; } inline mve::ByteImage::Ptr& IsaacTextureAtlas::get_image(void) { return image; } -inline unsigned int IsaacTextureAtlas::get_width() { return width; } +inline int64_t IsaacTextureAtlas::get_width() { return width; } + +inline int64_t IsaacTextureAtlas::get_height() { return height; } + -inline unsigned int IsaacTextureAtlas::get_height() { return height; } +/** + * Class representing a obj model. Have to duplicate the texrecon structure, as + * we use double-precision texcoords. + */ +class IsaacObjModel { + public: + struct Face { + std::size_t vertex_ids[3]; + std::size_t texcoord_ids[3]; + std::size_t normal_ids[3]; + }; + + struct Group { + std::string material_name; + std::vector faces; + }; + + typedef std::vector Vertices; + typedef std::vector TexCoords; + typedef std::vector Normals; + typedef std::vector Groups; + + private: + Vertices vertices; + TexCoords texcoords; + Normals normals; + Groups groups; + MaterialLib material_lib; + + public: + /** Saves the obj model to an .obj file, its material lib and the + materials with the given prefix. */ + void save_to_files(std::string const & prefix) const; + + MaterialLib & get_material_lib(void); + Vertices & get_vertices(void); + TexCoords & get_texcoords(void); + Normals & get_normals(void); + Groups & get_groups(void); + + static void save(IsaacObjModel const & model, std::string const & prefix); +}; + +inline +MaterialLib & +IsaacObjModel::get_material_lib(void) { + return material_lib; +} + +inline +IsaacObjModel::Vertices & +IsaacObjModel::get_vertices(void) { + return vertices; +} + +inline +IsaacObjModel::TexCoords & +IsaacObjModel::get_texcoords(void) { + return texcoords; +} + +inline +IsaacObjModel::Normals & +IsaacObjModel::get_normals(void) { + return normals; +} + +inline +IsaacObjModel::Groups & +IsaacObjModel::get_groups(void) { + return groups; +} // Load and prepare a mesh void loadMeshBuildTree(std::string const& mesh_file, mve::TriangleMesh::Ptr& mesh, - std::shared_ptr& mesh_info, std::shared_ptr& graph, + std::shared_ptr& mesh_info, + std::shared_ptr& graph, std::shared_ptr& bvh_tree); -void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_threads, +void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_threads, // outputs - std::vector& face_projection_info, std::vector& texture_atlases, - tex::Model& model); + std::vector& face_projection_info, + std::vector& texture_atlases, + IsaacObjModel& model); // Put an textured mesh obj file in a string -void formObj(tex::Model& texture_model, std::string const& out_prefix, std::string& obj_str); +void formObj(IsaacObjModel& texture_model, std::string const& out_prefix, std::string& obj_str); // Put an textured mesh obj file in a string void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vector const& face_vec, - std::map const& uv_map, std::string const& out_prefix, std::string& obj_str); + std::map const& uv_map, + std::string const& out_prefix, std::string& obj_str); void formMtl(std::string const& out_prefix, std::string& mtl_str); +// The images from the bag may need to be resized to be the same +// size as in the calibration file. +void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image); + // Project texture and find the UV coordinates -void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, cv::Mat const& image, - camera::CameraModel const& cam, double num_exclude_boundary_pixels, +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, + cv::Mat const& image, camera::CameraModel const& cam, + double num_exclude_boundary_pixels, // outputs - std::vector& smallest_cost_per_face, std::vector& face_vec, + std::vector& smallest_cost_per_face, + std::vector& face_vec, std::map& uv_map); // Project texture on a texture model that was pre-filled already, so // only the texture pixel values need to be computed -void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, cv::Mat const& image, - camera::CameraModel const& cam, std::vector& smallest_cost_per_face, double pixel_size, - int num_threads, std::vector const& face_projection_info, - std::vector& texture_atlases, tex::Model& model, cv::Mat& out_texture); +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, + cv::Mat const& image, camera::CameraModel const& cam, + std::vector& smallest_cost_per_face, double pixel_size, + int64_t num_threads, std::vector const& face_projection_info, + std::vector& texture_atlases, + IsaacObjModel& model, cv::Mat& out_texture); + +// Find where ray emanating from a distorted pixel intersects a mesh. Return true +// on success. +bool ray_mesh_intersect(Eigen::Vector2d const& dist_pix, + camera::CameraParameters const& cam_params, + Eigen::Affine3d const& world_to_cam, + mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + double min_ray_dist, double max_ray_dist, + // Output + Eigen::Vector3d& intersection); + +void meshProject(mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, + cv::Mat const& image, + Eigen::Affine3d const& world_to_cam, camera::CameraParameters const& cam_params, + int64_t num_exclude_boundary_pixels, std::string const& out_prefix); // Save a model -void isaac_save_model(ObjModel* obj_model, std::string const& prefix); +void isaac_save_model(IsaacObjModel* obj_model, std::string const& prefix); } // namespace dense_map diff --git a/dense_map/geometry_mapper/readme.md b/dense_map/geometry_mapper/readme.md index 84dfa173..2f37cb46 100644 --- a/dense_map/geometry_mapper/readme.md +++ b/dense_map/geometry_mapper/readme.md @@ -19,8 +19,8 @@ coming from an Astrobee robot. The two main tools are: The following environmental variables should be set up (please adjust them for your particular configuration): - export ASTROBEE_SOURCE_PATH=$HOME/astrobee/src - export ASTROBEE_BUILD_PATH=$HOME/astrobee + export ASTROBEE_WS=$HOME/astrobee + export ASTROBEE_SOURCE_PATH=$ASTROBEE_WS/src export ISAAC_WS=$HOME/isaac ## Robot sensors @@ -28,9 +28,9 @@ them for your particular configuration): This software makes use of three sensors that are mounted on the front face of the robot: - - haz cam: A low-resolution depth and intensity camera - - nav cam: A wide field of view medium-resolution navigation camera - - sci cam: A high-resolution narrow field of view "science" camera + - haz cam: A low-resolution depth and intensity camera + - nav cam: A wide field of view medium-resolution navigation camera + - sci cam: A high-resolution narrow field of view "science" camera The nav cam is used to determine robot's position and orientation as it explores its environment. @@ -72,27 +72,97 @@ have different invocations for each case. The reader is advised to keep close attention to this, and we will make it clear at every step about which of the two paradigms one refers to. -## Additional sensors present only in simulation +## Sensors present in simulation -A number of robot sensors are present only as part of the simulator. -Those are the heat camera, described in: +The simulator supports the ``nav_cam``, ``sci_cam``, ``haz_cam`` +cameras, which are analogous to the ones on the real robot, and also +the ``heat_cam`` and ``acoustics_cam`` cameras which exist only in +simulation. All these have been tested with the geometry mapper and +streaming mapper. - $ISAAC_WS/src/astrobee/simulation/gazebo/readme.md +The ``sci_cam`` and ``haz_cam`` cameras are not enabled by default in +the simulator. To enable them, edit the simulation configuration, in -and the acoustics camera, documented at + $ASTROBEE_SOURCE_PATH/astrobee/config/simulation/simulation.config - $ISAAC_WS/src/astrobee/simulation/acoustics_cam/readme.md +and set: -These tools are implemented and documented in different places -in the source tree because the first one is a gazebo plugin -while the second one is a Python node. + haz_cam_rate = 1.0; + sci_cam_rate = 1.0; + sci_cam_continuous_picture_taking = true; -See also +The later will make sure sci cam pictures are taken automatically. If +custom behavior is desired, see: + + $ISAAC_WS/src/astrobee/behaviors/inspection/readme.md + +More information about the simulated nav_cam, haz_cam, and sci_cam is +at: $ASTROBEE_SOURCE_PATH/simulation/readme.md -for information about the simulated nav_cam, haz_cam, and sci_cam. - +The heat camera is described in: + + $ISAAC_WS/src/astrobee/simulation/isaac_gazebo/readme.md + +The acoustics camera and how to enable it is documented at: + + $ISAAC_WS/src/astrobee/simulation/acoustics_cam/readme.md + +## Adding support for new cameras + +For an actual camera, rather than a simulated one, the files: + + $ASTROBEE_SOURCE_PATH/astrobee/config/cameras.config + $ASTROBEE_SOURCE_PATH/astrobee/config/geometry.config + $ASTROBEE_SOURCE_PATH/astrobee/config/transforms.config + $ASTROBEE_SOURCE_PATH/astrobee/config/robots/robotName.config + +should be modified and entities added, mirroring ``sci_cam``, so having +fields for intrinsics, extrinsics (nav cam to given cam transform), +timestamp offset (by default it should be 0), and body to given camera +transform. + +Note the first three config files above are used by any robot, while +the last one refers only to the robot that is desired to use with the +new camera. The Lua-based processing of config files is tolerant of +the fact that a different robot config file lacks a new camera's +settings even if the shared config files define them. For a camera +that is expected to be present on all robots, each robot config file +should be changed, but otherwise just a desired robot's file can be +modified, in addition to the shared files. + +For a simulated camera, the above is not necessary. Instead, that +camera must define the topics: + + /sim/some_cam/pose + /sim/some_cam/info + +where the desired camera was named ``some_cam``, having the camera +poses and camera intrinsics info, respectively. It is suggested to +follow the example of ``heat_cam`` or ``sci_cam`` when creating +such a simulated camera. + +Then, whether the camera is real or simulated, a topic is expected +on which its images will be published, for example, named: + + /hw/cam_some + +which then need to be passed to the geometry and streaming mappers, +per the documentation for these tools. + +To visualize images published by your camera in ``rviz``, appropriate +entities must be added in ``iss.rviz``, etc. + +Uncompressed or compressed images are supported, but for the latter +adjustments must be made, mirroring ``sci_cam``. For example, the +image topic should be: + + /hw/cam_some/compressed + +except in ``iss.rviz``, where the suffix ``/compressed`` is not +needed, but instead the one sets ``Transport Hint: compressed``. + ## Compiling the software It is assumed that by now the Astrobee and ISAAC software is compiled. @@ -149,10 +219,12 @@ experimental remeshing tool. ### With real data -Acquire a bag of data on the bot as follows. +Acquire a bag of data on the bot. The current approach is to use a +recording profile. A step-by-step procedure is outlined below if a +recording profile has not been set up. First give the bot the ability to acquire intensity data with the -depth camera (haz_cam). For that, connect to the MLP processor of the +depth camera (haz_cam). For that, connect to the MLP processor of the bot. Edit the file: /opt/astrobee/config/cameras.config @@ -209,43 +281,52 @@ Copy the resulting bag off the robot. ### With simulated data -Edit the simulation configuration, in +The astrobee simulator supports a handful of cameras, mentioned +earlier in the text. - $ASTROBEE_SOURCE_PATH/astrobee/config/simulation/simulation.config +#### Recording simulated data -and set: +Start the simulator, such as: - haz_cam_rate = 1.0; - sci_cam_rate = 1.0; - sci_cam_continuous_picture_taking = true; + source $ASTROBEE_WS/devel/setup.bash + source $ISAAC_WS/devel/setup.bash + roslaunch isaac sim.launch rviz:=true \ + pose:="11.0 -7.0 5.0 0 0 0 1" world:=iss -The later will make sure sci cam pictures are taken automatically. If -custom behavior is desired, see +In rviz turn on visualizing the desired cameras cameras from the +Panels menu, as otherwise the needed camera topics may not be +published. - $ISAAC_WS/src/astrobee/behaviors/inspection/readme.md +Adjust the 3D view in rviz with the mouse so that the robot, which is +present in the middle of the module, can be seen. -Start the simulator, such as: +When recording simulated data for a given camera, for example, for +``sci_cam``, so that later it can be used with the geometry and +streaming mapper, one needs to record, in addition to depth clouds and +camera images, also the camera poses and intrinsics information, which +is done as follows: - source $ASTROBEE_BUILD_PATH/devel/setup.bash - source $ISAAC_WS/devel/setup.bash - roslaunch isaac sim.launch rviz:=true pose:="11.0 -7.0 5.0 0 0 0 1" world:=iss + rosbag record /hw/depth_haz/points \ + /hw/cam_sci/compressed /sim/sci_cam/pose /sim/sci_cam/info + +It is good to check for the precise name of the camera image topic. +For haz cam the image topic will be instead: -In RVIZ turn on visualizing these cameras from the Panels menu. + /hw/depth_haz/extended/amplitude_int -When recording simulated data, need to also save the camera poses and -camera information, which is different than how we do for real data, -when these quantities must be estimated by algorithms. Hence the -record command to run is: +For nav cam, the image topic will be: - rosbag record /hw/depth_haz/points \ - /hw/depth_haz/extended/amplitude_int /hw/cam_sci \ - /sim/haz_cam/pose /sim/sci_cam/pose /sim/sci_cam/info + /hw/cam_nav -Note that we record no nav cam data at all since that one is needed -only for computing camera poses that are pre-computed in -simulation. Also the sci cam topic changed, compared to the earlier -record command, as the simulator returns uncompressed images (they are -grayscale as well, unlike for the real sci cam). +and the same convention is followed for the rest of the cameras. + +If desired to record data for many cameras, these topics must +be specified for each of them. For example, for ``heat_cam`` add +the lines: + + /hw/cam_heat /sim/heat_cam/pose /sim/heat_cam/info + +to the ``rosbag record`` command. The robot can be told to move around by either running a plan, or by sending it a move command, such as: @@ -261,7 +342,7 @@ This applies only to real data. If the recorded data is split into many small bags, as it often happens on the ISS, those bags should be first merged as documented in: - $ASTROBEE_SOURCE_PATH/localization/sparse_mapping/readme.md + $ASTROBEE_SOURCE_PATH/localization/sparse_mapping/readme.md In order to save bandwidth, the sci cam images are published at a reduced resolution (usually 640x480 pixels), while the full-resolution @@ -284,8 +365,8 @@ aperture, and focal length) will be saved in the bag file as well. (Normally the sci cam data will be downloaded from the HLP of ISS robots using some established infrastructure. Alternatively, one can -use `adb pull`. After this tool is used, the data can be manually -deleted from HLP by first connecting to it with `adb shell`.) +use ``adb pull``. After this tool is used, the data can be manually +deleted from HLP by first connecting to it with ``adb shell``.) In order to run camera_calibrator, the sci cam data in the bag needs to be decompressed, resized to 1/4 of the resolution, and made to be @@ -302,14 +383,18 @@ the fact that the calibration was done at reduced resolution. To accomplish this processing, once the sci cam data is integrated into the bag, one can do the following: - $ISAAC_WS/devel/lib/geometry_mapper/process_bag -input_bag input.bag \ - -output_bag output.bag --image_type grayscale --scale 0.25 + $ISAAC_WS/devel/lib/geometry_mapper/scale_bag --input_bag input.bag \ + --output_bag output.bag --image_type grayscale --scale 0.25 Note that the processed sci cam images will be now on topic -`/hw/cam_sci2`. +``/hw/cam_sci2``. ## Camera calibration +Currently the calibrator solution is not that accurate. It is suggested +to use instead camera_refiner (see further down) on a bag acquired +without a calibration target. + Camera calibration is an advanced topic. Likely your robot's cameras have been calibrated by now, and then this step can be skipped. @@ -342,12 +427,18 @@ builds upon the instructions used in the doc referenced right above.) source $KALIBR_WS/devel/setup.bash rosrun kalibr kalibr_calibrate_cameras \ - --topics /hw/cam_nav /hw/depth_haz/extended/amplitude_int /hw/cam_sci2 \ + --topics /mgt/img_sampler/nav_cam/image_record \ + /hw/depth_haz/extended/amplitude_int \ + /hw/cam_sci2 \ --models pinhole-fov pinhole-radtan pinhole-radtan \ --target $ASTROBEE_SOURCE_PATH/scripts/calibrate/config/granite_april_tag.yaml \ --bag calibration.bag --dont-show-report --verbose \ --target_corners_dirs calib_nav calib_haz calib_sci +Note that above we assume that the image sampler was used to collect a +subset of the nav cam images. Otherwise the nav cam topic would be +``/hw/cam_nav``. + This will create three directories with the corners extracted from the nav, haz, and sci cameras. @@ -373,7 +464,7 @@ The calibrator program is at: It can be used to calibrate the intrinsics of the nav and haz camera pair, and then of the sci and haz pair. These are referred to as -`cam1` and `cam2` below. +``cam1`` and ``cam2`` below. It is important to remember that the haz cam records both an image intensity and a depth point cloud. @@ -504,20 +595,20 @@ refiner can refine the sci cam intrinsics and will likely do a better job. It was found experimentally that the depth to image transform -which is updated by `--update_depth_to_image_transform` +which is updated by ``--update_depth_to_image_transform`` depends very much on how far the calibration target is from the -camera. The value of `hazcam_depth_to_image_transform` already in the +camera. The value of ``hazcam_depth_to_image_transform`` already in the robot config file, which shows roughly a scale transform with a factor of 0.95 is good enough. One has to be also be careful with the option -`--timestamp_offset_sampling`, and even avoid using it in a first +``--timestamp_offset_sampling``, and even avoid using it in a first pass. Notice that here we chose to not update the intrinsics of cam1 (nav_cam). That is because this camera was calibrated with Kalibr a while ago and it is known to be accurate. If desired to calibrate it, one can add the option -`--update_cam1`. +`--update_cam1``. Only if after using the geometry mapper one notices visible registration errors (which manifest themselves as artifacts in the @@ -537,11 +628,11 @@ can go as follows: --update_extrinsics \ --timestamp_offset_sampling '-0.35 -0.15 11' -As before, one better not use the option `--timestamp_offset_sampling` +As before, one better not use the option ``--timestamp_offset_sampling`` unless one is sure it is necessary. Note that this time we optimize the intrinsics of cam1 (sci_cam) -and we do not use `--update_depth_to_image_transform` or optimize +and we do not use ``--update_depth_to_image_transform`` or optimize the intrinsics of cam2 (haz_cam) as this was already done earlier. We do not optimize the distortion of cam1 as that can result in incorrect values if there are not enough measurements at image periphery. @@ -556,25 +647,21 @@ down this document, in the section on camera refinement. Nav cam images can be extracted from a bag as follows: - $ASTROBEE_BUILD_PATH/devel/lib/localization_node/extract_image_bag \ - mydata.bag -image_topic /hw/cam_nav -output_directory nav_data \ - -use_timestamp_as_image_name + $ASTROBEE_WS/devel/lib/localization_node/extract_image_bag \ + mydata.bag -image_topic /mgt/img_sampler/nav_cam/image_record \ + -output_directory nav_data -use_timestamp_as_image_name -The last option, `-use_timestamp_as_image_name`, must not be missed. +The last option, ``-use_timestamp_as_image_name``, must not be missed. It makes it easy to look up the image acquisition based on image name, and this is used by the geometry mapper. -Note that bags acquired on the ISS usually have the nav cam image topic -as: - - /mgt/img_sampler/nav_cam/image_record - -Then the command above needs to be adjusted accordingly. +One should check beforehand if the nav cam topic is correct. If the image +sampler was not used, the nav cam topic would be /hw/cam_nav. To extract the sci cam data, if necessary, do: - $ASTROBEE_BUILD_PATH/devel/lib/localization_node/extract_image_bag \ - mydata.bag -image_topic /hw/cam_sci/compressed \ + $ASTROBEE_WS/devel/lib/localization_node/extract_image_bag \ + mydata.bag -image_topic /hw/cam_sci/compressed \ -output_directory sci_data -use_timestamp_as_image_name To extract the depth clouds, which may be useful for debugging purposes, @@ -587,24 +674,27 @@ do: ## Map building and registration Build and register a SURF sparse map with the nav cam images. (This is -needed only with real data.) +needed only with real data.) See the +[sparse mapping](https://nasa.github.io/astrobee/html/sparsemapping.html) +documentation in the Astrobee repository, with more details given in +the [map building](https://nasa.github.io/astrobee/html/map_building.html) +page. -An example for how to collect images and build a map is shown later in -this doc, in the section about camera refinement. See also the -reference documentation in +If the map to be built is large, consider using the Theia SfM +software. See the [Theia documentation](https://nasa.github.io/astrobee/html/theia_map.html) +for how to use this package to create Astrobee sparse maps. - $ASTROBEE_SOURCE_PATH/localization/sparse_mapping/readme.md - -and also in build_map.md in that repository. +An example for how to build a map in the context of calibration is +also given further down this document. This SURF map will be used with the geometry mapper. Rebuild it with BRISK features, to be used with the streaming mapper. Examine the BRISK obtained map. If it does not have enough features, rebuild it -with a lower value of -default_brisk_threshold and --max_brisk_threshold (For example, use 70 instead of the default of +with a lower value of ``--default_brisk_threshold`` and +``--max_brisk_threshold`` (For example, use 70 instead of the default of 90. This may make the sparse map bigger.) -It is suggested to not use the `--histogram_equalization` flag for the +It is suggested to not use the ``--histogram_equalization`` flag for the SURF map, but to use it with the BRISK map. Don't forget to set: @@ -625,44 +715,48 @@ sci cam images with the geometry mapper. ## When using real data -The geometry mapper can handle both color and grayscale images, and -both at full and reduced resolution. (With the sci cam topic name -being different at reduced resolution.) +The geometry mapper fuses the depth cloud data and creates textures +from the image cameras. + +Any image camera is supported, as long as present in the robot +configuration file and a topic for it is in the bag file (see more +details further down). The geometry mapper can handle both color and +grayscale images, and, for sci cam, both full and reduced resolution. -Ensure that the bot name is correct below. Set `ASTROBEE_SOURCE_PATH`, -`ASTROBEE_BUILD_PATH`, and `ISAAC_WS` as earlier. Run: +Ensure that the bot name is correct below. Set ``ASTROBEE_SOURCE_PATH``, +`ASTROBEE_WS``, and ``ISAAC_WS`` as earlier. Run: export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config export ASTROBEE_WORLD=iss export ASTROBEE_ROBOT=bsharp2 - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash python $ISAAC_WS/src/dense_map/geometry_mapper/tools/geometry_mapper.py \ --ros_bag data.bag \ --sparse_map nav_data.map \ - --nav_cam_topic /hw/cam_nav \ + --camera_types "sci_cam nav_cam haz_cam" \ + --camera_topics "/hw/cam_sci/compressed /mgt/img_sampler/nav_cam/image_record /hw/depth_haz/extended/amplitude_int"\ + --undistorted_crop_wins "sci_cam,1250,1000 nav_cam,1100,776 haz_cam,250,200" \ --haz_cam_points_topic /hw/depth_haz/points \ - --haz_cam_intensity_topic /hw/depth_haz/extended/amplitude_int \ - --sci_cam_topic /hw/cam_sci/compressed \ - --camera_type all \ --start 0 \ --duration 1e+10 \ --sampling_spacing_seconds 5 \ --dist_between_processed_cams 0.1 \ + --angle_between_processed_cams 5.0 \ --depth_exclude_columns 0 \ --depth_exclude_rows 0 \ --foreshortening_delta 5.0 \ --median_filters "7 0.1 25 0.1" \ --reliability_weight_exponent 2 \ --voxblox_integrator merged \ - --depth_hole_fill_diameter 30.0 \ + --depth_hole_fill_diameter 0 \ --max_ray_length 2.5 \ --voxel_size 0.01 \ --max_iso_times_exposure 5.1 \ --smoothing_time 5e-6 \ --max_num_hole_edges 8000 \ - --max_hole_diameter 1.8 \ + --max_hole_diameter 0.3 \ --num_min_faces_in_component 100 \ --num_components_to_keep 100 \ --edge_keep_ratio 0.2 \ @@ -671,21 +765,31 @@ Ensure that the bot name is correct below. Set `ASTROBEE_SOURCE_PATH`, Parameters: - --ros_bag: A ROS bag with recorded nav_cam, haz_cam, and sci_cam data. + --ros_bag: A ROS bag with recorded image and point cloud data. --sparse_map: A registered SURF sparse map of a subset of the nav_cam data. - --nav_cam_topic: The nav cam image topic in the bag file. - --haz_cam_points_topic: The haz cam point cloud topic in the bag file. - --haz_cam_intensity_topic: The haz cam intensity topic in the bag file. - --sci_cam_topic: The sci cam image topic in the bag file. - --camera_type: Specify which cameras to process. Options: all (default), - nav_cam, and sci_cam (with simulated data only sci_cam is supported). + --camera_types: Specify the cameras to use for the textures, as a list + in quotes. Default: "sci_cam nav_cam haz_cam". With simulated + data only ``sci_cam`` is supported. + --camera_topics: Specify the bag topics for the cameras to texture + (in the same order as in ``--camera_types``). Use a list in quotes. + The default is in the usage above. + --undistorted_crop_wins: The central region to keep after + undistorting an image and before texturing. For sci_cam the + numbers are at 1/4th of the full resolution (resolution of + calibration) and will be adjusted for the actual input image + dimensions. Use a list in quotes. The default is + "sci_cam,1250,1000 nav_cam,1100,776 haz_cam,250,200". + --haz_cam_points_topic: The depth point cloud topic in the bag file. --start: How many seconds into the bag to start processing the data. --duration: For how many seconds to do the processing. --sampling_spacing_seconds: How frequently to sample the sci and haz cameras, in seconds. The default is 2. - --dist_between_processed_cams: Once an image or depth image is processed, - how far the camera should move (in meters) before it should process - more data. The default is 0.1 meters. + --dist_between_processed_cams: Once an image or depth cloud is processed, + process a new one whenever either the camera moves by more than this + distance, in meters, or the angle changes by more than + --angle_between_processed_cams, in degrees. The default is 0.1. + --angle_between_processed_cams: See --dist_between_processed_cams. The + default is 5.0. --sci_cam_timestamps: Process only these sci cam timestamps (rather than any in the bag using --dist_between_processed_cams, etc.). Must be a file with one timestamp per line. @@ -697,17 +801,19 @@ Parameters: The default is 0. --foreshortening_delta: A smaller value here will result in holes in depth images being filled more aggressively but potentially - with more artifacts in foreshortened regions. + with more artifacts in foreshortened regions. Works only with + positive --depth_hole_fill_diameter. --median_filters: Given a list "w1 d1 w2 d2 ... ", remove a depth image point if it differs, in the Manhattan norm, from the median of cloud points in the pixel window of size wi centered at it by - more than di. This removes points sticking out for each such i. The - default is "7 0.1 25 0.1". + more than di. This removes points sticking out for each such + i. The default is "7 0.1 25 0.1". --depth_hole_fill_diameter: Fill holes in the depth point clouds - with this diameter, in pixels. This happens before the clouds - are fused. It is suggested to not make this too big, as more - hole-filling happens on the fused mesh later (see - --max_hole_diameter). The default is 30. + with this diameter, in pixels. This happens before the clouds are + fused. If set to a positive value it can fill really big holes but + may introduce artifacts. It is better to leave the hole-filling + for later, once the mesh is fused (see --max_hole_diameter). + The default is 0. --reliability_weight_exponent: A larger value will give more weight to depth points corresponding to pixels closer to depth image center, which are considered more reliable. The default is @@ -718,18 +824,23 @@ Parameters: --voxblox_integrator: When fusing the depth point clouds use this VoxBlox method. Options are: "merged", "simple", and "fast". The default is "merged". - --voxel_size is the grid size used for binning the points, in meters. + --voxel_size: The grid size used for binning depth cloud points and + creating the mesh. Measured in meters. --max_iso_times_exposure: Apply the inverse gamma transform to images, multiply them by max_iso_times_exposure/ISO/exposure_time to adjust for lightning differences, then apply the gamma transform back. This value should be set to the maximum observed ISO * exposure_time. The default is 5.1. Not used with simulated data. - --smoothing_time: A larger value will result in a smoother mesh. The default - is 0.00005. - --max_num_hole_edges: Close holes in the mesh which have no more than this - many edges. The default is 1000. - --max_hole_diameter: The diameter (in meters) of the largest hole in the - mesh to fill. The default is 0.3. + --smoothing_time: A larger value will result in a smoother mesh. The + default is 0.00005. + --no_boundary_erosion: Do not erode the boundary when smoothing + the mesh. Erosion may help with making the mesh more regular and + easier to hole-fill, but may be undesirable in regions which + don't get to be hole-filled. + --max_num_hole_edges: Close holes in the mesh which have no more + than this many edges. The default is 1000. + --max_hole_diameter: The diameter (in meters) of the largest hole + in the mesh to fill. The default is 0.3. --num_min_faces_in_component: Keep only connected mesh components with at least this many faces. --num_components_to_keep: How many of the largest connected components @@ -750,9 +861,6 @@ Parameters: reliable. In this case one must specify carefully the range of times in the bag to use as it will no longer be constrained by the timestamps in the map. - --sci_cam_timestamps: Process only these sci cam timestamps (rather than - any in the bag using --dist_between_processed_cams, etc.). Must be a - file with one timestamp per line. --start_step: Start processing at this step. Useful for resuming work. Values: 0 (determine poses), 1 (fuse meshes), 2 (smoothe mesh), 3 (fill holes), 4 (clean mesh and rm small connected @@ -767,10 +875,13 @@ Parameters: registered map. --external_mesh: Use this mesh to texture the images, rather than creating one from depth data in the current bag. - --scicam_to_hazcam_timestamp_offset_override_value: Override the - value of scicam_to_hazcam_timestamp_offset from the robot config + --nav_cam_to_sci_cam_offset_override_value: Override the + value of nav_cam_to_sci_cam_timestamp_offset from the robot config file with this value. --verbose: If specified, echo all output in the terminal. + --texture_individual_images: If specified, in addition to a joint texture + of all images create individual textures for each image and camera. Does + not work with simulated cameras. For debugging. --save_debug_data: If specified, save many intermediate datasets for debugging. @@ -782,9 +893,15 @@ with Meshlab: - data_dir/hole_filled_mesh.ply: A mesh obtained by filling holes in the fused mesh. - data_dir/clean_mesh.ply: The mesh with small connected components removed. - data_dir/smooth_mesh2.ply: A further smoothed version of the mesh. + - data_dir/hole_filled_mesh2.ply: A further hole-filled mesh. - data_dir/simplified_mesh.ply: The simplified mesh. - - data_dir/nav_cam_texture/run.obj: The mesh overlayed with the nav cam texture. + - data_dir/smooth_mesh3.ply: A further smoothed version of the mesh. - data_dir/sci_cam_texture/run.obj: The mesh overlayed with the sci cam texture. + - data_dir/nav_cam_texture/run.obj: The mesh overlayed with the nav cam texture. + - data_dir/haz_cam_texture/run.obj: The mesh overlayed with the haz cam texture. + +(Several passes of smoothing and hole-filling, as done above, appear +necessary from experiments.) Intermediate products are the undistorted nav cam and sci cam images. It is suggested to review those in an image viewer, such as 'eog' and @@ -793,21 +910,21 @@ of the pipeline (invocation of the texrecon tool) can be redone. To run this tool it is suggested to pick a portion of the bag where the images face the wall as much as possible, so one may need to -change the `--start` and `--duration` values. +change the ``--start`` and ``--duration`` values. -Unless the flag `--use_brisk_map` is set, the data processing will be +Unless the flag ``--use_brisk_map`` is set, the data processing will be restricted to the range of timestamps contained within the sparse map -(this is another restriction, in addition to `--start` and -`--duration`). +(this is another restriction, in addition to ``--start`` and +`--duration``). -If this tool is too slow, or if localization fails, consider tweaking -the --localization_options above. For example, to make localization +If this tool is too slow, or if localization fails, consider adjusting +the ``--localization_options`` above. For example, to make localization work better (which will make the program slower) decrease the value of ---default_surf_threshold, and increase --early_break_landmarks, ---min_surf_features, and --max_surf_features. To make it faster, +``--default_surf_threshold``, and increase ``--early_break_landmarks``, +``--min_surf_features``, and ``--max_surf_features``. To make it faster, do the opposite. -The values of --depth_exclude_columns and --depth_exclude_rows +The values of ``--depth_exclude_columns`` and ``--depth_exclude_rows`` can be adjusted to remove rows and columns at the margins which may result in a nicer mesh. If, for example, the bot moves upwards or downwards, there is little loss in removing @@ -817,46 +934,80 @@ with good lateral overlap, removing some columns won't result in much loss but may remove some noise. If it is desired to use only a precise subset of the sci cam images, -specify those with the option --sci_cam_timestamps. +specify those with the option ``--sci_cam_timestamps``. If several acquisitions were performed, and the geometry mapper was run with each of them, those can be merged by invoking the -geometry mapper with the option --merge_maps. +geometry mapper with the option ``--merge_maps``. The geometry mapper can run with a previously created mesh if invoked -with the option --external_mesh. +with the option ``--external_mesh``. The most time-consuming part of the geometry mapper is computing the initial poses, which is the earliest step, or step 0. To resume the -geometry mapper at any step, use the option '--start_step num', where -0 <= num and num <= 7. For example, one may want to apply further -smoothing to the mesh or more hole-filling, before resuming with the -next steps. +geometry mapper at any step, use the option ``--start_step num``. For +example, one may want to apply further smoothing to the mesh or more +hole-filling, before resuming with the next steps. + +For a given camera type to be textured it must have entries in +``cameras.config`` and the robot config file (such as +``bumble.config``), which are analogous to existing +``nav_cam_to_sci_cam_timestamp_offset``, +``nav_cam_to_sci_cam_transform``, and ``sci_cam`` intrinsics, with +"sci" replaced by your camera name. The geometry mapper arguments +``--camera_types``, ``--camera_topics``, and +``--undistorted_crop_wins`` must be populated accordingly, with some +careful choice to be made for the last one. Images for the desired +camera must be present in the bag file at the the specified topic. ## With simulated data -When working with simulated data, the flag +The geometry mapper works with any simulated cameras not having +distortion. It was tested to work with simulated images for +``sci_cam``, ``haz_cam``, ``heat_cam``, and ``acoustics_cam``. It does +not work with ``nav_cam``, which has distortion. - --simulated_data +The flag: -should be passed to this tool. Also, the sci cam image topic option -should be: - - --sci_cam_topic /hw/cam_sci + --simulated_data -It is not necessary to produce or pass in a sparse map with simulated -data. The nav cam and the camera information provided by the value of -`ASTROBEE_ROBOT` will be ignored. Instead, camera poses and -information will be read from +should be passed to the geometry mapper. The sparse map is not +necessary, no localization will take place, and intrinsics +information, camera transform, and timestamp offset will not be read +from the robot configuration file. Instead, it is expected that each +simulated camera, for example ``sci_cam``, will provide, in addition to an +image topic, the topics - /sim/haz_cam/pose /sim/sci_cam/pose /sim/sci_cam/info -For this operation it is suggested to pick a portion of the bag where -the images face the wall as much as possible, so one may need to -change the `-start` and `-duration` values. That will result in the -best possible output. +having the pose and intrinsics of each camera image. These should be +recorded in the bag (see more on recording earlier in the document). + +It is assumed that the simulated images are not distorted. In particular, +``nav_cam``, which has fisheye lens distortion, is not supported. + +The simulated haz_cam is required to be among the topics being recorded +and read in, because its pose is needed to process the depth clouds. + +Example of running the geometry mapper with simulated data: + + sci_topic=/hw/cam_sci/compressed + haz_topic=/hw/depth_haz/extended/amplitude_int + python $ISAAC_WS/src/dense_map/geometry_mapper/tools/geometry_mapper.py \ + --simulated_data \ + --ros_bag data.bag \ + --camera_types "sci_cam haz_cam" \ + --camera_topics "$sci_topic $haz_topic" \ + --haz_cam_points_topic /hw/depth_haz/points \ + --output_dir data_dir \ + --sampling_spacing_seconds 2 \ + --dist_between_processed_cams 0.1 \ + --angle_between_processed_cams 5.0 \ + --verbose + +It is important to check for the correct names for the camera image +topics are passed to ``--camera_topics``. ## Running the streaming mapper @@ -872,9 +1023,11 @@ obtained textured model to be visualized. ### Running with real data +#### When the robot (or nav cam) poses are known + To run the streaming mapper with real data for the given bot, do: - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config @@ -883,40 +1036,176 @@ To run the streaming mapper with real data for the given bot, do: roslaunch isaac isaac_astrobee.launch llp:=disabled nodes:=framestore \ streaming_mapper:=true output:=screen -This will load the mesh produced by the geometry mapper from +Wait until it finishes forming the texture model, which may take +30 seconds, or up to 3 minutes for very large meshes. + +Ensure that the ``ASTROBEE_ROBOT`` name is correct above. + +This node will load the mesh produced by the geometry mapper from $ISAAC_WS/src/isaac/resources/geometry/${ASTROBEE_WORLD}.ply -It will listen to the robot body pose being published at: +If an updated geometry mapper mesh exists, it should be copied to that +location first. + +The tool will read the configuration options from: + + $ISAAC_WS/src/isaac/config/dense_map/streaming_mapper.config + +By default, as specified in that file, it will listen to the robot +body pose being published at: /gnc/ekf -and the sci cam image data at: +That config file specifies the the camera image to texture, which, by +default, is ``sci_cam``. Its image topic must be set, which, for +``sci_cam`` is normally /hw/cam_sci/compressed + +while for ``haz_cam`` is: + + /hw/depth_haz/extended/amplitude_int + +For ``nav_cam`` the image topic is either + + /mgt/img_sampler/nav_cam/image_record + +if the images are played from a bag which recorded the images produced +with the image sampler, or + + /hw/cam_nav + +if no image sampler was used. The rest of the cameras usually follow +the convention on the line above, for example, the ``heat_cam`` topic +is ``/hw/cam_heat``. + +For the sci cam, the streaming mapper also expects image exposure data +on the topic: + /hw/sci_cam_exif -It will publish the the texture on the topics: +to do exposure correction, unless in simulation mode or +``sci_cam_exposure_correction`` is set to false. This is not needed +for other cameras. + +In order for the streaming mapper to produce texture it should receive +input information with its needed topics (robot or nav_cam pose and +the image to texture), either from a bag or from the robot in real +time. A data bag can be played with the usual command: + + rosbag play data.bag + +See the note further down if it is desired to rename the topics +on which some bag data is published. + +The streaming mapper will publish several topics having texture +information, which for ``sci_cam`` are: /ism/sci_cam/img /ism/sci_cam/obj /ism/sci_cam/mtl -(when the texture type is sci_cam, and this is customizable, see -below). +and an analogous convention is followed for other cameras. The header.stamp value for each published message will be the same as -the header.stamp for the corresponding input sci cam image. +the header.stamp for the corresponding input camera image. -Next one can play a bag having the input topics expected by the robot. -(See the note below on perhaps redirecting the nav cam topic if the -localization node is necessary.) +The data produced by the streaming mapper can be recorded (for +sci_cam, and analogously for other cameras) with: -Additional configuration option for this tool can be specified in + rosbag record /ism/sci_cam/img /ism/sci_cam/obj /ism/sci_cam/mtl \ + -b 10000 + +The recording should start before the input bag is played. The ``-b`` +option tells ROS to increase its recording buffer size, as sometimes +the streaming mapper can publish giant meshes. - $ISAAC_WS/src/isaac/config/dense_map/streaming_mapper.config +The robot pose that the streaming mapper needs assumes a very accurate +calibration of the IMU sensor in addition to the nav, haz, and sci cam +sensors, and very accurate knowledge of the pose of these sensors on +the robot body. If that is not the case, it is suggested to use the +nav cam pose via the ``nav_cam_pose_topic`` field in +streaming_mapper.config (set it to ``/loc/ml/features``), for which +only accurate calibration of the nav, sci, and haz cameras among each +other is assumed, while the ``ekf_pose_topic`` must be set to an empty +string. + +The input texture can be in color or grayscale, at full or reduced +resolution, and compressed or not. + +#### Running with no robot or nav cam pose information + +If no robot body or nav cam pose information is present, for example, +if the EKF or localization node was not running when the image data was +acquired, or this data was not recorded or was not reliable, the +localization node can be started together with the streaming mapper, +and this node will provide updated pose information. -which has the following fields: +Edit ``streaming_mapper.config`` and set ``nav_cam_pose_topic`` to +``/loc/ml/features`` and let ``ekf_state_topic`` be empty. + +The localization node will make use of a registered sparse +map with BRISK features, histogram equalization, and a vocabulary +database to find the nav cam poses. The command for building such a +BRISK map from a registered SURF map is: + + cp surf_map.map brisk_map.map + $ASTROBEE_WS/devel/lib/sparse_mapping/build_map \ + --output_map brisk_map.map --rebuild --histogram_equalization \ + --vocab_db + +See: + + $ASTROBEE_SOURCE_PATH/localization/sparse_mapping/readme.md + +for more information. + +If running on a local machine, after the map is rebuilt it should be +copied to: + + $ASTROBEE_RESOURCE_DIR/maps/${ASTROBEE_WORLD}.map + +(The ``maps`` directory needs to be created if missing.) + +Also see a note earlier in the text for how to reduce the BRISK +thresholds if the map has too few features. For more details on what +the localization node expects, see build_map.md, in the section about +running this node on a local machine. + +Ensure the same environment as before is present, including the robot name, +and run: + + roslaunch isaac isaac_astrobee.launch mlp:=local llp:=disabled \ + nodes:=framestore,localization_node streaming_mapper:=true \ + output:=screen + +Wait until the textured model is created, which can take a minute. + +Then, in a different terminal, play the bag. The localization node will +expect the nav cam images to be published on topic ``/hw/cam_nav``. If +it is on a different topic, such as +``/mgt/img_sampler/nav_cam/image_record``, it needs to be redirected +to this one when playing the bag, such as: + + rosbag play --clock data.bag \ + /mgt/img_sampler/nav_cam/image_record:=/hw/cam_nav \ + /loc/ml/features:=/tmp/features /gnc/ekf:=/tmp/ekf + +Above the /loc/ml/features and /gnc/ekf topics which may exist in the +bag are redirected to temporary topics, since the currently started +localization node will create new camera pose information. + +The ``--clock`` option should not be missed. + +Then enable the localization node by running in a separate +terminal: + + rosservice call /loc/ml/enable true + +#### The streaming mapper configuration file + +The ``streaming_mapper.config`` file has following fields: - mesh_file: Override the location of the mesh described earlier. - ekf_state_topic: The topic to listen to for robot pose information. @@ -928,134 +1217,97 @@ which has the following fields: /loc/ml/features. It is published by the localization node. - texture_cam_type: The camera that creates the texture images (can be nav_cam, sci_cam, haz_cam, and in simulation also - heat_cam, and acoustics_cam). The default is sci_cam. This + heat_cam and acoustics_cam). The default is sci_cam. This field affects the name of the topics on which the streaming mapper publishes its output. - - texture_cam_topic: The topic having the texture to overlay. - The default value is /hw/cam_sci/compressed. + - texture_cam_topic: The topic having the images (texture) to + overlay. The default value is /hw/cam_sci/compressed and see + note in the text for other cameras. - dist_between_processed_cams: Once an image is textured and - published, how far the camera should move (in meters) before - it should process another texture (any images arriving - in between will be ignored). The default is 0.1 meters. + published, process a new one whenever either the camera moves by + more than this distance, in meters, or the angle changes by more + than angle_between_processed_cams, in degrees. The default is + 0.1. + - angle_between_processed_cams: See: dist_between_processed_cams. + The default is 5.0. - max_iso_times_exposure: Apply the inverse gamma transform to images, multiply them by max_iso_times_exposure/ISO/exposure_time to adjust for lightning differences, then apply the gamma transform back. This value should be set to the maximum observed ISO * exposure_time. The default is 5.1. Not used with simulated - data. - - use_single_texture: Use a single texture buffer. Sample - the images by picking points on each triangle face with spacing - pixel_size. This can take a couple of minutes to form the - necessary structures to be able to stream the texture. + data, when sci_cam_exposure_correction if false, or other cameras + than sci_cam. + - sci_cam_exposure_correction: If set to 'true', correct the + exposure of sci_cam images. Read exposures from /hw/sci_cam_exif. + - use_single_texture: If set to 'true', use a single texture + buffer. Sample the images by picking points on each triangle face + with spacing pixel_size. This can take a couple of minutes to form + the necessary structures to be able to stream the texture. - pixel_size: The pixel size to use with use_single_texture. The default is 0.001 meters. - num_threads: Number of threads to use. The default is 48. - - save_to_disk: If to save to disk an .obj file for each topic - published, to be debugged in Meshlab. These will be saved in + - save_to_disk: If set to 'true', save to disk an .obj file for each + topic published, to be debugged in Meshlab. These will be saved in ~/.ros. The default is 'false'. -If no pose information is present, for example, if the EKF or -localization node was not running when the data was acquired, or this -data was not recorded, the localization node can be started by -modifying the above `roslaunch` command as follows: - - roslaunch isaac isaac_astrobee.launch llp:=disabled \ - nodes:=framestore,localization_node \ - streaming_mapper:=true output:=screen - -Alternatively, the streaming mapper can be started first, -without localization, such as: - - roslaunch streaming_mapper.launch sim_mode:=false output:=screen - -(the full path to streaming_mapper.launch needs to be specified if not -found). - -Ensure then that nav_cam_pose_topic is set to /loc/ml/features in -streaming_mapper.config and that ekf_state_topic is empty. - -Then the localization node can be started separately, as: - - roslaunch astrobee astrobee.launch llp:=disabled \ - nodes:=framestore,localization_node output:=screen - -and then enabled by running in a separate terminal: - - rosservice call /loc/ml/enable true - -This node will expect the nav cam pose to be published on topic -`/hw/cam_nav`. If it is on a different topic, such as -`/mgt/img_sampler/nav_cam/image_record`, it needs to be redirected to -this one when playing the bag, such as: - - rosbag play data.bag \ - /mgt/img_sampler/nav_cam/image_record:=/hw/cam_nav - -The localization node, if needed, will make use of a registered sparse -map with BRISK features, histogram equalization, and a vocabulary -database to find the nav cam poses. See build_map.md in the astrobee -repository how such a map can be created and where it should be -copied. If running on a local machine, it should go to: - - $ASTROBEE_RESOURCE_DIR/maps/${ASTROBEE_WORLD}.map +## Running with simulated data -Also see a note earlier in the text for how to reduce the BRISK -thresholds if the map has too few features. For more details on what -the localization node expects, see build_map.md, in the section about -running this node on a local machine. +For simulated data the usage is somewhat different. First, +simulation.config needs to be edited as described earlier in the +document to turn on the simulated sci cam, haz cam, or other desired +camera. -The data produced by the streaming mapper can be recorded with: +When working with ISS data, more specifically the JPM module, do: - rosbag record /ism/sci_cam/img /ism/sci_cam/obj /ism/sci_cam/mtl \ - -b 10000 - -The recording should start before the input bag is played. The `-b` -option tells ROS to increase its recording buffer size, as sometimes -the streaming mapper can publish giant meshes. + export ASTROBEE_WORLD=iss + /bin/cp -fv $ISAAC_WS/src/isaac/resources/geometry/iss_sim.ply \ + $ISAAC_WS/src/isaac/resources/geometry/${ASTROBEE_WORLD}.ply -The robot pose that the streaming mapper needs assumes a very accurate -calibration of the IMU sensor in addition to the nav, haz, and sci cam -sensors, and very accurate knowledge of the pose of these sensors on -the robot body. If that is not the case, it is suggested to use the nav -cam pose via the nav_cam_pose_topic field, for which only accurate -calibration of the nav, sci, and haz cameras among each other is assumed. +to use the simulated mesh for this module. -The input texture can be in color or grayscale, at full or reduced -resolution, and compressed or not. +Edit ``streaming_mapper.config`` and set: -## Running with simulated data + use_single_texture = false; -For simulated data the usage is somewhat different. First, -simulation.config needs to be edited as described earlier in the -document to turn on the simulated sci cam, haz cam, or other desired -camera. +Otherwise, the exiting simulated mesh has so many triangles that it +will overwhelm the size of the single buffer which is meant to fit all +textures. -To launch the streaming mapper, one can do the following: +To launch the streaming mapper, do: - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config export ASTROBEE_WORLD=iss - roslaunch isaac sim.launch world:=iss rviz:=true streaming_mapper:=true \ - pose:="11.0 -7.0 5.0 0 0 0 1" + roslaunch isaac sim.launch world:=iss rviz:=true \ + streaming_mapper:=true pose:="11.0 -7.0 5.0 0 0 0 1" \ + output:=screen + +It is suggested to read the documentation above for working with the +simulator in the context of the geometry mapper for more details, such +as how to ensure the desired camera images are seen in rviz. -and then the robot can be moved in order to create images to texture as: +Then, the robot can be moved in order to create images to texture as: rosrun mobility teleop -move -pos "11.0 -5.0 5.0" -tolerance_pos 0.0001 \ -att "0 0 0 1" With simulated data the pose and intrinsics for each camera are -received directly from the simulator, on topics that, for example for +received directly from the simulator, on topics that, for example, for sci_cam, are: /sim/sci_cam/pose /sim/sci_cam/info -Hence, the parameters ekf_state_topic, ekf_pose_topic, and -nav_cam_pose_topic are ignored. +Hence, the parameters ``ekf_state_topic``, ``ekf_pose_topic``, and +``nav_cam_pose_topic`` are ignored. + +The streaming mapper will publish its results on topics mentioned +earlier in the text. -Note that value of `ASTROBEE_ROBOT` is not needed in this case. +Note that value of ``ASTROBEE_ROBOT`` is not needed in this case. Any +user-set value will be overwritten with the robot name ``sim``. ## Camera refinement @@ -1067,8 +1319,12 @@ products, and between the nav cam and sci cam textures. Once a dataset of the robot flying around and performing inspections is acquired, so in realistic conditions, rather than with a calibration target, it can be used to further refine the camera -calibration file. This refinement will change the transforms between -the cameras and the intrinsics of the sci cam. +calibration file, including the intrinsics and extrinsics. + +The calibration step above can be avoided altogether, and this robot's +desired transforms to be refined can be initialized with values from a +different robot or with the placeholder values already present in a +given robot's config file. To avoid issues with accuracy of the timestamps of the images, we assume that the robot is paused, or otherwise moves very slowly, @@ -1077,47 +1333,52 @@ following approach should be taken. ### Image selection -Select a set of nav cam images around 1 second before and after each -sci cam image using the tool: +Select a set of nav cam images shortly before and after each sci cam +image using the image_picker tool: export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config export ASTROBEE_WORLD=iss export ASTROBEE_ROBOT=bsharp2 - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash $ISAAC_WS/devel/lib/geometry_mapper/image_picker \ --ros_bag mybag.bag \ --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ --sci_cam_topic /hw/cam_sci/compressed \ --haz_cam_intensity_topic /hw/depth_haz/extended/amplitude_int \ - --bracket_len 2.0 --output_nav_cam_dir nav_images + --bracket_len 0.6 --output_nav_cam_dir nav_images -Setting up the correct robot name above is very important. +Setting up the correct robot name for ASTROBEE_ROBOT is very important. The --bracket_len option brackets sci cam images by nav cam images so the length of time between these two nav cam images is at most this value. These nav cam images are saved. Later in camera_refiner sci cam and haz cam images are picked in each such a nav cam interval. (A -camera's time is first adjusted for the time offset before any of this -happens.) One may consider using a bracket length of 1.0 seconds if -the bot is known to move quickly right after taking a sci cam picture. +camera's time is first adjusted for the timestamp offset between the +cameras.) + +It is important to note that the bracket length can affect the accuracy +of calibration later, and hence it should be rather tight. Yet a tight +bracket does not allow for wiggle room if later it is desired to tweak +a little the timestamp offsets while still staying within the bounds, +and it may prevent bracketing all the sci cam images and enough haz +cam images. -The option --scicam_to_hazcam_timestamp_offset_override_value can be +The option --nav_cam_to_sci_cam_offset_override_value can be used if the given bag is suspected to have a different value of this offset. Such an option the option can be passed also to the camera refiner below and to the geometry mapper. -Examine these images. Since they are used for bracketing, -there will be pairs of very similar images. Yet, it is good -that there should be no excessive overlap otherwise, so the images -in the first pair better have about 3/4 or 4/5 overlap with -images from other pairs. +Examine these images. Since they are used for bracketing, there will +be pairs of very similar images. Yet, it is good that there should be +no excessive overlap otherwise, so the images in the first pair better +have about 4/5 overlap with images from other pairs. -If necessary, add more intermediate images manually, or re-running -this tool with +If necessary, add more intermediate images by re-running this tool +with: - --max_time_between_images + --max_time_between_images It is good to not allow too many images or excessive overlap, but, if removing excessive images, ensure that each sci cam image is still @@ -1141,7 +1402,7 @@ above: dir=nav_images images=$(ls $dir/*jpg) surf_map=${dir}_surf.map - $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/build_map \ + $ASTROBEE_WS/devel/lib/sparse_mapping/build_map \ --output_map $surf_map --feature_detection --feature_matching \ --track_building --incremental_ba --bundle_adjustment \ --min_valid_angle 1.0 --num_subsequent_images 20 $images @@ -1152,6 +1413,10 @@ will prevent having too many features which result in small convergence angles between the rays, which may make map-building less stable. +If the map makes a closed loop, and, for example, image 80 becomes +similar to image 0, one should increase --num_subsequent_images to +perhaps 90. This would result in increased runtime but a better map. + Register the map. That can be done, for example, by merging this map with a registered map, bundle-adjusting the obtained map, re-registering it, then extracting the submap corresponding to the @@ -1164,13 +1429,13 @@ map, say showing all the walls as our map, and merge and register our map to this registered submap. That goes as follows. Examine the existing registered map in nvm_visualize and record in a -list named `list.txt` the images which are similar to the ones in the +list named ``list.txt`` the images which are similar to the ones in the map we want to register, one per line (those are printed on the screen as one navigates through the map in the viewer). A submap of the registered map can then be extracted, without bundle-adjustment (to not affect the registration) as: - $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/extract_submap \ + $ASTROBEE_WS/devel/lib/sparse_mapping/extract_submap \ --skip_bundle_adjustment --input_map registered_map.map \ --output_map registered_submap.map \ --skip_bundle_adjustment \ @@ -1188,23 +1453,23 @@ registration.) Our map can be merged into this map without modifying the first map, and hence keeping its registration, as: - $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/merge_maps \ - --fix_first_map \ - --num_image_overlaps_at_endpoints 200 \ - --min_valid_angle 1.0 \ - registered_submap.map $surf_map \ + $ASTROBEE_WS/devel/lib/sparse_mapping/merge_maps \ + --fix_first_map \ + --num_image_overlaps_at_endpoints 200 \ + --min_valid_angle 1.0 \ + registered_submap.map $surf_map \ --output_map merged.map The desired now-registered map can then be extracted as: - $ASTROBEE_BUILD_PATH/devel/lib/sparse_mapping/extract_submap \ - --skip_bundle_adjustment \ - --input_map merged.map --output_map ${dir}_surf_reg.map \ + $ASTROBEE_WS/devel/lib/sparse_mapping/extract_submap \ + --skip_bundle_adjustment \ + --input_map merged.map --output_map ${dir}_surf_reg.map \ ${dir}/*jpg Here, $dir points to nav_images as earlier in the document. -### Running the refiner +### Running camera_refiner Next, the refiner tool is run, as shown below. This will overwrite the camera calibration file, so it may be prudent to start by copying the @@ -1215,126 +1480,366 @@ point to that. export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config export ASTROBEE_WORLD=iss export ASTROBEE_ROBOT=bsharp2 - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash + + float="optical_center focal_length distortion" $ISAAC_WS/devel/lib/geometry_mapper/camera_refiner \ - --ros_bag mybag.bag --sparse_map mymap.map \ - --num_iterations 20 --bracket_len 2.0 \ - --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ + --ros_bag mybag.bag \ + --sparse_map input_map.map \ + --mesh mesh.ply \ --output_map output_map.map \ - --fix_map --skip_registration --float_scale \ - --timestamp_interpolation --robust_threshold 3 \ - --sci_cam_intrinsics_to_float \ - 'focal_length optical_center distortion' \ - --mesh mesh.ply --mesh_weight 25 \ - --mesh_robust_threshold 3 - -Note how we used the same bracket length as in the image picker. - -This tool will print some statistics showing the reprojection -residuals for all camera combinations. This can be helpful -in figuring out if the value of scicam_to_hazcam_timestamp_offset -is correct. If this value is not known well, this tool can be -run with zero or more iterations and various values of - - --scicam_to_hazcam_timestamp_offset_override_value - -to see which value gives the smallest residuals. The geometry mapper -cam be run with various obtained calibrated files, and see which -causes least amount of registration errors and most agreement between -the nav_cam and sci_cam textures. - -A subset of the stats output is as follows: - - The 25, 50 and 75 percentile residual stats before opt - haznavhaz1: 0.15588849206757516 0.28939657329467749 0.50425806803227147 - haznavhaz2: 0.14321982353646234 0.2810887933143924 0.47081968246287786 - nav1: 0.20133152070741289 0.43912265848302923 0.79663785518573604 - nav2: 0.16416862779482244 0.35723227217511067 0.65759965916544161 - navscinav1: 0.97612783076206711 2.3173228923638192 5.1374205463019109 - navscinav2: 0.73081001251695454 2.3535687733679822 5.6151463474188459 - navscisci1: 0.57405681159248445 1.3846928084816739 3.0725112787539501 - navscisci2: 0.42920106999360996 1.4522176781247396 3.4578275058146062 - -For the first two lines with stats, a 3D point was found for the haz -and nav camera pair (taking into account pixel matches, the depth -measurement, and camera orientations), it was projected into the haz -camera, and the column (first line) and row (second line) absolute -difference residuals were computed, sorted, and given percentiles were -found. For the next two lines the same logic was used, without depth -measurements, by simply triangulating a 3D point from given nav cam -matches and projecting it back into the nav camera. Other lines of -statistics are to be interpreted analogously. - -Above, the options --mesh, --mesh_weight, and --mesh_robust_threshold -were used in order to employ a mesh generated from a previous run of -the geometry mapper with the same dataset as a constraint. This was -found to greatly improve the sci cam intrinsics and extrinsics -parameters as well as the agreement between nav_cam and sci_cam -textures. In a first pass this tool can be used without these mesh -options. + --bracket_len 0.6 \ + --depth_tri_weight 1000 \ + --mesh_tri_weight 0 \ + --depth_mesh_weight 0 \ + --sci_cam_intrinsics_to_float "$float" \ + --nav_cam_intrinsics_to_float "$float" \ + --haz_cam_intrinsics_to_float "$float" \ + --affine_depth_to_image \ + --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ + --num_overlaps 10 \ + --out_texture_dir out_texture_dir + +Here it was chosen to pass in a mesh from a previous invocation of the +geometry mapper for this robot and the given registered sparse map (for +example, the ``fused.ply`` mesh can be used). That is optional, and it +is needed only one adds to camera_refiner the constraints that the +triangulated points and haz cam clouds stay close to the mesh, when +positive values should be used for ``--mesh_tri_weight`` and +``--depth_mesh_weight``, and if desired to use the option +``--out_texture_dir``. + +Sometimes such a mesh can help with convergence, but should not be +used in a first attempt at calibration. It was not used for the final +bumble robot calibration, when the weights were as above, but was used +for the bsharp2 robot, when all three weights above were set to 25. + +Note that sometimes it is desired to not change the nav_cam +intrinsics, such as for the bumble robot, as existing ISS maps depend +on it, and then one should set `--nav_cam_intrinsics_to_float ""` +above. + +We used the same bracket length as in the image picker. It is very +important to note that a tight bracket should be used, as, if the +robot moves fast and the bracket value is big, it can affect the +accuracy of calibration. Yet a tight bracket does not allow for wiggle +room if it is decided to vary the timestamp offset (see further down) +while staying within the bounds given by the bracket. + +This tool will print some statistics showing the residual errors +before and after each optimization pass (before outlier removal at the +end of the pass), as follows: + + The 25, 50, 75, and 100th percentile residual stats after opt + depth_mesh_x_m: 0.0015756 0.004064 0.0099792 1.1882 (3566 residuals) + depth_mesh_y_m: 0.0015745 0.0036734 0.0088576 1.8041 (3566 residuals) + depth_mesh_z_m: 0.00091998 0.0019874 0.0038565 0.37353 (3566 residuals) + depth_tri_x_m: 0.00066995 0.0021602 0.0065155 2.7389 (3578 residuals) + depth_tri_y_m: 0.00069529 0.0022702 0.0069424 2.6967 (3578 residuals) + depth_tri_z_m: 0.00051406 0.0016069 0.0044358 1.1036 (3578 residuals) + haz_cam_pix_x: 0.23413 0.51717 1.0606 10353 (3723 residuals) + haz_cam_pix_y: 0.14605 0.33521 0.67331 1040.7 (3723 residuals) + mesh_tri_x_m: 0.0030461 0.0097438 0.023452 4.7647 (24231 residuals) + mesh_tri_y_m: 0.0027831 0.0085864 0.020538 3.1248 (24231 residuals) + mesh_tri_z_m: 0.00088227 0.0026434 0.0063236 1.1076 (24231 residuals) + nav_cam_pix_x: 0.055205 0.15801 0.37068 36.164 (206561 residuals) + nav_cam_pix_y: 0.096944 0.23234 0.49844 495.14 (206561 residuals) + sci_cam_pix_x: 0.020412 0.10584 0.29013 38.499 (635 residuals) + sci_cam_pix_y: 0.1585 0.34267 0.71541 30.158 (635 residuals) + +These can be helpful in figuring out if the calibration result is good. +The errors whose name ends in "_m" are in meters and measure +the absolute differences between the depth clouds and mesh +(depth_mesh), between depth clouds and triangulated points +(depth_tri), and between mesh points and triangulated points +(mesh_tri), in x, y, and z, respectively. The ``mesh`` residuals will +be printed only if a mesh is passed on input and if the mesh-related +weights are positive. Some outliers are unavoidable, hence some of +these numbers can be big even if the calibration overall does well +(the robust threshold does not allow outliers to dominate). + +A source of errors (apart from inaccurate intrinsics, extrinsics, or +insufficiently good modeling of the cameras) can be the +nav_cam_to_sci_cam_timestamp_offset, which can be non-zero if the HLP +and MLP/LLP processors are not synchronized (the sci_cam pictures are +acquired with the HLP and nav_cam with MLP/LLP). If this value is not +known well, this tool can be run with zero or more iterations and +various values of: + + --nav_cam_to_sci_cam_offset_override_value + +to see which value gives the smallest residuals. + +If the ``--out_texture_dir`` option is specified, the tool will create +textured meshes for each image and optimized camera at the +end. Ideally those textured meshes will agree among each other. + +### The algorithm + +See camera_refiner.cc for a lengthy explanation of the algorithm. + +### Camera refiner options This program's options are: - --ros_bag: A ROS bag with recorded nav_cam, haz_cam, and - full-resolution sci_cam data. - --sparse_map: A registered SURF sparse map made with some of - the ROS bag data, and including nav cam images closely - bracketing the sci cam images. - --nav_cam_topic: The nav cam image topic in the bag file. - --haz_cam_points_topic: The haz cam point cloud topic in the bag file. - --haz_cam_intensity_topic: The haz cam intensity topic in the bag file. - --sci_cam_topic: The sci cam image topic in the bag file. - --start: How many seconds into the bag to start processing the data. - --duration: For how many seconds to do the processing. - --max_haz_cam_image_to_depth_timestamp_diff: Use depth - haz cam clouds that are within this distance in time from the - nearest haz cam intensity image. The default is 0.2. - --robust_threshold: Pixel errors much larger than this will be - exponentially attenuated to affect less the cost - function. The default is 1.0. - --num_cam_iterations: How many solver iterations to perform to - solve for cam1 intrinsics. The default is 20. - --bracket_len: Lookup sci and haz cam images only between - consecutive nav cam images whose distance in time is no more than - this (in seconds). It is assumed the robot moves slowly and - uniformly during this time. - --sci_cam_timestamps: Use only these sci cam timestamps. Must be - a file with one timestamp per line. - --float_scale: If to optimize the scale of the clouds (use it if - the sparse map is kept fixed). - --fix_map: Do not optimize the sparse map, hence only the camera - params. - --sci_cam_intrinsics_to_float: Refine 0 or more of the following - intrinsics for sci_cam: focal_length, optical_center, - distortion. Specify as a quoted list. For example: - 'focal_length optical_center distortion'. - --timestamp_interpolation: If true, interpolate between timestamps. - May give better results if the robot is known to move uniformly, and - perhaps less so for stop-and-go robot motion. - --skip_registration: If true, do not re-register the sparse map. - Then the hugin and xyz file options need not be provided. - This may result in the scale not being correct if the sparse map - is not fixed. - --hugin_file: The path to the hugin .pto file used for sparse map + + --ros_bag (string, default = "") + A ROS bag with recorded nav_cam, haz_cam intensity, + full-resolution sci_cam, and haz_cam depth clouds. + + --sparse_map (string, default = "") + A registered SURF sparse map made with some of the ROS bag data, + including nav cam images closely bracketing the sci cam + images. + + --output_map (string, default = "") + Output file containing the updated map. + + --nav_cam_topic (string, default = "/mgt/img_sampler/nav_cam/image_record") + The nav cam topic in the bag file. + + --haz_cam_intensity_topic (string, default = "/hw/depth_haz/extended/amplitude_int") + The depth camera intensity topic in the bag file. + + --sci_cam_topic (string, default = "/hw/cam_sci/compressed") + The sci cam topic in the bag file. + + --haz_cam_points_topic (string, default = "/hw/depth_haz/points") + The depth point cloud topic in the bag file. + + --num_overlaps (int32, default = 10) + How many images (of all camera types) close and forward in time + to match to given image. + + --max_haz_cam_image_to_depth_timestamp_diff (double, default = 0.2) + Use a haz cam depth cloud only if it is within this distance in + time from a given haz cam intensity image. + + --robust_threshold (double, default = 3.0) + Residual pixel errors and 3D point residuals (the latter multiplied + by corresponding weight) much larger than this will be + exponentially attenuated to affect less the cost function. + + --num_iterations (int32, default = 20) + How many solver iterations to perform in calibration. + + --bracket_len (double, default = 0.6) + Lookup sci and haz cam images only between consecutive nav cam + images whose distance in time is no more than this (in seconds), + after adjusting for the timestamp offset between these + cameras. It is assumed the robot moves slowly and uniformly + during this time. A large value here will make the refiner + compute a poor solution but a small value will prevent enough + sci_cam images being bracketed. + + --nav_cam_intrinsics_to_float (string, default = "") + Refine given nav_cam intrinsics. Specify as a quoted list. For + example: 'focal_length optical_center distortion'. + + --haz_cam_intrinsics_to_float (string, default = "") + Refine given haz_cam intrinsics. Specify as a quoted list. For + example: 'focal_length optical_center distortion'. + + --sci_cam_intrinsics_to_float (string, default = "") + Refine given sci_cam intrinsics. Specify as a quoted list. For + example: 'focal_length optical_center distortion'. + + --extrinsics_to_float (string, default = "haz_cam sci_cam depth_to_image") + Specify the cameras whose extrinsics to float, relative to + nav_cam. Also consider if to float the haz_cam depth_to_image + transform. + + --float_scale (bool, false unless specified) + If to optimize the scale of the clouds, part of + haz_cam_depth_to_image_transform (use it if the sparse map is + kept fixed, or else rescaling and registration of the map and + extrinsics is needed). This parameter should not be used with + --affine_depth_to_image when the transform is affine, rather + than rigid and a scale. See also --extrinsics_to_float. + + --float_sparse_map (bool, false unless specified) + Optimize the sparse map. This should be avoided as it can + invalidate the scales of the extrinsics and the registration. It + should at least be used with big mesh weights to attenuate those + effects. See also: --registration. + + --float_timestamp_offsets (bool, false unless specified) + If to optimize the timestamp offsets among the cameras. + + --nav_cam_num_exclude_boundary_pixels (int32, default = 0) + Flag as outliers nav cam pixels closer than this to the + boundary, and ignore that boundary region when texturing with + the --out_texture_dir option. This may improve the calibration + accuracy, especially if switching from fisheye to radtan + distortion for nav_cam. See also the geometry_mapper + --undistorted_crop_wins option. + + --timestamp_offsets_max_change (double, default = 1.0) + If floating the timestamp offsets, do not let them change by + more than this (measured in seconds). Existing image bracketing + acts as an additional constraint. + + --nav_cam_to_sci_cam_offset_override_value (double, default = NaN) + Override the value of nav_cam_to_sci_cam_timestamp_offset from + the robot config file with this value. + + --sci_cam_timestamps (string, default = "") + Use only these sci cam timestamps. Must be a file with one + timestamp per line. + + --depth_tri_weight (double, default = 1000.0) + The weight to give to the constraint that depth measurements + agree with triangulated points. Use a bigger number as depth + errors are usually on the order of 0.01 meters while + reprojection errors are on the order of 1 pixel. + + --mesh (string, default = "") + Use this geometry mapper mesh from a previous geometry mapper + run to help constrain the calibration. E.g., use fused_mesh.ply. + + --mesh_tri_weight (double, default = 0.0) + A larger value will give more weight to the constraint that + triangulated points stay close to a preexisting mesh. Not + suggested by default. + + --depth_mesh_weight (double, default = 0.0) + A larger value will give more weight to the constraint that the + depth clouds stay close to the mesh. Not suggested by default. + + --affine_depth_to_image (bool, false unless specified) + Assume that the depth_to_image_transform for each depth + image + camera is an arbitrary affine transform rather than a rotation + times a scale. + + --refiner_num_passes (int32, default = 2) + How many passes of optimization to do. Outliers will be removed + after every pass. Each pass will start with the previously + optimized solution as an initial guess. Mesh intersections (if + applicable) and ray triangulation will be recomputed before each + pass. + + --initial_max_reprojection_error (double, default = 300.0) + If filtering outliers, remove interest points for which the + reprojection error, in pixels) is larger than this. This + filtering happens when matches are created, before cameras are + optimized) and a big value should be used if the initial cameras + are not trusted. + + --max_reprojection_error (double, default = 25.0) + If filtering outliers, remove interest points for which the + reprojection error, in pixels) is larger than this. This + filtering happens after each optimization pass finishes, unless + disabled. It is better to not filter too aggressively unless + confident in the solution. + + --refiner_min_angle (double, default = 0.5) + If filtering outliers, remove triangulated points for which all + rays converging to it make an angle (in degrees) less than this. + Note that some cameras in the rig may be very close to each + other relative to the triangulated points, so care is needed + here. + + --out_texture_dir (string, default = "") + If non-empty and if an input mesh was provided, project the + camera images using the optimized poses onto the mesh and write + the obtained .obj files in the given directory. + + --min_ray_dist (double, default = 0.0) + The minimum search distance from a starting point along a ray + when intersecting the ray with a mesh, in meters (if + applicable). + + --max_ray_dist (double, default = 100.0) + The maximum search distance from a starting point along a ray + when intersecting the ray with a mesh, in meters (if + applicable). + + --nav_cam_distortion_replacement (string, default = "") + Replace nav_cam's distortion coefficients with this list after + the initial determination of triangulated points, and then + continue with distortion optimization. A quoted list of four or + five values expected, separated by spaces, as the replacement + distortion model will have radial and tangential + coefficients. Set a positive + --nav_cam_num_exclude_boundary_pixels. + + --registration (bool, false unless specified) + If true, and registration control points for the sparse map + exist and are specified by --hugin_file and --xyz_file, + re-register the sparse map at the end. All the extrinsics, + including depth_to_image_transform, will be scaled accordingly. + This is not needed if the nav_cam intrinsics and the sparse map + do not change. + + --hugin_file (string, default = "") + The path to the hugin .pto file used for sparse map registration. - --xyz_file: The path to the xyz file used for sparse map registration. - --output_map: The map optimized by this tool (it won't change if the - option --fix_map is used). - --num_opt_threads: How many threads to use in the optimization. The - default is 16. - --scicam_to_hazcam_timestamp_offset_override_value: - Override the value of scicam_to_hazcam_timestamp_offset from the - config file with this value. - --mesh: Refine the sci cam so that the sci cam texture agrees with - the nav cam texture when projected on this mesh. - --mesh_weight: A larger value will give more weight to the mesh - constraint. The mesh residuals printed at the end can be used to - examine the effect of this weight. Default: 25.0. - --mesh_robust_threshold: A larger value will try harder to - minimize large divergences from the mesh (but note that some of - those may be outliers). Default: 3.0. - --verbose: Print the residuals and save the images and match files. - Stereo Pipeline's viewer can be used for visualizing these. + + --xyz_file (string, default = "") + The path to the xyz file used for sparse map registration. + + --parameter_tolerance (double, default = 1e-12) + Stop when the optimization variables change by less than this. + + --num_opt_threads (int32, default = 16) + How many threads to use in the optimization. + + --num_match_threads (int32, default = 8) + How many threads to use in feature detection/matching. + A large number can use a lot of memory. + + --verbose (bool, false unless specified) + Print the residuals and save the images and match files. Stereo + Pipeline's viewer can be used for visualizing these. + +### Using the refiner with a radtan model for nav_cam + +The camera refiner supports using a radtan distortion model for +nav_cam, that is a model with radial and and tangential distortion, +just like for haz_cam and sci_cam, but the default nav_cam distortion +model is fisheye. One can edit the robot config file and replace the +fisheye model with a desired radial + tangential distortion model (4 +or 5 coefficients are needed) then run the refiner. + +Since it is not easy to find a good initial set of such coefficients, +the refiner has the option of computing such a model which best fits +the given fisheye model. For that, the refiner is started with the +fisheye model, this model is used to set up the problem, including +triangulating the 3D points after feature detection, then the fisheye +model is replaced on-the-fly with desired 4 or 5 coefficients of the +radtan model via the option --nav_cam_distortion_replacement, to which +one can pass, for example, "0 0 0 0". These coefficients will then be +optimized while keeping the rest of the variables fixed (nav cam focal +length and optical center, intrinsics of other cameras, and all the +extrinsics). The new best-fit distortion model will be written to disk +at the end, replacing the fisheye model, and from then on the new +model can be used for further calibration experiments just as with the +fisheye model. + +It may however be needed to rerun the refiner one more time, this time +with the new distortion model read from disk, and still keep all +intrinsics and extrinsics (including the sparse map and depth to +image) fixed, except for the nav cam distortion, to fully tighten it. + +Since it is expected that fitting such a model is harder at the +periphery, where the distortion is stronger, the camera refiner has +the option ``--nav_cam_num_exclude_boundary_pixels`` can be used to +restrict the nav cam view to a central region of given dimensions when +such such optimization takes place (whether the new model type is fit +on the fly or read from disk when already determined). If a +satisfactory solution is found and it is desired to later use the +geometry mapper with such a model, note its option +``--undistorted_crop_wins``, and one should keep in mind that that the +restricted region specified earlier may not exactly be the region to +be used with the geometry mapper, since the former is specified in +distorted pixels and this one in undistorted pixels. + +All this logic was tested and was shown to work in a satisfactory way, +but no thorough attempt was made at validating that a radtan distortion +model, while having more degrees of freedom, would out-perform the +fisheye model. That is rather unlikely, since given sufficiently many +images with good overlap, the effect of the peripheral region where +the fisheye lens distortion may not perform perfectly may be small. ## Orthoprojecting images @@ -1348,9 +1853,9 @@ A geometry mapper run directory has all the inputs this tool needs. It can be run as follows: export ASTROBEE_SOURCE_PATH=$HOME/projects/astrobee/src - export ASTROBEE_BUILD_PATH=$HOME/projects/astrobee + export ASTROBEE_WS=$HOME/projects/astrobee export ISAAC_WS=$HOME/projects/isaac - source $ASTROBEE_BUILD_PATH/devel/setup.bash + source $ASTROBEE_WS/devel/setup.bash source $ISAAC_WS/devel/setup.bash export ASTROBEE_RESOURCE_DIR=$ASTROBEE_SOURCE_PATH/astrobee/resources export ASTROBEE_CONFIG_DIR=$ASTROBEE_SOURCE_PATH/astrobee/config @@ -1361,6 +1866,12 @@ can be run as follows: --mesh geom_dir/simplified_mesh.ply \ --image geom_dir/distorted_sci_cam/1616785318.1400001.jpg \ --camera_to_world geom_dir/1616785318.1400001_sci_cam_to_world.txt \ - --output_dir out + --num_exclude_boundary_pixels 0 \ + --output_prefix out + +This will write ``out-1616785318.1400001.obj`` and its associated files. + +Ensure that the correct robot is specified in ``ASTROBEE_ROBOT``. -This will write out/run.obj and its associated files. +Alternatively, the images and cameras can be specified in lists, via +``--image_list`` and ``--camera_list``. diff --git a/dense_map/geometry_mapper/src/dense_map_ros_utils.cc b/dense_map/geometry_mapper/src/dense_map_ros_utils.cc index 23e74a3e..5bdfca11 100644 --- a/dense_map/geometry_mapper/src/dense_map_ros_utils.cc +++ b/dense_map/geometry_mapper/src/dense_map_ros_utils.cc @@ -347,6 +347,13 @@ bool lookupCloud(double desired_time, std::vector const return false; } +// A wrapper around a function in pcl_ros/point_cloud.h to avoid +// including that header all over the place as it creates an annoying +// warning. +void msgToPcl(sensor_msgs::PointCloud2::ConstPtr pc_msg, pcl::PointCloud& pc) { + pcl::fromROSMsg(*pc_msg, pc); +} + // Read the list of topics in a bag while avoiding repetitions void readTopicsInBag(std::string const& bag_file, std::vector& topics) { if (!boost::filesystem::exists(bag_file)) LOG(FATAL) << "Bag does not exist: " << bag_file; diff --git a/dense_map/geometry_mapper/src/dense_map_utils.cc b/dense_map/geometry_mapper/src/dense_map_utils.cc index 29ff99d4..043c9721 100644 --- a/dense_map/geometry_mapper/src/dense_map_utils.cc +++ b/dense_map/geometry_mapper/src/dense_map_utils.cc @@ -24,17 +24,6 @@ #include -// Get rid of warning beyond our control -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic push -#include -#include -#include -#include -#include -#pragma GCC diagnostic pop - #include #include @@ -44,7 +33,8 @@ namespace dense_map { // A function to split a string like 'optical_center focal_length' into // its two constituents. -void parse_intrinsics_to_float(std::string const& intrinsics_to_float, std::set& intrinsics_to_float_set) { +void parse_intrinsics_to_float(std::string const& intrinsics_to_float, + std::set& intrinsics_to_float_set) { intrinsics_to_float_set.clear(); std::string val; std::istringstream is(intrinsics_to_float); @@ -55,6 +45,39 @@ void parse_intrinsics_to_float(std::string const& intrinsics_to_float, std::set< } } +// A function to split a string like 'haz_cam sci_cam depth_to_image' into +// its two constituents and validate against the list of known cameras. +void parse_extrinsics_to_float(std::vector const& cam_names, + std::string const& ref_cam_name, + std::string const& depth_to_image_name, + std::string const& extrinsics_to_float, + std::set& extrinsics_to_float_set) { + extrinsics_to_float_set.clear(); + std::string val; + std::istringstream is(extrinsics_to_float); + while (is >> val) { + extrinsics_to_float_set.insert(val); + + // Sanity check + bool known_cam = false; + for (size_t it = 0; it < cam_names.size(); it++) { + if (val == cam_names[it]) { + known_cam = true; + break; + } + } + if (val == depth_to_image_name) known_cam = true; + + if (!known_cam) + LOG(FATAL) << "Unknown camera: " << val << "\n"; + } + + if (extrinsics_to_float_set.find(ref_cam_name) != extrinsics_to_float_set.end()) + LOG(FATAL) << "There exists no extrinsics transform from " << ref_cam_name << " to itself.\n"; + + return; +} + // Extract a rigid transform to an array of length NUM_RIGID_PARAMS void rigid_transform_to_array(Eigen::Affine3d const& aff, double* arr) { for (size_t it = 0; it < 3; it++) arr[it] = aff.translation()[it]; @@ -77,6 +100,36 @@ void array_to_rigid_transform(Eigen::Affine3d& aff, const double* arr) { aff = Eigen::Affine3d(Eigen::Translation3d(arr[0], arr[1], arr[2])) * Eigen::Affine3d(R); } +// Extract a affine transform to an array of length NUM_AFFINE_PARAMS +void affine_transform_to_array(Eigen::Affine3d const& aff, double* arr) { + Eigen::MatrixXd M = aff.matrix(); + int count = 0; + // The 4th row always has 0, 0, 0, 1 + for (int row = 0; row < 3; row++) { + for (int col = 0; col < 4; col++) { + arr[count] = M(row, col); + count++; + } + } +} + +// Convert an array of length NUM_AFFINE_PARAMS to a affine +// transform. Normalize the quaternion to make it into a rotation. +void array_to_affine_transform(Eigen::Affine3d& aff, const double* arr) { + Eigen::MatrixXd M = Eigen::Matrix::Identity(); + + int count = 0; + // The 4th row always has 0, 0, 0, 1 + for (int row = 0; row < 3; row++) { + for (int col = 0; col < 4; col++) { + M(row, col) = arr[count]; + count++; + } + } + + aff.matrix() = M; +} + // Read a 4x4 pose matrix of doubles from disk void readPoseMatrix(cv::Mat& pose, std::string const& filename) { pose = cv::Mat::zeros(4, 4, CV_64F); @@ -183,67 +236,83 @@ std::string matType(cv::Mat const& mat) { // Read the transform from depth to given camera void readCameraTransform(config_reader::ConfigReader& config, std::string const transform_str, - Eigen::MatrixXd& transform) { + Eigen::Affine3d& transform) { Eigen::Vector3d T; Eigen::Quaterniond R; if (!msg_conversions::config_read_transform(&config, transform_str.c_str(), &T, &R)) - LOG(FATAL) << "Unspecified transform: " << transform_str; + LOG(FATAL) << "Unspecified transform: " << transform_str << " for robot: " + << getenv("ASTROBEE_ROBOT") << "\n"; R.normalize(); - Eigen::Affine3d affine_trans = Eigen::Affine3d(Eigen::Translation3d(T.x(), T.y(), T.z())) * Eigen::Affine3d(R); - transform = affine_trans.matrix(); -} - -// Read a bunch of transforms from the robot calibration file -void readConfigFile(std::string const& navcam_to_hazcam_timestamp_offset_str, - std::string const& scicam_to_hazcam_timestamp_offset_str, - std::string const& hazcam_to_navcam_transform_str, - std::string const& scicam_to_hazcam_transform_str, std::string const& navcam_to_body_transform_str, - std::string const& hazcam_depth_to_image_transform_str, double& navcam_to_hazcam_timestamp_offset, - double& scicam_to_hazcam_timestamp_offset, Eigen::MatrixXd& hazcam_to_navcam_trans, - Eigen::MatrixXd& scicam_to_hazcam_trans, Eigen::MatrixXd& navcam_to_body_trans, - Eigen::Affine3d& hazcam_depth_to_image_transform, camera::CameraParameters& nav_cam_params, - camera::CameraParameters& haz_cam_params, camera::CameraParameters& sci_cam_params) { + transform = Eigen::Affine3d(Eigen::Translation3d(T.x(), T.y(), T.z())) * Eigen::Affine3d(R); +} + +// Read some transforms from the robot calibration file +void readConfigFile // NOLINT +(// Inputs // NOLINT + std::vector const& cam_names, // NOLINT + std::string const& nav_cam_to_body_trans_str, // NOLINT + std::string const& haz_cam_depth_to_image_trans_str, // NOLINT + // Outputs // NOLINT + std::vector & cam_params, // NOLINT + std::vector & nav_to_cam_trans, // NOLINT + std::vector & nav_to_cam_timestamp_offset, // NOLINT + Eigen::Affine3d & nav_cam_to_body_trans, // NOLINT + Eigen::Affine3d & haz_cam_depth_to_image_trans){ // NOLINT config_reader::ConfigReader config; config.AddFile("cameras.config"); config.AddFile("transforms.config"); if (!config.ReadFiles()) LOG(FATAL) << "Failed to read config files."; - readCameraTransform(config, hazcam_to_navcam_transform_str, hazcam_to_navcam_trans); - readCameraTransform(config, scicam_to_hazcam_transform_str, scicam_to_hazcam_trans); - readCameraTransform(config, navcam_to_body_transform_str, navcam_to_body_trans); - - if (!config.GetReal(navcam_to_hazcam_timestamp_offset_str.c_str(), &navcam_to_hazcam_timestamp_offset)) - LOG(FATAL) << "Could not read value of " << navcam_to_hazcam_timestamp_offset_str - << " for robot: " << getenv("ASTROBEE_ROBOT"); + cam_params.clear(); + nav_to_cam_trans.clear(); + nav_to_cam_timestamp_offset.clear(); + for (size_t it = 0; it < cam_names.size(); it++) { + camera::CameraParameters params = camera::CameraParameters(&config, cam_names[it].c_str()); + cam_params.push_back(params); + + std::string trans_str = "nav_cam_to_" + cam_names[it] + "_transform"; + if (cam_names[it] == "nav_cam") { + // transforms from nav cam to itself + nav_to_cam_trans.push_back(Eigen::Affine3d::Identity()); + nav_to_cam_timestamp_offset.push_back(0.0); + } else { + Eigen::Affine3d trans; + readCameraTransform(config, trans_str, trans); + nav_to_cam_trans.push_back(trans); + + std::string offset_str = "nav_cam_to_" + cam_names[it] + "_timestamp_offset"; + double offset; + if (!config.GetReal(offset_str.c_str(), &offset)) + LOG(FATAL) << "Could not read value of " << offset_str + << " for robot: " << getenv("ASTROBEE_ROBOT"); + nav_to_cam_timestamp_offset.push_back(offset); + } + } - if (!config.GetReal(scicam_to_hazcam_timestamp_offset_str.c_str(), &scicam_to_hazcam_timestamp_offset)) - LOG(FATAL) << "Could not read value of " << scicam_to_hazcam_timestamp_offset_str - << " for robot: " << getenv("ASTROBEE_ROBOT"); + // Read the remaining data + readCameraTransform(config, nav_cam_to_body_trans_str, nav_cam_to_body_trans); Eigen::MatrixXd M(4, 4); - config_reader::ConfigReader::Table mat(&config, hazcam_depth_to_image_transform_str.c_str()); + config_reader::ConfigReader::Table mat(&config, haz_cam_depth_to_image_trans_str.c_str()); int count = 0; for (int row = 0; row < M.rows(); row++) { for (int col = 0; col < M.cols(); col++) { count++; // note that the count stats from 1 if (!mat.GetReal(count, &M(row, col))) { - LOG(FATAL) << "Could not read value of " << hazcam_depth_to_image_transform_str + LOG(FATAL) << "Could not read value of " << haz_cam_depth_to_image_trans_str << " for robot: " << getenv("ASTROBEE_ROBOT"); } } } - hazcam_depth_to_image_transform.matrix() = M; - - nav_cam_params = camera::CameraParameters(&config, "nav_cam"); - haz_cam_params = camera::CameraParameters(&config, "haz_cam"); - sci_cam_params = camera::CameraParameters(&config, "sci_cam"); + haz_cam_depth_to_image_trans.matrix() = M; } // Given two poses aff0 and aff1, and 0 <= alpha <= 1, do linear interpolation. -Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, Eigen::Affine3d const& aff1) { +Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, + Eigen::Affine3d const& aff1) { Eigen::Quaternion rot0(aff0.linear()); Eigen::Quaternion rot1(aff1.linear()); @@ -261,7 +330,8 @@ Eigen::Affine3d linearInterp(double alpha, Eigen::Affine3d const& aff0, Eigen::A // Given a set of poses indexed by timestamp in an std::map, find the // interpolated pose at desired timestamp. This is efficient // only for very small maps. Else use the StampedPoseStorage class. -bool findInterpPose(double desired_time, std::map const& poses, Eigen::Affine3d& interp_pose) { +bool findInterpPose(double desired_time, std::map const& poses, + Eigen::Affine3d& interp_pose) { double left_time = std::numeric_limits::max(); double right_time = -left_time; for (auto it = poses.begin(); it != poses.end(); it++) { @@ -288,6 +358,19 @@ bool findInterpPose(double desired_time, std::map const return true; } +// Implement some heuristic to find the maximum rotation angle that can result +// from applying the given transform. It is assumed that the transform is not +// too different from the identity. +double maxRotationAngle(Eigen::Affine3d const& T) { + Eigen::Vector3d angles = T.linear().eulerAngles(0, 1, 2); + + // Angles close to +/-pi can result even if the matrix is close to identity + for (size_t it = 0; it < 3; it++) + angles[it] = std::min(std::abs(angles[it]), std::abs(M_PI - std::abs(angles[it]))); + double angle_norm = (180.0 / M_PI) * angles.norm(); + return angle_norm; +} + void StampedPoseStorage::addPose(Eigen::Affine3d const& pose, double timestamp) { int bin_index = floor(timestamp); m_poses[bin_index][timestamp] = pose; @@ -436,13 +519,23 @@ double fileNameToTimestamp(std::string const& file_name) { // Create a directory unless it exists already void createDir(std::string const& dir) { - if (!boost::filesystem::create_directories(dir) && !boost::filesystem::exists(dir)) { - LOG(FATAL) << "Failed to create directory: " << dir; + if (!boost::filesystem::create_directories(dir) && !boost::filesystem::is_directory(dir)) { + LOG(FATAL) << "Failed to create directory: " << dir << "\n"; } } // Minor utilities for converting values to a string below +// Convert a string of values separated by spaces to a vector of doubles. +std::vector string_to_vector(std::string const& str) { + std::istringstream iss(str); + std::vector vals; + double val; + while (iss >> val) + vals.push_back(val); + return vals; +} + std::string number_to_string(double val) { std::ostringstream oss; oss.precision(8); @@ -509,6 +602,11 @@ int robust_find(std::string const& text, std::string const& val, int beg) { beg = text.find(val, beg); if (beg == std::string::npos) return beg; + // TODO(oalexan1): Must skip comments. From this position must + // read back towards the beginning of the current line and see if + // the text "--" is encountered, which would mean that this + // position is on a comment and hence the search must continue. + beg += val.size(); // advance // Look for spaces and the equal sign @@ -533,10 +631,49 @@ int robust_find(std::string const& text, std::string const& val, int beg) { return beg; } + +// A fragile function to determine if the value of the given parameter has a brace +bool param_val_has_braces(std::string const& param_name, std::string const& parent, + // The text to search + std::string const& text) { + int beg = 0; + + // First find the parent, if provided + if (parent != "") { + beg = robust_find(text, parent, beg); + if (beg == std::string::npos) LOG(FATAL) << "Could not find the field '" + << parent << " =' in the config file."; + } + + // Find the param after the parent + beg = robust_find(text, param_name, beg); + if (beg == std::string::npos) { + std::string msg; + if (parent != "") msg = " Tried to locate it after field '" + parent + "'."; + LOG(FATAL) << "Could not find the field '" << param_name << " =' in the config file." << msg; + } + + // Now we are positioned after the equal sign + bool has_brace = false; + while (beg < static_cast(text.size())) { + if (text[beg] == ' ') { + beg++; + continue; + } + + if (text[beg] == '{') has_brace = true; + + break; + } + + return has_brace; +} + // Replace a given parameter's value in the text. This is very fragile // code, particularly it does not understand that text starting with // "--" is a comment. -void replace_param_val(std::string const& param_name, std::string const& param_val, std::string const& parent, +void replace_param_val(std::string const& param_name, std::string const& param_val, + std::string const& parent, std::string const& beg_brace, std::string const& end_brace, // The text to modify std::string& text) { @@ -545,7 +682,8 @@ void replace_param_val(std::string const& param_name, std::string const& param_v // First find the parent, if provided if (parent != "") { beg = robust_find(text, parent, beg); - if (beg == std::string::npos) LOG(FATAL) << "Could not find the field '" << parent << " =' in the config file."; + if (beg == std::string::npos) LOG(FATAL) << "Could not find the field '" + << parent << " =' in the config file."; } // Find the param after the parent @@ -563,7 +701,8 @@ void replace_param_val(std::string const& param_name, std::string const& param_v beg = text.find(beg_brace, beg); if (beg == std::string::npos) { - LOG(FATAL) << "Failed to replace value for " << parent << " " << param_name << " in the config file."; + LOG(FATAL) << "Failed to replace value for " << parent << " " << param_name + << " in the config file."; } end = beg + 1; @@ -580,28 +719,34 @@ void replace_param_val(std::string const& param_name, std::string const& param_v } } else { - // No braces, then just look for the next comma - end = text.find(",", beg); + // No braces, then just look for the next comma or newline + end = std::min(text.find(",", beg), text.find("\n", beg)); if (beg == std::string::npos) LOG(FATAL) << "Could not parse correctly " << param_name; - end--; // go before the comma + end--; // go before the found character } text = text.replace(beg, end - beg + 1, param_val); } -// Modify in-place the robot config file -void update_config_file(bool update_cam1, std::string const& cam1_name, - boost::shared_ptr cam1_params, bool update_cam2, - std::string const& cam2_name, boost::shared_ptr cam2_params, - bool update_depth_to_image_transform, Eigen::Affine3d const& depth_to_image_transform, - bool update_extrinsics, Eigen::Affine3d const& cam2_to_cam1_transform, - bool update_timestamp_offset, std::string const& cam1_to_cam2_timestamp_offset_str, - double cam1_to_cam2_timestamp_offset) { +// Save some transforms from the robot calibration file. This has some very fragile +// logic and cannot handle comments in the config file. +void updateConfigFile // NOLINT +(std::vector const& cam_names, // NOLINT + std::string const& haz_cam_depth_to_image_trans_str, // NOLINT + std::vector const& cam_params, // NOLINT + std::vector const& nav_to_cam_trans, // NOLINT + std::vector const& nav_to_cam_timestamp_offset, // NOLINT + Eigen::Affine3d const& haz_cam_depth_to_image_trans) { // NOLINT + if (cam_names.size() != cam_params.size() || + cam_names.size() != nav_to_cam_trans.size() || + cam_names.size() != nav_to_cam_timestamp_offset.size()) + LOG(FATAL) << "The number of various inputs to updateConfigFile() do not match."; + // Open the config file to modify char* config_dir = getenv("ASTROBEE_CONFIG_DIR"); - if (config_dir == NULL) LOG(FATAL) << "The environmental variable ASTROBEE_CONFIG_DIR was not set"; + if (config_dir == NULL) LOG(FATAL) << "The environmental variable ASTROBEE_CONFIG_DIR was not set."; char* robot = getenv("ASTROBEE_ROBOT"); - if (robot == NULL) LOG(FATAL) << "The environmental variable ASTROBEE_ROBOT was not set"; + if (robot == NULL) LOG(FATAL) << "The environmental variable ASTROBEE_ROBOT was not set."; std::string config_file = std::string(config_dir) + "/robots/" + robot + ".config"; std::ifstream ifs(config_file.c_str()); if (!ifs.is_open()) LOG(FATAL) << "Could not open file: " << config_file; @@ -612,108 +757,63 @@ void update_config_file(bool update_cam1, std::string const& cam1_name, while (std::getline(ifs, line)) text += line + "\n"; ifs.close(); - std::cout << "Updating " << config_file << std::endl; - - // Handle cam1 - if (update_cam1) { - std::string cam1_intrinsics = intrinsics_to_string(cam1_params->GetFocalVector(), cam1_params->GetOpticalOffset()); - replace_param_val("intrinsic_matrix", cam1_intrinsics, cam1_name, "{", "}", text); - Eigen::VectorXd cam1_distortion = cam1_params->GetDistortion(); - if (cam1_distortion.size() > 1) { - std::string distortion_str = vector_to_string(cam1_distortion); - replace_param_val("distortion_coeff", distortion_str, cam1_name, "{", "}", text); - } else if (cam1_distortion.size() == 1) { - std::string distortion_str = " " + number_to_string(cam1_distortion[0]); - replace_param_val("distortion_coeff", distortion_str, cam1_name, "", "", text); - } else { - LOG(FATAL) << "Camera " << cam1_name << " must have distortion."; - } - } + std::cout << "Updating: " << config_file << std::endl; - // Handle cam2 - if (update_cam2) { - std::string cam2_intrinsics = intrinsics_to_string(cam2_params->GetFocalVector(), cam2_params->GetOpticalOffset()); - replace_param_val("intrinsic_matrix", cam2_intrinsics, cam2_name, "{", "}", text); - Eigen::VectorXd cam2_distortion = cam2_params->GetDistortion(); - if (cam2_distortion.size() > 1) { - std::string distortion_str = vector_to_string(cam2_distortion); - replace_param_val("distortion_coeff", distortion_str, cam2_name, "{", "}", text); - } else if (cam2_distortion.size() == 1) { - std::string distortion_str = " " + number_to_string(cam2_distortion[0]); - replace_param_val("distortion_coeff", distortion_str, cam2_name, "", "", text); - } else { - LOG(FATAL) << "Camera " << cam2_name << " must have distortion."; - } - } - - std::string short_cam1 = ff_common::ReplaceInStr(cam1_name, "_", ""); - std::string short_cam2 = ff_common::ReplaceInStr(cam2_name, "_", ""); + for (size_t it = 0; it < cam_names.size(); it++) { + std::string const& cam_name = cam_names[it]; // alias + camera::CameraParameters const& params = cam_params[it]; // alias - if (update_depth_to_image_transform) { - Eigen::MatrixXd depth_to_image_transform_mat = depth_to_image_transform.matrix(); - std::string depth_to_image_transform_param = short_cam2 + "_depth_to_image_transform"; - std::string depth_to_image_transform_val = matrix_to_string(depth_to_image_transform_mat); - replace_param_val(depth_to_image_transform_param, depth_to_image_transform_val, "", "{", "}", text); - } + std::string intrinsics = intrinsics_to_string(params.GetFocalVector(), + params.GetOpticalOffset()); + replace_param_val("intrinsic_matrix", intrinsics, cam_name, "{", "}", text); + Eigen::VectorXd cam_distortion = params.GetDistortion(); - if (update_extrinsics) { - if (cam1_name == "nav_cam") { - std::string extrinsics_name = short_cam2 + "_to_" + short_cam1 + "_transform"; - std::string extrinsics_str = affine_to_string(cam2_to_cam1_transform); - replace_param_val(extrinsics_name, extrinsics_str, "", "(", ")", text); - } else if (cam1_name == "sci_cam") { - std::string extrinsics_name = short_cam1 + "_to_" + short_cam2 + "_transform"; - std::string extrinsics_str = affine_to_string(cam2_to_cam1_transform.inverse()); - replace_param_val(extrinsics_name, extrinsics_str, "", "(", ")", text); - } else { - LOG(FATAL) << "Unexpected value for cam1: " << cam1_name; - } - - if (update_timestamp_offset) - replace_param_val(cam1_to_cam2_timestamp_offset_str, " " + number_to_string(cam1_to_cam2_timestamp_offset), "", - "", "", text); - } + // This can switch the distortion from being a number to being a vector, + // and vice-versa, which makes the logic more complicated. + std::string distortion_str; + if (cam_distortion.size() > 1) + distortion_str = vector_to_string(cam_distortion); + else if (cam_distortion.size() == 1) + distortion_str = number_to_string(cam_distortion[0]); + else + LOG(FATAL) << "Camera " << cam_name << " must have distortion."; - // Write the updated values - std::ofstream ofs(config_file.c_str()); - ofs << text; - ofs.close(); -} + // Note that we look at whether there are braces around param val + // before replacement, rather than if the replacement param val + // has braces. + if (param_val_has_braces("distortion_coeff", cam_name, text)) + replace_param_val("distortion_coeff", distortion_str, cam_name, "{", "}", text); + else + replace_param_val("distortion_coeff", " " + distortion_str, cam_name, "", "", text); -// Some small utilities for writing a file having poses for nav, sci, and haz cam, -// and also the depth timestamp corresponding to given haz intensity timestamp -void writeCameraPoses(std::string const& filename, std::map const& haz_depth_to_image_timestamps, - std::map > const& world_to_cam_poses) { - std::ofstream ofs(filename.c_str()); - ofs.precision(17); + // Next deal with extrinsics - for (auto it = world_to_cam_poses.begin(); it != world_to_cam_poses.end(); it++) { - std::string cam_name = it->first; - auto& cam_poses = it->second; + if (cam_names[it] == "nav_cam") continue; // this will have the trivial transforms - for (auto it2 = cam_poses.begin(); it2 != cam_poses.end(); it2++) { - double timestamp = it2->first; - Eigen::MatrixXd M = it2->second.matrix(); + std::string trans_name = "nav_cam_to_" + cam_names[it] + "_transform"; + std::string trans_val = affine_to_string(nav_to_cam_trans[it]); + replace_param_val(trans_name, trans_val, "", "(", ")", text); - ofs << cam_name << ' ' << timestamp << " "; - for (int row = 0; row < 4; row++) { - for (int col = 0; col < 4; col++) { - ofs << M(row, col) << " "; - } - } - ofs << "\n"; - } + std::string offset_str = "nav_cam_to_" + cam_names[it] + "_timestamp_offset"; + replace_param_val(offset_str, " " + number_to_string(nav_to_cam_timestamp_offset[it]), "", + "", "", text); } - for (auto it = haz_depth_to_image_timestamps.begin(); it != haz_depth_to_image_timestamps.end(); it++) { - ofs << "haz_depth_to_image " << it->first << ' ' << it->second << "\n"; - } + std::string depth_to_image_transform_val + = matrix_to_string(haz_cam_depth_to_image_trans.matrix()); + replace_param_val(haz_cam_depth_to_image_trans_str, depth_to_image_transform_val, + "", "{", "}", text); + // Write the updated values + std::ofstream ofs(config_file.c_str()); + ofs << text; ofs.close(); } -void readCameraPoses(std::string const& filename, std::map& haz_depth_to_image_timestamps, - std::map >& world_to_cam_poses) { +void readCameraPoses(std::string const& filename, + std::map& haz_depth_to_image_timestamps, + std::map >& + world_to_cam_poses) { haz_depth_to_image_timestamps.clear(); world_to_cam_poses.clear(); @@ -844,58 +944,6 @@ void pickTimestampsInBounds(std::vector const& timestamps, double left_b return; } -// Triangulate rays emanating from given undistorted and centered pixels -Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, - Eigen::Affine3d const& world_to_cam1, - Eigen::Affine3d const& world_to_cam2, - Eigen::Vector2d const& pix1, - Eigen::Vector2d const& pix2) { - Eigen::Matrix3d k1; - k1 << focal_length1, 0, 0, 0, focal_length1, 0, 0, 0, 1; - - Eigen::Matrix3d k2; - k2 << focal_length2, 0, 0, 0, focal_length2, 0, 0, 0, 1; - - openMVG::Mat34 cid_to_p1, cid_to_p2; - openMVG::P_From_KRt(k1, world_to_cam1.linear(), world_to_cam1.translation(), &cid_to_p1); - openMVG::P_From_KRt(k2, world_to_cam2.linear(), world_to_cam2.translation(), &cid_to_p2); - - openMVG::Triangulation tri; - tri.add(cid_to_p1, pix1); - tri.add(cid_to_p2, pix2); - - Eigen::Vector3d solution = tri.compute(); - return solution; -} - -// Triangulate n rays emanating from given undistorted and centered pixels -Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, - std::vector const& world_to_cam_vec, - std::vector const& pix_vec) { - if (focal_length_vec.size() != world_to_cam_vec.size() || - focal_length_vec.size() != pix_vec.size()) - LOG(FATAL) << "All inputs to Triangulate() must have the same size."; - - if (focal_length_vec.size() <= 1) - LOG(FATAL) << "At least two rays must be passed to Triangulate()."; - - openMVG::Triangulation tri; - - for (size_t it = 0; it < focal_length_vec.size(); it++) { - Eigen::Matrix3d k; - k << focal_length_vec[it], 0, 0, 0, focal_length_vec[it], 0, 0, 0, 1; - - openMVG::Mat34 cid_to_p; - openMVG::P_From_KRt(k, world_to_cam_vec[it].linear(), world_to_cam_vec[it].translation(), - &cid_to_p); - - tri.add(cid_to_p, pix_vec[it]); - } - - Eigen::Vector3d solution = tri.compute(); - return solution; -} - // A debug utility for saving a camera in a format ASP understands. // Need to expose the sci cam intrinsics. void save_tsai_camera(Eigen::MatrixXd const& desired_cam_to_world_trans, std::string const& output_dir, diff --git a/dense_map/geometry_mapper/src/interest_point.cc b/dense_map/geometry_mapper/src/interest_point.cc index 7cef6c1c..17ed579e 100644 --- a/dense_map/geometry_mapper/src/interest_point.cc +++ b/dense_map/geometry_mapper/src/interest_point.cc @@ -22,8 +22,20 @@ #include #include // from the isaac repo +#include // from the isaac repo #include // from the astrobee repo +// Get rid of warnings beyond our control +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#pragma GCC diagnostic ignored "-Wunused-function" +#pragma GCC diagnostic push +#include +#include +#include +#include +#include +#pragma GCC diagnostic pop + #include #include @@ -155,6 +167,131 @@ void matchFeatures(std::mutex* match_mutex, int left_image_index, int right_imag match_mutex->unlock(); } + +// Match features while assuming that the input cameras can be used to filter out +// outliers by reprojection error. +void matchFeaturesWithCams(std::mutex* match_mutex, int left_image_index, int right_image_index, + camera::CameraParameters const& left_params, + camera::CameraParameters const& right_params, + Eigen::Affine3d const& left_world_to_cam, + Eigen::Affine3d const& right_world_to_cam, + double reprojection_error, + cv::Mat const& left_descriptors, cv::Mat const& right_descriptors, + Eigen::Matrix2Xd const& left_keypoints, + Eigen::Matrix2Xd const& right_keypoints, + bool verbose, + // output + MATCH_PAIR* matches) { + // Match by using descriptors first + std::vector cv_matches; + interest_point::FindMatches(left_descriptors, right_descriptors, &cv_matches); + + // Do filtering + std::vector left_vec; + std::vector right_vec; + std::vector filtered_cv_matches; + for (size_t j = 0; j < cv_matches.size(); j++) { + int left_ip_index = cv_matches.at(j).queryIdx; + int right_ip_index = cv_matches.at(j).trainIdx; + + Eigen::Vector2d dist_left_ip(left_keypoints.col(left_ip_index)[0], + left_keypoints.col(left_ip_index)[1]); + + Eigen::Vector2d dist_right_ip(right_keypoints.col(right_ip_index)[0], + right_keypoints.col(right_ip_index)[1]); + + Eigen::Vector2d undist_left_ip; + Eigen::Vector2d undist_right_ip; + left_params.Convert + (dist_left_ip, &undist_left_ip); + right_params.Convert + (dist_right_ip, &undist_right_ip); + + Eigen::Vector3d X = + dense_map::TriangulatePair(left_params.GetFocalLength(), right_params.GetFocalLength(), + left_world_to_cam, right_world_to_cam, + undist_left_ip, undist_right_ip); + + // Project back into the cameras + Eigen::Vector3d left_cam_X = left_world_to_cam * X; + Eigen::Vector2d undist_left_pix + = left_params.GetFocalVector().cwiseProduct(left_cam_X.hnormalized()); + Eigen::Vector2d dist_left_pix; + left_params.Convert(undist_left_pix, + &dist_left_pix); + + Eigen::Vector3d right_cam_X = right_world_to_cam * X; + Eigen::Vector2d undist_right_pix + = right_params.GetFocalVector().cwiseProduct(right_cam_X.hnormalized()); + Eigen::Vector2d dist_right_pix; + right_params.Convert(undist_right_pix, + &dist_right_pix); + + // Filter out points whose reprojection error is too big + bool is_good = ((dist_left_ip - dist_left_pix).norm() <= reprojection_error && + (dist_right_ip - dist_right_pix).norm() <= reprojection_error); + + // If any values above are Inf or NaN, is_good will be false as well + if (!is_good) continue; + + // Get the keypoints from the good matches + left_vec.push_back(cv::Point2f(left_keypoints.col(left_ip_index)[0], + left_keypoints.col(left_ip_index)[1])); + right_vec.push_back(cv::Point2f(right_keypoints.col(right_ip_index)[0], + right_keypoints.col(right_ip_index)[1])); + + filtered_cv_matches.push_back(cv_matches[j]); + } + + if (left_vec.empty()) return; + + // Filter using geometry constraints + // These may need some tweaking but works reasonably well. + double ransacReprojThreshold = 20.0; + cv::Mat inlier_mask; + int maxIters = 10000; + double confidence = 0.8; + + // affine2D works better than homography + // cv::Mat H = cv::findHomography(left_vec, right_vec, cv::RANSAC, + // ransacReprojThreshold, inlier_mask, maxIters, confidence); + cv::Mat H = cv::estimateAffine2D(left_vec, right_vec, inlier_mask, cv::RANSAC, + ransacReprojThreshold, maxIters, confidence); + + std::vector left_ip, right_ip; + for (size_t j = 0; j < filtered_cv_matches.size(); j++) { + int left_ip_index = filtered_cv_matches.at(j).queryIdx; + int right_ip_index = filtered_cv_matches.at(j).trainIdx; + + if (inlier_mask.at(j, 0) == 0) continue; + + cv::Mat left_desc = left_descriptors.row(left_ip_index); + cv::Mat right_desc = right_descriptors.row(right_ip_index); + + InterestPoint left; + left.setFromCvKeypoint(left_keypoints.col(left_ip_index), left_desc); + + InterestPoint right; + right.setFromCvKeypoint(right_keypoints.col(right_ip_index), right_desc); + + left_ip.push_back(left); + right_ip.push_back(right); + } + + // Update the shared variable using a lock + match_mutex->lock(); + + // Print the verbose message inside the lock, otherwise the text + // may get messed up. + if (verbose) + std::cout << "Number of matches for pair " + << left_image_index << ' ' << right_image_index << ": " + << left_ip.size() << std::endl; + + *matches = std::make_pair(left_ip, right_ip); + match_mutex->unlock(); +} + void writeIpRecord(std::ofstream& f, InterestPoint const& p) { f.write(reinterpret_cast(&(p.x)), sizeof(p.x)); f.write(reinterpret_cast(&(p.y)), sizeof(p.y)); @@ -212,4 +349,263 @@ void saveImagesAndMatches(std::string const& left_prefix, std::string const& rig writeMatchFile(match_file, match_pair.first, match_pair.second); } +// Triangulate rays emanating from given undistorted and centered pixels +Eigen::Vector3d TriangulatePair(double focal_length1, double focal_length2, + Eigen::Affine3d const& world_to_cam1, + Eigen::Affine3d const& world_to_cam2, + Eigen::Vector2d const& pix1, + Eigen::Vector2d const& pix2) { + Eigen::Matrix3d k1; + k1 << focal_length1, 0, 0, 0, focal_length1, 0, 0, 0, 1; + + Eigen::Matrix3d k2; + k2 << focal_length2, 0, 0, 0, focal_length2, 0, 0, 0, 1; + + openMVG::Mat34 cid_to_p1, cid_to_p2; + openMVG::P_From_KRt(k1, world_to_cam1.linear(), world_to_cam1.translation(), &cid_to_p1); + openMVG::P_From_KRt(k2, world_to_cam2.linear(), world_to_cam2.translation(), &cid_to_p2); + + openMVG::Triangulation tri; + tri.add(cid_to_p1, pix1); + tri.add(cid_to_p2, pix2); + + Eigen::Vector3d solution = tri.compute(); + return solution; +} + +// Triangulate n rays emanating from given undistorted and centered pixels +Eigen::Vector3d Triangulate(std::vector const& focal_length_vec, + std::vector const& world_to_cam_vec, + std::vector const& pix_vec) { + if (focal_length_vec.size() != world_to_cam_vec.size() || + focal_length_vec.size() != pix_vec.size()) + LOG(FATAL) << "All inputs to Triangulate() must have the same size."; + + if (focal_length_vec.size() <= 1) + LOG(FATAL) << "At least two rays must be passed to Triangulate()."; + + openMVG::Triangulation tri; + + for (size_t it = 0; it < focal_length_vec.size(); it++) { + Eigen::Matrix3d k; + k << focal_length_vec[it], 0, 0, 0, focal_length_vec[it], 0, 0, 0, 1; + + openMVG::Mat34 cid_to_p; + openMVG::P_From_KRt(k, world_to_cam_vec[it].linear(), world_to_cam_vec[it].translation(), + &cid_to_p); + + tri.add(cid_to_p, pix_vec[it]); + } + + Eigen::Vector3d solution = tri.compute(); + return solution; +} + +void detectMatchFeatures( // Inputs + std::vector const& cams, + std::vector const& cam_names, + std::vector const& cam_params, + std::vector const& world_to_cam, int num_overlaps, + int initial_max_reprojection_error, int num_match_threads, + bool verbose, + // Outputs + std::vector>>& keypoint_vec, + std::vector>& pid_to_cid_fid, + std::vector & image_files) { + // Wipe the outputs + keypoint_vec.clear(); + pid_to_cid_fid.clear(); + image_files.clear(); + + if (verbose) { + int count = 10000; + for (size_t it = 0; it < cams.size(); it++) { + std::ostringstream oss; + oss << count << "_" << cam_names[cams[it].camera_type] << ".jpg"; + std::string name = oss.str(); + std::cout << "Writing: " << name << std::endl; + cv::imwrite(name, cams[it].image); + count++; + image_files.push_back(name); + } + } + + // Detect features using multiple threads. Too many threads may result + // in high memory usage. + std::ostringstream oss; + oss << num_match_threads; + std::string num_threads = oss.str(); + google::SetCommandLineOption("num_threads", num_threads.c_str()); + if (!gflags::GetCommandLineOption("num_threads", &num_threads)) + LOG(FATAL) << "Failed to get the value of --num_threads in Astrobee software.\n"; + std::cout << "Using " << num_threads << " threads for feature detection/matching." << std::endl; + + std::cout << "Detecting features." << std::endl; + + std::vector cid_to_descriptor_map; + std::vector cid_to_keypoint_map; + cid_to_descriptor_map.resize(cams.size()); + cid_to_keypoint_map.resize(cams.size()); + { + // Make the thread pool go out of scope when not needed to not use up memory + ff_common::ThreadPool thread_pool; + for (size_t it = 0; it < cams.size(); it++) { + thread_pool.AddTask + (&dense_map::detectFeatures, // multi-thread // NOLINT + // dense_map::detectFeatures( // single-thread // NOLINT + cams[it].image, verbose, &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); + } + thread_pool.Join(); + } + + MATCH_MAP matches; + + std::vector > image_pairs; + for (size_t it1 = 0; it1 < cams.size(); it1++) { + for (size_t it2 = it1 + 1; it2 < std::min(cams.size(), it1 + num_overlaps + 1); it2++) { + image_pairs.push_back(std::make_pair(it1, it2)); + } + } + + { + std::cout << "Matching features." << std::endl; + ff_common::ThreadPool thread_pool; + std::mutex match_mutex; + for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { + auto pair = image_pairs[pair_it]; + int left_image_it = pair.first, right_image_it = pair.second; + thread_pool.AddTask + (&dense_map::matchFeaturesWithCams, // multi-threaded // NOLINT + // dense_map::matchFeaturesWithCams( // single-threaded // NOLINT + &match_mutex, left_image_it, right_image_it, cam_params[cams[left_image_it].camera_type], + cam_params[cams[right_image_it].camera_type], world_to_cam[left_image_it], + world_to_cam[right_image_it], initial_max_reprojection_error, + cid_to_descriptor_map[left_image_it], cid_to_descriptor_map[right_image_it], + cid_to_keypoint_map[left_image_it], cid_to_keypoint_map[right_image_it], verbose, + &matches[pair]); + } + thread_pool.Join(); + } + cid_to_descriptor_map = std::vector(); // Wipe, takes memory + + // Give all interest points in a given image a unique id, and put + // them in a vector with the id corresponding to the interest point + std::vector, int>> keypoint_map(cams.size()); + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias + + int left_index = index_pair.first; + int right_index = index_pair.second; + + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + std::vector const& left_ip_vec = match_pair.first; + std::vector const& right_ip_vec = match_pair.second; + for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { + auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); + auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); + // Initialize to zero for the moment + keypoint_map[left_index][dist_left_ip] = 0; + keypoint_map[right_index][dist_right_ip] = 0; + } + } + keypoint_vec.resize(cams.size()); + for (size_t cid = 0; cid < cams.size(); cid++) { + keypoint_vec[cid].resize(keypoint_map[cid].size()); + int fid = 0; + for (auto ip_it = keypoint_map[cid].begin(); ip_it != keypoint_map[cid].end(); + ip_it++) { + auto& dist_ip = ip_it->first; // alias + keypoint_map[cid][dist_ip] = fid; + keypoint_vec[cid][fid] = dist_ip; + fid++; + } + } + + // If feature A in image I matches feather B in image J, which + // matches feature C in image K, then (A, B, C) belong together in + // a track, and will have a single triangulated xyz. Build such a track. + + openMVG::matching::PairWiseMatches match_map; + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias + + int left_index = index_pair.first; + int right_index = index_pair.second; + + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + std::vector const& left_ip_vec = match_pair.first; + std::vector const& right_ip_vec = match_pair.second; + + std::vector mvg_matches; + + for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { + auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); + auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); + + int left_id = keypoint_map[left_index][dist_left_ip]; + int right_id = keypoint_map[right_index][dist_right_ip]; + mvg_matches.push_back(openMVG::matching::IndMatch(left_id, right_id)); + } + match_map[index_pair] = mvg_matches; + } + + if (verbose) { + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair index_pair = it->first; + dense_map::MATCH_PAIR const& match_pair = it->second; + + int left_index = index_pair.first; + int right_index = index_pair.second; + + std::string left_image = image_files[left_index]; + std::string right_image = image_files[right_index]; + + std::string left_stem = boost::filesystem::path(left_image).stem().string(); + std::string right_stem = boost::filesystem::path(right_image).stem().string(); + + std::string match_file = left_stem + "__" + right_stem + ".match"; + + std::cout << "Writing: " << left_image << ' ' << right_image << ' ' + << match_file << std::endl; + dense_map::writeMatchFile(match_file, match_pair.first, match_pair.second); + } + } + + // De-allocate data not needed anymore and take up a lot of RAM + matches.clear(); matches = MATCH_MAP(); + keypoint_map.clear(); keypoint_map.shrink_to_fit(); + cid_to_keypoint_map.clear(); cid_to_keypoint_map.shrink_to_fit(); + + { + // Build tracks + // De-allocate these as soon as not needed to save memory + openMVG::tracks::TracksBuilder trackBuilder; + trackBuilder.Build(match_map); // Build: Efficient fusion of correspondences + trackBuilder.Filter(); // Filter: Remove tracks that have conflict + // trackBuilder.ExportToStream(std::cout); + // Export tracks as a map (each entry is a sequence of imageId and featureIndex): + // {TrackIndex => {(imageIndex, featureIndex), ... ,(imageIndex, featureIndex)} + openMVG::tracks::STLMAPTracks map_tracks; + trackBuilder.ExportToSTL(map_tracks); + match_map = openMVG::matching::PairWiseMatches(); // wipe this, no longer needed + trackBuilder = openMVG::tracks::TracksBuilder(); // wipe it + + if (map_tracks.empty()) + LOG(FATAL) << "No tracks left after filtering. Perhaps images are too dis-similar?\n"; + + // Populate the filtered tracks + size_t num_elems = map_tracks.size(); + pid_to_cid_fid.resize(num_elems); + size_t curr_id = 0; + for (auto itr = map_tracks.begin(); itr != map_tracks.end(); itr++) { + for (auto itr2 = (itr->second).begin(); itr2 != (itr->second).end(); itr2++) { + pid_to_cid_fid[curr_id][itr2->first] = itr2->second; + } + curr_id++; + } + } + + return; +} + } // end namespace dense_map diff --git a/dense_map/geometry_mapper/src/texture_processing.cc b/dense_map/geometry_mapper/src/texture_processing.cc index 1dfebc23..8a7bb1bb 100644 --- a/dense_map/geometry_mapper/src/texture_processing.cc +++ b/dense_map/geometry_mapper/src/texture_processing.cc @@ -55,35 +55,46 @@ #include #include +// TODO(oalexan1): Consider applying TILE_PADDING not to at the atlas +// forming stage, when the patch is padded, but earlier, right when +// the patch is created from a mesh triangle. That will ensure that +// while the same overall buffer is used, the padding region will have +// actual image samples, which may improve the rendering in meshlab +// when one zooms out, where now for some reason the triangle edges +// show up. Can test the output with the test_texture_gen tool. + namespace dense_map { -const int NUM_CHANNELS = 4; // BRGA -const int TILE_PADDING = 4; +// Use int64_t values as much as possible when it comes to dealing with +// pixel indices, as for large images stored as a single array, an int +// (which is same as int32) index was shown to overflow. +const int64_t NUM_CHANNELS = 4; // BRGA +const int64_t TILE_PADDING = 4; -IsaacTextureAtlas::IsaacTextureAtlas(unsigned int width, unsigned int height) - : width(width), height(height), determined_height(0), padding(TILE_PADDING), finalized(false) { +IsaacTextureAtlas::IsaacTextureAtlas(int64_t width, int64_t height) + : width(width), height(height), determined_height(0), finalized(false) { bin = RectangularBin::create(width, height); - image = mve::ByteImage::create(width, height, NUM_CHANNELS); + + // Do not create the image buffer yet, we don't need it until after its precise + // size is determined. } -/** - * Copies the src image into the dest image at the given position, - * optionally adding a border. - * @warning asserts that the given src image fits into the given dest image. - */ -void copy_into(mve::ByteImage::ConstPtr src, int x, int y, mve::ByteImage::Ptr dest, int border = 0) { - assert(x >= 0 && x + src->width() + 2 * border <= dest->width()); - assert(y >= 0 && y + src->height() + 2 * border <= dest->height()); +// Copies the src image into the dest image at the given position, +// optionally adding a border. +// It asserts that the given src image fits into the given dest image. +void copy_into(mve::ByteImage::ConstPtr src, int64_t x, int64_t y, mve::ByteImage::Ptr dest, int64_t border = 0) { + assert(x >= 0L && x + src->width() + 2 * border <= dest->width()); + assert(y >= 0L && y + src->height() + 2 * border <= dest->height()); assert(src->channels() == dest->channels()); - for (int i = 0; i < src->width() + 2 * border; ++i) { - for (int j = 0; j < src->height() + 2 * border; j++) { - int sx = i - border; - int sy = j - border; + for (int64_t i = 0; i < src->width() + 2 * border; i++) { + for (int64_t j = 0; j < src->height() + 2 * border; j++) { + int64_t sx = i - border; + int64_t sy = j - border; if (sx < 0 || sx >= src->width() || sy < 0 || sy >= src->height()) continue; - for (int c = 0; c < src->channels(); ++c) { + for (int64_t c = 0; c < src->channels(); ++c) { dest->at(x + i, y + j, c) = src->at(sx, sy, c); } } @@ -94,37 +105,41 @@ typedef std::vector> PixelVector; typedef std::set> PixelSet; bool IsaacTextureAtlas::insert(IsaacTexturePatch::ConstPtr texture_patch) { - if (finalized) throw util::Exception("No insertion possible, IsaacTextureAtlas already finalized"); + if (finalized) + throw util::Exception("No insertion possible, IsaacTextureAtlas already finalized"); assert(bin != NULL); - int const width = texture_patch->get_width() + 2 * padding; - int const height = texture_patch->get_height() + 2 * padding; - Rect rect(0, 0, width, height); + int64_t width = texture_patch->get_width() + 2 * TILE_PADDING; + int64_t height = texture_patch->get_height() + 2 * TILE_PADDING; + Rect rect = Rect(0, 0, width, height); if (!bin->insert(&rect)) return false; // Keep track how many rows of the allocated texture we actually use - determined_height = std::max((unsigned int)(rect.max_y), determined_height); + determined_height = std::max(rect.max_y, determined_height); // Note how we don't bother to do any copying, as at this stage we // only care for the structure - // copy_into(patch_image, rect.min_x, rect.min_y, image, padding); + // copy_into(patch_image, rect.min_x, rect.min_y, image, TILE_PADDING); - IsaacTexturePatch::Faces const& patch_faces = texture_patch->get_faces(); - IsaacTexturePatch::Texcoords const& patch_texcoords = texture_patch->get_texcoords(); + IsaacTexturePatch::Faces const& patch_faces = texture_patch->get_faces(); // alias + IsaacTexturePatch::Texcoords const& patch_texcoords = texture_patch->get_texcoords(); // alias - /* Calculate the offset of the texture patches' relative texture coordinates */ - math::Vec2f offset = math::Vec2f(rect.min_x + padding, rect.min_y + padding); + // Calculate where the patch will go after insertion + Eigen::Vector2d offset = Eigen::Vector2d(rect.min_x + TILE_PADDING, rect.min_y + TILE_PADDING); faces.insert(faces.end(), patch_faces.begin(), patch_faces.end()); - /* Calculate the final textcoords of the faces. */ - for (std::size_t i = 0; i < patch_faces.size(); ++i) { - for (int j = 0; j < 3; ++j) { - math::Vec2f rel_texcoord(patch_texcoords[i * 3 + j]); - math::Vec2f texcoord = rel_texcoord + offset; + // Insert the texcoords in the right place + for (std::size_t i = 0; i < patch_faces.size(); i++) { + for (int64_t j = 0; j < 3; j++) { + Eigen::Vector2d rel_texcoord(patch_texcoords[i * 3 + j]); + + Eigen::Vector2d texcoord = rel_texcoord + offset; + + // Delay this normalization until we know the final width and + // height of the atlas. - // Delay this until later // texcoord[0] = texcoord[0] / this->width; // texcoord[1] = texcoord[1] / this->height; @@ -135,12 +150,12 @@ bool IsaacTextureAtlas::insert(IsaacTexturePatch::ConstPtr texture_patch) { } struct VectorCompare { - bool operator()(math::Vec2f const& lhs, math::Vec2f const& rhs) const { + bool operator()(Eigen::Vector2d const& lhs, Eigen::Vector2d const& rhs) const { return lhs[0] < rhs[0] || (lhs[0] == rhs[0] && lhs[1] < rhs[1]); } }; -typedef std::map TexcoordMap; +typedef std::map TexcoordMap; void IsaacTextureAtlas::merge_texcoords() { Texcoords tmp; @@ -148,7 +163,7 @@ void IsaacTextureAtlas::merge_texcoords() { // Do not remove duplicates, as that messes up the book-keeping TexcoordMap texcoord_map; - for (math::Vec2f const& texcoord : tmp) { + for (Eigen::Vector2d const& texcoord : tmp) { TexcoordMap::iterator iter = texcoord_map.find(texcoord); // if (iter == texcoord_map.end()) { std::size_t texcoord_id = this->texcoords.size(); @@ -161,16 +176,18 @@ void IsaacTextureAtlas::merge_texcoords() { } } -// Set the final height and scale the texcoords by removing -// redundant space +// Set the final height by removing unused space. Allocate the image buffer. void IsaacTextureAtlas::resize_atlas() { - // Round up a little to make it a multiple of 2 * padding. Not strictly necessary + // Round up a little to make it a multiple of 2 * TILE_PADDING. Not strictly necessary // but looks nicer that way. - this->determined_height = (2 * padding) * (ceil(this->determined_height / static_cast(2 * padding))); - this->determined_height = std::min(this->height, this->determined_height); + int64_t factor = 2 * TILE_PADDING; + this->determined_height + = factor * ceil(this->determined_height / static_cast(factor)); + this->determined_height = std::min(this->height, this->determined_height); this->height = this->determined_height; - this->image->resize(this->width, this->height, image->channels()); + + this->image = mve::ByteImage::create(this->width, this->height, NUM_CHANNELS); } // Scale the texcoords once the final atlas dimensions are known @@ -190,19 +207,20 @@ void IsaacTextureAtlas::finalize() { this->finalized = true; } -void isaac_build_model(mve::TriangleMesh::ConstPtr mesh, std::vector const& texture_atlases, - ObjModel* obj_model) { +void isaac_build_model(mve::TriangleMesh::ConstPtr mesh, + std::vector const& texture_atlases, + IsaacObjModel* obj_model) { mve::TriangleMesh::VertexList const& mesh_vertices = mesh->get_vertices(); mve::TriangleMesh::NormalList const& mesh_normals = mesh->get_vertex_normals(); mve::TriangleMesh::FaceList const& mesh_faces = mesh->get_faces(); - ObjModel::Vertices& vertices = obj_model->get_vertices(); + IsaacObjModel::Vertices& vertices = obj_model->get_vertices(); vertices.insert(vertices.begin(), mesh_vertices.begin(), mesh_vertices.end()); - ObjModel::Normals& normals = obj_model->get_normals(); + IsaacObjModel::Normals& normals = obj_model->get_normals(); normals.insert(normals.begin(), mesh_normals.begin(), mesh_normals.end()); - ObjModel::TexCoords& texcoords = obj_model->get_texcoords(); + IsaacObjModel::TexCoords& texcoords = obj_model->get_texcoords(); - ObjModel::Groups& groups = obj_model->get_groups(); + IsaacObjModel::Groups& groups = obj_model->get_groups(); MaterialLib& material_lib = obj_model->get_material_lib(); for (IsaacTextureAtlas::Ptr texture_atlas : texture_atlases) { @@ -212,8 +230,8 @@ void isaac_build_model(mve::TriangleMesh::ConstPtr mesh, std::vectorget_image(); material_lib.push_back(material); - groups.push_back(ObjModel::Group()); - ObjModel::Group& group = groups.back(); + groups.push_back(IsaacObjModel::Group()); + IsaacObjModel::Group& group = groups.back(); group.material_name = material.name; IsaacTextureAtlas::Faces const& atlas_faces = texture_atlas->get_faces(); @@ -224,7 +242,7 @@ void isaac_build_model(mve::TriangleMesh::ConstPtr mesh, std::vector& mesh_info, std::shared_ptr& graph, + std::shared_ptr& mesh_info, + std::shared_ptr& graph, std::shared_ptr& bvh_tree) { - std::cout << "Load and prepare mesh from " << mesh_file << std::endl; + std::cout << "Loading mesh: " << mesh_file << std::endl; mesh = mve::geom::load_ply_mesh(mesh_file); mesh_info = std::shared_ptr(new mve::MeshInfo(mesh)); tex::prepare_mesh(mesh_info.get(), mesh); - std::cout << "Building adjacency graph: " << std::endl; std::size_t const num_faces = mesh->get_faces().size() / 3; if (num_faces > std::numeric_limits::max()) throw std::runtime_error("Exeeded maximal number of faces"); @@ -266,7 +284,7 @@ void loadMeshBuildTree(std::string const& mesh_file, mve::TriangleMesh::Ptr& mes util::WallTimer timer; std::vector const& faces = mesh->get_faces(); std::vector const& vertices = mesh->get_vertices(); - std::cout << "Building BVH tree from " << faces.size() / 3 << " faces.\n"; + std::cout << "Number of faces: " << faces.size() / 3 << " faces.\n"; bvh_tree = std::shared_ptr(new BVHTree(faces, vertices)); std::cout << "Building the tree took: " << timer.get_elapsed() / 1000.0 << " seconds\n"; } @@ -283,29 +301,46 @@ math::Vec3f eigen_to_vec3f(Eigen::Vector3d const& V) { return v; } -void calculate_texture_size(double height_factor, std::list const& texture_patches, - int& texture_width, int& texture_height) { - int64_t total_area = 0; - int max_width = 0; - int max_height = 0; - int padding = TILE_PADDING; - +void calculate_texture_size(double height_factor, + std::list const& texture_patches, + int64_t& texture_width, int64_t& texture_height) { + int64_t total_area = 0; // this can be huge and may overflow with 32 bit int + int64_t max_patch_width = 0; + int64_t max_patch_height = 0; for (IsaacTexturePatch::ConstPtr texture_patch : texture_patches) { - int width = texture_patch->get_width() + 2 * padding; - int height = texture_patch->get_height() + 2 * padding; + int64_t width = texture_patch->get_width() + 2 * TILE_PADDING; + int64_t height = texture_patch->get_height() + 2 * TILE_PADDING; - max_width = std::max(max_width, width); - max_height = std::max(max_height, height); + max_patch_width = std::max(max_patch_width, width); + max_patch_height = std::max(max_patch_height, height); int64_t area = width * height; total_area += area; } - texture_width = std::max(4 * 1024, max_width); + // Estimate rough dimensions of the atlas. This can't be precise, + // since those little tiles may not fit with no gaps between + // them. The height will be first over-estimated later, and then + // re-adjusted in due time. + + // It is good to aim for roughly square dimensions, as otherwise one + // dimension may be too big, which may result in loss of precision + // in texcoords as those get scaled by image dimensions. + texture_width = ceil(sqrt(static_cast(total_area))); texture_height = ceil(static_cast(total_area / texture_width)); - texture_height = std::max(texture_height, max_height); - // This is a heuristic but it works well-enough the first time + // Adjust to ensure even the biggest patch can fit + texture_width = std::max(texture_width, max_patch_width); + texture_height = std::max(texture_height, max_patch_height); + + // Round up a little to make the dims a multiple of 2 * + // TILE_PADDING. Not strictly necessary but looks nicer that way. + int64_t factor = 2 * TILE_PADDING; + texture_width = factor * ceil(texture_width / static_cast(factor)); + texture_height = factor * ceil(texture_height / static_cast(factor)); + + // Give the height a margin to overestimate it. It will be adjusted after we finish + // putting all patches in the atlas. texture_height *= height_factor; } @@ -316,6 +351,7 @@ bool texture_patch_compare(IsaacTexturePatch::ConstPtr first, IsaacTexturePatch: // If there is more than one texture atlas merge them, as it is // difficult to later manage multiple texture images. // TODO(oalexan1): This does not work, despite trying to debug it a lot. +// TODO(oalexan1): Maybe it is because texcoord_ids are not copied! void merge_texture_atlases(std::vector* texture_atlases) { if (texture_atlases->size() <= 1) return; @@ -329,7 +365,7 @@ void merge_texture_atlases(std::vector* texture_atlases) } // Find the final width and height - int final_height = 0, final_width = prev_atlases[0]->get_width(); + int64_t final_height = 0, final_width = prev_atlases[0]->get_width(); for (size_t atlas_it = 0; atlas_it < prev_atlases.size(); atlas_it++) { final_height += prev_atlases[atlas_it]->get_height(); } @@ -338,12 +374,12 @@ void merge_texture_atlases(std::vector* texture_atlases) // but no texcoords exist yet texture_atlases->push_back(IsaacTextureAtlas::create(final_width, final_height)); - // Copy the image data - int image_start = 0; + // Copy the image data. Need to use 'int64_t' values as an int may overflow. + int64_t image_start = 0; mve::ByteImage::Ptr& dest_image = (*texture_atlases)[0]->get_image(); for (size_t atlas_it = 0; atlas_it < prev_atlases.size(); atlas_it++) { mve::ByteImage::Ptr& prev_image = prev_atlases[atlas_it]->get_image(); - int prev_image_len = prev_image->width() * prev_image->height() * prev_image->channels(); + int64_t prev_image_len = prev_image->width() * prev_image->height() * prev_image->channels(); std::memcpy(dest_image->begin() + image_start, prev_image->begin(), prev_image_len); // prepare for the next iteration @@ -351,18 +387,18 @@ void merge_texture_atlases(std::vector* texture_atlases) } // Copy the texcoords - int texcoord_start = 0; + int64_t texcoord_start = 0; IsaacTextureAtlas::Texcoords& dest_coords = (*texture_atlases)[0]->get_texcoords(); dest_coords.clear(); for (size_t atlas_it = 0; atlas_it < prev_atlases.size(); atlas_it++) { IsaacTextureAtlas::Texcoords& prev_coords = prev_atlases[atlas_it]->get_texcoords(); - math::Vec2f offset = math::Vec2f(0, texcoord_start); + Eigen::Vector2d offset = Eigen::Vector2d(0, texcoord_start); IsaacTextureAtlas::Faces& prev_faces = prev_atlases[atlas_it]->get_faces(); for (size_t face_it = 0; face_it < prev_faces.size(); face_it++) { - for (int j = 0; j < 3; ++j) { - math::Vec2f rel_texcoord(prev_coords[face_it * 3 + j]); - math::Vec2f texcoord = rel_texcoord + offset; + for (int64_t j = 0; j < 3; j++) { + Eigen::Vector2d rel_texcoord(prev_coords[face_it * 3 + j]); + Eigen::Vector2d texcoord = rel_texcoord + offset; dest_coords.push_back(texcoord); } } @@ -384,36 +420,39 @@ void merge_texture_atlases(std::vector* texture_atlases) prev_atlases.clear(); // wipe this } -void generate_texture_atlases(double height_factor, std::vector& texture_patches, - std::map& face_positions, std::vector* texture_atlases, +void generate_texture_atlases(double height_factor, + std::vector& texture_patches, + std::map& face_positions, + std::vector* texture_atlases, std::vector>& atlas_sizes) { texture_atlases->clear(); atlas_sizes.clear(); // Make a copy of the pointers to texture patches that we will - // modify Use a list from which it is easier to erase things. + // modify. Use a list from which it is easier to erase things. std::list local_texture_patches; - for (size_t it = 0; it < texture_patches.size(); it++) local_texture_patches.push_back(texture_patches[it]); + for (size_t it = 0; it < texture_patches.size(); it++) + local_texture_patches.push_back(texture_patches[it]); /* Improve the bin-packing algorithm efficiency by sorting texture patches * in descending order of size. */ local_texture_patches.sort(texture_patch_compare); // Find the face positions after sorting - int count = 0; + int64_t count = 0; for (auto it = local_texture_patches.begin(); it != local_texture_patches.end(); it++) { IsaacTexturePatch::ConstPtr ptr = *it; IsaacTexturePatch const* P = ptr.get(); - int label = P->get_label(); + int64_t label = P->get_label(); face_positions[label] = count; count++; } - std::size_t const total_num_patches = local_texture_patches.size(); - std::size_t remaining_patches = local_texture_patches.size(); + int64_t num_patches = local_texture_patches.size(); + count = 0; while (!local_texture_patches.empty()) { - int texture_width, texture_height; + int64_t texture_width = 0, texture_height = 0; calculate_texture_size(height_factor, local_texture_patches, texture_width, texture_height); texture_atlases->push_back(IsaacTextureAtlas::create(texture_width, texture_height)); @@ -422,16 +461,15 @@ void generate_texture_atlases(double height_factor, std::vector::iterator it = local_texture_patches.begin(); for (; it != local_texture_patches.end();) { - std::size_t done_patches = total_num_patches - remaining_patches; - int precent = static_cast(done_patches) / total_num_patches * 100.0f; - if (total_num_patches > 100 && done_patches % (total_num_patches / 100) == 0) { - } - if (texture_atlas->insert(*it)) { it = local_texture_patches.erase(it); - remaining_patches -= 1; + count++; + + if (count % 5000 == 0 || count == num_patches) { + std::cout << "Adding patches: " << count << "/" << num_patches << std::endl; + } } else { - ++it; + it++; } } @@ -446,20 +484,20 @@ void generate_texture_atlases(double height_factor, std::vectormerge_texcoords(); (*texture_atlases)[atlas_it]->scale_texcoords(); - atlas_sizes.push_back( - std::make_pair((*texture_atlases)[atlas_it]->get_width(), (*texture_atlases)[atlas_it]->get_height())); + atlas_sizes.push_back(std::make_pair((*texture_atlases)[atlas_it]->get_width(), + (*texture_atlases)[atlas_it]->get_height())); } return; } -void isaac_save_model(ObjModel* obj_model, std::string const& prefix) { - int OBJ_INDEX_OFFSET = 1; +void isaac_save_model(IsaacObjModel* obj_model, std::string const& prefix) { + int64_t OBJ_INDEX_OFFSET = 1; - ObjModel::Vertices const& vertices = obj_model->get_vertices(); - ObjModel::Normals const& normals = obj_model->get_normals(); - ObjModel::TexCoords const& texcoords = obj_model->get_texcoords(); - ObjModel::Groups const& groups = obj_model->get_groups(); + IsaacObjModel::Vertices const& vertices = obj_model->get_vertices(); + IsaacObjModel::Normals const& normals = obj_model->get_normals(); + IsaacObjModel::TexCoords const& texcoords = obj_model->get_texcoords(); + IsaacObjModel::Groups const& groups = obj_model->get_groups(); MaterialLib const& material_lib = obj_model->get_material_lib(); material_lib.save_to_files(prefix); @@ -468,31 +506,31 @@ void isaac_save_model(ObjModel* obj_model, std::string const& prefix) { std::ofstream out((prefix + ".obj").c_str()); if (!out.good()) throw util::FileException(prefix + ".obj", std::strerror(errno)); - out << "mtllib " << name << ".mtl" << '\n'; + out << "mtllib " << name << ".mtl" << "\n"; - out << std::fixed << std::setprecision(8); - for (std::size_t i = 0; i < vertices.size(); ++i) { - out << "v " << vertices[i][0] << " " << vertices[i][1] << " " << vertices[i][2] << '\n'; - } + out << std::setprecision(16); + for (std::size_t i = 0; i < vertices.size(); i++) + out << "v " << vertices[i][0] << " " << vertices[i][1] << " " << vertices[i][2] << "\n"; - for (std::size_t i = 0; i < texcoords.size(); ++i) { - out << "vt " << texcoords[i][0] << " " << 1.0f - texcoords[i][1] << '\n'; - } + // Here use 1.0 rather than 1.0f to not lose precision + for (std::size_t i = 0; i < texcoords.size(); i++) + out << "vt " << texcoords[i][0] << " " << 1.0 - texcoords[i][1] << "\n"; - for (std::size_t i = 0; i < normals.size(); ++i) { - out << "vn " << normals[i][0] << " " << normals[i][1] << " " << normals[i][2] << '\n'; - } + for (std::size_t i = 0; i < normals.size(); i++) + out << "vn " << normals[i][0] << " " << normals[i][1] << " " << normals[i][2] << "\n"; - for (std::size_t i = 0; i < groups.size(); ++i) { - out << "usemtl " << groups[i].material_name << '\n'; - for (std::size_t j = 0; j < groups[i].faces.size(); ++j) { - ObjModel::Face const& face = groups[i].faces[j]; + for (std::size_t i = 0; i < groups.size(); i++) { + out << "usemtl " << groups[i].material_name << "\n"; + for (std::size_t j = 0; j < groups[i].faces.size(); j++) { + IsaacObjModel::Face const& face = groups[i].faces[j]; out << "f"; for (std::size_t k = 0; k < 3; ++k) { - out << " " << face.vertex_ids[k] + OBJ_INDEX_OFFSET << "/" << face.texcoord_ids[k] + OBJ_INDEX_OFFSET << "/" - << face.normal_ids[k] + OBJ_INDEX_OFFSET; + out << " " + << face.vertex_ids[k] + OBJ_INDEX_OFFSET << "/" // NOLINT + << face.texcoord_ids[k] + OBJ_INDEX_OFFSET << "/" // NOLINT + << face.normal_ids[k] + OBJ_INDEX_OFFSET; // NOLINT } - out << '\n'; + out << "\n"; } } out.close(); @@ -572,12 +610,13 @@ struct Edge { // Move an edge this far along the edge normal Edge bias_edge(Edge const& e, double bias) { pointPair n = e.get_normal(); - return Edge(e.x0 + bias * n.first, e.y0 + bias * n.second, e.x1 + bias * n.first, e.y1 + bias * n.second); + return Edge(e.x0 + bias * n.first, e.y0 + bias * n.second, + e.x1 + bias * n.first, e.y1 + bias * n.second); } bool edge_intersection(Edge A, Edge B, pointPair& out) { - return edge_intersection(std::make_pair(A.x0, A.y0), std::make_pair(A.x1, A.y1), std::make_pair(B.x0, B.y0), - std::make_pair(B.x1, B.y1), out); + return edge_intersection(std::make_pair(A.x0, A.y0), std::make_pair(A.x1, A.y1), + std::make_pair(B.x0, B.y0), std::make_pair(B.x1, B.y1), out); } // A triangle with three edges @@ -632,72 +671,84 @@ Triangle bias_triangle(Triangle const& tri, double bias) { ans2 = edge_intersection(biased_edges[1], biased_edges[2], E2); // Apply the bias only for non-degenerate triangles - if (ans0 && ans1 && ans2) return Triangle(E0.first, E0.second, E1.first, E1.second, E2.first, E2.second); + if (ans0 && ans1 && ans2) + return Triangle(E0.first, E0.second, E1.first, E1.second, E2.first, E2.second); return tri; } -// Given a mesh and a given pixel size in meters (say 0.001), -// overlay a grid of that pixel size on top of each triangle and -// form a textured 3D model with a pixel at each node. -// Store the transforms that allows one at any time to -// project an image onto that triangle and copy the image -// interpolated at the grid points into the texture. -// This way the size of the texture image is fixed -// for the given texture mesh and different images result -// in different pixels in this texture. - -void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_threads, - // outputs - std::vector& face_projection_info, std::vector& texture_atlases, - tex::Model& model) { - omp_set_dynamic(0); // Explicitly disable dynamic teams - omp_set_num_threads(num_threads); // Use this many threads for all - // consecutive parallel regions +// Given a mesh and a given pixel size in meters (say 0.001), overlay +// a grid of that pixel size on top of each triangle and form a +// textured 3D model with a pixel at each grid point. Store the +// transforms that allows one at any time to project an image onto +// that triangle and copy the image interpolated at the grid points +// into the texture buffer. This way the size of the texture buffer +// depends only on the mesh geometry and not on the number, orientation, +// or resolution of the images projected on the mesh. - std::cout << "Forming the textured model" << std::endl; +void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int64_t num_threads, + // outputs + std::vector& face_projection_info, + std::vector& texture_atlases, + IsaacObjModel& model) { + // Explicitly disable dynamic determination of number of threads + omp_set_dynamic(0); + // Use this many threads for all consecutive parallel regions + omp_set_num_threads(num_threads); + + std::cout << "Forming the textured model, please wait ..." << std::endl; util::WallTimer timer; std::vector const& vertices = mesh->get_vertices(); std::vector const& mesh_normals = mesh->get_vertex_normals(); - if (vertices.size() != mesh_normals.size()) LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; + if (vertices.size() != mesh_normals.size()) + LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; std::vector const& faces = mesh->get_faces(); std::vector const& face_normals = mesh->get_face_normals(); - int num_faces = faces.size() / 3; + // Initialize face_projection_info. Likely the constructor will be + // called either way but it is safer this way. + int64_t num_faces = faces.size() / 3; face_projection_info.resize(num_faces); + for (int64_t it = 0; it < num_faces; it++) face_projection_info[it] = FaceInfo(); - // Store here texture coords before they get packed in the model - ObjModel::TexCoords input_texcoords(3 * num_faces); + // Store here texture coords before they get packed in the model. + // Ensure that this is initialized. + std::vector input_texcoords(3 * num_faces); + for (size_t it = 0; it < input_texcoords.size(); it++) + input_texcoords[it] = Eigen::Vector2d(0.0, 0.0); std::vector texture_patches(num_faces); + double total_area = 0.0; #pragma omp parallel for - for (int face_id = 0; face_id < num_faces; face_id++) { + for (int64_t face_id = 0; face_id < num_faces; face_id++) { math::Vec3f const& v1 = vertices[faces[3 * face_id + 0]]; math::Vec3f const& v2 = vertices[faces[3 * face_id + 1]]; math::Vec3f const& v3 = vertices[faces[3 * face_id + 2]]; // math::Vec3f const & face_normal = face_normals[face_id]; - // A mesh triangle is visible if rays from its vertices do not - // intersect the mesh somewhere else before hitting the camera, - // and hit the camera inside the image bounds. + // The mesh triangle math::Vec3f const* samples[] = {&v1, &v2, &v3}; std::vector patch_faces(1); patch_faces[0] = face_id; + std::vector face_texcoords(3); // Convert the triangle corners to Eigen so that we have double precision std::vector V(3); for (size_t v_it = 0; v_it < 3; v_it++) V[v_it] = vec3f_to_eigen(*samples[v_it]); + // The area of the given face + double area = 0.5 * ((V[1] - V[0]).cross(V[2] - V[0])).norm(); + total_area += area; + // Compute the double precision normal, as we will do a lot of // careful math. It agrees with the pre-existing float normal. Eigen::Vector3d N = (V[1] - V[0]).cross(V[2] - V[0]); if (N != Eigen::Vector3d(0, 0, 0)) N.normalize(); - // Eigen::Vector3d N0 = vec3f_to_eigen(face_normal); // pre-existing - // normal + // Eigen::Vector3d N0 = vec3f_to_eigen(face_normal); // pre-existing normal double a, e; dense_map::normalToAzimuthAndElevation(N, a, e); @@ -713,8 +764,8 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre YZPlaneToTriangleFace.translation() = Eigen::Vector3d(0, 0, 0); Eigen::Affine3d inv_trans = YZPlaneToTriangleFace.inverse(); - // Transform the vertices, they will end up in the same plane - // with x constant. + // Transform the vertices, they will end up in the same plane with + // x constant. This will make it easy to sample the face. std::vector TransformedVertices; TransformedVertices.resize(3); double x = 0, min_y = 0, max_y = 0, min_z = 0, max_z = 0; @@ -734,27 +785,21 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre } } - // Put a little padding just in case - // TODO(oalexan1): Put back a padding of 2 maybe! - int padding = 1; - min_y -= padding * pixel_size; - max_y += padding * pixel_size; - min_z -= padding * pixel_size; - max_z += padding * pixel_size; - - int width = ceil((max_y - min_y) / pixel_size) + 1; - int height = ceil((max_z - min_z) / pixel_size) + 1; + FaceInfo& F = face_projection_info[face_id]; // alias - // std::cout << "width and height are " << width << ' ' << height << - // std::endl; + // Put a little padding just in case (its value is 1 in the constructor) + min_y -= F.face_info_padding * pixel_size; + max_y += F.face_info_padding * pixel_size; + min_z -= F.face_info_padding * pixel_size; + max_z += F.face_info_padding * pixel_size; - std::vector face_texcoords(3); + int64_t width = ceil((max_y - min_y) / pixel_size) + 1; + int64_t height = ceil((max_z - min_z) / pixel_size) + 1; // Record where the triangle vertices will be in the sampled bounding box of the face - for (size_t v_it = 0; v_it < 3; v_it++) { - face_texcoords[v_it] = math::Vec2f((TransformedVertices[v_it][1] - min_y) / pixel_size, + for (size_t v_it = 0; v_it < 3; v_it++) + face_texcoords[v_it] = Eigen::Vector2d((TransformedVertices[v_it][1] - min_y) / pixel_size, (TransformedVertices[v_it][2] - min_z) / pixel_size); - } // Append these to the bigger list for (size_t v_it = 0; v_it < face_texcoords.size(); v_it++) { @@ -763,21 +808,21 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre // Store the info that we will need later to find where a little tile // should go. - FaceInfo& F = face_projection_info[face_id]; // alias F.x = x; F.min_y = min_y; F.min_z = min_z; F.width = width; F.height = height; - F.TransformedVertices = TransformedVertices; + F.TransformedVertices = TransformedVertices; F.YZPlaneToTriangleFace = YZPlaneToTriangleFace; - F.padding = padding; - // We won't really use this image, but need its dimensions - texture_patches[face_id] = IsaacTexturePatch::create(face_id, patch_faces, face_texcoords, width, height); + // Form a little rectangle, which later will be inserted in the right place + // in the texture atlas + texture_patches[face_id] = IsaacTexturePatch::create(face_id, patch_faces, face_texcoords, + width, height); } // End loop over mesh faces - // for face i, 3 * face_position[i] will be where it is starts being stored + // For face i, 3 * face_position[i] will be where it is starts being stored // in the uv array. std::map face_positions; std::vector> atlas_sizes; @@ -788,7 +833,8 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre // allocated for the atlas. Normally we will get it right from the first // try though. for (int attempt = 0; attempt < 10; attempt++) { - generate_texture_atlases(height_factor, texture_patches, face_positions, &texture_atlases, atlas_sizes); + generate_texture_atlases(height_factor, texture_patches, face_positions, + &texture_atlases, atlas_sizes); if (texture_atlases.size() <= 1) break; @@ -798,38 +844,37 @@ void formModel(mve::TriangleMesh::ConstPtr mesh, double pixel_size, int num_thre } // If still failed, give up - if (texture_atlases.size() > 1) LOG(FATAL) << "More than one texture atlas was generated, which is not supported."; + if (texture_atlases.size() > 1) + LOG(FATAL) << "More than one texture atlas was generated, which is not supported."; if (texture_atlases.empty()) LOG(FATAL) << "No texture atlas was created."; - int texture_width = atlas_sizes[0].first; - int texture_height = atlas_sizes[0].second; + int64_t texture_width = atlas_sizes[0].first; + int64_t texture_height = atlas_sizes[0].second; // Build the model from atlases isaac_build_model(mesh, texture_atlases, &model); // These texcoords and the ones above are related by a scale transform + translation // that transform that can change depending on which atlas we are on. - ObjModel::TexCoords const& model_texcoords = model.get_texcoords(); + IsaacObjModel::TexCoords const& model_texcoords = model.get_texcoords(); // The two vectors below would be equal if we process the mesh fully, // and not so unless we stop early. - if (model_texcoords.size() != input_texcoords.size()) LOG(FATAL) << "Book-keeping failure regarding texcoords."; + if (model_texcoords.size() != input_texcoords.size()) + LOG(FATAL) << "Book-keeping failure regarding texcoords."; for (std::size_t i = 0; i < model_texcoords.size() / 3; i++) { auto face_pos = face_positions.find(i); - if (face_pos == face_positions.end()) { - LOG(FATAL) << "Cannot find position for index " << i; - } - - int mapped_i = face_pos->second; + if (face_pos == face_positions.end()) LOG(FATAL) << "Cannot find position for index " << i; + int64_t mapped_i = face_pos->second; // By comparing where the texcoords were before being put in the // textured mesh file and where it ends up in that file, we can // determine the shift that was used to place a texture patch in // the texture. - int shift_u = round(model_texcoords[3 * mapped_i][0] * texture_width - input_texcoords[3 * i][0]); - int shift_v = round(model_texcoords[3 * mapped_i][1] * texture_height - input_texcoords[3 * i][1]); + int64_t shift_u = round(model_texcoords[3 * mapped_i][0] * texture_width - input_texcoords[3 * i][0]); + int64_t shift_v = round(model_texcoords[3 * mapped_i][1] * texture_height - input_texcoords[3 * i][1]); face_projection_info[i].shift_u = shift_u; face_projection_info[i].shift_v = shift_v; @@ -849,11 +894,6 @@ void formMtl(std::string const& out_prefix, std::string& mtl_str) { ofs << "Ns 0.000000\n"; ofs << "map_Kd " << out_prefix << ".png\n"; mtl_str = ofs.str(); - - std::string mtl_file = out_prefix + ".mtl"; - std::ofstream ofs2(mtl_file.c_str()); - ofs2 << mtl_str; - ofs2.close(); } void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vector const& face_vec, @@ -862,13 +902,14 @@ void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vector const& vertices = mesh->get_vertices(); std::vector const& mesh_normals = mesh->get_vertex_normals(); - if (vertices.size() != mesh_normals.size()) LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; + if (vertices.size() != mesh_normals.size()) + LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; // The uv_map assigns to each of the mesh vertices that is visible // in the texture its (u, v) pair. Find the map from each vertex // index to the index in the list of (u, v) pairs. std::map vertex_to_uv; - int count = 0; + int64_t count = 0; for (auto it = uv_map.begin(); it != uv_map.end(); it++) { vertex_to_uv[it->first] = count; count++; @@ -878,77 +919,118 @@ void formObjCustomUV(mve::TriangleMesh::ConstPtr mesh, std::vectorsecond)[0] << " " << (it->second)[1] << '\n'; - } + for (auto it = uv_map.begin(); it != uv_map.end(); it++) + out << "vt " << (it->second)[0] << " " << (it->second)[1] << "\n"; - for (std::size_t i = 0; i < mesh_normals.size(); i++) { - out << "vn " << mesh_normals[i][0] << " " << mesh_normals[i][1] << " " << mesh_normals[i][2] << '\n'; - } + for (std::size_t i = 0; i < mesh_normals.size(); i++) + out << "vn " << mesh_normals[i][0] << " " << mesh_normals[i][1] << " " + << mesh_normals[i][2] << "\n"; - int OBJ_INDEX_OFFSET = 1; // have indices start from 1 + int64_t OBJ_INDEX_OFFSET = 1; // have indices start from 1 for (std::size_t j = 0; j < face_vec.size(); j++) { out << "f"; for (std::size_t k = 0; k < 3; ++k) { - out << " " << face_vec[j][k] + OBJ_INDEX_OFFSET << "/" << vertex_to_uv[face_vec[j][k]] + OBJ_INDEX_OFFSET << "/" + out << " " << face_vec[j][k] + OBJ_INDEX_OFFSET << "/" + << vertex_to_uv[face_vec[j][k]] + OBJ_INDEX_OFFSET << "/" << face_vec[j][k] + OBJ_INDEX_OFFSET; } - out << '\n'; + out << "\n"; } obj_str = out.str(); } -void formObj(tex::Model& texture_model, std::string const& out_prefix, std::string& obj_str) { +void formObj(IsaacObjModel& texture_model, std::string const& out_prefix, std::string& obj_str) { std::vector const& vertices = texture_model.get_vertices(); - std::vector const& texcoords = texture_model.get_texcoords(); + std::vector const& texcoords = texture_model.get_texcoords(); std::vector const& normals = texture_model.get_normals(); - std::vector const& groups = texture_model.get_groups(); + std::vector const& groups = texture_model.get_groups(); std::ostringstream out; out << "mtllib " << out_prefix << ".mtl\n"; - out << std::fixed << std::setprecision(8); - for (std::size_t i = 0; i < vertices.size(); i++) { - out << "v " << vertices[i][0] << " " << vertices[i][1] << " " << vertices[i][2] << '\n'; - } + // Must have high precision, as otherwise for large .obj files a loss of precision + // will happen when the normalized texcoords are not saved with enough digits + // and then on loading are multiplied back by the large texture dimensions. + out << std::setprecision(16); - for (std::size_t i = 0; i < texcoords.size(); ++i) { - out << "vt " << texcoords[i][0] << " " << 1.0f - texcoords[i][1] << '\n'; - } + for (std::size_t i = 0; i < vertices.size(); i++) + out << "v " << vertices[i][0] << " " << vertices[i][1] << " " << vertices[i][2] << "\n"; - for (std::size_t i = 0; i < normals.size(); i++) { - out << "vn " << normals[i][0] << " " << normals[i][1] << " " << normals[i][2] << '\n'; - } + // Here use 1.0 rather than 1.0f to not lose precision + for (std::size_t i = 0; i < texcoords.size(); i++) + out << "vt " << texcoords[i][0] << " " << 1.0 - texcoords[i][1] << "\n"; + + for (std::size_t i = 0; i < normals.size(); i++) + out << "vn " << normals[i][0] << " " << normals[i][1] << " " << normals[i][2] << "\n"; - int OBJ_INDEX_OFFSET = 1; // have indices start from 1 + int64_t OBJ_INDEX_OFFSET = 1; // have indices start from 1 - for (std::size_t i = 0; i < groups.size(); ++i) { - out << "usemtl " << groups[i].material_name << '\n'; - for (std::size_t j = 0; j < groups[i].faces.size(); ++j) { - ObjModel::Face const& face = groups[i].faces[j]; + for (std::size_t i = 0; i < groups.size(); i++) { + out << "usemtl " << groups[i].material_name << "\n"; + for (std::size_t j = 0; j < groups[i].faces.size(); j++) { + IsaacObjModel::Face const& face = groups[i].faces[j]; out << "f"; for (std::size_t k = 0; k < 3; ++k) { - out << " " << face.vertex_ids[k] + OBJ_INDEX_OFFSET << "/" << face.texcoord_ids[k] + OBJ_INDEX_OFFSET << "/" - << face.normal_ids[k] + OBJ_INDEX_OFFSET; + out << " " // NOLINT + << face.vertex_ids[k] + OBJ_INDEX_OFFSET << "/" // NOLINT + << face.texcoord_ids[k] + OBJ_INDEX_OFFSET << "/" // NOLINT + << face.normal_ids[k] + OBJ_INDEX_OFFSET; // NOLINT } - out << '\n'; + out << "\n"; } } obj_str = out.str(); } +// The images from the bag may need to be resized to be the same +// size as in the calibration file. Sometimes the full-res images +// can be so blurry that interest point matching fails, hence the +// resizing. +// Similar logic to deal with differences between image size and calibrated size +// is used further down this code. +void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { + int64_t raw_image_cols = image.cols; + int64_t raw_image_rows = image.rows; + int64_t calib_image_cols = cam_params.GetDistortedSize()[0]; + int64_t calib_image_rows = cam_params.GetDistortedSize()[1]; + + int64_t factor = raw_image_cols / calib_image_cols; + + if ((raw_image_cols != calib_image_cols * factor) || + (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows + << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" + << "These must be equal up to an integer factor.\n"; + } + + if (factor != 1) { + // TODO(oalexan1): This kind of resizing may be creating aliased images. + cv::Mat local_image; + cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); + local_image.copyTo(image); + } + + // Check + if (image.cols != calib_image_cols || image.rows != calib_image_rows) + LOG(FATAL) << "Sci cam images have the wrong size."; +} + // Project texture and find the UV coordinates -void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, cv::Mat const& image, +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, + cv::Mat const& image, camera::CameraModel const& cam, double num_exclude_boundary_pixels, // outputs - std::vector& smallest_cost_per_face, std::vector& face_vec, + std::vector& smallest_cost_per_face, + std::vector& face_vec, std::map& uv_map) { // Wipe the outputs face_vec.clear(); @@ -959,16 +1041,19 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // image resolution. We will use the calibrated dimensions when // normalizing u and v, because when projecting 3D points in the // camera we will the calibrated camera model. - int raw_image_cols = image.cols; - int raw_image_rows = image.rows; - int calib_image_cols = cam.GetParameters().GetDistortedSize()[0]; - int calib_image_rows = cam.GetParameters().GetDistortedSize()[1]; - - int factor = raw_image_cols / calib_image_cols; - - if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { - LOG(FATAL) << "Published image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" - << "Calibrated image width and height are: " << calib_image_cols << ' ' << calib_image_rows << "\n" + int64_t raw_image_cols = image.cols; + int64_t raw_image_rows = image.rows; + int64_t calib_image_cols = cam.GetParameters().GetDistortedSize()[0]; + int64_t calib_image_rows = cam.GetParameters().GetDistortedSize()[1]; + + int64_t factor = raw_image_cols / calib_image_cols; + + if ((raw_image_cols != calib_image_cols * factor) || + (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Published image width and height are: " + << raw_image_cols << ' ' << raw_image_rows << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" << "These must be equal up to an integer factor.\n"; } @@ -979,12 +1064,14 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b std::vector const& vertices = mesh->get_vertices(); std::vector const& mesh_normals = mesh->get_vertex_normals(); - if (vertices.size() != mesh_normals.size()) LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; + if (vertices.size() != mesh_normals.size()) + LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; std::vector const& faces = mesh->get_faces(); std::vector const& face_normals = mesh->get_face_normals(); - if (smallest_cost_per_face.size() != faces.size()) LOG(FATAL) << "There must be one cost value per face."; + if (smallest_cost_per_face.size() != faces.size()) + LOG(FATAL) << "There must be one cost value per face."; #pragma omp parallel for for (std::size_t face_id = 0; face_id < faces.size() / 3; face_id++) { @@ -1053,7 +1140,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b } // Get the undistorted pixel - Eigen::Vector2d undist_centered_pix = cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); + Eigen::Vector2d undist_centered_pix = + cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); if (std::abs(undist_centered_pix[0]) > cam.GetParameters().GetUndistortedHalfSize()[0] || std::abs(undist_centered_pix[1]) > cam.GetParameters().GetUndistortedHalfSize()[1]) { // If we are out of acceptable undistorted region, there's some uncertainty whether @@ -1064,7 +1152,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Get the distorted pixel value Eigen::Vector2d dist_pix; - cam.GetParameters().Convert(undist_centered_pix, &dist_pix); + cam.GetParameters().Convert + (undist_centered_pix, &dist_pix); // Skip pixels that don't project in the image if (dist_pix.x() < num_exclude_boundary_pixels || @@ -1107,13 +1196,16 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Project texture using a texture model that was already pre-filled, so // just update pixel values -void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, cv::Mat const& image, - camera::CameraModel const& cam, std::vector& smallest_cost_per_face, double pixel_size, - int num_threads, std::vector const& face_projection_info, - std::vector& texture_atlases, tex::Model& model, cv::Mat& out_texture) { - omp_set_dynamic(0); // Explicitly disable dynamic teams - omp_set_num_threads(num_threads); // Use this many threads for all - // consecutive parallel regions +void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, + cv::Mat const& image, camera::CameraModel const& cam, + std::vector& smallest_cost_per_face, double pixel_size, + int64_t num_threads, std::vector const& face_projection_info, + std::vector& texture_atlases, + IsaacObjModel& model, cv::Mat& out_texture) { + // Explicitly disable dynamic determination of number of threads + omp_set_dynamic(0); + // Use this many threads for all consecutive parallel regions + omp_set_num_threads(num_threads); util::WallTimer timer; @@ -1122,15 +1214,18 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // image resolution. We will use the calibrated dimensions when // normalizing u and v, because when projecting 3D points in the // camera we will the calibrated camera model. - int raw_image_cols = image.cols; - int raw_image_rows = image.rows; - int calib_image_cols = cam.GetParameters().GetDistortedSize()[0]; - int calib_image_rows = cam.GetParameters().GetDistortedSize()[1]; - - int factor = raw_image_cols / calib_image_cols; - if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { - LOG(FATAL) << "Published image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" - << "Calibrated image width and height are: " << calib_image_cols << ' ' << calib_image_rows << "\n" + int64_t raw_image_cols = image.cols; + int64_t raw_image_rows = image.rows; + int64_t calib_image_cols = cam.GetParameters().GetDistortedSize()[0]; + int64_t calib_image_rows = cam.GetParameters().GetDistortedSize()[1]; + + int64_t factor = raw_image_cols / calib_image_cols; + if ((raw_image_cols != calib_image_cols * factor) || + (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Published image width and height are: " + << raw_image_cols << ' ' << raw_image_rows << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" << "These must be equal up to an integer factor.\n"; } @@ -1143,18 +1238,21 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Blank the image texture->fill(0); - if (texture->channels() != NUM_CHANNELS) throw util::Exception("Wrong number of channels in the texture image."); + if (texture->channels() != NUM_CHANNELS) + throw util::Exception("Wrong number of channels in the texture image."); Eigen::Vector3d cam_ctr = cam.GetPosition(); std::vector const& vertices = mesh->get_vertices(); std::vector const& mesh_normals = mesh->get_vertex_normals(); - if (vertices.size() != mesh_normals.size()) LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; + if (vertices.size() != mesh_normals.size()) + LOG(FATAL) << "A mesh must have as many vertices as vertex normals."; std::vector const& faces = mesh->get_faces(); std::vector const& face_normals = mesh->get_face_normals(); - if (smallest_cost_per_face.size() != faces.size()) LOG(FATAL) << "There must be one cost value per face."; + if (smallest_cost_per_face.size() != faces.size()) + LOG(FATAL) << "There must be one cost value per face."; #pragma omp parallel for for (std::size_t face_id = 0; face_id < faces.size() / 3; face_id++) { @@ -1222,7 +1320,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b } // Get the undistorted pixel - Eigen::Vector2d undist_centered_pix = cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); + Eigen::Vector2d undist_centered_pix + = cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); if (std::abs(undist_centered_pix[0]) > cam.GetParameters().GetUndistortedHalfSize()[0] || std::abs(undist_centered_pix[1]) > cam.GetParameters().GetUndistortedHalfSize()[1]) { // If we are out of acceptable undistorted region, there's some uncertainty whether @@ -1233,7 +1332,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Get the distorted pixel value Eigen::Vector2d dist_pix; - cam.GetParameters().Convert(undist_centered_pix, &dist_pix); + cam.GetParameters().Convert(undist_centered_pix, &dist_pix); // Skip pixels that don't project in the image if (dist_pix.x() < 0 || dist_pix.x() > calib_image_cols - 1 || dist_pix.y() < 0 || @@ -1270,21 +1370,23 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // The vertices of the face transformed to a y-z plane (ignore the x) Triangle tri(TV[0][1], TV[0][2], TV[1][1], TV[1][2], TV[2][1], TV[2][2]); - // Apply a 1.5 * pixel_size bias * padding to ensure all the pixels around + // Apply a 1.5 * pixel_size bias * F.face_info_padding to ensure all the pixels around // the triangle are sampled well. - tri = bias_triangle(tri, 1.5 * pixel_size * F.padding); + tri = bias_triangle(tri, 1.5 * pixel_size * F.face_info_padding); - for (int iz = 0; iz < F.height; iz++) { - double out_y0, out_y1; + for (int64_t iz = 0; iz < F.height; iz++) { + double out_y0 = -1.0, out_y1 = -1.0; double z = F.min_z + iz * pixel_size; bool success = tri.horizontal_line_intersect(z, out_y0, out_y1); if (!success) continue; - int min_iy = std::max(static_cast(floor((out_y0 - F.min_y) / pixel_size)), 0); - int max_iy = std::min(static_cast(ceil((out_y1 - F.min_y) / pixel_size)), F.width - 1); + int64_t min_iy = std::max(static_cast(floor((out_y0 - F.min_y) / pixel_size)), 0L); + int64_t max_iy = + std::min(static_cast(ceil((out_y1 - F.min_y) / pixel_size)), + static_cast(F.width) - 1L); - for (int iy = min_iy; iy <= max_iy; iy++) { + for (int64_t iy = min_iy; iy <= max_iy; iy++) { // The point in the plane in which we do the book-keeping // Eigen::Vector3d P(F.x, F.min_y + iy * pixel_size, F.min_z + iz * // pixel_size); @@ -1300,7 +1402,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b if (cam_pt.z() <= 0) continue; // Get the undistorted pixel - Eigen::Vector2d undist_centered_pix = cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); + Eigen::Vector2d undist_centered_pix + = cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); if (std::abs(undist_centered_pix[0]) > cam.GetParameters().GetUndistortedHalfSize()[0] || std::abs(undist_centered_pix[1]) > cam.GetParameters().GetUndistortedHalfSize()[1]) { // If we are out of acceptable undistorted region, there's some uncertainty whether @@ -1311,11 +1414,12 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Get the distorted pixel value Eigen::Vector2d dist_pix; - cam.GetParameters().Convert(undist_centered_pix, &dist_pix); + cam.GetParameters().Convert + (undist_centered_pix, &dist_pix); // Skip pixels that don't project in the image, and potentially nan pixels. - bool is_good = (dist_pix.x() >= 0 && dist_pix.x() < calib_image_cols - 1 && dist_pix.y() >= 0 && - dist_pix.y() < calib_image_rows - 1); + bool is_good = (dist_pix.x() >= 0 && dist_pix.x() < calib_image_cols - 1 && + dist_pix.y() >= 0 && dist_pix.y() < calib_image_rows - 1); if (!is_good) continue; // Find the pixel value using bilinear interpolation. Note @@ -1330,11 +1434,11 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b cv::getRectSubPix(image, s, pix, interp_pix_val); cv::Vec3b color = interp_pix_val.at(0, 0); - // Find the location where to put the pixel - int offset = texture->channels() * ((F.shift_u + iy) + (F.shift_v + iz) * texture->width()); + // Find the location where to put the pixel. Use a int64_t as an int may overflow. + int64_t offset = texture->channels() * ((F.shift_u + iy) + (F.shift_v + iz) * texture->width()); // Copy the color - for (int channel = 0; channel < NUM_CHANNELS - 1; channel++) texture_ptr[offset + channel] = color[channel]; + for (int64_t channel = 0; channel < NUM_CHANNELS - 1; channel++) texture_ptr[offset + channel] = color[channel]; // Make it non-transparent texture_ptr[offset + NUM_CHANNELS - 1] = 255; @@ -1348,7 +1452,8 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // Sanity check, to ensure nothing got mixed up with the multiple // threads - if (cost_val >= smallest_cost_per_face[face_id]) LOG(FATAL) << "Book-keeping error in estimating cost per face."; + if (cost_val >= smallest_cost_per_face[face_id]) + LOG(FATAL) << "Book-keeping error in estimating cost per face."; smallest_cost_per_face[face_id] = cost_val; } @@ -1360,4 +1465,100 @@ void projectTexture(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr b // std::cout << "Projecting took: " << timer.get_elapsed()/1000.0 << " // seconds\n"; } + +// Intersect ray with a mesh. Return true on success. +bool ray_mesh_intersect(Eigen::Vector2d const& dist_pix, + camera::CameraParameters const& cam_params, + Eigen::Affine3d const& world_to_cam, + mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + double min_ray_dist, double max_ray_dist, + // Output + Eigen::Vector3d& intersection) { + // Initialize the output + intersection = Eigen::Vector3d(0.0, 0.0, 0.0); + + // Undistort the pixel + Eigen::Vector2d undist_centered_pix; + cam_params.Convert + (dist_pix, &undist_centered_pix); + + // Ray from camera going through the undistorted and centered pixel + Eigen::Vector3d cam_ray(undist_centered_pix.x() / cam_params.GetFocalVector()[0], + undist_centered_pix.y() / cam_params.GetFocalVector()[1], 1.0); + cam_ray.normalize(); + + Eigen::Affine3d cam_to_world = world_to_cam.inverse(); + Eigen::Vector3d world_ray = cam_to_world.linear() * cam_ray; + Eigen::Vector3d cam_ctr = cam_to_world.translation(); + + // Set up the ray structure for the mesh + BVHTree::Ray bvh_ray; + bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); + bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); + bvh_ray.dir.normalize(); + + bvh_ray.tmin = min_ray_dist; + bvh_ray.tmax = max_ray_dist; + + // Intersect the ray with the mesh + BVHTree::Hit hit; + if (bvh_tree->intersect(bvh_ray, &hit)) { + double cam_to_mesh_dist = hit.t; + intersection = cam_ctr + cam_to_mesh_dist * world_ray; + return true; + } + + return false; +} + +// Project and save a mesh as an obj file to out_prefix.obj, +// out_prefix.mtl, out_prefix.png. +void meshProject(mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, + cv::Mat const& image, Eigen::Affine3d const& world_to_cam, + camera::CameraParameters const& cam_params, + int64_t num_exclude_boundary_pixels, std::string const& out_prefix) { + // Create the output directory, if needed + std::string out_dir = boost::filesystem::path(out_prefix).parent_path().string(); + if (out_dir != "") dense_map::createDir(out_dir); + + std::vector face_vec; + std::map uv_map; + + std::vector const& faces = mesh->get_faces(); + int64_t num_faces = faces.size(); + std::vector smallest_cost_per_face(num_faces, 1.0e+100); + + camera::CameraModel cam(world_to_cam, cam_params); + + // Find the UV coordinates and the faces having them + dense_map::projectTexture(mesh, bvh_tree, image, cam, num_exclude_boundary_pixels, + smallest_cost_per_face, face_vec, uv_map); + + // Strip the directory name, according to .obj file conventions. + std::string suffix = boost::filesystem::path(out_prefix).filename().string(); + + std::string obj_str; + dense_map::formObjCustomUV(mesh, face_vec, uv_map, suffix, obj_str); + + std::string mtl_str; + dense_map::formMtl(suffix, mtl_str); + + std::string obj_file = out_prefix + ".obj"; + std::cout << "Writing: " << obj_file << std::endl; + std::ofstream obj_handle(obj_file); + obj_handle << obj_str; + obj_handle.close(); + + std::string mtl_file = out_prefix + ".mtl"; + std::cout << "Writing: " << mtl_file << std::endl; + std::ofstream mtl_handle(mtl_file); + mtl_handle << mtl_str; + mtl_handle.close(); + + std::string texture_file = out_prefix + ".png"; + std::cout << "Writing: " << texture_file << std::endl; + cv::imwrite(texture_file, image); +} + } // namespace dense_map diff --git a/dense_map/geometry_mapper/tools/add_sci_cam_to_bag.cc b/dense_map/geometry_mapper/tools/add_sci_cam_to_bag.cc index 7b15efbc..5eb69ca8 100644 --- a/dense_map/geometry_mapper/tools/add_sci_cam_to_bag.cc +++ b/dense_map/geometry_mapper/tools/add_sci_cam_to_bag.cc @@ -117,9 +117,32 @@ int main(int argc, char** argv) { std::string topic = m.getTopic(); if (topic == FLAGS_sci_cam_topic) continue; - double timestamp = m.getTime().toSec(); - if (beg_time < 0) beg_time = timestamp; - end_time = timestamp; + // If the current message is an image or a point cloud, get its + // header timestamp. That is a more reliable time than + // m.getTime(), which is the time the message got recorded, not + // when it got made. + double timestamp = -1.0; + sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); + if (image_msg) { + timestamp = image_msg->header.stamp.toSec(); + } else { + sensor_msgs::CompressedImage::ConstPtr comp_image_msg = + m.instantiate(); + if (comp_image_msg) { + timestamp = comp_image_msg->header.stamp.toSec(); + } else { + sensor_msgs::PointCloud2::ConstPtr pc_msg = m.instantiate(); + if (pc_msg) { + timestamp = pc_msg->header.stamp.toSec(); + } + } + } + + if (timestamp > 0.0) { + // Record beg and end time from the headers + if (beg_time < 0) beg_time = timestamp; + end_time = timestamp; + } output_bag.write(m.getTopic(), m.getTime(), m); } diff --git a/dense_map/geometry_mapper/tools/camera_calibrator.cc b/dense_map/geometry_mapper/tools/camera_calibrator.cc index aa171bdc..7182ec14 100644 --- a/dense_map/geometry_mapper/tools/camera_calibrator.cc +++ b/dense_map/geometry_mapper/tools/camera_calibrator.cc @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -115,11 +114,11 @@ DEFINE_int32(num_cam1_focal_lengths, 1, DEFINE_int32(num_cam2_focal_lengths, 1, "If set to 2, use separate focal lengths along image rows and columns for cam2. Else use the same one."); -DEFINE_int32(num_cam1_iterations, 10000, "How many solver iterations to perform to solve for cam1 intrinsics."); +DEFINE_int32(num_cam1_iterations, 1000, "How many solver iterations to perform to solve for cam1 intrinsics."); -DEFINE_int32(num_cam2_iterations, 10000, "How many solver iterations to perform to solve for cam2 intrinsics."); +DEFINE_int32(num_cam2_iterations, 1000, "How many solver iterations to perform to solve for cam2 intrinsics."); -DEFINE_int32(num_extrinsics_iterations, 10000, "How many solver iterations to perform to solve for extrinsics."); +DEFINE_int32(num_extrinsics_iterations, 1000, "How many solver iterations to perform to solve for extrinsics."); DEFINE_double(robust_threshold, 4.0, "Pixel errors much larger than this will be exponentially attenuated " @@ -233,7 +232,7 @@ struct IntrinsicsError { new IntrinsicsError(pix, xyz, block_sizes, orig_cam_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add. for (size_t i = 0; i < block_sizes.size(); i++) { @@ -319,7 +318,7 @@ struct IntrinsicsDepthError { new IntrinsicsDepthError(pix, xyz, block_sizes, orig_cam_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add. for (size_t i = 0; i < block_sizes.size(); i++) { @@ -396,7 +395,7 @@ struct ExtrinsicsError { new ExtrinsicsError(target_corner_pixel, target_corner_meas, interp_world_to_cam1, block_sizes, cam2_params)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_PIX_PARAMS); // The camera wrapper knows all of the block sizes to add. for (size_t i = 0; i < block_sizes.size(); i++) { @@ -487,6 +486,11 @@ void read_depth_cam_meas( // Inputs cam2_timestamps.push_back(it->first); } + if (cam2_timestamps.empty()) { + std::cout << "Could not read any target corners." << std::endl; + return; + } + // Iterate over the depth cam clouds rosbag::Bag bag; bag.open(bag_file, rosbag::bagmode::Read); @@ -551,13 +555,15 @@ void read_depth_cam_meas( // Inputs } pcl::PointCloud pc; - pcl::fromROSMsg(*pc_msg, pc); - if (static_cast(pc.points.size()) != static_cast(pc_msg->width * pc_msg->height)) + dense_map::msgToPcl(pc_msg, pc); + + if (static_cast(pc.points.size()) != pc_msg->width * pc_msg->height) LOG(FATAL) << "Extracted point cloud size does not agree with original size."; double best_image_timestamp = cam2_timestamps[image_cid]; auto it = cam2_target_corners.find(best_image_timestamp); - if (it == cam2_target_corners.end()) LOG(FATAL) << "Cannot locate target corners at desired timestamp."; + if (it == cam2_target_corners.end()) + LOG(FATAL) << "Cannot locate target corners at desired timestamp."; // Will push here one depth measurement for each target corners. // Invalid ones will be (0, 0, 0). @@ -601,7 +607,8 @@ void read_depth_cam_meas( // Inputs // Create the vector timestamp offset samples. If none are specified // on the command line, use only the one specified in the config file. -void parse_timestamp_offset_sampling(double cam1_to_cam2_timestamp_offset, std::string const& timestamp_offset_sampling, +void parse_timestamp_offset_sampling(double cam1_to_cam2_timestamp_offset, + std::string const& timestamp_offset_sampling, std::vector& samples) { std::istringstream is(timestamp_offset_sampling); @@ -683,7 +690,10 @@ double calc_and_print_residual_stats(std::string const& prefix, std::string cons std::vector err_norm; size_t len = residuals.size() / 2; - if (len == 0) LOG(FATAL) << "No residuals exist."; + if (len == 0) { + std::cout << "No residuals exist. This is likely an error.\n"; + return 0; + } double res = 0.0; for (size_t it = 0; it < len; it++) { @@ -694,15 +704,18 @@ double calc_and_print_residual_stats(std::string const& prefix, std::string cons res /= len; std::sort(err_norm.begin(), err_norm.end()); - std::cout << prefix << " min, mean, median and max reprojection error for " << cam_name << ": " << err_norm[0] << ' ' + std::cout << prefix << " min, mean, median and max reprojection error for " + << cam_name << ": " << err_norm[0] << ' ' << res << ' ' << err_norm[err_norm.size() / 2] << ' ' << err_norm.back() << std::endl; // Return the median error return err_norm[err_norm.size() / 2]; } -void save_residuals(std::string const& prefix, std::string const& cam_name, std::string const& output_dir, - std::vector const& pixels, std::vector const& residuals) { +void save_residuals(std::string const& prefix, std::string const& cam_name, + std::string const& output_dir, + std::vector const& pixels, + std::vector const& residuals) { size_t len = residuals.size() / 2; if (len != pixels.size()) { LOG(FATAL) << "Must have one residual norm per pixel."; @@ -715,8 +728,8 @@ void save_residuals(std::string const& prefix, std::string const& cam_name, std: ofs << "# pixel_x, pixel_y, residual_x, residual_y, residual_norm\n"; for (size_t it = 0; it < len; it++) { double norm = Eigen::Vector2d(residuals[2 * it + 0], residuals[2 * it + 1]).norm(); - ofs << pixels[it][0] << ", " << pixels[it][1] << ", " << residuals[2 * it + 0] << ", " << residuals[2 * it + 1] - << ", " << norm << std::endl; + ofs << pixels[it][0] << ", " << pixels[it][1] << ", " << residuals[2 * it + 0] + << ", " << residuals[2 * it + 1] << ", " << norm << std::endl; } ofs.close(); } @@ -769,37 +782,42 @@ void refine_intrinsics( // Inputs ceres::LossFunction* loss_function = GetLossFunction("cauchy", FLAGS_robust_threshold); - problem.AddResidualBlock(cost_function, loss_function, &focal_vector[0], &optical_center[0], &distortion[0], + problem.AddResidualBlock(cost_function, loss_function, &focal_vector[0], + &optical_center[0], &distortion[0], &cameras[NUM_RIGID_PARAMS * cam_it]); // In case it is ever desired to experiment with fixing the cameras // problem.SetParameterBlockConstant(&cameras[NUM_RIGID_PARAMS * cam_it]); - } - } - // See which values to float or keep fixed - if (cam_intrinsics_to_float.find("focal_length") == cam_intrinsics_to_float.end()) { - // std::cout << "For " << cam_name << " not floating focal_length." << - // std::endl; - problem.SetParameterBlockConstant(&focal_vector[0]); - } else { - // std::cout << "For " << cam_name << " floating focal_length." << - // std::endl; - } - if (cam_intrinsics_to_float.find("optical_center") == cam_intrinsics_to_float.end()) { - // std::cout << "For " << cam_name << " not floating optical_center." << - // std::endl; - problem.SetParameterBlockConstant(&optical_center[0]); - } else { - // std::cout << "For " << cam_name << " floating optical_center." << - // std::endl; - } - if (cam_intrinsics_to_float.find("distortion") == cam_intrinsics_to_float.end()) { - // std::cout << "For " << cam_name << " not floating distortion." << - // std::endl; - problem.SetParameterBlockConstant(&distortion[0]); - } else { - // std::cout << "For " << cam_name << " floating distortion." << std::endl; + // This logic must be in the loop, right after the residual got + // added, otherwise one risks fixing parameters which were not + // created yet. + + // See which values to float or keep fixed + if (cam_intrinsics_to_float.find("focal_length") == cam_intrinsics_to_float.end()) { + // std::cout << "For " << cam_name << " not floating focal_length." << + // std::endl; + problem.SetParameterBlockConstant(&focal_vector[0]); + } else { + // std::cout << "For " << cam_name << " floating focal_length." << + // std::endl; + } + if (cam_intrinsics_to_float.find("optical_center") == cam_intrinsics_to_float.end()) { + // std::cout << "For " << cam_name << " not floating optical_center." << + // std::endl; + problem.SetParameterBlockConstant(&optical_center[0]); + } else { + // std::cout << "For " << cam_name << " floating optical_center." << + // std::endl; + } + if (cam_intrinsics_to_float.find("distortion") == cam_intrinsics_to_float.end()) { + // std::cout << "For " << cam_name << " not floating distortion." << + // std::endl; + problem.SetParameterBlockConstant(&distortion[0]); + } else { + // std::cout << "For " << cam_name << " floating distortion." << std::endl; + } + } } // Evaluate the residual before optimization @@ -1170,8 +1188,10 @@ void find_initial_extrinsics(std::vector const& cam1_timestamps, std::ve // Refine cam2_to_cam1_transform void refine_extrinsics(int num_iterations, std::vector const& cam1_timestamps, std::vector const& cam2_timestamps, - std::vector const& world_to_cam1_vec, double cam1_to_cam2_timestamp_offset, - double max_interp_dist, boost::shared_ptr const& cam2_params, + std::vector const& world_to_cam1_vec, + double cam1_to_cam2_timestamp_offset, + double max_interp_dist, + boost::shared_ptr const& cam2_params, std::map>> const& cam2_target_corners, Eigen::Affine3d& cam2_to_cam1_transform, double& median_error) { // Convert the initial transform to vector format, to pass to the solver @@ -1224,7 +1244,8 @@ void refine_extrinsics(int num_iterations, std::vector const& cam1_times double alpha = (cam2_time - left_cam1_time) / (right_cam1_time - left_cam1_time); - Eigen::Affine3d interp_world_to_cam1 = dense_map::linearInterp(alpha, left_world_to_cam1, right_world_to_cam1); + Eigen::Affine3d interp_world_to_cam1 + = dense_map::linearInterp(alpha, left_world_to_cam1, right_world_to_cam1); auto it = cam2_target_corners.find(cam2_time); if (it == cam2_target_corners.end()) continue; @@ -1238,8 +1259,9 @@ void refine_extrinsics(int num_iterations, std::vector const& cam1_times target_corner_pixel[0] = target_corners[pt_iter][3]; // projection into camera x target_corner_pixel[1] = target_corners[pt_iter][4]; // projection into camera y - ceres::CostFunction* cost_function = ExtrinsicsError::Create(target_corner_pixel, target_corner_meas, - interp_world_to_cam1, block_sizes, cam2_params); + ceres::CostFunction* cost_function + = ExtrinsicsError::Create(target_corner_pixel, target_corner_meas, + interp_world_to_cam1, block_sizes, cam2_params); problem.AddResidualBlock(cost_function, loss_function, &cam2_to_cam1_transform_vec[0]); } } @@ -1279,9 +1301,11 @@ void refine_extrinsics(int num_iterations, std::vector const& cam1_times int main(int argc, char** argv) { ff_common::InitFreeFlyerApplication(&argc, &argv); GOOGLE_PROTOBUF_VERIFY_VERSION; - if (FLAGS_cam1_dir.empty() || FLAGS_cam2_dir.empty()) LOG(FATAL) << "Not all inputs were specified."; + if (FLAGS_cam1_dir.empty() || FLAGS_cam2_dir.empty()) + LOG(FATAL) << "Not all inputs were specified."; - if ((FLAGS_cam1_name != "nav_cam" && FLAGS_cam1_name != "sci_cam") || FLAGS_cam2_name != "haz_cam") + if ((FLAGS_cam1_name != "nav_cam" && FLAGS_cam1_name != "sci_cam") || + FLAGS_cam2_name != "haz_cam") LOG(FATAL) << "Must specify -cam1_name as one of nav_cam or sci_cam," << " and -cam2_name as haz_cam."; @@ -1309,30 +1333,52 @@ int main(int argc, char** argv) { dense_map::parse_intrinsics_to_float(FLAGS_cam1_intrinsics_to_float, cam1_intrinsics_to_float); dense_map::parse_intrinsics_to_float(FLAGS_cam2_intrinsics_to_float, cam2_intrinsics_to_float); - config_reader::ConfigReader config; - config.AddFile("cameras.config"); - config.AddFile("transforms.config"); - if (!config.ReadFiles()) LOG(FATAL) << "Failed to read config files."; - // read cam1 params - boost::shared_ptr cam1_params = - boost::shared_ptr(new camera::CameraParameters(&config, FLAGS_cam1_name.c_str())); - // read cam2 params - boost::shared_ptr cam2_params = - boost::shared_ptr(new camera::CameraParameters(&config, FLAGS_cam2_name.c_str())); - - std::string cam1_to_cam2_timestamp_offset_str; + // Read the bot config file + std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector cam_params; + std::vector ref_to_cam_trans; + std::vector ref_to_cam_timestamp_offsets; + Eigen::Affine3d navcam_to_body_trans; + Eigen::Affine3d hazcam_depth_to_image_transform; + dense_map::readConfigFile( // Inputs + cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", + // Outputs + cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, navcam_to_body_trans, + hazcam_depth_to_image_transform); + + Eigen::Affine3d hazcam_to_navcam_aff_trans = ref_to_cam_trans[1].inverse(); + Eigen::Affine3d scicam_to_hazcam_aff_trans = + ref_to_cam_trans[1] * ref_to_cam_trans[2].inverse(); + double navcam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1]; + double scicam_to_hazcam_timestamp_offset = + ref_to_cam_timestamp_offsets[1] - ref_to_cam_timestamp_offsets[2]; + + // Set cam1 and cam2 params + boost::shared_ptr cam1_params; + boost::shared_ptr cam2_params; double cam1_to_cam2_timestamp_offset = 0.0; + std::string cam1_to_cam2_timestamp_offset_str; + Eigen::Affine3d cam2_to_cam1_transform; if (FLAGS_cam1_name == "nav_cam" && FLAGS_cam2_name == "haz_cam") { + cam1_params = boost::shared_ptr + (new camera::CameraParameters(cam_params[0])); + cam2_params = boost::shared_ptr + (new camera::CameraParameters(cam_params[1])); + cam1_to_cam2_timestamp_offset = navcam_to_hazcam_timestamp_offset; cam1_to_cam2_timestamp_offset_str = "navcam_to_hazcam_timestamp_offset"; + cam2_to_cam1_transform = hazcam_to_navcam_aff_trans; } else if (FLAGS_cam1_name == "sci_cam" && FLAGS_cam2_name == "haz_cam") { + cam1_params = boost::shared_ptr + (new camera::CameraParameters(cam_params[2])); + cam2_params = boost::shared_ptr + (new camera::CameraParameters(cam_params[1])); + cam1_to_cam2_timestamp_offset = scicam_to_hazcam_timestamp_offset; cam1_to_cam2_timestamp_offset_str = "scicam_to_hazcam_timestamp_offset"; + cam2_to_cam1_transform = scicam_to_hazcam_aff_trans.inverse(); } else { LOG(FATAL) << "Must specify -cam1_name as nav_cam or sci_cam, " << "and -cam2_name as haz_cam."; } - if (!config.GetReal(cam1_to_cam2_timestamp_offset_str.c_str(), &cam1_to_cam2_timestamp_offset)) - LOG(FATAL) << "Could not read value of " << cam1_to_cam2_timestamp_offset - << " for robot: " << getenv("ASTROBEE_ROBOT"); if (FLAGS_num_cam1_focal_lengths != 1 && FLAGS_num_cam1_focal_lengths != 2) LOG(FATAL) << "Can use 1 or 2 focal lengths."; @@ -1350,7 +1396,8 @@ int main(int argc, char** argv) { } std::vector timestamp_offset_samples; - dense_map::parse_timestamp_offset_sampling(cam1_to_cam2_timestamp_offset, FLAGS_timestamp_offset_sampling, + dense_map::parse_timestamp_offset_sampling(cam1_to_cam2_timestamp_offset, + FLAGS_timestamp_offset_sampling, timestamp_offset_samples); // Find cam1 poses and timestamps @@ -1366,12 +1413,10 @@ int main(int argc, char** argv) { // Refine cam1_params and world_to_cam1_vec if (FLAGS_update_cam1) dense_map::refine_intrinsics( // Inputs - FLAGS_cam1_name, FLAGS_num_cam1_focal_lengths, - FLAGS_num_cam1_iterations, cam1_intrinsics_to_float, - FLAGS_output_dir, - // Outputs - cam1_params, cam1_observations, cam1_landmarks, world_to_cam1_vec); - + FLAGS_cam1_name, FLAGS_num_cam1_focal_lengths, FLAGS_num_cam1_iterations, + cam1_intrinsics_to_float, FLAGS_output_dir, + // Outputs + cam1_params, cam1_observations, cam1_landmarks, world_to_cam1_vec); // Refine cam2_params. It is very important to note that here we // want the 3D points as measured by the depth camera to project // accurately to the target corner pixels, unlike for the intrinsics @@ -1380,40 +1425,38 @@ int main(int argc, char** argv) { // depth camera while cam1 is not. if (FLAGS_update_cam2) dense_map::refine_depth_cam_intrinsics( // Inputs - FLAGS_cam2_name, FLAGS_num_cam2_focal_lengths, - FLAGS_num_cam2_iterations, cam2_intrinsics_to_float, - cam2_target_corners, target_corners_depth, - // Outputs - cam2_params); + FLAGS_cam2_name, FLAGS_num_cam2_focal_lengths, FLAGS_num_cam2_iterations, + cam2_intrinsics_to_float, cam2_target_corners, target_corners_depth, + // Outputs + cam2_params); // Find cam2 poses and timestamps std::vector cam2_timestamps; std::vector> cam2_observations; std::vector> cam2_landmarks; std::vector world_to_cam2_vec; - dense_map::find_world_to_cam_trans(FLAGS_cam2_name, cam2_params, cam2_target_corners, cam2_timestamps, - cam2_observations, cam2_landmarks, world_to_cam2_vec); + dense_map::find_world_to_cam_trans(FLAGS_cam2_name, cam2_params, cam2_target_corners, + cam2_timestamps, cam2_observations, cam2_landmarks, + world_to_cam2_vec); // Find the transform to correct the scale of the haz cam depth clouds - Eigen::Affine3d depth_to_image_transform; if (FLAGS_update_depth_to_image_transform) dense_map::find_depth_to_image_transform( // Inputs - cam2_timestamps, world_to_cam2_vec, cam2_target_corners, - target_corners_depth, FLAGS_cam2_name, cam2_params, + cam2_timestamps, world_to_cam2_vec, cam2_target_corners, target_corners_depth, + FLAGS_cam2_name, cam2_params, // Outputs - depth_to_image_transform); + hazcam_depth_to_image_transform); // Find the transform from cam2 to cam1. - Eigen::Affine3d cam2_to_cam1_transform; if (FLAGS_update_extrinsics) { // If provided, try several values // for cam1_to_cam2_timestamp_offset and pick the one with the smallest // median error. if (FLAGS_timestamp_offset_sampling != "") { std::cout << std::endl; - std::cout << "Sampling the timestamp offset from " << timestamp_offset_samples.front() << " to " - << timestamp_offset_samples.back() << " with " << timestamp_offset_samples.size() << " sample(s)." - << std::endl; + std::cout << "Sampling the timestamp offset from " << timestamp_offset_samples.front() + << " to " << timestamp_offset_samples.back() << " with " + << timestamp_offset_samples.size() << " sample(s)." << std::endl; } double median_extrinsics_err = std::numeric_limits::max(); @@ -1426,12 +1469,17 @@ int main(int argc, char** argv) { std::cout << "Finding extrinsics with " << cam1_to_cam2_timestamp_offset_str << " " << curr_cam1_to_cam2_timestamp_offset << std::endl; - dense_map::find_initial_extrinsics(cam1_timestamps, cam2_timestamps, world_to_cam1_vec, world_to_cam2_vec, - curr_cam1_to_cam2_timestamp_offset, FLAGS_max_interp_dist, + dense_map::find_initial_extrinsics(cam1_timestamps, cam2_timestamps, + world_to_cam1_vec, world_to_cam2_vec, + curr_cam1_to_cam2_timestamp_offset, + FLAGS_max_interp_dist, curr_cam2_to_cam1_transform); - dense_map::refine_extrinsics(FLAGS_num_extrinsics_iterations, cam1_timestamps, cam2_timestamps, world_to_cam1_vec, - curr_cam1_to_cam2_timestamp_offset, FLAGS_max_interp_dist, cam2_params, - cam2_target_corners, curr_cam2_to_cam1_transform, curr_median_extrinsics_err); + dense_map::refine_extrinsics(FLAGS_num_extrinsics_iterations, cam1_timestamps, + cam2_timestamps, world_to_cam1_vec, + curr_cam1_to_cam2_timestamp_offset, FLAGS_max_interp_dist, + cam2_params, + cam2_target_corners, curr_cam2_to_cam1_transform, + curr_median_extrinsics_err); if (curr_median_extrinsics_err < median_extrinsics_err) { cam1_to_cam2_timestamp_offset = curr_cam1_to_cam2_timestamp_offset; cam2_to_cam1_transform = curr_cam2_to_cam1_transform; @@ -1440,21 +1488,46 @@ int main(int argc, char** argv) { } std::cout << std::endl; - std::cout << "Final value of " << cam1_to_cam2_timestamp_offset_str << " is " << cam1_to_cam2_timestamp_offset - << " with smallest median error of " << median_extrinsics_err << " pixels" << std::endl; + std::cout << "Final value of " << cam1_to_cam2_timestamp_offset_str << " is " + << cam1_to_cam2_timestamp_offset + << " with smallest median error of " << median_extrinsics_err + << " pixels" << std::endl; std::cout << std::endl; - std::cout << "Extrinsics transform from " << FLAGS_cam2_name << " to " << FLAGS_cam1_name << ":\n" + std::cout << "Extrinsics transform from " << FLAGS_cam2_name << " to " + << FLAGS_cam1_name << ":\n" << cam2_to_cam1_transform.matrix() << std::endl; } - bool update_timestamp_offset = (FLAGS_timestamp_offset_sampling != ""); - dense_map::update_config_file(FLAGS_update_cam1, FLAGS_cam1_name, cam1_params, FLAGS_update_cam2, FLAGS_cam2_name, - cam2_params, FLAGS_update_depth_to_image_transform, depth_to_image_transform, - FLAGS_update_extrinsics, cam2_to_cam1_transform, update_timestamp_offset, - cam1_to_cam2_timestamp_offset_str, cam1_to_cam2_timestamp_offset); + // Save back the results + if (FLAGS_cam1_name == "nav_cam" && FLAGS_cam2_name == "haz_cam") { + cam_params[0] = *cam1_params; + cam_params[1] = *cam2_params; + navcam_to_hazcam_timestamp_offset = cam1_to_cam2_timestamp_offset; + hazcam_to_navcam_aff_trans = cam2_to_cam1_transform; + } else if (FLAGS_cam1_name == "sci_cam" && FLAGS_cam2_name == "haz_cam") { + cam_params[2] = *cam1_params; + cam_params[1] = *cam2_params; + scicam_to_hazcam_timestamp_offset = cam1_to_cam2_timestamp_offset; + scicam_to_hazcam_aff_trans = cam2_to_cam1_transform.inverse(); + } else { + LOG(FATAL) << "Must specify -cam1_name as nav_cam or sci_cam, " + << "and -cam2_name as haz_cam."; + } - if (FLAGS_output_dir == "") std::cout << "No output directory provided, hence no residuals got saved." << std::endl; + // Recall that cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + // nav_to_haz + ref_to_cam_trans[1] = hazcam_to_navcam_aff_trans.inverse(); + // nav_to_sci + ref_to_cam_trans[2] = scicam_to_hazcam_aff_trans.inverse() * + hazcam_to_navcam_aff_trans.inverse(); + ref_to_cam_timestamp_offsets[1] = navcam_to_hazcam_timestamp_offset; + ref_to_cam_timestamp_offsets[2] = + navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; + dense_map::updateConfigFile(cam_names, "haz_cam_depth_to_image_transform", + cam_params, ref_to_cam_trans, + ref_to_cam_timestamp_offsets, + hazcam_depth_to_image_transform); return 0; } diff --git a/dense_map/geometry_mapper/tools/camera_refiner.cc b/dense_map/geometry_mapper/tools/camera_refiner.cc index 56dc9d9e..84fb1855 100644 --- a/dense_map/geometry_mapper/tools/camera_refiner.cc +++ b/dense_map/geometry_mapper/tools/camera_refiner.cc @@ -17,9 +17,73 @@ * under the License. */ -// TODO(oalexan1): Consider adding a haz cam to haz cam -// reprojection error in the camera refiner. There will be more -// haz to haz matches than haz to nav or haz to sci. +// The algorithm: + +// We assume our camera rig has n camera types. Each can be image or +// depth + image. Just one camera must be the reference camera. In +// this code that will be nav_cam. + +// We assume we know the precise time every camera image is acquired. +// Every non-ref camera will be bracketed by two ref cameras very +// close in time. Hence, given the two bracketing ref camera poses, +// the ref cam pose will be interpolated at the time a non-ref camera +// is measured. This allows one to model the transform between +// the ref camera and every other camera on the rig. + +// The variables to be optimized will be the pose of each ref camera, +// and the transforms from the ref camera to every other camera type +// (the extrinsics), with these transforms independent of time as the +// rig is rigid. Also optimized are the intrinsics of each camera, and +// the transform from each depth camera's cloud coordinates to its +// image coordinates (it is a transform very close to the identity but +// not quite, and a scale factor may be present). + +// One component of the cost function to minimize measures the +// reprojection error in each camera, from each triangulated point in +// world coordinates. A second one measures the error between a +// triangulated point and corresponding depth measurement at that +// pixel, when applicable, with appropriate transforms applied to +// bring the depth measurement to world coordinates. This second +// error's strength is controlled by depth_tri_weight. + +// Optionally, one can constrain that the triangulated points +// intersect close to a preexisting mesh, representing the surface +// being scanned with the rig given a previous estimation of all +// the camera poses. That mesh is computed using the geometry mapper. +// One also can control how close the depth camera clouds are to this +// mesh. The flags for this are mesh_tri_weight and depth_tri_weight, +// and can be set to 0 if desired not to use them. + +// These mesh constraints bring in additional information, +// particularly for the cameras lacking depth, and help get the focal +// lengths correctly. + +// If different camera sensors are on different CPUs, and a time +// offset exists among their clocks, this program can model that, +// and also float those offsets, if desired. + +// The initial ref camera poses are computed using SfM, with the +// Astrobee build_map tool. The obtained "sparse map" of poses must +// be registered to world coordinates, to get the world scale correctly. +// The sparse map can be fixed or further refined in this tool. + +// The initial extrinsics are assumed known, and are refined by this +// tool. Likely SfM can be used to get an initial value of the +// extrinsics, but for that may need to use the Theia tool which can +// do SfM with cameras acquired with different sensors. + +// Every camera object (struct cameraImage) can look up its type, +// timestamp, timestamps and indices of bracketing cameras, image topic, +// depth topic (if present), ref_to_cam_timestamp_offset, and +// ref_to_cam_transform (extrinsics). A camera object also stores its +// image and depth cloud. + +// For every instance of a reference camera its +// ref_to_cam_timestamp_offset is 0 and kept fixed, +// ref_to_cam_transform (extrinsics) is the identity and kept fixed, +// and the indices pointing to the left and right ref bracketing +// cameras are identical. + #include #include #include @@ -64,6 +128,7 @@ #include #include #include +#include #include #include @@ -71,95 +136,171 @@ #include #include -DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and full-resolution sci_cam data."); +DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam intensity, " + "full-resolution sci_cam, and haz_cam depth clouds."); DEFINE_string(sparse_map, "", "A registered SURF sparse map made with some of the ROS bag data, " - "and including nav cam images closely bracketing the sci cam images."); + "including nav cam images closely bracketing the sci cam images."); DEFINE_string(output_map, "", "Output file containing the updated map."); -DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); +DEFINE_string(nav_cam_topic, "/mgt/img_sampler/nav_cam/image_record", + "The nav cam topic in the bag file."); -DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); +DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", + "The depth point cloud topic in the bag file."); DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", "The depth camera intensity topic in the bag file."); DEFINE_string(sci_cam_topic, "/hw/cam_sci/compressed", "The sci cam topic in the bag file."); -DEFINE_double(start, 0.0, "How many seconds into the bag to start processing the data."); - -DEFINE_double(duration, -1.0, "For how many seconds to do the processing."); +DEFINE_int32(num_overlaps, 10, "How many images (of all camera types) close and forward in " + "time to match to given image."); DEFINE_double(max_haz_cam_image_to_depth_timestamp_diff, 0.2, - "Use depth haz cam clouds that are within this distance in " - "time from the nearest haz cam intensity image."); + "Use a haz cam depth cloud only if it is within this distance in time " + "from the nearest haz cam intensity image."); DEFINE_double(robust_threshold, 3.0, - "Pixel errors much larger than this will be exponentially attenuated " - "to affect less the cost function."); + "Residual pixel errors and 3D point residuals (the latter multiplied " + "by corresponding weight) much larger than this will be " + "exponentially attenuated to affect less the cost function.\n"); DEFINE_int32(num_iterations, 20, "How many solver iterations to perform in calibration."); -DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables change by less than this."); - -DEFINE_double(bracket_len, 2.0, +DEFINE_double(bracket_len, 0.6, "Lookup sci and haz cam images only between consecutive nav cam images " "whose distance in time is no more than this (in seconds), after adjusting " "for the timestamp offset between these cameras. It is assumed the robot " - "moves slowly and uniformly during this time."); + "moves slowly and uniformly during this time. A large value here will " + "make the refiner compute a poor solution but a small value will prevent " + "enough sci_cam images being bracketed."); -DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); +DEFINE_string(nav_cam_intrinsics_to_float, "", + "Refine given nav_cam intrinsics. Specify as a quoted list. " + "For example: 'focal_length optical_center distortion'."); -DEFINE_string(hugin_file, "", "The path to the hugin .pto file used for sparse map registration."); - -DEFINE_string(xyz_file, "", "The path to the xyz file used for sparse map registration."); +DEFINE_string(haz_cam_intrinsics_to_float, "", + "Refine given haz_cam intrinsics. Specify as a quoted list. " + "For example: 'focal_length optical_center distortion'."); -DEFINE_bool(timestamp_interpolation, false, - "If true, interpolate between " - "timestamps. May give better results if the robot is known to move " - "uniformly, and perhaps less so for stop-and-go robot motion."); - -DEFINE_string(sci_cam_timestamps, "", - "Use only these sci cam timestamps. Must be " - "a file with one timestamp per line."); - -DEFINE_bool(skip_registration, false, - "If true, do not re-register the optimized map. " - "Then the hugin and xyz file options need not be provided. " - "This may result in the scale not being correct if the sparse map is not fixed."); - -DEFINE_bool(opt_map_only, false, "If to optimize only the map and not the camera params."); +DEFINE_string(sci_cam_intrinsics_to_float, "", + "Refine given sci_cam intrinsics. Specify as a quoted list. " + "For example: 'focal_length optical_center distortion'."); -DEFINE_bool(fix_map, false, "Do not optimize the sparse map, hence only the camera params."); +DEFINE_string(extrinsics_to_float, "haz_cam sci_cam depth_to_image", + "Specify the cameras whose extrinsics, relative to nav_cam, to optimize. Also " + "consider if to float the haz_cam depth_to_image transform."); DEFINE_bool(float_scale, false, - "If to optimize the scale of the clouds (use it if the " - "sparse map is kept fixed)."); - -DEFINE_string(sci_cam_intrinsics_to_float, "", - "Refine 0 or more of the following intrinsics for sci_cam: focal_length, " - "optical_center, distortion. Specify as a quoted list. " - "For example: 'focal_length optical_center'."); - -DEFINE_double(scicam_to_hazcam_timestamp_offset_override_value, + "If to optimize the scale of the clouds, part of haz_cam_depth_to_image_transform " + "(use it if the sparse map is kept fixed, or else rescaling and registration " + "of the map and extrinsics is needed). This parameter should not be used with " + "--affine_depth_to_image when the transform is affine, rather than rigid and a scale." + "See also --extrinsics_to_float."); + +DEFINE_bool(float_sparse_map, false, + "Optimize the sparse map. This should be avoided as it can invalidate the scales " + "of the extrinsics and the registration. It should at least be used with big mesh " + "weights to attenuate those effects. See also: --registration."); + +DEFINE_bool(float_timestamp_offsets, false, + "If to optimize the timestamp offsets among the cameras."); + +DEFINE_int32(nav_cam_num_exclude_boundary_pixels, 0, + "Flag as outliers nav cam pixels closer than this to the boundary, and ignore " + "that boundary region when texturing with the --out_texture_dir option. " + "This may improve the calibration accuracy, especially if switching from fisheye " + "to radtan distortion for nav_cam. See also the geometry_mapper " + "--undistorted_crop_wins option."); + +DEFINE_double(timestamp_offsets_max_change, 1.0, + "If floating the timestamp offsets, do not let them change by more than this " + "(measured in seconds). Existing image bracketing acts as an additional constraint."); + +DEFINE_double(nav_cam_to_sci_cam_offset_override_value, std::numeric_limits::quiet_NaN(), - "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " + "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " "file with this value."); +DEFINE_double(depth_tri_weight, 1000.0, + "The weight to give to the constraint that depth measurements agree with " + "triangulated points. Use a bigger number as depth errors are usually on the " + "order of 0.01 meters while reprojection errors are on the order of 1 pixel."); + DEFINE_string(mesh, "", - "Refine the sci cam so that the sci cam texture agrees with the nav cam " - "texture when projected on this mesh."); + "Use this geometry mapper mesh from a previous geometry mapper run to help constrain " + "the calibration (e.g., use fused_mesh.ply)."); + +DEFINE_double(mesh_tri_weight, 0.0, + "A larger value will give more weight to the constraint that triangulated points " + "stay close to a preexisting mesh. Not suggested by default."); + +DEFINE_double(depth_mesh_weight, 0.0, + "A larger value will give more weight to the constraint that the depth clouds " + "stay close to the mesh. Not suggested by default."); + +DEFINE_bool(affine_depth_to_image, false, "Assume that the depth_to_image_transform " + "for each depth + image camera is an arbitrary affine transform rather than a " + "rotation times a scale."); + +DEFINE_int32(refiner_num_passes, 2, "How many passes of optimization to do. Outliers will be " + "removed after every pass. Each pass will start with the previously optimized " + "solution as an initial guess. Mesh intersections (if applicable) and ray " + "triangulation will be recomputed before each pass."); + +DEFINE_double(initial_max_reprojection_error, 300.0, "If filtering outliers, remove interest " + "points for which the reprojection error, in pixels, is larger than this. This " + "filtering happens when matches are created, before cameras are optimized, and " + "a big value should be used if the initial cameras are not trusted."); + +DEFINE_double(max_reprojection_error, 25.0, "If filtering outliers, remove interest points for " + "which the reprojection error, in pixels, is larger than this. This filtering " + "happens after each optimization pass finishes, unless disabled. It is better to not " + "filter too aggressively unless confident of the solution."); + +DEFINE_double(refiner_min_angle, 0.5, "If filtering outliers, remove triangulated points " + "for which all rays converging to it make an angle (in degrees) less than this." + "Note that some cameras in the rig may be very close to each other relative to " + "the triangulated points, so care is needed here."); + +DEFINE_string(out_texture_dir, "", "If non-empty and if an input mesh was provided, " + "project the camera images using the optimized poses onto the mesh " + "and write the obtained .obj files in the given directory."); + +DEFINE_double(min_ray_dist, 0.0, "The minimum search distance from a starting point along a ray " + "when intersecting the ray with a mesh, in meters (if applicable)."); + +DEFINE_double(max_ray_dist, 100.0, "The maximum search distance from a starting point along a ray " + "when intersecting the ray with a mesh, in meters (if applicable)."); + +DEFINE_string(nav_cam_distortion_replacement, "", + "Replace nav_cam's distortion coefficients with this list after the initial " + "determination of triangulated points, and then continue with distortion " + "optimization. A quoted list of four or five values expected, separated by " + "spaces, as the replacement distortion model will have radial and tangential " + "coefficients. Set a positive --nav_cam_num_exclude_boundary_pixels."); + +DEFINE_bool(registration, false, + "If true, and registration control points for the sparse map exist and are specified " + "by --hugin_file and --xyz_file, re-register the sparse map at the end. All the " + "extrinsics, including depth_to_image_transform, will be scaled accordingly." + "This is not needed if the nav_cam intrinsics and the sparse map do not change."); -DEFINE_double(mesh_weight, 25.0, - "A larger value will give more weight to the mesh constraint. " - "The mesh residuals printed at the end can be used to examine " - "the effect of this weight."); +DEFINE_string(hugin_file, "", "The path to the hugin .pto file used for sparse map registration."); -DEFINE_double(mesh_robust_threshold, 3.0, - "A larger value will try harder to minimize large divergences from " - "the mesh (but note that some of those may be outliers)."); +DEFINE_string(xyz_file, "", "The path to the xyz file used for sparse map registration."); + +DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables change by " + "less than this."); + +DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); +DEFINE_int32(num_match_threads, 8, "How many threads to use in feature detection/matching. " + "A large number can use a lot of memory."); +DEFINE_string(sci_cam_timestamps, "", + "Use only these sci cam timestamps. Must be a file with one timestamp per line."); DEFINE_bool(verbose, false, "Print the residuals and save the images and match files." @@ -167,8 +308,53 @@ DEFINE_bool(verbose, false, namespace dense_map { -// TODO(oalexan1): Store separately matches which end up being -// squashed in a pid_cid_to_fid. +// Calculate interpolated world to camera transform. Use the +// convention that if beg_ref_stamp == end_ref_stamp, then this is the +// reference camera, and then only beg_world_to_ref_t is used, while +// end_world_to_ref_t is undefined. For the reference camera it is +// also expected that ref_to_cam_aff is the identity. This saves some +// code duplication later as the ref cam need not be treated +// separately. +Eigen::Affine3d calc_world_to_cam_trans(const double* beg_world_to_ref_t, + const double* end_world_to_ref_t, + const double* ref_to_cam_trans, + double beg_ref_stamp, + double end_ref_stamp, + double ref_to_cam_offset, + double cam_stamp) { + Eigen::Affine3d beg_world_to_ref_aff; + array_to_rigid_transform(beg_world_to_ref_aff, beg_world_to_ref_t); + + Eigen::Affine3d interp_world_to_cam_aff; + if (beg_ref_stamp == end_ref_stamp) { + interp_world_to_cam_aff = beg_world_to_ref_aff; + } else { + Eigen::Affine3d end_world_to_ref_aff; + array_to_rigid_transform(end_world_to_ref_aff, end_world_to_ref_t); + + Eigen::Affine3d ref_to_cam_aff; + array_to_rigid_transform(ref_to_cam_aff, ref_to_cam_trans); + + // Covert from cam time to ref time and normalize. It is very + // important that below we subtract the big numbers from each + // other first, which are the timestamps, then subtract whatever + // else is necessary. Otherwise we get problems with numerical + // precision with CERES. + // Note how the denominator never becomes 0. + double alpha = ((cam_stamp - beg_ref_stamp) - ref_to_cam_offset) + / (end_ref_stamp - beg_ref_stamp); + + if (alpha < 0.0 || alpha > 1.0) LOG(FATAL) << "Out of bounds in interpolation.\n"; + + // Interpolate at desired time + Eigen::Affine3d interp_world_to_ref_aff = dense_map::linearInterp(alpha, beg_world_to_ref_aff, + end_world_to_ref_aff); + + interp_world_to_cam_aff = ref_to_cam_aff * interp_world_to_ref_aff; + } + + return interp_world_to_cam_aff; +} ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { // Convert to lower-case @@ -189,462 +375,295 @@ ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { return loss_function; } -// An error function minimizing the error of projection of haz cam -// depth points to the haz cam image. -struct DepthToHazError { +// An error function minimizing the error of projecting +// an xyz point into a camera that is bracketed by +// two reference cameras. The precise timestamp offset +// between them is also floated. +struct BracketedCamError { EIGEN_MAKE_ALIGNED_OPERATOR_NEW - DepthToHazError(Eigen::Vector2d const& haz_pix, Eigen::Vector3d const& depth_xyz, std::vector const& block_sizes, - camera::CameraParameters const& haz_cam_params) - : m_haz_pix(haz_pix), m_depth_xyz(depth_xyz), m_block_sizes(block_sizes), m_haz_cam_params(haz_cam_params) { - // Sanity check. - if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_SCALAR_PARAMS) { - LOG(FATAL) << "DepthToHazError: The block sizes were not set up properly.\n"; + BracketedCamError(Eigen::Vector2d const& meas_dist_pix, + double left_ref_stamp, double right_ref_stamp, double cam_stamp, + std::vector const& block_sizes, + camera::CameraParameters const& cam_params): + m_meas_dist_pix(meas_dist_pix), + m_left_ref_stamp(left_ref_stamp), + m_right_ref_stamp(right_ref_stamp), + m_cam_stamp(cam_stamp), + m_block_sizes(block_sizes), + m_cam_params(cam_params), + m_num_focal_lengths(1) { + // Sanity check + if (m_block_sizes.size() != 8 || m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_RIGID_PARAMS || + m_block_sizes[3] != NUM_XYZ_PARAMS || m_block_sizes[4] != NUM_SCALAR_PARAMS || + m_block_sizes[5] != m_num_focal_lengths || m_block_sizes[6] != NUM_OPT_CTR_PARAMS || + m_block_sizes[7] != 1 // This will be overwritten shortly + ) { + LOG(FATAL) << "BracketedCamError: The block sizes were not set up properly.\n"; } + + // Set the correct distortion size. This cannot be done in the interface for now. + m_block_sizes[7] = m_cam_params.GetDistortion().size(); } // Call to work with ceres::DynamicNumericDiffCostFunction. - // Takes array of arrays as parameters. bool operator()(double const* const* parameters, double* residuals) const { - // Populate the intrinsics + Eigen::Affine3d world_to_cam_trans = + calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t + parameters[1], // end_world_to_ref_t + parameters[2], // ref_to_cam_trans + m_left_ref_stamp, m_right_ref_stamp, + parameters[4][0], // ref_to_cam_offset + m_cam_stamp); - // The current transform from the depth point cloud to the image - Eigen::Affine3d depth_to_image; - array_to_rigid_transform(depth_to_image, parameters[0]); + // World point + Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); - // Apply the scale - double depth_to_image_scale = parameters[1][0]; - depth_to_image.linear() *= depth_to_image_scale; + // Make a deep copy which we will modify + camera::CameraParameters cam_params = m_cam_params; + Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[5][0], parameters[5][0]); + Eigen::Vector2d optical_center(parameters[6][0], parameters[6][1]); + Eigen::VectorXd distortion(m_block_sizes[7]); + for (int i = 0; i < m_block_sizes[7]; i++) distortion[i] = parameters[7][i]; + cam_params.SetFocalLength(focal_vector); + cam_params.SetOpticalOffset(optical_center); + cam_params.SetDistortion(distortion); - // Convert from depth cloud coordinates to haz cam coordinates - Eigen::Vector3d X = depth_to_image * m_depth_xyz; + // Convert world point to given cam coordinates + X = world_to_cam_trans * X; - // Project into the camera - Eigen::Vector2d undist_pix = m_haz_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - // Eigen::Vector2d dist_pix; - // m_haz_cam_params.Convert(undist_pix, &dist_pix); + // Project into the image + Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + Eigen::Vector2d curr_dist_pix; + cam_params.Convert(undist_pix, &curr_dist_pix); // Compute the residuals - residuals[0] = undist_pix[0] - m_haz_pix[0]; - residuals[1] = undist_pix[1] - m_haz_pix[1]; + residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; + residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; return true; } // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, - std::vector const& block_sizes, - camera::CameraParameters const& haz_cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction( - new DepthToHazError(nav_pix, depth_xyz, block_sizes, haz_cam_params)); + static ceres::CostFunction* + Create(Eigen::Vector2d const& meas_dist_pix, double left_ref_stamp, double right_ref_stamp, + double cam_stamp, std::vector const& block_sizes, + camera::CameraParameters const& cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new BracketedCamError(meas_dist_pix, left_ref_stamp, right_ref_stamp, + cam_stamp, block_sizes, cam_params)); + + cost_function->SetNumResiduals(NUM_PIX_PARAMS); + + // The camera wrapper knows all of the block sizes to add, except + // for distortion, which is last + for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 + cost_function->AddParameterBlock(block_sizes[i]); - // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + // The distortion block size is added separately as it is variable + cost_function->AddParameterBlock(cam_params.GetDistortion().size()); - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { - cost_function->AddParameterBlock(block_sizes[i]); - } return cost_function; } private: - Eigen::Vector2d m_haz_pix; // The pixel observation - Eigen::Vector3d m_depth_xyz; // The measured position + Eigen::Vector2d m_meas_dist_pix; // Measured distorted current camera pixel + double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps + double m_cam_stamp; // Current cam timestamp std::vector m_block_sizes; - camera::CameraParameters m_haz_cam_params; -}; // End class DepthToHazError + camera::CameraParameters m_cam_params; + int m_num_focal_lengths; +}; // End class BracketedCamError -// An error function minimizing the error of projection of a point in the nav cam -// image. Both the point and the camera pose are variables of optimization. -struct NavError { +// An error function minimizing the product of a given weight and the +// error between a triangulated point and a measured depth point. The +// depth point needs to be transformed to world coordinates first. For +// that one has to do pose interpolation. +struct BracketedDepthError { EIGEN_MAKE_ALIGNED_OPERATOR_NEW - NavError(Eigen::Vector2d const& nav_pix, std::vector const& block_sizes, - camera::CameraParameters const& nav_cam_params) - : m_nav_pix(nav_pix), m_block_sizes(block_sizes), m_nav_cam_params(nav_cam_params) { + BracketedDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, + double left_ref_stamp, double right_ref_stamp, double cam_stamp, + std::vector const& block_sizes): + m_weight(weight), + m_meas_depth_xyz(meas_depth_xyz), + m_left_ref_stamp(left_ref_stamp), + m_right_ref_stamp(right_ref_stamp), + m_cam_stamp(cam_stamp), + m_block_sizes(block_sizes) { // Sanity check - if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_XYZ_PARAMS) { - LOG(FATAL) << "NavError: The block sizes were not set up properly.\n"; - } - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - // Takes array of arrays as parameters. - bool operator()(double const* const* parameters, double* residuals) const { - // Populate the intrinsics - - Eigen::Affine3d world_to_nav_cam; - array_to_rigid_transform(world_to_nav_cam, parameters[0]); - - Eigen::Vector3d xyz; - for (int it = 0; it < NUM_XYZ_PARAMS; it++) xyz[it] = parameters[1][it]; - - // Convert to camera coordinates - Eigen::Vector3d X = world_to_nav_cam * xyz; - - // Project into the camera - Eigen::Vector2d undist_pix = m_nav_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - // Eigen::Vector2d dist_pix; - // m_nav_cam_params.Convert(undist_pix, &dist_pix); - - // Compute the residuals - residuals[0] = undist_pix[0] - m_nav_pix[0]; - residuals[1] = undist_pix[1] - m_nav_pix[1]; - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, std::vector const& block_sizes, - camera::CameraParameters const& nav_cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction(new NavError(nav_pix, block_sizes, nav_cam_params)); - - // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); - - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { - cost_function->AddParameterBlock(block_sizes[i]); - } - return cost_function; - } - - private: - Eigen::Vector2d m_nav_pix; // The pixel observation - std::vector m_block_sizes; - camera::CameraParameters m_nav_cam_params; -}; // End class NavError - -// An error function minimizing the error of transforming depth points -// first to haz cam image coordinates, then to nav cam coordinates -// (via time interpolation) then to world coordinates, then by -// projecting into the nav cam image having a match point with the -// original haz cam image. This is a little tricky to follow, because -// each haz cam image is bound in time by two nav cam images. -struct DepthToNavError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - DepthToNavError(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, - double alpha, // used for interpolation - bool match_left, std::vector const& block_sizes, camera::CameraParameters const& nav_cam_params) - : m_nav_pix(nav_pix), - m_depth_xyz(depth_xyz), - m_alpha(alpha), - m_match_left(match_left), - m_block_sizes(block_sizes), - m_nav_cam_params(nav_cam_params) { - // Sanity check. - if (m_block_sizes.size() != 5 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || - m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || - m_block_sizes[4] != NUM_SCALAR_PARAMS) { - LOG(FATAL) << "DepthToNavError: The block sizes were not set up properly.\n"; + if (m_block_sizes.size() != 7 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || + (m_block_sizes[3] != NUM_RIGID_PARAMS && m_block_sizes[3] != NUM_AFFINE_PARAMS) || + m_block_sizes[4] != NUM_SCALAR_PARAMS || + m_block_sizes[5] != NUM_XYZ_PARAMS || + m_block_sizes[6] != NUM_SCALAR_PARAMS) { + LOG(FATAL) << "BracketedDepthError: The block sizes were not set up properly.\n"; } } // Call to work with ceres::DynamicNumericDiffCostFunction. bool operator()(double const* const* parameters, double* residuals) const { - // As mentioned before, we have to deal with four transforms - - Eigen::Affine3d left_nav_trans; - array_to_rigid_transform(left_nav_trans, parameters[0]); - - Eigen::Affine3d right_nav_trans; - array_to_rigid_transform(right_nav_trans, parameters[1]); - - Eigen::Affine3d hazcam_to_navcam_trans; - array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); + // Current world to camera transform + Eigen::Affine3d world_to_cam_trans = + calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t + parameters[1], // end_world_to_ref_t + parameters[2], // ref_to_cam_trans + m_left_ref_stamp, m_right_ref_stamp, + parameters[6][0], // ref_to_cam_offset + m_cam_stamp); + + // The current transform from the depth point cloud to the camera image + Eigen::Affine3d depth_to_image; + if (m_block_sizes[3] == NUM_AFFINE_PARAMS) + array_to_affine_transform(depth_to_image, parameters[3]); + else + array_to_rigid_transform(depth_to_image, parameters[3]); - // The haz cam depth to image transform, which has a fixed scale - Eigen::Affine3d hazcam_depth_to_image_trans; - array_to_rigid_transform(hazcam_depth_to_image_trans, parameters[3]); + // Apply the scale double depth_to_image_scale = parameters[4][0]; - hazcam_depth_to_image_trans.linear() *= depth_to_image_scale; - - // Convert from depth cloud coordinates to haz cam coordinates - Eigen::Vector3d X = hazcam_depth_to_image_trans * m_depth_xyz; - - // Convert to nav cam coordinates - X = hazcam_to_navcam_trans * X; + depth_to_image.linear() *= depth_to_image_scale; - // The haz cam to nav cam transform at the haz cam time is obtained - // by interpolation in time - Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(m_alpha, left_nav_trans, right_nav_trans); + // Convert from depth cloud coordinates to cam coordinates + Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; // Convert to world coordinates - X = interp_world_to_nav_trans.inverse() * X; - - // Transform to either the left or right nav camera coordinates, - // depending on for which one we managed to find a match with he - // haz cam image - if (m_match_left) { - X = left_nav_trans * X; - } else { - X = right_nav_trans * X; - } + M = world_to_cam_trans.inverse() * M; - // Project into the image - Eigen::Vector2d undist_pix = m_nav_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - // Eigen::Vector2d dist_pix; - // m_nav_cam_params.Convert(undist_pix, &dist_pix); + // Triangulated world point + Eigen::Vector3d X(parameters[5][0], parameters[5][1], parameters[5][2]); // Compute the residuals - residuals[0] = undist_pix[0] - m_nav_pix[0]; - residuals[1] = undist_pix[1] - m_nav_pix[1]; + for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { + residuals[it] = m_weight * (X[it] - M[it]); + } return true; } // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, double alpha, - bool match_left, std::vector const& block_sizes, - camera::CameraParameters const& nav_cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction( - new DepthToNavError(nav_pix, depth_xyz, alpha, match_left, block_sizes, nav_cam_params)); + static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, + double left_ref_stamp, double right_ref_stamp, + double cam_stamp, std::vector const& block_sizes) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new BracketedDepthError(weight, meas_depth_xyz, left_ref_stamp, right_ref_stamp, + cam_stamp, block_sizes)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { + for (size_t i = 0; i < block_sizes.size(); i++) cost_function->AddParameterBlock(block_sizes[i]); - } + return cost_function; } private: - Eigen::Vector2d m_nav_pix; // The nav cam pixel observation - Eigen::Vector3d m_depth_xyz; // The measured position in the depth camera - double m_alpha; - bool m_match_left; + double m_weight; // How much weight to give to this constraint + Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement + double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps + double m_cam_stamp; // Current cam timestamp std::vector m_block_sizes; - camera::CameraParameters m_nav_cam_params; -}; // End class DepthToNavError +}; // End class BracketedDepthError -// An error function minimizing the error of transforming depth points at haz cam -// time through enough coordinate systems until it can project -// into the sci cam at the sci cam time -struct DepthToSciError { +// An error function minimizing the product of a given weight and the +// error between a mesh point and a transformed measured depth point. The +// depth point needs to be transformed to world coordinates first. For +// that one has to do pose interpolation. +struct BracketedDepthMeshError { EIGEN_MAKE_ALIGNED_OPERATOR_NEW - DepthToSciError(Eigen::Vector2d const& dist_sci_pix, Eigen::Vector3d const& depth_xyz, double alpha_haz, - double alpha_sci, // used for interpolation - std::vector const& block_sizes, camera::CameraParameters const& sci_cam_params) - : m_dist_sci_pix(dist_sci_pix), - m_depth_xyz(depth_xyz), - m_alpha_haz(alpha_haz), - m_alpha_sci(alpha_sci), - m_block_sizes(block_sizes), - m_sci_cam_params(sci_cam_params), - m_num_focal_lengths(1) { - // Sanity check. - if (m_block_sizes.size() != 9 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || - m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || - m_block_sizes[4] != NUM_RIGID_PARAMS || m_block_sizes[5] != NUM_SCALAR_PARAMS || - m_block_sizes[6] != m_num_focal_lengths || m_block_sizes[7] != NUM_OPT_CTR_PARAMS || - m_block_sizes[8] != m_sci_cam_params.GetDistortion().size()) { - LOG(FATAL) << "DepthToSciError: The block sizes were not set up properly.\n"; + BracketedDepthMeshError(double weight, + Eigen::Vector3d const& meas_depth_xyz, + Eigen::Vector3d const& mesh_xyz, + double left_ref_stamp, double right_ref_stamp, double cam_stamp, + std::vector const& block_sizes): + m_weight(weight), + m_meas_depth_xyz(meas_depth_xyz), + m_mesh_xyz(mesh_xyz), + m_left_ref_stamp(left_ref_stamp), + m_right_ref_stamp(right_ref_stamp), + m_cam_stamp(cam_stamp), + m_block_sizes(block_sizes) { + // Sanity check + if (m_block_sizes.size() != 6 || + m_block_sizes[0] != NUM_RIGID_PARAMS || + m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || + (m_block_sizes[3] != NUM_RIGID_PARAMS && m_block_sizes[3] != NUM_AFFINE_PARAMS) || + m_block_sizes[4] != NUM_SCALAR_PARAMS || + m_block_sizes[5] != NUM_SCALAR_PARAMS) { + LOG(FATAL) << "BracketedDepthMeshError: The block sizes were not set up properly.\n"; } } // Call to work with ceres::DynamicNumericDiffCostFunction. bool operator()(double const* const* parameters, double* residuals) const { - // Make a deep copy which we will modify - camera::CameraParameters sci_cam_params = m_sci_cam_params; - - // We have to deal with four transforms - Eigen::Affine3d left_nav_trans; - array_to_rigid_transform(left_nav_trans, parameters[0]); - - Eigen::Affine3d right_nav_trans; - array_to_rigid_transform(right_nav_trans, parameters[1]); - - Eigen::Affine3d hazcam_to_navcam_trans; - array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); - - Eigen::Affine3d scicam_to_hazcam_trans; - array_to_rigid_transform(scicam_to_hazcam_trans, parameters[3]); - - // The haz cam depth to image transform, which has a fixed scale - Eigen::Affine3d hazcam_depth_to_image_trans; - array_to_rigid_transform(hazcam_depth_to_image_trans, parameters[4]); - double depth_to_image_scale = parameters[5][0]; - hazcam_depth_to_image_trans.linear() *= depth_to_image_scale; - - // Intrinsics, including a single focal length - Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[6][0], parameters[6][0]); - Eigen::Vector2d optical_center(parameters[7][0], parameters[7][1]); - Eigen::VectorXd distortion(m_block_sizes[8]); - for (int i = 0; i < m_block_sizes[8]; i++) distortion[i] = parameters[8][i]; - sci_cam_params.SetFocalLength(focal_vector); - sci_cam_params.SetOpticalOffset(optical_center); - sci_cam_params.SetDistortion(distortion); - - // Convert from depth cloud coordinates to haz cam coordinates - Eigen::Vector3d X = hazcam_depth_to_image_trans * m_depth_xyz; - - // Convert to nav cam coordinates at haz cam time - X = hazcam_to_navcam_trans * X; + // Current world to camera transform + Eigen::Affine3d world_to_cam_trans = + calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t + parameters[1], // end_world_to_ref_t + parameters[2], // ref_to_cam_trans + m_left_ref_stamp, m_right_ref_stamp, + parameters[5][0], // ref_to_cam_offset + m_cam_stamp); + + // The current transform from the depth point cloud to the camera image + Eigen::Affine3d depth_to_image; + if (m_block_sizes[3] == NUM_AFFINE_PARAMS) + array_to_affine_transform(depth_to_image, parameters[3]); + else + array_to_rigid_transform(depth_to_image, parameters[3]); - // World to navcam at haz time - Eigen::Affine3d interp_world_to_nav_trans_haz_time = - dense_map::linearInterp(m_alpha_haz, left_nav_trans, right_nav_trans); + // Apply the scale + double depth_to_image_scale = parameters[4][0]; + depth_to_image.linear() *= depth_to_image_scale; - // World to nav time at sci time - Eigen::Affine3d interp_world_to_nav_trans_sci_time = - dense_map::linearInterp(m_alpha_sci, left_nav_trans, right_nav_trans); + // Convert from depth cloud coordinates to cam coordinates + Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; // Convert to world coordinates - X = interp_world_to_nav_trans_haz_time.inverse() * X; - - // Convert to nav coordinates at sci cam time - X = interp_world_to_nav_trans_sci_time * X; - - // Convert to sci cam coordinates - X = scicam_to_hazcam_trans.inverse() * hazcam_to_navcam_trans.inverse() * X; - - // Convert to sci cam pix - Eigen::Vector2d undist_pix = sci_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - - // Apply distortion - Eigen::Vector2d comp_dist_sci_pix; - sci_cam_params.Convert(undist_pix, &comp_dist_sci_pix); + M = world_to_cam_trans.inverse() * M; // Compute the residuals - residuals[0] = comp_dist_sci_pix[0] - m_dist_sci_pix[0]; - residuals[1] = comp_dist_sci_pix[1] - m_dist_sci_pix[1]; - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector2d const& dist_sci_pix, Eigen::Vector3d const& depth_xyz, - double alpha_haz, double alpha_sci, std::vector const& block_sizes, - camera::CameraParameters const& sci_cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction( - new DepthToSciError(dist_sci_pix, depth_xyz, alpha_haz, alpha_sci, block_sizes, sci_cam_params)); - - // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); - - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { - cost_function->AddParameterBlock(block_sizes[i]); + for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { + residuals[it] = m_weight * (m_mesh_xyz[it] - M[it]); } - return cost_function; - } - - private: - Eigen::Vector2d m_dist_sci_pix; // The sci cam pixel observation - Eigen::Vector3d m_depth_xyz; // The measured position in the depth camera - double m_alpha_haz; - double m_alpha_sci; - std::vector m_block_sizes; - camera::CameraParameters m_sci_cam_params; - int m_num_focal_lengths; -}; // End class DepthToSciError - -// An error function projecting an xyz point in the sci cam -// bounded by two nav cams. -struct SciError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - SciError(Eigen::Vector2d const& dist_sci_pix, - double alpha, // used for interpolation - std::vector const& block_sizes, camera::CameraParameters const& sci_cam_params) - : m_dist_sci_pix(dist_sci_pix), - m_alpha(alpha), - m_block_sizes(block_sizes), - m_sci_cam_params(sci_cam_params), - m_num_focal_lengths(1) { - // Sanity check. - if (m_block_sizes.size() != 8 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || - m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || - m_block_sizes[4] != NUM_XYZ_PARAMS || m_block_sizes[5] != m_num_focal_lengths || - m_block_sizes[6] != NUM_OPT_CTR_PARAMS || m_block_sizes[7] != m_sci_cam_params.GetDistortion().size()) { - LOG(FATAL) << "SciError: The block sizes were not set up properly.\n"; - } - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - // Make a deep copy which we will modify - camera::CameraParameters sci_cam_params = m_sci_cam_params; - - // We have to deal with four transforms - - Eigen::Affine3d left_nav_trans; - array_to_rigid_transform(left_nav_trans, parameters[0]); - - Eigen::Affine3d right_nav_trans; - array_to_rigid_transform(right_nav_trans, parameters[1]); - - Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(m_alpha, left_nav_trans, right_nav_trans); - - Eigen::Affine3d hazcam_to_navcam_trans; - array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); - - Eigen::Affine3d scicam_to_hazcam_trans; - array_to_rigid_transform(scicam_to_hazcam_trans, parameters[3]); - - Eigen::Vector3d X; - for (int it = 0; it < NUM_XYZ_PARAMS; it++) X[it] = parameters[4][it]; - - // Intrinsics, including a single focal length - Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[5][0], parameters[5][0]); - Eigen::Vector2d optical_center(parameters[6][0], parameters[6][1]); - Eigen::VectorXd distortion(m_block_sizes[7]); - for (int i = 0; i < m_block_sizes[7]; i++) distortion[i] = parameters[7][i]; - sci_cam_params.SetFocalLength(focal_vector); - sci_cam_params.SetOpticalOffset(optical_center); - sci_cam_params.SetDistortion(distortion); - - // Find the sci cam to world transform - Eigen::Affine3d interp_world_to_sci_trans = - scicam_to_hazcam_trans.inverse() * hazcam_to_navcam_trans.inverse() * interp_world_to_nav_trans; - - // Project into the sci cam - Eigen::Vector3d sciX = interp_world_to_sci_trans * X; - Eigen::Vector2d undist_sci_pix = sci_cam_params.GetFocalVector().cwiseProduct(sciX.hnormalized()); - - // Convert to distorted pixel - Eigen::Vector2d comp_dist_sci_pix; - sci_cam_params.Convert(undist_sci_pix, &comp_dist_sci_pix); - - // Compute the residuals - residuals[0] = comp_dist_sci_pix[0] - m_dist_sci_pix[0]; - residuals[1] = comp_dist_sci_pix[1] - m_dist_sci_pix[1]; return true; } // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector2d const& dist_sci_pix, double alpha, - std::vector const& block_sizes, - camera::CameraParameters const& sci_cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction( - new SciError(dist_sci_pix, alpha, block_sizes, sci_cam_params)); + static ceres::CostFunction* Create(double weight, + Eigen::Vector3d const& meas_depth_xyz, + Eigen::Vector3d const& mesh_xyz, + double left_ref_stamp, double right_ref_stamp, + double cam_stamp, std::vector const& block_sizes) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction + (new BracketedDepthMeshError(weight, meas_depth_xyz, mesh_xyz, + left_ref_stamp, right_ref_stamp, + cam_stamp, block_sizes)); // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { + for (size_t i = 0; i < block_sizes.size(); i++) cost_function->AddParameterBlock(block_sizes[i]); - } + return cost_function; } private: - Eigen::Vector2d m_dist_sci_pix; // The sci cam pixel observation - double m_alpha; + double m_weight; // How much weight to give to this constraint + Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement + Eigen::Vector3d m_mesh_xyz; // Point on preexisting mesh + double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps + double m_cam_stamp; // Current cam timestamp std::vector m_block_sizes; - camera::CameraParameters m_sci_cam_params; - int m_num_focal_lengths; -}; // End class SciError +}; // End class BracketedDepthMeshError // An error function minimizing a weight times the distance from a // variable xyz point to a fixed reference xyz point. @@ -662,7 +681,8 @@ struct XYZError { // TODO(oalexan1): May want to use the analytical Ceres cost function bool operator()(double const* const* parameters, double* residuals) const { // Compute the residuals - for (int it = 0; it < NUM_XYZ_PARAMS; it++) residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); + for (int it = 0; it < NUM_XYZ_PARAMS; it++) + residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); return true; } @@ -672,7 +692,8 @@ struct XYZError { std::vector const& block_sizes, double weight) { ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction(new XYZError(ref_xyz, block_sizes, weight)); + new ceres::DynamicNumericDiffCostFunction + (new XYZError(ref_xyz, block_sizes, weight)); // The residual size is always the same cost_function->SetNumResiduals(NUM_XYZ_PARAMS); @@ -690,8 +711,6 @@ struct XYZError { double m_weight; }; // End class XYZError -enum imgType { NAV_CAM, SCI_CAM, HAZ_CAM }; - // Calculate the rmse residual for each residual type. void calc_median_residuals(std::vector const& residuals, std::vector const& residual_names, @@ -702,12 +721,13 @@ void calc_median_residuals(std::vector const& residuals, LOG(FATAL) << "There must be as many residuals as residual names."; std::map > stats; - for (size_t it = 0; it < residuals.size(); it++) stats[residual_names[it]] = std::vector(); // initialize + for (size_t it = 0; it < residuals.size(); it++) + stats[residual_names[it]] = std::vector(); // initialize for (size_t it = 0; it < residuals.size(); it++) stats[residual_names[it]].push_back(std::abs(residuals[it])); - std::cout << "The 25, 50 and 75 percentile residual stats " << tag << std::endl; + std::cout << "The 25, 50, 75, and 100th percentile residual stats " << tag << std::endl; for (auto it = stats.begin(); it != stats.end(); it++) { std::string const& name = it->first; std::vector vals = stats[name]; // make a copy @@ -716,1111 +736,966 @@ void calc_median_residuals(std::vector const& residuals, int len = vals.size(); int it1 = static_cast(0.25 * len); - int it2 = static_cast(0.5 * len); + int it2 = static_cast(0.50 * len); int it3 = static_cast(0.75 * len); + int it4 = static_cast(len - 1); - if (len = 0) - std::cout << name << ": " - << "none" << std::endl; + if (len == 0) + std::cout << name << ": " << "none"; else - std::cout << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' << vals[it3] << std::endl; + std::cout << std::setprecision(5) + << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' + << vals[it3] << ' ' << vals[it4]; + std::cout << " (" << len << " residuals)" << std::endl; } } - // Intersect ray with mesh. Return true on success. - bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, camera::CameraParameters const& cam_params, - Eigen::Affine3d const& world_to_cam, mve::TriangleMesh::Ptr const& mesh, - std::shared_ptr const& bvh_tree, - double min_ray_dist, double max_ray_dist, - // Output - Eigen::Vector3d& intersection) { - // Initialize the output - intersection = Eigen::Vector3d(0.0, 0.0, 0.0); - - // Ray from camera going through the pixel - Eigen::Vector3d cam_ray(undist_pix.x() / cam_params.GetFocalVector()[0], - undist_pix.y() / cam_params.GetFocalVector()[1], 1.0); - cam_ray.normalize(); - - Eigen::Affine3d cam_to_world = world_to_cam.inverse(); - Eigen::Vector3d world_ray = cam_to_world.linear() * cam_ray; - Eigen::Vector3d cam_ctr = cam_to_world.translation(); - - // Set up the ray structure for the mesh - BVHTree::Ray bvh_ray; - bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); - bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); - bvh_ray.dir.normalize(); - - bvh_ray.tmin = min_ray_dist; - bvh_ray.tmax = max_ray_dist; - - // Intersect the ray with the mesh - BVHTree::Hit hit; - if (bvh_tree->intersect(bvh_ray, &hit)) { - double cam_to_mesh_dist = hit.t; - intersection = cam_ctr + cam_to_mesh_dist * world_ray; - return true; - } +// Sort by timestamps adjusted to be relative to the ref camera clock +bool timestampLess(cameraImage i, cameraImage j) { + return (i.ref_timestamp < j.ref_timestamp); +} - return false; - } +// Find the haz cam depth measurement. Use nearest neighbor interpolation +// to look into the depth cloud. +bool depthValue( // Inputs + cv::Mat const& depth_cloud, Eigen::Vector2d const& dist_ip, + // Output + Eigen::Vector3d& depth_xyz) { + depth_xyz = Eigen::Vector3d(0, 0, 0); // initialize - // Prevent the linter from messing up with the beautiful formatting below - void add_haz_nav_cost // NOLINT - (// Inputs // NOLINT - int haz_it, int nav_it, int nav_cam_start, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - MATCH_PAIR const & match_pair, // NOLINT - std::vector const & haz_cam_intensity_timestamps, // NOLINT - std::vector const & sparse_map_timestamps, // NOLINT - std::map const & haz_cam_to_left_nav_cam_index, // NOLINT - std::map const & haz_cam_to_right_nav_cam_index, // NOLINT - camera::CameraParameters const & nav_cam_params, // NOLINT - camera::CameraParameters const & haz_cam_params, // NOLINT - std::vector const & depth_to_nav_block_sizes, // NOLINT - std::vector const & depth_to_haz_block_sizes, // NOLINT - std::vector const & nav_cam_affines, // NOLINT - std::vector const & depth_clouds, // NOLINT - // Outputs // NOLINT - std::vector & residual_names, // NOLINT - double & hazcam_depth_to_image_scale, // NOLINT - std::vector & nav_cam_vec, // NOLINT - std::vector & hazcam_to_navcam_vec, // NOLINT - std::vector & hazcam_depth_to_image_vec, // NOLINT - ceres::Problem & problem) { // NOLINT - // Figure out the two nav cam indices bounding the current haz cam - // Must have sparse_map_timestamp + navcam_to_hazcam_timestamp_offset <= haz_timestamp - // which must be < next sparse_map_timestamp + navcam_to_hazcam_timestamp_offset. - bool match_left = false; - if (haz_cam_intensity_timestamps[haz_it] >= - sparse_map_timestamps[nav_cam_start + nav_it] + navcam_to_hazcam_timestamp_offset) { - match_left = true; - } else { - match_left = false; // match right then - } + if (depth_cloud.cols == 0 && depth_cloud.rows == 0) return false; // empty cloud - auto left_it = haz_cam_to_left_nav_cam_index.find(haz_it); - auto right_it = haz_cam_to_right_nav_cam_index.find(haz_it); - if (left_it == haz_cam_to_left_nav_cam_index.end() || - right_it == haz_cam_to_right_nav_cam_index.end()) - LOG(FATAL) << "Book-keeping error in add_haz_nav_cost."; + int col = round(dist_ip[0]); + int row = round(dist_ip[1]); - int left_nav_it = left_it->second; - int right_nav_it = right_it->second; + if (col < 0 || row < 0 || col > depth_cloud.cols || row > depth_cloud.rows) + LOG(FATAL) << "Out of range in depth cloud."; - if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size()) || - haz_it >= static_cast(haz_cam_intensity_timestamps.size())) - LOG(FATAL) << "Book-keeping error 2."; + // After rounding one may hit the bound + if (col == depth_cloud.cols || row == depth_cloud.rows) + return false; - // Left and right nav cam image time, in haz_cam's time measurement - double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] - + navcam_to_hazcam_timestamp_offset; - double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] - + navcam_to_hazcam_timestamp_offset; - double haz_time = haz_cam_intensity_timestamps[haz_it]; + cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); - bool good = (left_time <= haz_time && haz_time < right_time); + // Skip invalid measurements + if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) + return false; - if (!good) LOG(FATAL) << "Book-keeping error 3."; + depth_xyz = Eigen::Vector3d(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); - // The current nav it better be either the left or right kind - if (nav_it != left_nav_it && nav_it != right_nav_it) LOG(FATAL) << "Book-keeping error 4."; + return true; +} - // Find the transform from the world to nav cam at the haz cam time - Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; - Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; - double alpha = (haz_time - left_time) / (right_time - left_time); - if (right_time == left_time) alpha = 0.0; // handle division by zero +// Project given images with optimized cameras onto mesh. It is +// assumed that the most up-to-date cameras were copied/interpolated +// form the optimizer structures into the world_to_cam vector. +void meshProjectCameras(std::vector const& cam_names, + std::vector const& cam_params, + std::vector const& cam_images, + std::vector const& world_to_cam, + mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + int ref_camera_type, int nav_cam_num_exclude_boundary_pixels, + std::string const& out_dir) { + if (cam_names.size() != cam_params.size()) + LOG(FATAL) << "There must be as many camera names as sets of camera parameters.\n"; + if (cam_images.size() != world_to_cam.size()) + LOG(FATAL) << "There must be as many camera images as camera poses.\n"; + if (out_dir.empty()) + LOG(FATAL) << "The output directory is empty.\n"; + + char filename_buffer[1000]; + + for (size_t cid = 0; cid < cam_images.size(); cid++) { + double timestamp = cam_images[cid].timestamp; + int cam_type = cam_images[cid].camera_type; + + int num_exclude_boundary_pixels = 0; + if (cam_type == ref_camera_type) + num_exclude_boundary_pixels = nav_cam_num_exclude_boundary_pixels; + + // Must use the 10.7f format for the timestamp as everywhere else in the code + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s", + out_dir.c_str(), timestamp, cam_names[cam_type].c_str()); + std::string out_prefix = filename_buffer; // convert to string + + std::cout << "Creating texture for: " << out_prefix << std::endl; + meshProject(mesh, bvh_tree, cam_images[cid].image, world_to_cam[cid], cam_params[cam_type], + num_exclude_boundary_pixels, out_prefix); + } +} - if (!FLAGS_timestamp_interpolation) alpha = round(alpha); +// Rebuild the map. +// TODO(oalexan1): This must be integrated in astrobee. +void RebuildMap(std::string const& map_file, // Will be used for temporary saving of aux data + camera::CameraParameters const& cam_params, + boost::shared_ptr sparse_map) { + std::string rebuild_detector = "SURF"; + std::cout << "Rebuilding map with " << rebuild_detector << " detector."; + + // Set programatically the command line option for astrobee's map + // building min angle based on the corresponding refiner flag. + std::ostringstream oss; + oss << FLAGS_refiner_min_angle; + std::string min_valid_angle = oss.str(); + google::SetCommandLineOption("min_valid_angle", min_valid_angle.c_str()); + if (!gflags::GetCommandLineOption("min_valid_angle", &min_valid_angle)) + LOG(FATAL) << "Failed to get the value of --min_valid_angle in Astrobee " + << "map-building software.\n"; + // The newline below is due to the sparse map software not putting a newline + std::cout << "\nSetting --min_valid_angle " << min_valid_angle << ".\n"; + + // Copy some data to make sure it does not get lost on resetting things below + std::vector world_to_ref_t = sparse_map->cid_to_cam_t_global_; + std::vector> pid_to_cid_fid = sparse_map->pid_to_cid_fid_; + + // Ensure the new camera parameters are set + sparse_map->SetCameraParameters(cam_params); + + std::cout << "Detecting features."; + sparse_map->DetectFeatures(); + + std::cout << "Matching features."; + // Borrow from the original map which images should be matched with which. + sparse_map->cid_to_cid_.clear(); + for (size_t p = 0; p < pid_to_cid_fid.size(); p++) { + std::map const& track = pid_to_cid_fid[p]; // alias + for (std::map::const_iterator it1 = track.begin(); + it1 != track.end() ; it1++) { + for (std::map::const_iterator it2 = it1; + it2 != track.end() ; it2++) { + if (it1->first != it2->first) { + // Never match an image with itself + sparse_map->cid_to_cid_[it1->first].insert(it2->first); + } + } + } + } - Eigen::Affine3d world_to_nav_trans_at_haz_time - = dense_map::linearInterp(alpha, left_nav_trans, right_nav_trans); + sparse_mapping::MatchFeatures(sparse_mapping::EssentialFile(map_file), + sparse_mapping::MatchesFile(map_file), sparse_map.get()); + for (size_t i = 0; i < world_to_ref_t.size(); i++) + sparse_map->SetFrameGlobalTransform(i, world_to_ref_t[i]); + + // Wipe file that is no longer needed + try { + std::remove(sparse_mapping::EssentialFile(map_file).c_str()); + }catch(...) {} + + std::cout << "Building tracks."; + bool rm_invalid_xyz = true; // by now cameras are good, so filter out bad stuff + sparse_mapping::BuildTracks(rm_invalid_xyz, + sparse_mapping::MatchesFile(map_file), + sparse_map.get()); + + // It is essential that during re-building we do not vary the + // cameras. Those were usually computed with a lot of SURF features, + // while rebuilding is usually done with many fewer ORGBRISK + // features. + bool fix_cameras = true; + if (fix_cameras) + std::cout << "Performing bundle adjustment with fixed cameras."; + else + std::cout << "Performing bundle adjustment while floating cameras."; - std::vector const& haz_ip_vec = match_pair.first; // alias - std::vector const& nav_ip_vec = match_pair.second; // alias + sparse_mapping::BundleAdjust(fix_cameras, sparse_map.get()); +} - cv::Mat const& depth_cloud = depth_clouds[haz_it]; // alias +// Compute the transforms from the world to every camera, using pose interpolation +// if necessary. +void calc_world_to_cam_transforms( // Inputs + std::vector const& cams, + std::vector const& world_to_ref_vec, + std::vector const& ref_timestamps, + std::vector const& ref_to_cam_vec, + std::vector const& ref_to_cam_timestamp_offsets, + // Output + std::vector& world_to_cam) { + if (ref_to_cam_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_to_cam_timestamp_offsets.size()) + LOG(FATAL) << "Must have as many transforms to reference as timestamp offsets.\n"; + if (world_to_ref_vec.size() / dense_map::NUM_RIGID_PARAMS != ref_timestamps.size()) + LOG(FATAL) << "Must have as many reference timestamps as reference cameras.\n"; + + world_to_cam.resize(cams.size()); + + for (size_t it = 0; it < cams.size(); it++) { + int beg_index = cams[it].beg_ref_index; + int end_index = cams[it].end_ref_index; + int cam_type = cams[it].camera_type; + world_to_cam[it] = dense_map::calc_world_to_cam_trans + (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_index], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_index], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], + ref_timestamps[beg_index], ref_timestamps[end_index], + ref_to_cam_timestamp_offsets[cam_type], + cams[it].timestamp); + } + return; +} - for (size_t ip_it = 0; ip_it < haz_ip_vec.size(); ip_it++) { - // Find the haz cam depth measurement. Use nearest neighbor interpolation - // to look into the depth cloud. - int col = round(haz_ip_vec[ip_it].x); - int row = round(haz_ip_vec[ip_it].y); +// Look up a map value and throw an error when not found +template +B mapVal(std::map const& map, A const& a) { + auto it = map.find(a); + if (it == map.end()) + LOG(FATAL) << "Cannot look up expected map value.\n"; - if (col < 0 || row < 0 || col >= depth_cloud.cols || row >= depth_cloud.rows) - LOG(FATAL) << "Book-keeping error 5."; + return it->second; +} - // Skip any point that goes out of bounds due to rounding - if (col == depth_cloud.cols || row == depth_cloud.rows) continue; +// Get a map value while being const-correct and also checking that the value exists. +template +T getMapValue(std::vector>> const& pid_cid_fid, size_t pid, int cid, + int fid) { + if (pid_cid_fid.size() <= pid) + LOG(FATAL) << "Current pid is out of range.\n"; - cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); + auto& cid_fid_map = pid_cid_fid[pid]; // alias + auto cid_it = cid_fid_map.find(cid); + if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; - // Skip invalid measurements - if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) continue; + auto& fid_map = cid_it->second; // alias + auto fid_it = fid_map.find(fid); + if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; - // Convert to Eigen - Eigen::Vector3d depth_xyz(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); + return fid_it->second; +} - Eigen::Vector2d undist_haz_ip; - Eigen::Vector2d undist_nav_ip; - { - // Make sure we don't use the distorted pixels from now on - Eigen::Vector2d haz_ip(haz_ip_vec[ip_it].x, haz_ip_vec[ip_it].y); - Eigen::Vector2d nav_ip(nav_ip_vec[ip_it].x, nav_ip_vec[ip_it].y); - haz_cam_params.Convert(haz_ip, &undist_haz_ip); - nav_cam_params.Convert(nav_ip, &undist_nav_ip); - } +// Set a map value while taking care that the place for it exists. +void setMapValue(std::vector>> & pid_cid_fid, + size_t pid, int cid, int fid, int val) { + if (pid_cid_fid.size() <= pid) + LOG(FATAL) << "Current pid is out of range.\n"; - // Ensure the depth point projects well into the haz cam interest point - ceres::CostFunction* depth_to_haz_cost_function = - dense_map::DepthToHazError::Create(undist_haz_ip, depth_xyz, depth_to_haz_block_sizes, - haz_cam_params); - ceres::LossFunction* depth_to_haz_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - problem.AddResidualBlock(depth_to_haz_cost_function, depth_to_haz_loss_function, - &hazcam_depth_to_image_vec[0], - &hazcam_depth_to_image_scale); - residual_names.push_back("haznavhaz1"); - residual_names.push_back("haznavhaz2"); - - // Ensure that the depth points projects well in the nav cam interest point - ceres::CostFunction* depth_to_nav_cost_function - = dense_map::DepthToNavError::Create(undist_nav_ip, depth_xyz, alpha, match_left, - depth_to_nav_block_sizes, nav_cam_params); - ceres::LossFunction* depth_to_nav_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - int left_nav_index = NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it); - int right_nav_index = NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it); - problem.AddResidualBlock(depth_to_nav_cost_function, depth_to_nav_loss_function, - &nav_cam_vec[left_nav_index], - &nav_cam_vec[right_nav_index], - &hazcam_to_navcam_vec[0], &hazcam_depth_to_image_vec[0], - &hazcam_depth_to_image_scale); - residual_names.push_back("haznavnav1"); - residual_names.push_back("haznavnav2"); - - if (FLAGS_fix_map) { - problem.SetParameterBlockConstant(&nav_cam_vec[left_nav_index]); - problem.SetParameterBlockConstant(&nav_cam_vec[right_nav_index]); - } - } + auto& cid_fid_map = pid_cid_fid[pid]; // alias + auto cid_it = cid_fid_map.find(cid); + if (cid_it == cid_fid_map.end()) LOG(FATAL) << "Current cid it out of range.\n"; - return; - } + auto& fid_map = cid_it->second; // alias + auto fid_it = fid_map.find(fid); + if (fid_it == fid_map.end()) LOG(FATAL) << "Current fid is out of range.\n"; - // Prevent the linter from messing up with the beautiful formatting below - void add_haz_sci_cost(// Inputs // NOLINT - int haz_it, int sci_it, int nav_cam_start, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - double scicam_to_hazcam_timestamp_offset, // NOLINT - MATCH_PAIR const & match_pair, // NOLINT - std::vector const & haz_cam_intensity_timestamps, // NOLINT - std::vector const & sparse_map_timestamps, // NOLINT - std::vector const & sci_cam_timestamps, // NOLINT - std::map const & haz_cam_to_left_nav_cam_index, // NOLINT - std::map const & haz_cam_to_right_nav_cam_index, // NOLINT - std::map const & sci_cam_to_left_nav_cam_index, // NOLINT - std::map const & sci_cam_to_right_nav_cam_index, // NOLINT - camera::CameraParameters const & sci_cam_params, // NOLINT - camera::CameraParameters const & haz_cam_params, // NOLINT - std::vector const & depth_to_sci_block_sizes, // NOLINT - std::vector const & depth_to_haz_block_sizes, // NOLINT - std::vector const & nav_cam_affines, // NOLINT - std::vector const & depth_clouds, // NOLINT - // Outputs // NOLINT - std::vector & residual_names, // NOLINT - double & hazcam_depth_to_image_scale, // NOLINT - std::vector & nav_cam_vec, // NOLINT - std::vector & hazcam_to_navcam_vec, // NOLINT - std::vector & scicam_to_hazcam_vec, // NOLINT - std::vector & hazcam_depth_to_image_vec, // NOLINT - Eigen::Vector2d & sci_cam_focal_vector, // NOLINT - Eigen::Vector2d & sci_cam_optical_center, // NOLINT - Eigen::VectorXd & sci_cam_distortion, // NOLINT - ceres::Problem & problem) { // NOLINT - auto left_it = haz_cam_to_left_nav_cam_index.find(haz_it); - auto right_it = haz_cam_to_right_nav_cam_index.find(haz_it); - if (left_it == haz_cam_to_left_nav_cam_index.end() || - right_it == haz_cam_to_right_nav_cam_index.end()) - LOG(FATAL) << "Book-keeping error 1 in add_haz_sci_cost."; - - int left_nav_it = left_it->second; - int right_nav_it = right_it->second; - - if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size()) || - haz_it >= static_cast(haz_cam_intensity_timestamps.size())) - LOG(FATAL) << "Book-keeping error 2 in add_haz_sci_cost."; - - // The haz and sci images must be bracketed by the same two nav images - { - auto left_it2 = sci_cam_to_left_nav_cam_index.find(sci_it); - auto right_it2 = sci_cam_to_right_nav_cam_index.find(sci_it); - if (left_it2 == sci_cam_to_left_nav_cam_index.end() || - right_it2 == sci_cam_to_right_nav_cam_index.end()) - LOG(FATAL) << "Book-keeping error 3 in add_haz_sci_cost."; - - int left_nav_it2 = left_it2->second; - int right_nav_it2 = right_it2->second; - - if (left_nav_it2 != left_nav_it || right_nav_it2 != right_nav_it) - LOG(FATAL) << "Book-keeping error 4 in add_haz_sci_cost."; - } - // Left and right nav cam image time, in haz_cam's time measurement - double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] - + navcam_to_hazcam_timestamp_offset; - double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] - + navcam_to_hazcam_timestamp_offset; - - // Find the haz and sci time and convert them to haz cam's clock - double haz_time = haz_cam_intensity_timestamps[haz_it]; - double sci_time = sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset; - - bool good1 = (left_time <= haz_time && haz_time < right_time); - if (!good1) LOG(FATAL) << "Book-keeping error 5 in add_haz_sci_cost."; - - bool good2 = (left_time <= sci_time && sci_time < right_time); - if (!good2) LOG(FATAL) << "Book-keeping error 6 in add_haz_sci_cost."; - - // Find the transform from the world to nav cam at the haz cam time - Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; - Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; - - double alpha_haz = (haz_time - left_time) / (right_time - left_time); - if (right_time == left_time) alpha_haz = 0.0; // handle division by zero - - if (!FLAGS_timestamp_interpolation) alpha_haz = round(alpha_haz); - - Eigen::Affine3d world_to_nav_trans_at_haz_time = - dense_map::linearInterp(alpha_haz, left_nav_trans, right_nav_trans); - - double alpha_sci = (sci_time - left_time) / (right_time - left_time); - if (right_time == left_time) alpha_sci = 0.0; // handle division by zero - - if (!FLAGS_timestamp_interpolation) - alpha_sci = round(alpha_sci); - - Eigen::Affine3d world_to_nav_trans_at_sci_time = - dense_map::linearInterp(alpha_sci, left_nav_trans, right_nav_trans); - - std::vector const& haz_ip_vec = match_pair.first; - std::vector const& sci_ip_vec = match_pair.second; - - cv::Mat const& depth_cloud = depth_clouds[haz_it]; - - for (size_t ip_it = 0; ip_it < haz_ip_vec.size(); ip_it++) { - // Find the haz cam depth measurement. Use nearest neighbor interpolation - // to look into the depth cloud. - int col = round(haz_ip_vec[ip_it].x); - int row = round(haz_ip_vec[ip_it].y); - - if (col < 0 || row < 0 || col >= depth_cloud.cols || row >= depth_cloud.rows) - LOG(FATAL) << "Book-keeping error 7 in add_haz_sci_cost."; - - // Skip any point that goes out of bounds due to rounding - if (col == depth_cloud.cols || row == depth_cloud.rows) continue; - - cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); - - // Skip invalid measurements - if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) continue; - - // Convert to Eigen - Eigen::Vector3d depth_xyz(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); - - // Apply undistortion. Must take great care to not mix up - // distorted and undistorted pixels. - Eigen::Vector2d dist_haz_ip(haz_ip_vec[ip_it].x, haz_ip_vec[ip_it].y); - Eigen::Vector2d dist_sci_ip(sci_ip_vec[ip_it].x, sci_ip_vec[ip_it].y); - Eigen::Vector2d undist_haz_ip; - Eigen::Vector2d undist_sci_ip; - haz_cam_params.Convert(dist_haz_ip, &undist_haz_ip); - sci_cam_params.Convert(dist_sci_ip, &undist_sci_ip); - - // Ensure the depth point projects well into the haz cam interest point - ceres::CostFunction* depth_to_haz_cost_function = - dense_map::DepthToHazError::Create(undist_haz_ip, depth_xyz, depth_to_haz_block_sizes, haz_cam_params); - ceres::LossFunction* depth_to_haz_loss_function = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - problem.AddResidualBlock(depth_to_haz_cost_function, depth_to_haz_loss_function, &hazcam_depth_to_image_vec[0], - &hazcam_depth_to_image_scale); - residual_names.push_back("hazscihaz1"); - residual_names.push_back("hazscihaz2"); - - // Ensure that the depth points projects well in the sci cam interest point. - // Note how we pass a distorted sci cam pix, as in that error function we will - // take the difference of distorted pixels. - ceres::CostFunction* depth_to_sci_cost_function - = dense_map::DepthToSciError::Create(dist_sci_ip, depth_xyz, alpha_haz, - alpha_sci, depth_to_sci_block_sizes, sci_cam_params); - ceres::LossFunction* depth_to_sci_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - problem.AddResidualBlock(depth_to_sci_cost_function, depth_to_sci_loss_function, - &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)], - &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)], - &hazcam_to_navcam_vec[0], &scicam_to_hazcam_vec[0], &hazcam_depth_to_image_vec[0], - &hazcam_depth_to_image_scale, &sci_cam_focal_vector[0], &sci_cam_optical_center[0], - &sci_cam_distortion[0]); - - residual_names.push_back("hazscisci1"); - residual_names.push_back("hazscisci2"); - - if (FLAGS_fix_map) { - problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)]); - problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)]); - } - } + fid_it->second = val; +} - return; - } +void parameterValidation() { + if (FLAGS_ros_bag.empty()) + LOG(FATAL) << "The bag file was not specified."; + if (FLAGS_sparse_map.empty()) + LOG(FATAL) << "The input sparse map was not specified."; - // Prevent the linter from messing up with the beautiful formatting below - void add_nav_sci_cost - (// Inputs // NOLINT - int nav_it, int sci_it, int nav_cam_start, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - double scicam_to_hazcam_timestamp_offset, // NOLINT - MATCH_PAIR const & match_pair, // NOLINT - std::vector const & sparse_map_timestamps, // NOLINT - std::vector const & sci_cam_timestamps, // NOLINT - std::map const & sci_cam_to_left_nav_cam_index, // NOLINT - std::map const & sci_cam_to_right_nav_cam_index, // NOLINT - Eigen::Affine3d const & hazcam_to_navcam_aff_trans, // NOLINT - Eigen::Affine3d const & scicam_to_hazcam_aff_trans, // NOLINT - camera::CameraParameters const & nav_cam_params, // NOLINT - camera::CameraParameters const & sci_cam_params, // NOLINT - std::vector const & nav_block_sizes, // NOLINT - std::vector const & sci_block_sizes, // NOLINT - std::vector const & mesh_block_sizes, // NOLINT - std::vector const & nav_cam_affines, // NOLINT - std::vector const & depth_clouds, // NOLINT - mve::TriangleMesh::Ptr const & mesh, // NOLINT - std::shared_ptr const & bvh_tree, // NOLINT - // Outputs // NOLINT - int & nav_sci_xyz_count, // NOLINT - std::vector & residual_names, // NOLINT - std::vector & nav_cam_vec, // NOLINT - std::vector & hazcam_to_navcam_vec, // NOLINT - std::vector & scicam_to_hazcam_vec, // NOLINT - Eigen::Vector2d & sci_cam_focal_vector, // NOLINT - Eigen::Vector2d & sci_cam_optical_center, // NOLINT - Eigen::VectorXd & sci_cam_distortion, // NOLINT - std::vector & initial_nav_sci_xyz, // NOLINT - std::vector & nav_sci_xyz, // NOLINT - ceres::Problem & problem) { // NOLINT - auto left_it = sci_cam_to_left_nav_cam_index.find(sci_it); - auto right_it = sci_cam_to_right_nav_cam_index.find(sci_it); - if (left_it == sci_cam_to_left_nav_cam_index.end() || - right_it == sci_cam_to_right_nav_cam_index.end()) - LOG(FATAL) << "Book-keeping error 1 in add_sci_sci_cost."; - - int left_nav_it = left_it->second; - int right_nav_it = right_it->second; - - if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size())) - LOG(FATAL) << "Book-keeping error 1 in add_nav_sci_cost."; - - // Figure out the two nav cam indices bounding the current sci cam - bool match_left = false; - if (sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset >= - sparse_map_timestamps[nav_cam_start + nav_it] + navcam_to_hazcam_timestamp_offset) { - match_left = true; - } else { - match_left = false; // match right then - } + if (FLAGS_output_map.empty()) + LOG(FATAL) << "The output sparse map was not specified."; - // Left and right nav cam image time, and sci cam time, in haz_cam's time measurement - double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] + navcam_to_hazcam_timestamp_offset; - double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] + navcam_to_hazcam_timestamp_offset; - double sci_time = sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset; + if (FLAGS_robust_threshold <= 0.0) + LOG(FATAL) << "The robust threshold must be positive.\n"; - bool good = (left_time <= sci_time && sci_time < right_time); + if (FLAGS_bracket_len <= 0.0) LOG(FATAL) << "Bracket length must be positive."; - if (!good) LOG(FATAL) << "Book-keeping 2 in add_nav_sci_cost."; + if (FLAGS_num_overlaps < 1) LOG(FATAL) << "Number of overlaps must be positive."; - // Find the transform from the world to nav cam at the sci cam time - Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; - Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; - double alpha = (sci_time - left_time) / (right_time - left_time); - if (right_time == left_time) alpha = 0.0; // handle division by zero + if (FLAGS_timestamp_offsets_max_change < 0) + LOG(FATAL) << "The timestamp offsets must be non-negative."; - if (!FLAGS_timestamp_interpolation) alpha = round(alpha); + if (FLAGS_refiner_min_angle <= 0.0) + LOG(FATAL) << "The min triangulation angle must be positive.\n"; - Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(alpha, left_nav_trans, right_nav_trans); + if (FLAGS_depth_tri_weight < 0.0) + LOG(FATAL) << "The depth weight must non-negative.\n"; - // Find the sci cam to world transform - Eigen::Affine3d interp_world_to_sci_trans = - scicam_to_hazcam_aff_trans.inverse() * - hazcam_to_navcam_aff_trans.inverse() * - interp_world_to_nav_trans; + if (FLAGS_mesh_tri_weight < 0.0) + LOG(FATAL) << "The mesh weight must non-negative.\n"; - Eigen::Affine3d world_to_nav_trans; - if (match_left) { - world_to_nav_trans = left_nav_trans; - } else { - world_to_nav_trans = right_nav_trans; - } + if (FLAGS_depth_mesh_weight < 0.0) + LOG(FATAL) << "The depth mesh weight must non-negative.\n"; - std::vector const& sci_ip_vec = match_pair.first; - std::vector const& nav_ip_vec = match_pair.second; + if (FLAGS_nav_cam_num_exclude_boundary_pixels < 0) + LOG(FATAL) << "Must have a non-negative value for --nav_cam_num_exclude_boundary_pixels.\n"; - for (size_t ip_it = 0; ip_it < sci_ip_vec.size(); ip_it++) { - Eigen::Vector2d dist_sci_ip(sci_ip_vec[ip_it].x, sci_ip_vec[ip_it].y); - Eigen::Vector2d dist_nav_ip(nav_ip_vec[ip_it].x, nav_ip_vec[ip_it].y); - Eigen::Vector2d undist_nav_ip; - Eigen::Vector2d undist_sci_ip; - nav_cam_params.Convert(dist_nav_ip, &undist_nav_ip); - sci_cam_params.Convert(dist_sci_ip, &undist_sci_ip); + if (FLAGS_nav_cam_distortion_replacement != "") { + if (FLAGS_haz_cam_intrinsics_to_float != "" || + FLAGS_sci_cam_intrinsics_to_float != "" || + FLAGS_extrinsics_to_float != "" || + FLAGS_float_sparse_map || + FLAGS_float_scale || + FLAGS_float_timestamp_offsets) + LOG(FATAL) << "If replacing and optimizing the nav_cam model distortion, the rest " + "of the variables must be kept fixed. Once this model is found and saved, " + "a subsequent call to this tool may do various co-optimizations."; + } - Eigen::Vector3d X = - dense_map::TriangulatePair(sci_cam_params.GetFocalLength(), nav_cam_params.GetFocalLength(), - interp_world_to_sci_trans, world_to_nav_trans, undist_sci_ip, undist_nav_ip); + if (FLAGS_registration && (FLAGS_xyz_file.empty() || FLAGS_hugin_file.empty())) + LOG(FATAL) << "In order to register the map, the hugin and xyz file must be specified."; - bool have_mesh_intersection = false; - if (FLAGS_mesh != "") { - // Try to make the intersection point be on the mesh and the nav cam ray - // to make the sci cam to conform to that. - // TODO(oalexan1): Think more of the range of the ray below - double min_ray_dist = 0.0; - double max_ray_dist = 10.0; - Eigen::Vector3d intersection(0.0, 0.0, 0.0); - have_mesh_intersection - = dense_map::ray_mesh_intersect(undist_nav_ip, nav_cam_params, - world_to_nav_trans, mesh, bvh_tree, - min_ray_dist, max_ray_dist, - // Output - intersection); - - // Overwrite the triangulation result above with the intersection - if (have_mesh_intersection) X = intersection; - } + if (FLAGS_float_scale && FLAGS_affine_depth_to_image) + LOG(FATAL) << "The options --float_scale and --affine_depth_to_image should not be used " + << "together. If the latter is used, the scale is always floated.\n"; +} - // Record the triangulated positions. These will be optimized. - for (int i = 0; i < dense_map::NUM_XYZ_PARAMS; i++) - nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count + i] = X[i]; +void set_up_block_sizes(int num_depth_params, + std::vector & bracketed_cam_block_sizes, + std::vector & bracketed_depth_block_sizes, + std::vector & bracketed_depth_mesh_block_sizes, + std::vector & mesh_block_sizes) { + // Wipe the outputs + bracketed_cam_block_sizes.clear(); + bracketed_depth_block_sizes.clear(); + bracketed_depth_mesh_block_sizes.clear(); + mesh_block_sizes.clear(); + + int num_focal_lengths = 1; // The x and y focal length are assumed to be the same + int num_distortion_params = 1; // will be overwritten later + + // Set up the variable blocks to optimize for BracketedCamError + + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + bracketed_cam_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_cam_block_sizes.push_back(num_focal_lengths); + bracketed_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); + bracketed_cam_block_sizes.push_back(num_distortion_params); + + // Set up variable blocks to optimize for BracketedDepthError + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_block_sizes.push_back(num_depth_params); + bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + + // Set up the variable blocks to optimize for BracketedDepthMeshError + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(num_depth_params); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + bracketed_depth_mesh_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + + // Set up the variable blocks to optimize for the mesh xyz + mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); +} - // A copy of the triangulated positions which won't be optimized. - initial_nav_sci_xyz[nav_sci_xyz_count] = X; +typedef std::map> StrToMsgMap; + +// Look up each ref cam image by timestamp. In between any two ref cam timestamps, +// which are no further from each other than the bracket length, look up an image +// of each of the other camera types. If more than one choice, try to stay as close +// as possible to the midpoint of the two bracketing ref cam timestamps. This way +// there's more wiggle room later if one attempts to modify the timestamp offset. +void lookupImagesAndBrackets( // Inputs + int ref_cam_type, double bracket_len, double timestamp_offsets_max_change, + double max_haz_cam_image_to_depth_timestamp_diff, bool float_timestamp_offsets, + std::vector const& cam_names, std::vector const& ref_timestamps, + std::vector const& image_topics, std::vector const& depth_topics, + StrToMsgMap const& bag_map, std::vector> const& cam_timestamps_to_use, + std::vector const& ref_to_cam_timestamp_offsets, + // Outputs + std::vector& cams, std::vector& min_timestamp_offset, + std::vector& max_timestamp_offset) { + std::cout << "Looking up images and timestamp bracketing." << std::endl; + + int num_ref_cams = ref_timestamps.size(); + int num_cam_types = cam_names.size(); + + // Initialize the outputs + cams.clear(); + min_timestamp_offset.resize(num_cam_types); + max_timestamp_offset.resize(num_cam_types); + + // A lot of care is needed with positions. This remembers how we travel in time + // for each camera type so we have fewer messages to search. + // But if a mistake is done below it will mess up this bookkeeping. + std::vector image_start_positions(num_cam_types, 0); + std::vector cloud_start_positions(num_cam_types, 0); + + // Populate the data for each camera image + for (int ref_it = 0; ref_it < num_ref_cams; ref_it++) { + if (ref_cam_type != 0) + LOG(FATAL) << "It is assumed that the ref cam type is 0."; + + bool save_grayscale = true; // for matching we will need grayscale + + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + dense_map::cameraImage cam; + bool success = false; + if (cam_type == ref_cam_type) { + cam.camera_type = cam_type; + cam.timestamp = ref_timestamps[ref_it]; + cam.ref_timestamp = cam.timestamp; // the time offset is 0 between ref and itself + cam.beg_ref_index = ref_it; + cam.end_ref_index = ref_it; + + // Start looking up the image timestamp from this position. Some care + // is needed here. + int start_pos = image_start_positions[cam_type]; // care here + double found_time = -1.0; + // this has to succeed since we picked the ref images in the map + if (!dense_map::lookupImage(cam.timestamp, mapVal(bag_map, image_topics[cam_type]), + save_grayscale, + // outputs + cam.image, image_start_positions[cam_type], // care here + found_time)) + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up camera at time " << cam.timestamp << ".\n"; + + // The exact time is expected + if (found_time != cam.timestamp) + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up camera at time " << cam.timestamp << ".\n"; + + success = true; + image_start_positions[cam_type] = start_pos; // save for next time + + } else { + if (ref_it + 1 >= num_ref_cams) break; // Arrived at the end, cannot do a bracket + + // Convert the bracketing timestamps to current cam's time + double ref_to_cam_offset = ref_to_cam_timestamp_offsets[cam_type]; + double left_timestamp = ref_timestamps[ref_it] + ref_to_cam_offset; + double right_timestamp = ref_timestamps[ref_it + 1] + ref_to_cam_offset; + + if (right_timestamp <= left_timestamp) + LOG(FATAL) << "Ref timestamps must be in strictly increasing order.\n"; + + if (right_timestamp - left_timestamp > bracket_len) + continue; // Must respect the bracket length + + // Find the image timestamp closest to the midpoint of the brackets. This will give + // more room to vary the timestamp later. + double mid_timestamp = (left_timestamp + right_timestamp)/2.0; + + // Search forward in time from image_start_positions[cam_type]. + // We will update that too later. One has to be very careful + // with it so it does not go too far forward in time + // so that at the next iteration we are passed what we + // search for. + int start_pos = image_start_positions[cam_type]; // care here + double curr_timestamp = left_timestamp; // start here + cv::Mat best_image; + double best_dist = 1.0e+100; + double best_time = -1.0, found_time = -1.0; + while (1) { + if (found_time >= right_timestamp) break; // out of range + + cv::Mat image; + if (!dense_map::lookupImage(curr_timestamp, mapVal(bag_map, image_topics[cam_type]), + save_grayscale, + // outputs + image, + start_pos, // care here + found_time)) + break; // Need not succeed, but then there's no need to go on are we are at the end + + double curr_dist = std::abs(found_time - mid_timestamp); + if (curr_dist < best_dist) { + best_dist = curr_dist; + best_time = found_time; + // Update the start position for the future only if this is a good + // solution. Otherwise we may have moved too far. + image_start_positions[cam_type] = start_pos; + image.copyTo(best_image); + } + + // Go forward in time. We count on the fact that + // lookupImage() looks forward from given guess. + curr_timestamp = std::nextafter(found_time, 1.01 * found_time); + } - // The cost function of projecting in the sci cam. Note that we use dist_sci_ip, - // as in the cost function below we will do differences of distorted sci cam pixels. - ceres::CostFunction* sci_cost_function = - dense_map::SciError::Create(dist_sci_ip, alpha, sci_block_sizes, sci_cam_params); - ceres::LossFunction* sci_loss_function = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + if (best_time < 0.0) continue; // bracketing failed - problem.AddResidualBlock(sci_cost_function, sci_loss_function, - &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)], - &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)], - &hazcam_to_navcam_vec[0], &scicam_to_hazcam_vec[0], - &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count], &sci_cam_focal_vector[0], - &sci_cam_optical_center[0], &sci_cam_distortion[0]); + // Note how we allow best_time == left_timestamp if there's no other choice + if (best_time < left_timestamp || best_time >= right_timestamp) continue; // no luck - residual_names.push_back("navscisci1"); - residual_names.push_back("navscisci2"); + cam.camera_type = cam_type; + cam.timestamp = best_time; + cam.ref_timestamp = best_time - ref_to_cam_offset; + cam.beg_ref_index = ref_it; + cam.end_ref_index = ref_it + 1; + cam.image = best_image; - if (FLAGS_fix_map) { - problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)]); - problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)]); + // So far these are relative offsets, this will be adjusted further down + max_timestamp_offset[cam_type] + = std::min(max_timestamp_offset[cam_type], best_time - left_timestamp); + min_timestamp_offset[cam_type] + = std::max(min_timestamp_offset[cam_type], best_time - right_timestamp); + success = true; } - // The nav cam cost function - ceres::CostFunction* nav_cost_function = - dense_map::NavError::Create(undist_nav_ip, nav_block_sizes, nav_cam_params); - ceres::LossFunction* nav_loss_function = - dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + // See if to skip this timestamp + if (!cam_timestamps_to_use[cam_type].empty() && + cam_timestamps_to_use[cam_type].find(cam.timestamp) == + cam_timestamps_to_use[cam_type].end()) { + std::cout << std::setprecision(17) << "For " << cam_names[cam_type] + << " skipping timestamp: " << cam.timestamp << std::endl; + continue; + } - problem.AddResidualBlock(nav_cost_function, nav_loss_function, - &nav_cam_vec[(nav_cam_start + nav_it) * NUM_RIGID_PARAMS], - &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count]); - residual_names.push_back("navscinav1"); - residual_names.push_back("navscinav2"); + if (!success) continue; + + // This can be useful in checking if all the sci cams were bracketed successfully. + // std::cout << std::setprecision(17) << "For camera " + // << cam_names[cam_type] << " pick timestamp " + // << cam.timestamp << ".\n"; + + if (depth_topics[cam_type] != "") { + cam.cloud_timestamp = -1.0; // will change + cv::Mat cloud; + // Look up the closest cloud in time (either before or after cam.timestamp) + // This need not succeed. + dense_map::lookupCloud(cam.timestamp, + mapVal(bag_map, depth_topics[cam_type]), + max_haz_cam_image_to_depth_timestamp_diff, + // Outputs + cam.depth_cloud, + cloud_start_positions[cam_type], // care here + cam.cloud_timestamp); + } - if (FLAGS_fix_map) - problem.SetParameterBlockConstant(&nav_cam_vec[(nav_cam_start + nav_it) - * NUM_RIGID_PARAMS]); + cams.push_back(cam); + } // end loop over camera types + } // end loop over ref images - if (have_mesh_intersection) { - // Constrain the sci cam texture to agree with the nav cam texture on the mesh + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + if (cam_type == ref_cam_type) continue; // bounds don't make sense here - ceres::CostFunction* mesh_cost_function = - dense_map::XYZError::Create(initial_nav_sci_xyz[nav_sci_xyz_count], - mesh_block_sizes, FLAGS_mesh_weight); + // So far we had the relative change. Now add the actual offset to get the max allowed offset. + min_timestamp_offset[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; + max_timestamp_offset[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; + } - ceres::LossFunction* mesh_loss_function = - dense_map::GetLossFunction("cauchy", FLAGS_mesh_robust_threshold); + std::cout << "Timestamp offset allowed ranges:\n"; + for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { + if (cam_type == ref_cam_type) continue; // bounds don't make sense here + min_timestamp_offset[cam_type] = std::max(min_timestamp_offset[cam_type], + ref_to_cam_timestamp_offsets[cam_type] + - timestamp_offsets_max_change); + max_timestamp_offset[cam_type] = std::min(max_timestamp_offset[cam_type], + ref_to_cam_timestamp_offsets[cam_type] + + timestamp_offsets_max_change); + + // Tighten a bit to ensure we don't exceed things when we add + // and subtract timestamps later. Note that timestamps are + // measured in seconds and fractions of a second since epoch and + // can be quite large so precision loss can easily happen. + min_timestamp_offset[cam_type] += 1.0e-5; + max_timestamp_offset[cam_type] -= 1.0e-5; + std::cout << std::setprecision(8) << cam_names[cam_type] + << ": [" << min_timestamp_offset[cam_type] + << ", " << max_timestamp_offset[cam_type] << "]\n"; + } +} - problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, - &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count]); +void multiViewTriangulation( // Inputs + std::vector const& cam_params, + std::vector const& cams, std::vector const& world_to_cam, + std::vector> const& pid_to_cid_fid, + std::vector>> const& keypoint_vec, + // Outputs + std::vector>>& pid_cid_fid_inlier, + std::vector& xyz_vec) { + xyz_vec.resize(pid_to_cid_fid.size()); + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + std::vector focal_length_vec; + std::vector world_to_cam_vec; + std::vector pix_vec; + + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; - residual_names.push_back("mesh_x"); - residual_names.push_back("mesh_y"); - residual_names.push_back("mesh_z"); + // Triangulate inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + continue; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + Eigen::Vector2d undist_ip; + cam_params[cams[cid].camera_type].Convert + (dist_ip, &undist_ip); + + focal_length_vec.push_back(cam_params[cams[cid].camera_type].GetFocalLength()); + world_to_cam_vec.push_back(world_to_cam[cid]); + pix_vec.push_back(undist_ip); + } + + if (pix_vec.size() < 2) { + // If after outlier filtering less than two rays are left, can't triangulate. + // Must set all features for this pid to outliers. + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); } - // Very important, go forward in the vector of xyz. Do it at the end. - nav_sci_xyz_count++; + // Nothing else to do + continue; } - return; + // Triangulate n rays emanating from given undistorted and centered pixels + xyz_vec[pid] = dense_map::Triangulate(focal_length_vec, world_to_cam_vec, pix_vec); } - void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { - int raw_image_cols = image.cols; - int raw_image_rows = image.rows; - int calib_image_cols = cam_params.GetDistortedSize()[0]; - int calib_image_rows = cam_params.GetDistortedSize()[1]; + return; +} - int factor = raw_image_cols / calib_image_cols; +void meshTriangulations( // Inputs + std::vector const& cam_params, + std::vector const& cams, std::vector const& world_to_cam, + std::vector> const& pid_to_cid_fid, + std::vector>> const& pid_cid_fid_inlier, + std::vector>> const& keypoint_vec, + Eigen::Vector3d const& bad_xyz, double min_ray_dist, double max_ray_dist, + mve::TriangleMesh::Ptr const& mesh, std::shared_ptr const& bvh_tree, + // Outputs + std::vector>>& pid_cid_fid_mesh_xyz, + std::vector& pid_mesh_xyz) { + // Initialize the outputs + pid_cid_fid_mesh_xyz.resize(pid_to_cid_fid.size()); + pid_mesh_xyz.resize(pid_to_cid_fid.size()); + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + Eigen::Vector3d avg_mesh_xyz(0, 0, 0); + int num_intersections = 0; + + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; - if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { - LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" - << "Calibrated image width and height are: " - << calib_image_cols << ' ' << calib_image_rows << "\n" - << "These must be equal up to an integer factor.\n"; - } + // Initialize this + pid_cid_fid_mesh_xyz[pid][cid][fid] = bad_xyz; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + continue; - if (factor != 1) { - // TODO(oalexan1): This kind of resizing may be creating aliased images. - cv::Mat local_image; - cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); - local_image.copyTo(image); + // Intersect the ray with the mesh + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + Eigen::Vector3d mesh_xyz(0.0, 0.0, 0.0); + bool have_mesh_intersection + = dense_map::ray_mesh_intersect(dist_ip, cam_params[cams[cid].camera_type], + world_to_cam[cid], mesh, bvh_tree, + min_ray_dist, max_ray_dist, + // Output + mesh_xyz); + + if (have_mesh_intersection) { + pid_cid_fid_mesh_xyz[pid][cid][fid] = mesh_xyz; + avg_mesh_xyz += mesh_xyz; + num_intersections += 1; + } } - // Check - if (image.cols != calib_image_cols || image.rows != calib_image_rows) - LOG(FATAL) << "Sci cam images have the wrong size."; + // Average the intersections of all rays with the mesh + if (num_intersections >= 1) + avg_mesh_xyz /= num_intersections; + else + avg_mesh_xyz = bad_xyz; + + pid_mesh_xyz[pid] = avg_mesh_xyz; } - void select_images_to_match(// Inputs // NOLINT - double haz_cam_start_time, // NOLINT - double navcam_to_hazcam_timestamp_offset, // NOLINT - double scicam_to_hazcam_timestamp_offset, // NOLINT - std::vector const& sparse_map_timestamps, // NOLINT - std::vector const& all_haz_cam_inten_timestamps, // NOLINT - std::vector const& all_sci_cam_timestamps, // NOLINT - std::set const& sci_cam_timestamps_to_use, // NOLINT - dense_map::RosBagHandle const& nav_cam_handle, // NOLINT - dense_map::RosBagHandle const& sci_cam_handle, // NOLINT - dense_map::RosBagHandle const& haz_cam_points_handle, // NOLINT - dense_map::RosBagHandle const& haz_cam_intensity_handle, // NOLINT - camera::CameraParameters const& sci_cam_params, // NOLINT - // Outputs // NOLINT - int& nav_cam_start, // NOLINT - std::vector& cid_to_image_type, // NOLINT - std::vector& haz_cam_intensity_timestamps, // NOLINT - std::vector& sci_cam_timestamps, // NOLINT - std::vector& images, // NOLINT - std::vector& depth_clouds) { // NOLINT - // Wipe the outputs - nav_cam_start = -1; - cid_to_image_type.clear(); - haz_cam_intensity_timestamps.clear(); - sci_cam_timestamps.clear(); - images.clear(); - depth_clouds.clear(); - - bool stop_early = false; - double found_time = -1.0; - bool save_grayscale = true; // feature detection needs grayscale - - double navcam_to_scicam_timestamp_offset - = navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; - - // Use these to keep track where in the bags we are. After one - // traversal forward in time they need to be reset. - int nav_cam_pos = 0, haz_cam_intensity_pos = 0, haz_cam_cloud_pos = 0, sci_cam_pos = 0; - - for (size_t map_it = 0; map_it + 1 < sparse_map_timestamps.size(); map_it++) { - if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { - // The case when we would like to start later. Note the second - // comparison after "&&". When FLAG_start is 0, we want to - // make sure if the first nav image from the bag is in the map - // we use it, so we don't skip it even if based on - // navcam_to_hazcam_timestamp_offset we should. - if (sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset - < FLAGS_start + haz_cam_start_time && - sparse_map_timestamps[map_it] < FLAGS_start + haz_cam_start_time) - continue; - } + return; +} - if (nav_cam_start < 0) nav_cam_start = map_it; +void flagOutlierByExclusionDist( // Inputs + int ref_cam_type, int nav_cam_num_exclude_boundary_pixels, + std::vector const& cam_params, + std::vector const& cams, + std::vector> const& pid_to_cid_fid, + std::vector>> const& keypoint_vec, + // Outputs + std::vector>>& pid_cid_fid_inlier) { + // Initialize the output + pid_cid_fid_inlier.resize(pid_to_cid_fid.size()); + + // Iterate though interest point matches + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; - images.push_back(cv::Mat()); - if (!dense_map::lookupImage(sparse_map_timestamps[map_it], nav_cam_handle.bag_msgs, - save_grayscale, images.back(), - nav_cam_pos, found_time)) { - LOG(FATAL) << std::fixed << std::setprecision(17) - << "Cannot look up nav cam at time " << sparse_map_timestamps[map_it] << ".\n"; - } - cid_to_image_type.push_back(dense_map::NAV_CAM); - - if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { - // If we would like to end earlier, then save the last nav cam image so far - // and quit - if (sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset > - FLAGS_start + FLAGS_duration + haz_cam_start_time) { - stop_early = true; - break; - } - } + // Initially there are inliers only + pid_cid_fid_inlier[pid][cid][fid] = 1; - // Do not look up sci cam and haz cam images in time intervals bigger than this - if (std::abs(sparse_map_timestamps[map_it + 1] - sparse_map_timestamps[map_it]) > FLAGS_bracket_len) continue; - - // Append at most two haz cam images between consecutive sparse - // map timestamps, close to these sparse map timestamps. - std::vector local_haz_timestamps; - dense_map::pickTimestampsInBounds(all_haz_cam_inten_timestamps, - sparse_map_timestamps[map_it], - sparse_map_timestamps[map_it + 1], - -navcam_to_hazcam_timestamp_offset, - local_haz_timestamps); - - for (size_t samp_it = 0; samp_it < local_haz_timestamps.size(); samp_it++) { - haz_cam_intensity_timestamps.push_back(local_haz_timestamps[samp_it]); - - double nav_start = sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset - - haz_cam_start_time; - double haz_time = local_haz_timestamps[samp_it] - haz_cam_start_time; - double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset - - haz_cam_start_time; - - std::cout << "nav_start haz nav_end times " - << nav_start << ' ' << haz_time << ' ' << nav_end << std::endl; - std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; - - // Read the image - images.push_back(cv::Mat()); - if (!dense_map::lookupImage(haz_cam_intensity_timestamps.back(), - haz_cam_intensity_handle.bag_msgs, - save_grayscale, images.back(), haz_cam_intensity_pos, - found_time)) - LOG(FATAL) << "Cannot look up haz cam image at given time"; - cid_to_image_type.push_back(dense_map::HAZ_CAM); - - double cloud_time = -1.0; - depth_clouds.push_back(cv::Mat()); - if (!dense_map::lookupCloud(haz_cam_intensity_timestamps.back(), - haz_cam_points_handle.bag_msgs, - FLAGS_max_haz_cam_image_to_depth_timestamp_diff, - depth_clouds.back(), - haz_cam_cloud_pos, cloud_time)) { - // This need not succeed always + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + + if (cams[cid].camera_type == ref_cam_type) { + // Flag as outliers pixels at the nav_cam boundary, if desired. This + // is especially important when the nav_cam uses the radtan + // model instead of fisheye. + Eigen::Vector2i dist_size = cam_params[cams[cid].camera_type].GetDistortedSize(); + int excl = nav_cam_num_exclude_boundary_pixels; + if (dist_ip.x() < excl || dist_ip.x() > dist_size[0] - 1 - excl || + dist_ip.y() < excl || dist_ip.y() > dist_size[1] - 1 - excl) { + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); } } + } + } + return; +} - // Append at most two sci cam images between consecutive sparse - // map timestamps, close to these sparse map timestamps. - - std::vector local_sci_timestamps; - dense_map::pickTimestampsInBounds(all_sci_cam_timestamps, sparse_map_timestamps[map_it], - sparse_map_timestamps[map_it + 1], - -navcam_to_scicam_timestamp_offset, - local_sci_timestamps); - - // Append to the vector of sampled timestamps - for (size_t samp_it = 0; samp_it < local_sci_timestamps.size(); samp_it++) { - // See if to use only specified timestamps - if (!sci_cam_timestamps_to_use.empty() && - sci_cam_timestamps_to_use.find(local_sci_timestamps[samp_it]) == - sci_cam_timestamps_to_use.end()) +// Flag outliers by triangulation angle and reprojection error. It is +// assumed that the cameras in world_to_cam are up-to-date given the +// current state of optimization, and that the residuals (including +// the reprojection errors) have also been updated beforehand. +void flagOutliersByTriAngleAndReprojErr( // Inputs + double refiner_min_angle, double max_reprojection_error, + std::vector> const& pid_to_cid_fid, + std::vector>> const& keypoint_vec, + std::vector const& world_to_cam, std::vector const& xyz_vec, + std::vector>> const& pid_cid_fid_to_residual_index, + std::vector const& residuals, + // Outputs + std::vector>>& pid_cid_fid_inlier) { + // Must deal with outliers by triangulation angle before + // removing outliers by reprojection error, as the latter will + // exclude some rays which form the given triangulated points. + int num_outliers_by_angle = 0, num_total_features = 0; + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + // Find the largest angle among any two intersecting rays + double max_rays_angle = 0.0; + + for (auto cid_fid1 = pid_to_cid_fid[pid].begin(); + cid_fid1 != pid_to_cid_fid[pid].end(); cid_fid1++) { + int cid1 = cid_fid1->first; + int fid1 = cid_fid1->second; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid1, fid1)) continue; + + num_total_features++; + + Eigen::Vector3d cam_ctr1 = (world_to_cam[cid1].inverse()) * Eigen::Vector3d(0, 0, 0); + Eigen::Vector3d ray1 = xyz_vec[pid] - cam_ctr1; + ray1.normalize(); + + for (auto cid_fid2 = pid_to_cid_fid[pid].begin(); + cid_fid2 != pid_to_cid_fid[pid].end(); cid_fid2++) { + int cid2 = cid_fid2->first; + int fid2 = cid_fid2->second; + + // Look at each cid and next cids + if (cid2 <= cid1) continue; - sci_cam_timestamps.push_back(local_sci_timestamps[samp_it]); - - double nav_start = sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset - - haz_cam_start_time; - double sci_time = local_sci_timestamps[samp_it] + scicam_to_hazcam_timestamp_offset - - haz_cam_start_time; - double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset - - haz_cam_start_time; - std::cout << "nav_start sci nav_end times " - << nav_start << ' ' << sci_time << ' ' << nav_end << std::endl; - std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; - - // Read the sci cam image, and perhaps adjust its size - images.push_back(cv::Mat()); - cv::Mat local_img; - if (!dense_map::lookupImage(sci_cam_timestamps.back(), sci_cam_handle.bag_msgs, - save_grayscale, local_img, - sci_cam_pos, found_time)) - LOG(FATAL) << "Cannot look up sci cam image at given time."; - adjustImageSize(sci_cam_params, local_img); - local_img.copyTo(images.back()); - - // Sanity check - Eigen::Vector2i sci_cam_size = sci_cam_params.GetDistortedSize(); - if (images.back().cols != sci_cam_size[0] || images.back().rows != sci_cam_size[1]) - LOG(FATAL) << "Sci cam images have the wrong size."; - - cid_to_image_type.push_back(dense_map::SCI_CAM); + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid2, fid2)) continue; + + Eigen::Vector3d cam_ctr2 = (world_to_cam[cid2].inverse()) * Eigen::Vector3d(0, 0, 0); + Eigen::Vector3d ray2 = xyz_vec[pid] - cam_ctr2; + ray2.normalize(); + + double curr_angle = (180.0 / M_PI) * acos(ray1.dot(ray2)); + + if (std::isnan(curr_angle) || std::isinf(curr_angle)) continue; + + max_rays_angle = std::max(max_rays_angle, curr_angle); } - } // End iterating over nav cam timestamps - - // Add the last nav cam image from the map, unless we stopped early and this was done - if (!stop_early) { - images.push_back(cv::Mat()); - if (!dense_map::lookupImage(sparse_map_timestamps.back(), nav_cam_handle.bag_msgs, - save_grayscale, images.back(), - nav_cam_pos, found_time)) - LOG(FATAL) << "Cannot look up nav cam image at given time."; - cid_to_image_type.push_back(dense_map::NAV_CAM); } - if (images.size() > sparse_map_timestamps.size() + haz_cam_intensity_timestamps.size() - + sci_cam_timestamps.size()) - LOG(FATAL) << "Book-keeping error in select_images_to_match."; + if (max_rays_angle >= refiner_min_angle) + continue; // This is a good triangulated point, with large angle of convergence - return; - } + // Flag as outliers all the features for this cid + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; - void set_up_block_sizes(int num_scicam_focal_lengths, int num_scicam_distortions, - std::vector & depth_to_haz_block_sizes, - std::vector & nav_block_sizes, - std::vector & depth_to_nav_block_sizes, - std::vector & depth_to_sci_block_sizes, - std::vector & sci_block_sizes, - std::vector & mesh_block_sizes) { - // Wipe the outputs - depth_to_haz_block_sizes.clear(); - nav_block_sizes.clear(); - depth_to_nav_block_sizes.clear(); - depth_to_sci_block_sizes.clear(); - sci_block_sizes.clear(); - mesh_block_sizes.clear(); - - // Set up the variable blocks to optimize for DepthToHazError - depth_to_haz_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_haz_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - - // Set up the variable blocks to optimize for NavError - nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - nav_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - - // Set up the variable blocks to optimize for DepthToNavError - depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_nav_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - - // Set up the variable blocks to optimize for DepthToSciError - depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - depth_to_sci_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - depth_to_sci_block_sizes.push_back(num_scicam_focal_lengths); // focal length - depth_to_sci_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); // optical center - depth_to_sci_block_sizes.push_back(num_scicam_distortions); // distortion - - // Set up the variable blocks to optimize for SciError - sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - sci_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - sci_block_sizes.push_back(num_scicam_focal_lengths); // focal length - sci_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); // optical center - sci_block_sizes.push_back(num_scicam_distortions); // distortion - - // Set up the variable blocks to optimize for the mesh xyz error - mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + num_outliers_by_angle++; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); + } } + std::cout << std::setprecision(4) << "Removed " << num_outliers_by_angle + << " outlier features with small angle of convergence, out of " + << num_total_features << " (" + << (100.0 * num_outliers_by_angle) / num_total_features << " %)\n"; + + int num_outliers_reproj = 0; + num_total_features = 0; // reusing this variable + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; - typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) continue; - // Wrapper to find the value of a const map at a given key - int mapVal(std::map const& map, int key) { - auto ptr = map.find(key); - if (ptr == map.end()) - LOG(FATAL) << "Cannot find map value for given index."; - return ptr->second; - } + num_total_features++; - // Find nav images close in time. Match sci and haz images in between to each - // other and to these nave images - // TODO(oalexan1): Reword the above explanation - void detect_match_features(// Inputs // NOLINT - std::vector const& images, // NOLINT - std::vector const& cid_to_image_type, // NOLINT - // Outputs // NOLINT - std::map & image_to_nav_it, // NOLINT - std::map & image_to_haz_it, // NOLINT - std::map & image_to_sci_it, // NOLINT - std::map & haz_cam_to_left_nav_cam_index, // NOLINT - std::map & haz_cam_to_right_nav_cam_index, // NOLINT - std::map & sci_cam_to_left_nav_cam_index, // NOLINT - std::map & sci_cam_to_right_nav_cam_index, // NOLINT - MATCH_MAP & matches) { // NOLINT - // Wipe the outputs - image_to_nav_it.clear(); - image_to_haz_it.clear(); - image_to_sci_it.clear(); - haz_cam_to_left_nav_cam_index.clear(); - haz_cam_to_right_nav_cam_index.clear(); - sci_cam_to_left_nav_cam_index.clear(); - sci_cam_to_right_nav_cam_index.clear(); - matches.clear(); - - int nav_it = 0, haz_it = 0, sci_it = 0; - for (int image_it = 0; image_it < static_cast(images.size()); image_it++) { - if (cid_to_image_type[image_it] == dense_map::NAV_CAM) { - image_to_nav_it[image_it] = nav_it; - nav_it++; - } else if (cid_to_image_type[image_it] == dense_map::HAZ_CAM) { - image_to_haz_it[image_it] = haz_it; - haz_it++; - } else if (cid_to_image_type[image_it] == dense_map::SCI_CAM) { - image_to_sci_it[image_it] = sci_it; - sci_it++; + // Find the pixel residuals + size_t residual_index = dense_map::getMapValue(pid_cid_fid_to_residual_index, pid, cid, fid); + if (residuals.size() <= residual_index + 1) LOG(FATAL) << "Too few residuals.\n"; + + double res_x = residuals[residual_index + 0]; + double res_y = residuals[residual_index + 1]; + // NaN values will never be inliers if the comparison is set as below + bool is_good = (Eigen::Vector2d(res_x, res_y).norm() <= max_reprojection_error); + if (!is_good) { + num_outliers_reproj++; + dense_map::setMapValue(pid_cid_fid_inlier, pid, cid, fid, 0); } } + } - std::vector > image_pairs; - - // Look at two neighboring nav images. Find left_img_it and - // right_img_it so cid_to_image_type for these are nav images. - for (int left_img_it = 0; left_img_it < static_cast(images.size()); left_img_it++) { - if (cid_to_image_type[left_img_it] != dense_map::NAV_CAM) continue; // Not nav cam + std::cout << std::setprecision(4) << "Removed " << num_outliers_reproj + << " outlier features using reprojection error, out of " << num_total_features + << " (" << (100.0 * num_outliers_reproj) / num_total_features << " %)\n"; - // now find right_img_it - int right_img_it = -1; - for (int local_it = left_img_it + 1; local_it < static_cast(images.size()); local_it++) { - if (cid_to_image_type[local_it] == dense_map::NAV_CAM) { - right_img_it = local_it; - break; - } - } + return; +} - if (right_img_it < 0) continue; +// Evaluate the residuals before and after optimization +void evalResiduals( // Inputs + std::string const& tag, std::vector const& residual_names, + std::vector const& residual_scales, + // Outputs + ceres::Problem& problem, std::vector& residuals) { + double total_cost = 0.0; + ceres::Problem::EvaluateOptions eval_options; + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - // Now look at sci and haz images in between - std::vector nav_cam_indices, haz_cam_indices, sci_cam_indices; + // Sanity checks, after the residuals are created + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "There must be as many residual names as residual values."; + if (residuals.size() != residual_scales.size()) + LOG(FATAL) << "There must be as many residual values as residual scales."; - nav_cam_indices.push_back(left_img_it); - nav_cam_indices.push_back(right_img_it); + // Compensate for the scale + for (size_t it = 0; it < residuals.size(); it++) + residuals[it] /= residual_scales[it]; - for (int local_img_it = left_img_it + 1; local_img_it < right_img_it; local_img_it++) { - int left_nav_it = mapVal(image_to_nav_it, left_img_it); - int right_nav_it = mapVal(image_to_nav_it, right_img_it); + dense_map::calc_median_residuals(residuals, residual_names, tag); + return; +} - if (cid_to_image_type[local_img_it] == dense_map::HAZ_CAM) { - int haz_it = mapVal(image_to_haz_it, local_img_it); - haz_cam_indices.push_back(local_img_it); - haz_cam_to_left_nav_cam_index[haz_it] = left_nav_it; - haz_cam_to_right_nav_cam_index[haz_it] = right_nav_it; +// Given all the merged and filtered tracks in pid_cid_fid, for each +// image pair cid1 and cid2 with cid1 < cid2 < cid1 + num_overlaps + 1, +// save the matches of this pair which occur in the set of tracks. +void saveInlierMatches( // Inputs + std::vector const& image_files, int num_overlaps, + std::vector> const& pid_to_cid_fid, + std::vector>> const& keypoint_vec, + std::vector>> const& pid_cid_fid_inlier) { + MATCH_MAP matches; + + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid1 = pid_to_cid_fid[pid].begin(); + cid_fid1 != pid_to_cid_fid[pid].end(); cid_fid1++) { + int cid1 = cid_fid1->first; + int fid1 = cid_fid1->second; + + for (auto cid_fid2 = pid_to_cid_fid[pid].begin(); + cid_fid2 != pid_to_cid_fid[pid].end(); cid_fid2++) { + int cid2 = cid_fid2->first; + int fid2 = cid_fid2->second; + + bool is_good = (cid1 < cid2 && cid2 < cid1 + num_overlaps + 1); + if (!is_good) + continue; - } else if (cid_to_image_type[local_img_it] == dense_map::SCI_CAM) { - int sci_it = mapVal(image_to_sci_it, local_img_it); - sci_cam_indices.push_back(local_img_it); - sci_cam_to_left_nav_cam_index[image_to_sci_it[local_img_it]] = left_nav_it; - sci_cam_to_right_nav_cam_index[image_to_sci_it[local_img_it]] = right_nav_it; - } - } + // Consider inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid1, fid1) || + !dense_map::getMapValue(pid_cid_fid_inlier, pid, cid2, fid2)) + continue; - // Match haz to nav - for (size_t haz_it = 0; haz_it < haz_cam_indices.size(); haz_it++) { - for (size_t nav_it = 0; nav_it < nav_cam_indices.size(); nav_it++) { - image_pairs.push_back(std::make_pair(haz_cam_indices[haz_it], nav_cam_indices[nav_it])); - } - } + auto index_pair = std::make_pair(cid1, cid2); - // Match haz to sci - for (size_t haz_it = 0; haz_it < haz_cam_indices.size(); haz_it++) { - for (size_t sci_it = 0; sci_it < sci_cam_indices.size(); sci_it++) { - image_pairs.push_back(std::make_pair(haz_cam_indices[haz_it], sci_cam_indices[sci_it])); - } - } + InterestPoint ip1(keypoint_vec[cid1][fid1].first, keypoint_vec[cid1][fid1].second); + InterestPoint ip2(keypoint_vec[cid2][fid2].first, keypoint_vec[cid2][fid2].second); - // Match sci to nav - for (size_t nav_it = 0; nav_it < nav_cam_indices.size(); nav_it++) { - for (size_t sci_it = 0; sci_it < sci_cam_indices.size(); sci_it++) { - image_pairs.push_back(std::make_pair(sci_cam_indices[sci_it], nav_cam_indices[nav_it])); - } + matches[index_pair].first.push_back(ip1); + matches[index_pair].second.push_back(ip2); } } + } // End iterations over pid - // Detect features using multiple threads - std::vector cid_to_descriptor_map; - std::vector cid_to_keypoint_map; - cid_to_descriptor_map.resize(images.size()); - cid_to_keypoint_map.resize(images.size()); - ff_common::ThreadPool thread_pool1; - for (size_t it = 0; it < images.size(); it++) - thread_pool1.AddTask(&dense_map::detectFeatures, images[it], FLAGS_verbose, - &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); - thread_pool1.Join(); - - // Create the matches among nav, haz, and sci images. Note that we - // have a starting and ending nav cam images, and match all the - // haz and sci images to these two nav cam images and to each - // other. - - // Find the matches using multiple threads - ff_common::ThreadPool thread_pool2; - std::mutex match_mutex; - for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { - auto pair = image_pairs[pair_it]; - int left_image_it = pair.first, right_image_it = pair.second; - thread_pool2.AddTask(&dense_map::matchFeatures, &match_mutex, left_image_it, right_image_it, - cid_to_descriptor_map[left_image_it], - cid_to_descriptor_map[right_image_it], - cid_to_keypoint_map[left_image_it], cid_to_keypoint_map[right_image_it], - FLAGS_verbose, &matches[pair]); - } - thread_pool2.Join(); - - return; - } -} // namespace dense_map - -int main(int argc, char** argv) { - ff_common::InitFreeFlyerApplication(&argc, &argv); - GOOGLE_PROTOBUF_VERIFY_VERSION; + for (auto it = matches.begin(); it != matches.end(); it++) { + auto & index_pair = it->first; + dense_map::MATCH_PAIR const& match_pair = it->second; - std::cout.precision(17); // to be able to print timestamps + int left_index = index_pair.first; + int right_index = index_pair.second; - if (FLAGS_ros_bag.empty()) - LOG(FATAL) << "The bag file was not specified."; - if (FLAGS_sparse_map.empty()) - LOG(FATAL) << "The input sparse map was not specified."; + auto & left_image = image_files[left_index]; // alias + auto& right_image = image_files[right_index]; // alias - if (FLAGS_output_map.empty()) - LOG(FATAL) << "The output sparse map was not specified."; + std::string left_stem = boost::filesystem::path(left_image).stem().string(); + std::string right_stem = boost::filesystem::path(right_image).stem().string(); - if (!FLAGS_skip_registration) { - if (FLAGS_xyz_file.empty() || FLAGS_hugin_file.empty()) - LOG(FATAL) << "Either the hugin or xyz file was not specified."; + std::string match_file = left_stem + "__" + right_stem + "-inliers.match"; + std::cout << "Writing: " << left_image << ' ' << right_image << ' ' + << match_file << std::endl; + dense_map::writeMatchFile(match_file, match_pair.first, match_pair.second); } +} - if (FLAGS_opt_map_only && FLAGS_fix_map) - LOG(FATAL) << "Cannot both float the sparse map and keep it fixed."; +} // namespace dense_map - if (FLAGS_robust_threshold <= 0.0) - LOG(FATAL) << "The robust threshold must be positive.\n"; +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + GOOGLE_PROTOBUF_VERIFY_VERSION; - // Set up handles for reading data at given time stamp without - // searching through the whole bag each time. - dense_map::RosBagHandle nav_cam_handle(FLAGS_ros_bag, FLAGS_nav_cam_topic); - dense_map::RosBagHandle sci_cam_handle(FLAGS_ros_bag, FLAGS_sci_cam_topic); - dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); - dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); - - if (nav_cam_handle.bag_msgs.empty()) LOG(FATAL) << "No nav cam images found."; - if (sci_cam_handle.bag_msgs.empty()) LOG(FATAL) << "No sci cam images found."; - if (haz_cam_intensity_handle.bag_msgs.empty()) LOG(FATAL) << "No haz cam images found."; - - // Read the config file - double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; - Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::Affine3d hazcam_depth_to_image_transform; - hazcam_depth_to_image_transform.setIdentity(); // default value - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", // NOLINT - "scicam_to_hazcam_timestamp_offset", // NOLINT - "hazcam_to_navcam_transform", // NOLINT - "scicam_to_hazcam_transform", // NOLINT - "nav_cam_transform", // NOLINT - "hazcam_depth_to_image_transform", // NOLINT - navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, - hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, - nav_cam_params, haz_cam_params, - sci_cam_params); - - if (!std::isnan(FLAGS_scicam_to_hazcam_timestamp_offset_override_value)) { - double new_val = FLAGS_scicam_to_hazcam_timestamp_offset_override_value; - std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset - << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; - scicam_to_hazcam_timestamp_offset = new_val; + dense_map::parameterValidation(); + + // The info below will eventually come from a file + int num_cam_types = 3; + int ref_cam_type = 0; // Below we assume the starting cam is the ref cam + int haz_cam_type = 1; + + // Image and depth topics + std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector image_topics = {"/mgt/img_sampler/nav_cam/image_record", + "/hw/depth_haz/extended/amplitude_int", + "/hw/cam_sci/compressed"}; + std::vector depth_topics = {"", "/hw/depth_haz/points", ""}; + + // Read the calibration so far + std::vector cam_params; + std::vector ref_to_cam_trans; + std::vector ref_to_cam_timestamp_offsets; + Eigen::Affine3d nav_cam_to_body_trans; + Eigen::Affine3d haz_cam_depth_to_image_transform; + dense_map::readConfigFile( // Inputs + cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", + // Outputs + cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, nav_cam_to_body_trans, + haz_cam_depth_to_image_transform); + std::vector orig_cam_params = cam_params; + + // Optionally override the timestamp offset + if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { + for (size_t it = 0; it < cam_names.size(); it++) { + if (cam_names[it] == "sci_cam") { + double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; + std::cout << "Overriding the value " << ref_to_cam_timestamp_offsets[it] + << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; + ref_to_cam_timestamp_offsets[it] = new_val; + } + } } - if (FLAGS_mesh_weight <= 0.0 || FLAGS_mesh_robust_threshold <= 0.0) - LOG(FATAL) << "The mesh weight and robust threshold must be positive.\n"; - + // Optionally load the mesh mve::TriangleMesh::Ptr mesh; std::shared_ptr mesh_info; std::shared_ptr graph; std::shared_ptr bvh_tree; if (FLAGS_mesh != "") dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); -#if 0 - std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; - std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; - std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() - << "\n"; -#endif - std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; - - // Convert hazcam_to_navcam_trans to Affine3d - Eigen::Affine3d hazcam_to_navcam_aff_trans; - hazcam_to_navcam_aff_trans.matrix() = hazcam_to_navcam_trans; - - // Convert scicam_to_hazcam_trans to Affine3d - Eigen::Affine3d scicam_to_hazcam_aff_trans; - scicam_to_hazcam_aff_trans.matrix() = scicam_to_hazcam_trans; - - // Read the sparse map - boost::shared_ptr sparse_map = - boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); - - // TODO(oalexan1): All this timestamp reading logic below should be in a function - - // Find the minimum and maximum timestamps in the sparse map - double min_map_timestamp = std::numeric_limits::max(); - double max_map_timestamp = -min_map_timestamp; - std::vector sparse_map_timestamps; - const std::vector& sparse_map_images = sparse_map->cid_to_filename_; - sparse_map_timestamps.resize(sparse_map_images.size()); - for (size_t cid = 0; cid < sparse_map_images.size(); cid++) { - double timestamp = dense_map::fileNameToTimestamp(sparse_map_images[cid]); - sparse_map_timestamps[cid] = timestamp; - min_map_timestamp = std::min(min_map_timestamp, sparse_map_timestamps[cid]); - max_map_timestamp = std::max(max_map_timestamp, sparse_map_timestamps[cid]); - } - if (sparse_map_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; - - // Read the haz cam timestamps - std::vector const& haz_cam_intensity_msgs - = haz_cam_intensity_handle.bag_msgs; - if (haz_cam_intensity_msgs.empty()) LOG(FATAL) << "No haz cam messages are present."; - double haz_cam_start_time = -1.0; - std::vector all_haz_cam_inten_timestamps; - for (size_t it = 0; it < haz_cam_intensity_msgs.size(); it++) { - sensor_msgs::Image::ConstPtr image_msg - = haz_cam_intensity_msgs[it].instantiate(); - if (image_msg) { - double haz_cam_time = image_msg->header.stamp.toSec(); - all_haz_cam_inten_timestamps.push_back(haz_cam_time); - if (haz_cam_start_time < 0) haz_cam_start_time = haz_cam_time; - } - } - - // Read the sci cam timestamps from the bag - std::vector const& sci_cam_msgs = sci_cam_handle.bag_msgs; - std::vector all_sci_cam_timestamps; - for (size_t sci_it = 0; sci_it < sci_cam_msgs.size(); sci_it++) { - sensor_msgs::Image::ConstPtr sci_image_msg = sci_cam_msgs[sci_it].instantiate(); - sensor_msgs::CompressedImage::ConstPtr comp_sci_image_msg = - sci_cam_msgs[sci_it].instantiate(); - if (sci_image_msg) - all_sci_cam_timestamps.push_back(sci_image_msg->header.stamp.toSec()); - else if (comp_sci_image_msg) - all_sci_cam_timestamps.push_back(comp_sci_image_msg->header.stamp.toSec()); - } - // If desired to process only specific timestamps std::set sci_cam_timestamps_to_use; if (FLAGS_sci_cam_timestamps != "") { @@ -1829,397 +1704,561 @@ int main(int argc, char** argv) { while (ifs >> val) sci_cam_timestamps_to_use.insert(val); } - // Will optimize the nav cam poses as part of the process - std::vector& nav_cam_affines = sparse_map->cid_to_cam_t_global_; // alias - - if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { - double latest_start - = sparse_map_timestamps.back() + navcam_to_hazcam_timestamp_offset - haz_cam_start_time; - if (latest_start < FLAGS_start) { - std::cout << "The sparse map ended before the desired start time. " - << "Use a start time no later than " << latest_start << "." << std::endl; - return 1; - } - - double earliest_end = sparse_map_timestamps[0] + navcam_to_hazcam_timestamp_offset - - haz_cam_start_time; - if (earliest_end > FLAGS_start + FLAGS_duration) { - std::cout << "The sparse map did not start yet. The sum of start and duration " - << "must be at least " << earliest_end << "." << std::endl; - return 1; - } - } - - int nav_cam_start = -1; // Record the first nav cam image used - std::vector cid_to_image_type; - std::vector haz_cam_intensity_timestamps, sci_cam_timestamps; // timestamps we will use - std::vector images; - std::vector depth_clouds; - // Find nav images close in time. Match sci and haz images in between to each - // other and to these nave images - // TODO(oalexan1): This selects haz cam images outside of bracket. - // TODO(oalexan1): No check for bracket size either. - select_images_to_match( // Inputs // NOLINT - haz_cam_start_time, navcam_to_hazcam_timestamp_offset, // NOLINT - scicam_to_hazcam_timestamp_offset, // NOLINT - sparse_map_timestamps, all_haz_cam_inten_timestamps, // NOLINT - all_sci_cam_timestamps, sci_cam_timestamps_to_use, // NOLINT - nav_cam_handle, sci_cam_handle, haz_cam_points_handle, // NOLINT - haz_cam_intensity_handle, // NOLINT - sci_cam_params, // NOLINT - // Outputs // NOLINT - nav_cam_start, cid_to_image_type, // NOLINT - haz_cam_intensity_timestamps, sci_cam_timestamps, // NOLINT - images, depth_clouds); // NOLINT - - // If an image in the vector of images is of nav_cam type, see where its index is - // in the list of nav cam images - std::map image_to_nav_it, image_to_haz_it, image_to_sci_it; - - // Given the index of a haz cam or sci cam image, find the index of the left and right - // nav cam images bounding it in time. - std::map haz_cam_to_left_nav_cam_index, haz_cam_to_right_nav_cam_index; - std::map sci_cam_to_left_nav_cam_index, sci_cam_to_right_nav_cam_index; - - // Map haz cam and nav cam index to the vectors of haz cam to nav cam matches - dense_map::MATCH_MAP matches; - - // TODO(oalexan1): Add explanation here - detect_match_features(// Inputs // NOLINT - images, cid_to_image_type, // NOLINT - // Outputs // NOLINT - image_to_nav_it, image_to_haz_it, // NOLINT - image_to_sci_it, // NOLINT - haz_cam_to_left_nav_cam_index, // NOLINT - haz_cam_to_right_nav_cam_index, // NOLINT - sci_cam_to_left_nav_cam_index, // NOLINT - sci_cam_to_right_nav_cam_index, // NOLINT - matches); // NOLINT - - double hazcam_depth_to_image_scale = pow(hazcam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); - - // Since we will keep the scale fixed, vary the part of the transform without - // the scale, while adding the scale each time before the transform is applied - Eigen::Affine3d hazcam_depth_to_image_noscale = hazcam_depth_to_image_transform; - hazcam_depth_to_image_noscale.linear() /= hazcam_depth_to_image_scale; - - // Form the optimization vector for hazcam_depth_to_image_transform - std::vector hazcam_depth_to_image_vec(dense_map::NUM_RIGID_PARAMS); - dense_map::rigid_transform_to_array(hazcam_depth_to_image_noscale, &hazcam_depth_to_image_vec[0]); - - // Go back from the array to the original transform. This will make it into a pure - // scale*rotation + translation, removing the artifacts of it having been read - // from disk where it was stored with just a few digits of precision - dense_map::array_to_rigid_transform(hazcam_depth_to_image_transform, &hazcam_depth_to_image_vec[0]); - hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; - - // Form the optimization vector for hazcam_to_navcam_aff_trans - std::vector hazcam_to_navcam_vec(dense_map::NUM_RIGID_PARAMS); - dense_map::rigid_transform_to_array(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); - - // Recreate hazcam_to_navcam_aff_trans as above - dense_map::array_to_rigid_transform(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); - - // Form the optimization vector for scicam_to_hazcam_aff_trans - std::vector scicam_to_hazcam_vec(dense_map::NUM_RIGID_PARAMS); - dense_map::rigid_transform_to_array(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); - - // Recreate scicam_to_hazcam_aff_trans as above - dense_map::array_to_rigid_transform(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); - - // See how many nav to sci matches we have and allocate space for their xyz - // TODO(oalexan1): This must be factored out as a function - int num_nav_sci_xyz = 0; - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair index_pair = it->first; - dense_map::MATCH_PAIR const& match_pair = it->second; - - std::map::iterator image_haz_it, image_nav_it, image_sci_it; + // Separate the initial scale. This is convenient if + // haz_cam_depth_to_image is scale * rotation + translation and if + // it is desired to keep the scale fixed. In either case, the scale + // will be multiplied back when needed. + double haz_cam_depth_to_image_scale + = pow(haz_cam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); + Eigen::Affine3d haz_cam_normalized_depth_to_image = haz_cam_depth_to_image_transform; + haz_cam_normalized_depth_to_image.linear() /= haz_cam_depth_to_image_scale; - image_sci_it = image_to_sci_it.find(index_pair.first); - image_nav_it = image_to_nav_it.find(index_pair.second); - if (image_sci_it != image_to_sci_it.end() && image_nav_it != image_to_nav_it.end()) { - int sci_it = image_sci_it->second; - int nav_it = image_nav_it->second; - - // Add to the number of xyz - num_nav_sci_xyz += match_pair.first.size(); + // Read the sparse map. It has the ref cam poses. + boost::shared_ptr sparse_map = + boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); - if (FLAGS_verbose) saveImagesAndMatches("sci", "nav", index_pair, match_pair, images); - } + // Optionally deal with using a non-FOV model for nav_cam + Eigen::VectorXd nav_cam_distortion_replacement; + if (FLAGS_nav_cam_distortion_replacement != "") { + std::vector vec = dense_map::string_to_vector(FLAGS_nav_cam_distortion_replacement); + if (vec.size() != 4 && vec.size() != 5) + LOG(FATAL) << "nav_cam distortion replacement must consist of 4 or 5 values, corresponding" + << "to radial and tangential distortion coefficients.\n"; + nav_cam_distortion_replacement + = Eigen::Map(vec.data(), vec.size()); + } - image_haz_it = image_to_haz_it.find(index_pair.first); - image_sci_it = image_to_sci_it.find(index_pair.second); - if (image_haz_it != image_to_haz_it.end() && image_sci_it != image_to_sci_it.end()) { - if (FLAGS_verbose) saveImagesAndMatches("haz", "sci", index_pair, match_pair, images); - } + // Parse the ref timestamps from the sparse map. We assume the + // sparse map image names are the timestamps. + std::vector ref_timestamps; + const std::vector& sparse_map_images = sparse_map->cid_to_filename_; + ref_timestamps.resize(sparse_map_images.size()); + for (size_t cid = 0; cid < sparse_map_images.size(); cid++) + ref_timestamps[cid] = dense_map::fileNameToTimestamp(sparse_map_images[cid]); + if (ref_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; - image_haz_it = image_to_haz_it.find(index_pair.first); - image_nav_it = image_to_nav_it.find(index_pair.second); - if (image_haz_it != image_to_haz_it.end() && image_nav_it != image_to_nav_it.end()) { - if (FLAGS_verbose) saveImagesAndMatches("haz", "nav", index_pair, match_pair, images); + // Will optimize the nav cam poses as part of the process + std::vector & world_to_ref_t = sparse_map->cid_to_cam_t_global_; // alias + + // Put transforms of the reference cameras in a vector. We will optimize them. + int num_ref_cams = world_to_ref_t.size(); + if (world_to_ref_t.size() != ref_timestamps.size()) + LOG(FATAL) << "Must have as many ref cam timestamps as ref cameras.\n"; + std::vector world_to_ref_vec(num_ref_cams * dense_map::NUM_RIGID_PARAMS); + for (int cid = 0; cid < num_ref_cams; cid++) + dense_map::rigid_transform_to_array(world_to_ref_t[cid], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + // Need the identity transform for when the cam is the ref cam, and + // have to have a placeholder for the right bracketing cam which won't be used. + Eigen::Affine3d identity = Eigen::Affine3d::Identity(); + std::vector identity_vec(dense_map::NUM_RIGID_PARAMS); + dense_map::rigid_transform_to_array(identity, &identity_vec[0]); + + // Put all timestamps to use in a vector, in the nav_cam, haz_cam, sci_cam order + std::vector> cam_timestamps_to_use = {std::set(), + std::set(), + sci_cam_timestamps_to_use}; + + // Which intrinsics from which cameras to float + std::vector> intrinsics_to_float(num_cam_types); + dense_map::parse_intrinsics_to_float(FLAGS_nav_cam_intrinsics_to_float, intrinsics_to_float[0]); + dense_map::parse_intrinsics_to_float(FLAGS_haz_cam_intrinsics_to_float, intrinsics_to_float[1]); + dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, intrinsics_to_float[2]); + + std::string depth_to_image_name = "depth_to_image"; + std::set extrinsics_to_float; + dense_map::parse_extrinsics_to_float(cam_names, cam_names[ref_cam_type], depth_to_image_name, + FLAGS_extrinsics_to_float, extrinsics_to_float); + + if (!FLAGS_affine_depth_to_image && FLAGS_float_scale && + extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + LOG(FATAL) << "Cannot float the scale of depth_to_image_transform unless this " + << "this is allowed as part of --extrinsics_to_float.\n"; + + if (FLAGS_nav_cam_distortion_replacement != "") { + if (intrinsics_to_float[ref_cam_type].find("distortion") + == intrinsics_to_float[ref_cam_type].end() || + intrinsics_to_float[ref_cam_type].size() != 1) { + LOG(FATAL) << "When --nav_cam_distortion_replacement is used, must float the nav cam " + << "distortion and no other nav cam intrinsics.\n"; } } - // Prepare for floating sci cam params - int num_scicam_focal_lengths = 1; // Same focal length in x and y - std::set sci_cam_intrinsics_to_float; - dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, sci_cam_intrinsics_to_float); - Eigen::Vector2d sci_cam_focal_vector = sci_cam_params.GetFocalVector(); - if (num_scicam_focal_lengths == 1) { - sci_cam_focal_vector[0] = sci_cam_params.GetFocalLength(); // average the two focal lengths - sci_cam_focal_vector[1] = sci_cam_focal_vector[0]; + // Put the extrinsics in arrays, so we can optimize them + std::vector ref_to_cam_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::rigid_transform_to_array + (ref_to_cam_trans[cam_type], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + // Set up the variable blocks to optimize for BracketedDepthError + int num_depth_params = dense_map::NUM_RIGID_PARAMS; + if (FLAGS_affine_depth_to_image) num_depth_params = dense_map::NUM_AFFINE_PARAMS; + + // Depth to image transforms and scales + std::vector normalized_depth_to_image; + std::vector depth_to_image_scales = {1.0, haz_cam_depth_to_image_scale, 1.0}; + normalized_depth_to_image.push_back(Eigen::Affine3d::Identity()); + normalized_depth_to_image.push_back(haz_cam_normalized_depth_to_image); + normalized_depth_to_image.push_back(Eigen::Affine3d::Identity()); + + // Put depth_to_image arrays, so we can optimize them + std::vector normalized_depth_to_image_vec(num_cam_types * num_depth_params); + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + if (FLAGS_affine_depth_to_image) + dense_map::affine_transform_to_array + (normalized_depth_to_image[cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type]); + else + dense_map::rigid_transform_to_array + (normalized_depth_to_image[cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type]); } - std::cout << std::endl; - std::cout << "Initial focal length for sci cam: " << sci_cam_focal_vector.transpose() << std::endl; - Eigen::Vector2d sci_cam_optical_center = sci_cam_params.GetOpticalOffset(); - std::cout << "Initial optical center for sci_cam: " << sci_cam_optical_center.transpose() << std::endl; - Eigen::VectorXd sci_cam_distortion = sci_cam_params.GetDistortion(); - std::cout << "Initial distortion for sci_cam: " << sci_cam_distortion.transpose() << std::endl; - - std::vector depth_to_haz_block_sizes, nav_block_sizes; - std::vector depth_to_nav_block_sizes, depth_to_sci_block_sizes; - std::vector sci_block_sizes, mesh_block_sizes; - dense_map::set_up_block_sizes(// Inputs // NOLINT - num_scicam_focal_lengths, sci_cam_distortion.size(), // NOLINT - // Outputs // NOLINT - depth_to_haz_block_sizes, nav_block_sizes, // NOLINT - depth_to_nav_block_sizes, depth_to_sci_block_sizes, // NOLINT - sci_block_sizes, mesh_block_sizes); // NOLINT - - // Storage for the residual names and xyz - std::vector residual_names; - std::vector nav_sci_xyz(dense_map::NUM_XYZ_PARAMS * num_nav_sci_xyz, 0); - std::vector initial_nav_sci_xyz(num_nav_sci_xyz, Eigen::Vector3d(0, 0, 0)); - - // Put the sparse map camera transforms in a vector so we can optimize them further - int num_cams = nav_cam_affines.size(); - std::vector nav_cam_vec(num_cams * dense_map::NUM_RIGID_PARAMS); - for (int cid = 0; cid < num_cams; cid++) - dense_map::rigid_transform_to_array(nav_cam_affines[cid], - &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); - - int nav_sci_xyz_count = 0; - - ceres::Problem problem; - - // Form the problem - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair const& index_pair = it->first; // alias - dense_map::MATCH_PAIR const& match_pair = it->second; // alias - - std::map::iterator image_haz_it, image_nav_it, image_sci_it; - - // Doing haz cam to nav cam matches - image_haz_it = image_to_haz_it.find(index_pair.first); - image_nav_it = image_to_nav_it.find(index_pair.second); - if (image_haz_it != image_to_haz_it.end() && image_nav_it != image_to_nav_it.end()) { - int haz_it = image_haz_it->second; - int nav_it = image_nav_it->second; - dense_map::add_haz_nav_cost(// Inputs // NOLINT - haz_it, nav_it, nav_cam_start, // NOLINT - navcam_to_hazcam_timestamp_offset, // NOLINT - match_pair, haz_cam_intensity_timestamps, // NOLINT - sparse_map_timestamps, haz_cam_to_left_nav_cam_index, // NOLINT - haz_cam_to_right_nav_cam_index, nav_cam_params, // NOLINT - haz_cam_params, depth_to_nav_block_sizes, // NOLINT - depth_to_haz_block_sizes, // NOLINT - nav_cam_affines, depth_clouds, // NOLINT - // Outputs // NOLINT - residual_names, hazcam_depth_to_image_scale, // NOLINT - nav_cam_vec, hazcam_to_navcam_vec, // NOLINT - hazcam_depth_to_image_vec, // NOLINT - problem); // NOLINT - } - // Doing haz cam to sci cam matches - image_haz_it = image_to_haz_it.find(index_pair.first); - image_sci_it = image_to_sci_it.find(index_pair.second); - if (image_haz_it != image_to_haz_it.end() && image_sci_it != image_to_sci_it.end()) { - int haz_it = image_haz_it->second; - int sci_it = image_sci_it->second; - add_haz_sci_cost( // Inputs // NOLINT - haz_it, sci_it, nav_cam_start, navcam_to_hazcam_timestamp_offset, // NOLINT - scicam_to_hazcam_timestamp_offset, match_pair, // NOLINT - haz_cam_intensity_timestamps, sparse_map_timestamps, // NOLINT - sci_cam_timestamps, haz_cam_to_left_nav_cam_index, // NOLINT - haz_cam_to_right_nav_cam_index, sci_cam_to_left_nav_cam_index, // NOLINT - sci_cam_to_right_nav_cam_index, sci_cam_params, // NOLINT - haz_cam_params, depth_to_sci_block_sizes, // NOLINT - depth_to_haz_block_sizes, nav_cam_affines, // NOLINT - depth_clouds, // NOLINT - // Outputs // NOLINT - residual_names, hazcam_depth_to_image_scale, nav_cam_vec, // NOLINT - hazcam_to_navcam_vec, scicam_to_hazcam_vec, // NOLINT - hazcam_depth_to_image_vec, sci_cam_focal_vector, // NOLINT - sci_cam_optical_center, sci_cam_distortion, problem); // NOLINT - } + // Put the intrinsics in arrays + std::vector focal_lengths(num_cam_types); + std::vector optical_centers(num_cam_types); + std::vector distortions(num_cam_types); + for (int it = 0; it < num_cam_types; it++) { + focal_lengths[it] = cam_params[it].GetFocalLength(); // average the two focal lengths + optical_centers[it] = cam_params[it].GetOpticalOffset(); + + if (cam_params[it].GetDistortion().size() == 0) + LOG(FATAL) << "The cameras are expected to have distortion."; + distortions[it] = cam_params[it].GetDistortion(); + } - // Doing sci cam to nav cam matches - image_nav_it = image_to_nav_it.find(index_pair.second); - image_sci_it = image_to_sci_it.find(index_pair.first); - if (image_nav_it != image_to_nav_it.end() && image_sci_it != image_to_sci_it.end()) { - int nav_it = image_nav_it->second; - int sci_it = image_sci_it->second; - - add_nav_sci_cost(// Inputs // NOLINT - nav_it, sci_it, nav_cam_start, // NOLINT - navcam_to_hazcam_timestamp_offset, // NOLINT - scicam_to_hazcam_timestamp_offset, // NOLINT - match_pair, // NOLINT - sparse_map_timestamps, sci_cam_timestamps, // NOLINT - sci_cam_to_left_nav_cam_index, sci_cam_to_right_nav_cam_index, // NOLINT - hazcam_to_navcam_aff_trans, scicam_to_hazcam_aff_trans, // NOLINT - nav_cam_params, sci_cam_params, // NOLINT - nav_block_sizes, sci_block_sizes, mesh_block_sizes, // NOLINT - nav_cam_affines, depth_clouds, mesh, // NOLINT - bvh_tree, // NOLINT - // Outputs // NOLINT - nav_sci_xyz_count, residual_names, // NOLINT - nav_cam_vec, hazcam_to_navcam_vec, // NOLINT - scicam_to_hazcam_vec, sci_cam_focal_vector, // NOLINT - sci_cam_optical_center, sci_cam_distortion, // NOLINT - initial_nav_sci_xyz, nav_sci_xyz, // NOLINT - problem); // NOLINT + // Build a map for quick access for all the messages we may need + // TODO(oalexan1): Must the view be kept open for this to work? + std::vector topics; + for (auto it = 0; it < image_topics.size(); it++) + if (image_topics[it] != "") topics.push_back(image_topics[it]); + for (auto it = 0; it < depth_topics.size(); it++) + if (depth_topics[it] != "") topics.push_back(depth_topics[it]); + rosbag::Bag bag; + bag.open(FLAGS_ros_bag, rosbag::bagmode::Read); + rosbag::View view(bag, rosbag::TopicQuery(topics)); + dense_map::StrToMsgMap bag_map; + dense_map::indexMessages(view, bag_map); + + // Keep here the images, timestamps, and bracketing information + std::vector cams; + // The range of ref_to_cam_timestamp_offsets[cam_type] before + // getting out of the bracket. + std::vector min_timestamp_offset, max_timestamp_offset; + + // Select the images to use from the bag + dense_map::lookupImagesAndBrackets( // Inputs + ref_cam_type, FLAGS_bracket_len, FLAGS_timestamp_offsets_max_change, + FLAGS_max_haz_cam_image_to_depth_timestamp_diff, FLAGS_float_timestamp_offsets, cam_names, + ref_timestamps, image_topics, depth_topics, bag_map, cam_timestamps_to_use, + ref_to_cam_timestamp_offsets, + // Outputs + cams, min_timestamp_offset, max_timestamp_offset); + + // The images from the bag may need to be resized to be the same + // size as in the calibration file. Sometimes the full-res images + // can be so blurry that interest point matching fails, hence the + // resizing. + for (size_t it = 0; it < cams.size(); it++) + dense_map::adjustImageSize(cam_params[cams[it].camera_type], cams[it].image); + + // Sort by the timestamp in reference camera time. This is essential + // for matching each image to other images close in time. + std::sort(cams.begin(), cams.end(), dense_map::timestampLess); + + // The transform from the world to every camera + std::vector world_to_cam; + calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, + ref_to_cam_timestamp_offsets, + // Output + world_to_cam); + + // Detect and match features + std::vector>> keypoint_vec; + std::vector> pid_to_cid_fid; + std::vector image_files; // will be filled only in verbose mode + dense_map::detectMatchFeatures( // Inputs + cams, cam_names, cam_params, world_to_cam, + FLAGS_num_overlaps, FLAGS_initial_max_reprojection_error, + FLAGS_num_match_threads, FLAGS_verbose, + // Outputs + keypoint_vec, pid_to_cid_fid, image_files); + + // Set up the block sizes + std::vector bracketed_cam_block_sizes; + std::vector bracketed_depth_block_sizes; + std::vector bracketed_depth_mesh_block_sizes; + std::vector mesh_block_sizes; + dense_map::set_up_block_sizes(num_depth_params, + bracketed_cam_block_sizes, bracketed_depth_block_sizes, + bracketed_depth_mesh_block_sizes, mesh_block_sizes); + + // For a given fid = pid_to_cid_fid[pid][cid], the value + // pid_cid_fid_inlier[pid][cid][fid] will be non-zero only if this + // pixel is an inlier. Originally all pixels are inliers. Once an + // inlier becomes an outlier, it never becomes an inlier again. + std::vector>> pid_cid_fid_inlier; + dense_map::flagOutlierByExclusionDist( // Inputs + ref_cam_type, FLAGS_nav_cam_num_exclude_boundary_pixels, cam_params, cams, pid_to_cid_fid, + keypoint_vec, + // Outputs + pid_cid_fid_inlier); + + // Structures needed to intersect rays with the mesh + std::vector>> pid_cid_fid_mesh_xyz; + std::vector pid_mesh_xyz; + Eigen::Vector3d bad_xyz(1.0e+100, 1.0e+100, 1.0e+100); // use this to flag invalid xyz + + for (int pass = 0; pass < FLAGS_refiner_num_passes; pass++) { + std::cout << "\nOptimization pass " << pass + 1 << " / " << FLAGS_refiner_num_passes << "\n"; + + // The transforms from the world to all cameras must be updated + // given the current state of optimization + calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); + + std::vector xyz_vec; + dense_map::multiViewTriangulation( // Inputs + cam_params, cams, world_to_cam, pid_to_cid_fid, + keypoint_vec, + // Outputs + pid_cid_fid_inlier, xyz_vec); + + // Compute where each ray intersects the mesh + if (FLAGS_mesh != "") + dense_map::meshTriangulations( // Inputs + cam_params, cams, world_to_cam, pid_to_cid_fid, + pid_cid_fid_inlier, keypoint_vec, bad_xyz, FLAGS_min_ray_dist, FLAGS_max_ray_dist, mesh, + bvh_tree, + // Outputs + pid_cid_fid_mesh_xyz, pid_mesh_xyz); + + if (pass == 0 && nav_cam_distortion_replacement.size() > 1) { + // At the first pass, right after triangulation is done with a + // given nav cam model, which presumably was pretty accurate, + // replace its distortion if desired, which we will then + // optimize. + std::cout << "Setting nav cam distortion to: " << nav_cam_distortion_replacement.transpose() + << ". Will proceed to optimize it.\n"; + cam_params[ref_cam_type].SetDistortion(nav_cam_distortion_replacement); + distortions[ref_cam_type] = cam_params[ref_cam_type].GetDistortion(); } - } // end iterating over matches - - if (nav_sci_xyz_count != num_nav_sci_xyz) LOG(FATAL) << "nav sci xyz book-keeping error."; - if (sparse_map->pid_to_cid_fid_.size() != sparse_map->pid_to_xyz_.size()) - LOG(FATAL) << "Book-keeping error 1 in the sparse map."; + // For a given fid = pid_to_cid_fid[pid][cid], + // the value pid_cid_fid_to_residual_index[pid][cid][fid] will be the index in the array + // of residuals (look only at pixel residuals). This structure is populated only for + // inliers, so its size changes at each pass. + std::vector>> pid_cid_fid_to_residual_index; + pid_cid_fid_to_residual_index.resize(pid_to_cid_fid.size()); + + // Form the problem + ceres::Problem problem; + std::vector residual_names; + std::vector residual_scales; + for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { + for (auto cid_fid = pid_to_cid_fid[pid].begin(); + cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + // Deal with inliers only + if (!dense_map::getMapValue(pid_cid_fid_inlier, pid, cid, fid)) + continue; - for (size_t pid = 0; pid < sparse_map->pid_to_cid_fid_.size(); pid++) { - // Note that xyz is an alias. This is very important as we optimize these - // xyz in-place. - Eigen::Vector3d& xyz = sparse_map->pid_to_xyz_[pid]; + int cam_type = cams[cid].camera_type; + int beg_ref_index = cams[cid].beg_ref_index; + int end_ref_index = cams[cid].end_ref_index; + + // Left bracketing ref cam for a given cam. For a ref cam, this is itself. + double* left_ref_cam_ptr = &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]; + + // Transform from reference camera to given camera + double* ref_to_cam_ptr = &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]; + + // When the cam is the ref type, the right bracketing camera is nominal + double* right_ref_cam_ptr = NULL; + if (cam_type == ref_cam_type) + right_ref_cam_ptr = &identity_vec[0]; + else + right_ref_cam_ptr = &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]; + + Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); + + ceres::CostFunction* bracketed_cost_function = + dense_map::BracketedCamError::Create(dist_ip, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_cam_block_sizes, + cam_params[cam_type]); + ceres::LossFunction* bracketed_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + // Remember the index of the residuals about to create + pid_cid_fid_to_residual_index[pid][cid][fid] = residual_names.size(); + + residual_names.push_back(cam_names[cam_type] + "_pix_x"); + residual_names.push_back(cam_names[cam_type] + "_pix_y"); + residual_scales.push_back(1.0); + residual_scales.push_back(1.0); + problem.AddResidualBlock + (bracketed_cost_function, bracketed_loss_function, + left_ref_cam_ptr, right_ref_cam_ptr, ref_to_cam_ptr, &xyz_vec[pid][0], + &ref_to_cam_timestamp_offsets[cam_type], + &focal_lengths[cam_type], &optical_centers[cam_type][0], &distortions[cam_type][0]); + + // See which intrinsics to float + if (intrinsics_to_float[cam_type].find("focal_length") == + intrinsics_to_float[cam_type].end()) + problem.SetParameterBlockConstant(&focal_lengths[cam_type]); + if (intrinsics_to_float[cam_type].find("optical_center") == + intrinsics_to_float[cam_type].end()) + problem.SetParameterBlockConstant(&optical_centers[cam_type][0]); + if (intrinsics_to_float[cam_type].find("distortion") == intrinsics_to_float[cam_type].end()) + problem.SetParameterBlockConstant(&distortions[cam_type][0]); + + // When the camera is the ref type, the right bracketing + // camera is just a placeholder which is not used, hence + // should not be optimized. Same for the ref_to_cam_vec and + // ref_to_cam_timestamp_offsets, etc., as can seen further + // down. + + if (!FLAGS_float_sparse_map) problem.SetParameterBlockConstant(left_ref_cam_ptr); + + if (!FLAGS_float_sparse_map || cam_type == ref_cam_type) + problem.SetParameterBlockConstant(right_ref_cam_ptr); + + if (!FLAGS_float_timestamp_offsets || cam_type == ref_cam_type) { + // Either we don't float timestamp offsets at all, or the cam is the ref type, + // when it can't float anyway. + problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); + } else { + problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + min_timestamp_offset[cam_type]); + problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, + max_timestamp_offset[cam_type]); + } + if (extrinsics_to_float.find(cam_names[cam_type]) == extrinsics_to_float.end() || + cam_type == ref_cam_type) + problem.SetParameterBlockConstant(ref_to_cam_ptr); + + Eigen::Vector3d depth_xyz(0, 0, 0); + bool have_depth_tri_constraint + = (FLAGS_depth_tri_weight > 0 && + dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)); + + if (have_depth_tri_constraint) { + // Ensure that the depth points agree with triangulated points + ceres::CostFunction* bracketed_depth_cost_function + = dense_map::BracketedDepthError::Create(FLAGS_depth_tri_weight, + depth_xyz, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_depth_block_sizes); + + ceres::LossFunction* bracketed_depth_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back("depth_tri_x_m"); + residual_names.push_back("depth_tri_y_m"); + residual_names.push_back("depth_tri_z_m"); + residual_scales.push_back(FLAGS_depth_tri_weight); + residual_scales.push_back(FLAGS_depth_tri_weight); + residual_scales.push_back(FLAGS_depth_tri_weight); + problem.AddResidualBlock + (bracketed_depth_cost_function, bracketed_depth_loss_function, + left_ref_cam_ptr, right_ref_cam_ptr, ref_to_cam_ptr, + &normalized_depth_to_image_vec[num_depth_params * cam_type], + &depth_to_image_scales[cam_type], + &xyz_vec[pid][0], + &ref_to_cam_timestamp_offsets[cam_type]); + + // Note that above we already considered fixing some params. + // We won't repeat that code here. + // If we model an affine depth to image, fix its scale here, + // it will change anyway as part of normalized_depth_to_image_vec. + if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + + if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + problem.SetParameterBlockConstant + (&normalized_depth_to_image_vec[num_depth_params * cam_type]); + } - for (auto cid_fid = sparse_map->pid_to_cid_fid_[pid].begin(); - cid_fid != sparse_map->pid_to_cid_fid_[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; + // Add the depth to mesh constraint + bool have_depth_mesh_constraint = false; + depth_xyz = Eigen::Vector3d(0, 0, 0); + Eigen::Vector3d mesh_xyz(0, 0, 0); + if (FLAGS_mesh != "") { + mesh_xyz = dense_map::getMapValue(pid_cid_fid_mesh_xyz, pid, cid, fid); + have_depth_mesh_constraint + = (FLAGS_depth_mesh_weight > 0 && mesh_xyz != bad_xyz && + dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)); + } - Eigen::Matrix2Xd& keypoints = sparse_map->cid_to_keypoint_map_[cid]; // alias + if (have_depth_mesh_constraint) { + // Try to make each mesh intersection agree with corresponding depth measurement, + // if it exists + ceres::CostFunction* bracketed_depth_mesh_cost_function + = dense_map::BracketedDepthMeshError::Create(FLAGS_depth_mesh_weight, + depth_xyz, + mesh_xyz, + ref_timestamps[beg_ref_index], + ref_timestamps[end_ref_index], + cams[cid].timestamp, + bracketed_depth_mesh_block_sizes); + + ceres::LossFunction* bracketed_depth_mesh_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + residual_names.push_back("depth_mesh_x_m"); + residual_names.push_back("depth_mesh_y_m"); + residual_names.push_back("depth_mesh_z_m"); + residual_scales.push_back(FLAGS_depth_mesh_weight); + residual_scales.push_back(FLAGS_depth_mesh_weight); + residual_scales.push_back(FLAGS_depth_mesh_weight); + problem.AddResidualBlock + (bracketed_depth_mesh_cost_function, bracketed_depth_mesh_loss_function, + left_ref_cam_ptr, right_ref_cam_ptr, ref_to_cam_ptr, + &normalized_depth_to_image_vec[num_depth_params * cam_type], + &depth_to_image_scales[cam_type], + &ref_to_cam_timestamp_offsets[cam_type]); + + // Note that above we already fixed some of these variables. + // Repeat the fixing of depth variables, however, as the previous block + // may not take place. + if (!FLAGS_float_scale || FLAGS_affine_depth_to_image) + problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); + + if (extrinsics_to_float.find(depth_to_image_name) == extrinsics_to_float.end()) + problem.SetParameterBlockConstant + (&normalized_depth_to_image_vec[num_depth_params * cam_type]); + } + } // end iterating over all cid for given pid - if (fid > keypoints.cols()) LOG(FATAL) << "Book-keeping error 2 in the sparse map."; + // This constraint will be for the pid + bool have_mesh_tri_constraint = false; + Eigen::Vector3d avg_mesh_xyz(0, 0, 0); + if (FLAGS_mesh != "") { + avg_mesh_xyz = pid_mesh_xyz.at(pid); + if (FLAGS_mesh_tri_weight > 0 && avg_mesh_xyz != bad_xyz) have_mesh_tri_constraint = true; + } + if (have_mesh_tri_constraint) { + // Try to make the triangulated point agree with the mesh intersection - Eigen::Vector2d undist_nav_ip = keypoints.col(fid); + ceres::CostFunction* mesh_cost_function = + dense_map::XYZError::Create(avg_mesh_xyz, mesh_block_sizes, FLAGS_mesh_tri_weight); - ceres::CostFunction* nav_cost_function = - dense_map::NavError::Create(undist_nav_ip, nav_block_sizes, nav_cam_params); - ceres::LossFunction* nav_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - problem.AddResidualBlock(nav_cost_function, nav_loss_function, - &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid], - &xyz[0]); - residual_names.push_back("nav1"); - residual_names.push_back("nav2"); + ceres::LossFunction* mesh_loss_function = + dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - if (FLAGS_fix_map) - problem.SetParameterBlockConstant(&nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); + problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, + &xyz_vec[pid][0]); + + residual_names.push_back("mesh_tri_x_m"); + residual_names.push_back("mesh_tri_y_m"); + residual_names.push_back("mesh_tri_z_m"); + residual_scales.push_back(FLAGS_mesh_tri_weight); + residual_scales.push_back(FLAGS_mesh_tri_weight); + residual_scales.push_back(FLAGS_mesh_tri_weight); + } + } // end iterating over pid + + // Evaluate the residuals before optimization + std::vector residuals; + dense_map::evalResiduals("before opt", residual_names, residual_scales, problem, residuals); + + // Solve the problem + ceres::Solver::Options options; + ceres::Solver::Summary summary; + options.linear_solver_type = ceres::ITERATIVE_SCHUR; + options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread + options.max_num_iterations = FLAGS_num_iterations; + options.minimizer_progress_to_stdout = true; + options.gradient_tolerance = 1e-16; + options.function_tolerance = 1e-16; + options.parameter_tolerance = FLAGS_parameter_tolerance; + ceres::Solve(options, &problem, &summary); + + // The optimization is done. Right away copy the optimized states + // to where they belong to keep all data in sync. + + // Copy back the reference transforms + for (int cid = 0; cid < num_ref_cams; cid++) + dense_map::array_to_rigid_transform(world_to_ref_t[cid], + &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + // Copy back the optimized intrinsics + for (int it = 0; it < num_cam_types; it++) { + cam_params[it].SetFocalLength(Eigen::Vector2d(focal_lengths[it], focal_lengths[it])); + cam_params[it].SetOpticalOffset(optical_centers[it]); + cam_params[it].SetDistortion(distortions[it]); } - } - - if (FLAGS_opt_map_only) { - problem.SetParameterBlockConstant(&hazcam_depth_to_image_vec[0]); - problem.SetParameterBlockConstant(&hazcam_depth_to_image_scale); - problem.SetParameterBlockConstant(&hazcam_to_navcam_vec[0]); - problem.SetParameterBlockConstant(&scicam_to_hazcam_vec[0]); - } - - // Evaluate the residual before optimization - double total_cost = 0.0; - std::vector residuals; - ceres::Problem::EvaluateOptions eval_options; - eval_options.num_threads = 1; - eval_options.apply_loss_function = false; // want raw residuals - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "Book-keeping error in the number of residuals."; - - if (FLAGS_verbose) { - for (size_t it = 0; it < residuals.size(); it++) - std::cout << "initial res " << residual_names[it] << " " << residuals[it] << std::endl; - } - - // Want the RMSE residual with loss, to understand what all residuals contribute, - // but without getting distracted by the outliers - eval_options.apply_loss_function = false; - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - dense_map::calc_median_residuals(residuals, residual_names, "before opt"); - - // Experimentally it was found that it was better to keep the scale - // constant and optimize everything else. Later registration will - // happen to correct any drift in the nav cam poses. - if (!FLAGS_float_scale) problem.SetParameterBlockConstant(&hazcam_depth_to_image_scale); - - // See which sci cam intrinsics values to float or keep fixed - if (sci_cam_intrinsics_to_float.find("focal_length") == sci_cam_intrinsics_to_float.end()) { - // std::cout << "For " << sci_cam << " not floating focal_length." << std::endl; - problem.SetParameterBlockConstant(&sci_cam_focal_vector[0]); - } else { - // std::cout << "For " << sci_cam << " floating focal_length." << std::endl; - } - if (sci_cam_intrinsics_to_float.find("optical_center") == sci_cam_intrinsics_to_float.end()) { - // std::cout << "For " << sci_cam << " not floating optical_center." << std::endl; - problem.SetParameterBlockConstant(&sci_cam_optical_center[0]); - } else { - // std::cout << "For " << sci_cam << " floating optical_center." << std::endl; - } - if (sci_cam_intrinsics_to_float.find("distortion") == sci_cam_intrinsics_to_float.end()) { - // std::cout << "For " << sci_cam << " not floating distortion." << std::endl; - problem.SetParameterBlockConstant(&sci_cam_distortion[0]); - } else { - // std::cout << "For " << sci_cam << " floating distortion." << std::endl; - } - // Solve the problem - ceres::Solver::Options options; - ceres::Solver::Summary summary; - options.linear_solver_type = ceres::ITERATIVE_SCHUR; - options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread - options.max_num_iterations = FLAGS_num_iterations; - options.minimizer_progress_to_stdout = true; - options.gradient_tolerance = 1e-16; - options.function_tolerance = 1e-16; - options.parameter_tolerance = FLAGS_parameter_tolerance; - ceres::Solve(options, &problem, &summary); - - // Copy back the optimized intrinsics - sci_cam_params.SetFocalLength(Eigen::Vector2d(sci_cam_focal_vector[0], - sci_cam_focal_vector[0])); - sci_cam_params.SetOpticalOffset(sci_cam_optical_center); - sci_cam_params.SetDistortion(sci_cam_distortion); - - // Update with the optimized results - dense_map::array_to_rigid_transform(hazcam_depth_to_image_transform, &hazcam_depth_to_image_vec[0]); - hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; - dense_map::array_to_rigid_transform(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); - dense_map::array_to_rigid_transform(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); - for (int cid = 0; cid < num_cams; cid++) - dense_map::array_to_rigid_transform(nav_cam_affines[cid], &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); - - // Compute the residuals without loss, want to see the outliers too - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "Book-keeping error in the number of residuals."; + // If the nav cam did not get optimized, go back to the solution + // with two focal lengths, rather than the one with one focal length + // solved by this solver (as the average of the two). The two focal + // lengths are very similar, but it is not worth modifying the + // camera model we don't plan to optimize. + if (FLAGS_nav_cam_intrinsics_to_float == "" || FLAGS_num_iterations == 0) + cam_params[ref_cam_type] = orig_cam_params[ref_cam_type]; + + // Copy back the optimized extrinsics + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) + dense_map::array_to_rigid_transform + (ref_to_cam_trans[cam_type], + &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); + + // Copy back the depth to image transforms without scales + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + if (FLAGS_affine_depth_to_image) + dense_map::array_to_affine_transform + (normalized_depth_to_image[cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type]); + else + dense_map::array_to_rigid_transform( + normalized_depth_to_image[cam_type], + &normalized_depth_to_image_vec[num_depth_params * cam_type]); + } - if (FLAGS_verbose) { - for (size_t it = 0; it < residuals.size(); it++) - std::cout << "final res " << residual_names[it] << " " << residuals[it] << std::endl; + // Evaluate the residuals after optimization + dense_map::evalResiduals("after opt", residual_names, residual_scales, problem, residuals); + + // Flag outliers after this pass + calc_world_to_cam_transforms + ( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); + // Must have up-to-date world_to_cam and residuals to flag the outliers + dense_map::flagOutliersByTriAngleAndReprojErr( // Inputs + FLAGS_refiner_min_angle, FLAGS_max_reprojection_error, pid_to_cid_fid, keypoint_vec, + world_to_cam, xyz_vec, pid_cid_fid_to_residual_index, residuals, + // Outputs + pid_cid_fid_inlier); + } // End optimization passes + + if (FLAGS_verbose) + dense_map::saveInlierMatches(image_files, FLAGS_num_overlaps, pid_to_cid_fid, + keypoint_vec, pid_cid_fid_inlier); + + bool map_changed = (FLAGS_num_iterations > 0 && + (FLAGS_float_sparse_map || FLAGS_nav_cam_intrinsics_to_float != "")); + if (map_changed) { + std::cout << "Either the sparse map intrinsics or cameras got modified. " + << "The map must be rebuilt." << std::endl; + dense_map::RebuildMap(FLAGS_output_map, // Will be used for temporary saving of aux data + cam_params[ref_cam_type], sparse_map); } - // Want the RMSE residual with loss, to understand what all residuals contribute, - // but without getting distracted by the outliers - eval_options.apply_loss_function = false; - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - dense_map::calc_median_residuals(residuals, residual_names, "after opt"); - - // Re-register the map, and keep track of what scale was used. - // Note that we do not update the positions of the sci and haz - // images. We don't need those any more. - if (!FLAGS_skip_registration) { + if (FLAGS_registration) { + std::cout << "Redoing registration of the obtained map and adjusting all extrinsics.\n"; std::vector data_files; data_files.push_back(FLAGS_hugin_file); data_files.push_back(FLAGS_xyz_file); @@ -2227,63 +2266,49 @@ int main(int argc, char** argv) { double map_scale = sparse_mapping::RegistrationOrVerification(data_files, verification, sparse_map.get()); - // Apply the scale - hazcam_depth_to_image_scale *= map_scale; - // This transform is affine, so both the linear and translation parts need a scale - hazcam_depth_to_image_transform.linear() *= map_scale; - hazcam_depth_to_image_transform.translation() *= map_scale; - // These two are rotation + translation, so only the translation needs the scale - hazcam_to_navcam_aff_trans.translation() *= map_scale; - scicam_to_hazcam_aff_trans.translation() *= map_scale; + std::cout << "Registration resulted in a scale adjustment of: " << map_scale << ".\n"; + // We do not change depth_to_image_scales since multiplying the + // affine component of normalized_depth_to_image is enough. + for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { + // This transform is affine, so both the linear and translation parts need a scale + normalized_depth_to_image[cam_type].linear() *= map_scale; + normalized_depth_to_image[cam_type].translation() *= map_scale; + // This is a rotation + translation, so only the translation needs the scale + ref_to_cam_trans[cam_type].translation() *= map_scale; + } } + // Update the optimized depth to image (for haz cam only) + haz_cam_depth_to_image_scale = depth_to_image_scales[haz_cam_type]; + haz_cam_depth_to_image_transform = normalized_depth_to_image[haz_cam_type]; + haz_cam_depth_to_image_transform.linear() *= haz_cam_depth_to_image_scale; + + // Update the config file + dense_map::updateConfigFile(cam_names, "haz_cam_depth_to_image_transform", + cam_params, ref_to_cam_trans, + ref_to_cam_timestamp_offsets, + haz_cam_depth_to_image_transform); + std::cout << "Writing: " << FLAGS_output_map << std::endl; sparse_map->Save(FLAGS_output_map); - if (!FLAGS_opt_map_only) { - // update nav and haz - bool update_cam1 = true, update_cam2 = true; - std::string cam1_name = "nav_cam", cam2_name = "haz_cam"; - boost::shared_ptr - cam1_params(new camera::CameraParameters(nav_cam_params)); - boost::shared_ptr - cam2_params(new camera::CameraParameters(haz_cam_params)); - bool update_depth_to_image_transform = true; - bool update_extrinsics = true; - bool update_timestamp_offset = true; - std::string cam1_to_cam2_timestamp_offset_str = "navcam_to_hazcam_timestamp_offset"; - // Modify in-place the robot config file - dense_map::update_config_file(update_cam1, cam1_name, cam1_params, - update_cam2, cam2_name, cam2_params, - update_depth_to_image_transform, - hazcam_depth_to_image_transform, update_extrinsics, - hazcam_to_navcam_aff_trans, update_timestamp_offset, - cam1_to_cam2_timestamp_offset_str, - navcam_to_hazcam_timestamp_offset); - } - if (!FLAGS_opt_map_only) { - // update sci and haz - // TODO(oalexan1): Write a single function to update all 3 of them - bool update_cam1 = true, update_cam2 = true; - std::string cam1_name = "sci_cam", cam2_name = "haz_cam"; - boost::shared_ptr - cam1_params(new camera::CameraParameters(sci_cam_params)); - boost::shared_ptr - cam2_params(new camera::CameraParameters(haz_cam_params)); - bool update_depth_to_image_transform = true; - bool update_extrinsics = true; - bool update_timestamp_offset = true; - std::string cam1_to_cam2_timestamp_offset_str = "scicam_to_hazcam_timestamp_offset"; - // Modify in-place the robot config file - dense_map::update_config_file(update_cam1, cam1_name, cam1_params, - update_cam2, cam2_name, cam2_params, - update_depth_to_image_transform, - hazcam_depth_to_image_transform, update_extrinsics, - scicam_to_hazcam_aff_trans.inverse(), - update_timestamp_offset, - cam1_to_cam2_timestamp_offset_str, - scicam_to_hazcam_timestamp_offset); + if (FLAGS_out_texture_dir != "") { + // Project each image onto the mesh + + if (FLAGS_mesh == "") + LOG(FATAL) << "Cannot project camera images onto a mesh if a mesh was not provided.\n"; + + // The transform from the world to every camera must be updated + // TODO(oalexan1): Why the call below works without dense_map:: prepended to it? + dense_map::calc_world_to_cam_transforms( // Inputs + cams, world_to_ref_vec, ref_timestamps, ref_to_cam_vec, ref_to_cam_timestamp_offsets, + // Output + world_to_cam); + dense_map::meshProjectCameras(cam_names, cam_params, cams, world_to_cam, mesh, bvh_tree, + ref_cam_type, FLAGS_nav_cam_num_exclude_boundary_pixels, + FLAGS_out_texture_dir); } + return 0; -} // NOLINT +} diff --git a/dense_map/geometry_mapper/tools/camera_refiner2.cc b/dense_map/geometry_mapper/tools/camera_refiner2.cc deleted file mode 100644 index a419be0d..00000000 --- a/dense_map/geometry_mapper/tools/camera_refiner2.cc +++ /dev/null @@ -1,2151 +0,0 @@ -/* Copyright (c) 2021, United States Government, as represented by the - * Administrator of the National Aeronautics and Space Administration. - * - * All rights reserved. - * - * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking - * platform" software is licensed under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ - -// TODO(oalexan1): Consider adding a haz cam to haz cam -// reprojection error in the camera refiner. There will be more -// haz to haz matches than haz to nav or haz to sci. -// TODO(oalexan1): Must document that the cloud timestamp -// that is looked up is what is closest to image timestamp. -// TODO(oalexan1): What if the wrong cloud is looked up -// for given image? Or if the delay is too big? - -// TODO(oalexan1): Must test the DepthError with no bracketing. - -// TODO(oalexan1): Move this to utils -// Get rid of warning beyond our control -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" -#pragma GCC diagnostic ignored "-Wunused-function" -#pragma GCC diagnostic push -#include -#include -#include -#include -#include -#pragma GCC diagnostic pop - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and " - "full-resolution sci_cam data."); - -DEFINE_string(sparse_map, "", - "A registered SURF sparse map made with some of the ROS bag data, " - "and including nav cam images closely bracketing the sci cam images."); - -DEFINE_string(output_map, "", "Output file containing the updated map."); - -DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); - -DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", - "The depth point cloud topic in the bag file."); - -DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", - "The depth camera intensity topic in the bag file."); - -DEFINE_string(sci_cam_topic, "/hw/cam_sci/compressed", "The sci cam topic in the bag file."); - -DEFINE_int32(num_overlaps, 20, "How many images (of all camera types) close and forward in " - "time to match to given image."); - -DEFINE_double(max_haz_cam_image_to_depth_timestamp_diff, 0.2, - "Use depth haz cam clouds that are within this distance in " - "time from the nearest haz cam intensity image."); - -DEFINE_double(robust_threshold, 3.0, - "Pixel errors much larger than this will be exponentially attenuated " - "to affect less the cost function."); - -DEFINE_int32(num_iterations, 20, "How many solver iterations to perform in calibration."); - -DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables change by " - "less than this."); - -DEFINE_double(bracket_len, 2.0, - "Lookup sci and haz cam images only between consecutive nav cam images " - "whose distance in time is no more than this (in seconds), after adjusting " - "for the timestamp offset between these cameras. It is assumed the robot " - "moves slowly and uniformly during this time."); - -DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); - -DEFINE_string(sci_cam_timestamps, "", - "Use only these sci cam timestamps. Must be " - "a file with one timestamp per line."); - -DEFINE_bool(float_sparse_map, false, "Optimize the sparse map, hence only the camera params."); - -DEFINE_bool(float_scale, false, - "If to optimize the scale of the clouds (use it if the " - "sparse map is kept fixed), or else rescaling and registration is needed."); - -DEFINE_bool(float_timestamp_offsets, false, - "If to optimize the timestamp offsets among the cameras."); - -DEFINE_double(timestamp_offsets_max_change, 1.0, - "If floating the timestamp offsets, do not let them change by more than this " - "(measured in seconds). Existing image bracketing acts as an additional constraint."); - -DEFINE_string(nav_cam_intrinsics_to_float, "", - "Refine 0 or more of the following intrinsics for nav_cam: focal_length, " - "optical_center, distortion. Specify as a quoted list. " - "For example: 'focal_length optical_center'."); - -DEFINE_string(haz_cam_intrinsics_to_float, "", - "Refine 0 or more of the following intrinsics for haz_cam: focal_length, " - "optical_center, distortion. Specify as a quoted list. " - "For example: 'focal_length optical_center'."); - -DEFINE_string(sci_cam_intrinsics_to_float, "", - "Refine 0 or more of the following intrinsics for sci_cam: focal_length, " - "optical_center, distortion. Specify as a quoted list. " - "For example: 'focal_length optical_center'."); - -DEFINE_double(scicam_to_hazcam_timestamp_offset_override_value, - std::numeric_limits::quiet_NaN(), - "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " - "file with this value."); - -DEFINE_double(depth_weight, 10.0, - "The weight to give to depth measurements. Use a bigger number as " - "depth errors are usually small fractions of a meter."); - -DEFINE_string(mesh, "", - "Refine the sci cam so that the sci cam texture agrees with the nav cam " - "texture when projected on this mesh."); - -DEFINE_double(mesh_weight, 25.0, - "A larger value will give more weight to the mesh constraint. " - "Use a bigger number as depth errors are usually small fractions of a meter."); - -DEFINE_bool(verbose, false, - "Print the residuals and save the images and match files." - "Stereo Pipeline's viewer can be used for visualizing these."); - -namespace dense_map { - - // Calculate interpolated world to camera trans - Eigen::Affine3d calc_world_to_cam_trans(const double* beg_world_to_ref_t, - const double* end_world_to_ref_t, - const double* ref_to_cam_trans, - double beg_ref_stamp, - double end_ref_stamp, - double ref_to_cam_offset, - double cam_stamp) { - Eigen::Affine3d beg_world_to_ref_aff; - array_to_rigid_transform(beg_world_to_ref_aff, beg_world_to_ref_t); - - Eigen::Affine3d end_world_to_ref_aff; - array_to_rigid_transform(end_world_to_ref_aff, end_world_to_ref_t); - - Eigen::Affine3d ref_to_cam_aff; - array_to_rigid_transform(ref_to_cam_aff, ref_to_cam_trans); - - // std::cout.precision(18); - // std::cout << "--beg stamp " << beg_ref_stamp << std::endl; - // std::cout << "--end stamp " << end_ref_stamp << std::endl; - // std::cout << "cam stamp " << cam_stamp << std::endl; - // std::cout.precision(18); - // std::cout << "ref to cam off " << ref_to_cam_offset << std::endl; - // std::cout << "--ref to cam trans\n" << ref_to_cam_aff.matrix() << std::endl; - - // Covert from cam time to ref time and normalize. It is very - // important that below we subtract the big numbers from each - // other first, which are the timestamps, then subtract whatever - // else is necessary. Otherwise we get problems with numerical - // precision with CERES. - double alpha = ((cam_stamp - beg_ref_stamp) - ref_to_cam_offset) - / (end_ref_stamp - beg_ref_stamp); - - if (beg_ref_stamp == end_ref_stamp) - alpha = 0.0; // handle division by zero - - // std::cout << "--alpha " << alpha << std::endl; - if (alpha < 0.0 || alpha > 1.0) LOG(FATAL) << "Out of bounds in interpolation.\n"; - - // Interpolate at desired time - Eigen::Affine3d interp_world_to_ref_aff - = dense_map::linearInterp(alpha, beg_world_to_ref_aff, end_world_to_ref_aff); - - Eigen::Affine3d interp_world_to_cam_afff = ref_to_cam_aff * interp_world_to_ref_aff; - - // std::cout << "final trans\n" << interp_world_to_cam_afff.matrix() << std::endl; - - return interp_world_to_cam_afff; - } - - // TODO(oalexan1): Store separately matches which end up being - // squashed in a pid_cid_to_fid. - -ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { - // Convert to lower-case - std::transform(cost_fun.begin(), cost_fun.end(), cost_fun.begin(), ::tolower); - - ceres::LossFunction* loss_function = NULL; - if (cost_fun == "l2") - loss_function = NULL; - else if (cost_fun == "huber") - loss_function = new ceres::HuberLoss(th); - else if (cost_fun == "cauchy") - loss_function = new ceres::CauchyLoss(th); - else if (cost_fun == "l1") - loss_function = new ceres::SoftLOneLoss(th); - else - LOG(FATAL) << "Unknown cost function: " + cost_fun; - - return loss_function; -} - -// An error function minimizing the error of projecting -// an xyz point into a camera that is bracketed by -// two reference cameras. The precise timestamp offset -// between them is also floated. -struct BracketedCamError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - BracketedCamError(Eigen::Vector2d const& meas_dist_pix, - double left_ref_stamp, double right_ref_stamp, double cam_stamp, - std::vector const& block_sizes, - camera::CameraParameters const& cam_params): - m_meas_dist_pix(meas_dist_pix), - m_left_ref_stamp(left_ref_stamp), - m_right_ref_stamp(right_ref_stamp), - m_cam_stamp(cam_stamp), - m_block_sizes(block_sizes), - m_cam_params(cam_params), - m_num_focal_lengths(1) { - // Sanity check - if (m_block_sizes.size() != 8 || m_block_sizes[0] != NUM_RIGID_PARAMS || - m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_RIGID_PARAMS || - m_block_sizes[3] != NUM_XYZ_PARAMS || m_block_sizes[4] != NUM_SCALAR_PARAMS || - m_block_sizes[5] != m_num_focal_lengths || m_block_sizes[6] != NUM_OPT_CTR_PARAMS || - m_block_sizes[7] != 1 // This will be overwritten shortly - ) { - LOG(FATAL) << "BracketedCamError: The block sizes were not set up properly.\n"; - } - - // Set the correct distortion size. This cannot be done in the interface for now. - // TODO(oalexan1): Make individual block sizes for each camera, then this issue will go away. - m_block_sizes[7] = m_cam_params.GetDistortion().size(); - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - Eigen::Affine3d world_to_cam_trans = - calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t - parameters[1], // end_world_to_ref_t - parameters[2], // ref_to_cam_trans - m_left_ref_stamp, m_right_ref_stamp, - parameters[4][0], // ref_to_cam_offset - m_cam_stamp); - - // World point - Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); - // std::cout << "--bracketX is " << X.transpose() << std::endl; - - // Make a deep copy which we will modify - camera::CameraParameters cam_params = m_cam_params; - Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[5][0], parameters[5][0]); - Eigen::Vector2d optical_center(parameters[6][0], parameters[6][1]); - Eigen::VectorXd distortion(m_block_sizes[7]); - for (int i = 0; i < m_block_sizes[7]; i++) distortion[i] = parameters[7][i]; - cam_params.SetFocalLength(focal_vector); - cam_params.SetOpticalOffset(optical_center); - cam_params.SetDistortion(distortion); - - // std::cout << "--focal vector " << focal_vector.transpose() << std::endl; - // std::cout << "--opt ctr " << optical_center.transpose() << std::endl; - // std::cout << "-dist " << distortion.transpose() << std::endl; - - // Convert world point to given cam coordinates - X = world_to_cam_trans * X; - - // std::cout << "--trans X " << X.transpose() << std::endl; - - // Project into the image - Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - Eigen::Vector2d curr_dist_pix; - cam_params.Convert(undist_pix, &curr_dist_pix); - - // Compute the residuals - residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; - residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; - - // std::cout << "--residuals " << residuals[0] << ' ' << residuals[1] << std::endl; - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* - Create(Eigen::Vector2d const& meas_dist_pix, double left_ref_stamp, double right_ref_stamp, - double cam_stamp, std::vector const& block_sizes, - camera::CameraParameters const& cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new BracketedCamError(meas_dist_pix, left_ref_stamp, right_ref_stamp, - cam_stamp, block_sizes, cam_params)); - - cost_function->SetNumResiduals(NUM_RESIDUALS); - - // The camera wrapper knows all of the block sizes to add, except - // for distortion, which is last - for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 - cost_function->AddParameterBlock(block_sizes[i]); - - // The distortion block size is added separately as it is variable - cost_function->AddParameterBlock(cam_params.GetDistortion().size()); - - return cost_function; - } - - private: - Eigen::Vector2d m_meas_dist_pix; // Measured distorted current camera pixel - double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps - double m_cam_stamp; // Current cam timestamp - std::vector m_block_sizes; - camera::CameraParameters m_cam_params; - int m_num_focal_lengths; -}; // End class BracketedCamError - -// An error function minimizing the error of projecting an xyz point -// into a reference camera. Bracketing, timestamps, and transform to -// ref cam are not needed. -struct RefCamError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - RefCamError(Eigen::Vector2d const& meas_dist_pix, - std::vector const& block_sizes, - camera::CameraParameters const& cam_params): - m_meas_dist_pix(meas_dist_pix), - m_block_sizes(block_sizes), - m_cam_params(cam_params), - m_num_focal_lengths(1) { - // Sanity check - if (m_block_sizes.size() != 5 || - m_block_sizes[0] != NUM_RIGID_PARAMS || - m_block_sizes[1] != NUM_XYZ_PARAMS || - m_block_sizes[2] != m_num_focal_lengths || - m_block_sizes[3] != NUM_OPT_CTR_PARAMS || - m_block_sizes[4] != 1 // This will be overwritten shortly - ) { - LOG(FATAL) << "RefCamError: The block sizes were not set up properly.\n"; - } - - // Set the correct distortion size. This cannot be done in the interface for now. - - // TODO(oalexan1): Make individual block sizes for each camera, - // then this issue will go away. - m_block_sizes[4] = m_cam_params.GetDistortion().size(); - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - Eigen::Affine3d world_to_ref_t; - array_to_rigid_transform(world_to_ref_t, parameters[0]); - - // World point - Eigen::Vector3d X; - for (int it = 0; it < NUM_XYZ_PARAMS; it++) X[it] = parameters[1][it]; - // std::cout << "--refX is " << X.transpose() << std::endl; - - // Make a deep copy which we will modify - camera::CameraParameters cam_params = m_cam_params; - Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[2][0], parameters[2][0]); - Eigen::Vector2d optical_center(parameters[3][0], parameters[3][1]); - Eigen::VectorXd distortion(m_block_sizes[4]); - for (int i = 0; i < m_block_sizes[4]; i++) distortion[i] = parameters[4][i]; - cam_params.SetFocalLength(focal_vector); - cam_params.SetOpticalOffset(optical_center); - cam_params.SetDistortion(distortion); - - // std::cout << "--focal vector " << focal_vector.transpose() << std::endl; - // std::cout << "--opt ctr " << optical_center.transpose() << std::endl; - // std::cout << "-dist " << distortion.transpose() << std::endl; - - // Convert world point to given cam coordinates - X = world_to_ref_t * X; - - // std::cout << "--trans X " << X.transpose() << std::endl; - - // Project into the image - Eigen::Vector2d undist_pix = cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); - Eigen::Vector2d curr_dist_pix; - cam_params.Convert(undist_pix, &curr_dist_pix); - - // Compute the residuals - residuals[0] = curr_dist_pix[0] - m_meas_dist_pix[0]; - residuals[1] = curr_dist_pix[1] - m_meas_dist_pix[1]; - - // std::cout << "--residuals " << residuals[0] << ' ' << residuals[1] << std::endl; - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* - Create(Eigen::Vector2d const& meas_dist_pix, - std::vector const& block_sizes, - camera::CameraParameters const& cam_params) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new RefCamError(meas_dist_pix, block_sizes, cam_params)); - - // The residual size is always the same. - cost_function->SetNumResiduals(NUM_RESIDUALS); - - // The camera wrapper knows all of the block sizes to add, except - // for distortion, which is last - for (size_t i = 0; i + 1 < block_sizes.size(); i++) // note the i + 1 - cost_function->AddParameterBlock(block_sizes[i]); - - // The distortion block size is added separately as it is variable - cost_function->AddParameterBlock(cam_params.GetDistortion().size()); - - return cost_function; - } - - private: - Eigen::Vector2d m_meas_dist_pix; - std::vector m_block_sizes; - camera::CameraParameters m_cam_params; - int m_num_focal_lengths; -}; // End class RefCamError - -// An error function minimizing the product of a given weight and the -// error between a triangulated point and a measured depth point. The -// depth point needs to be transformed to world coordinates first. For -// that one has to do pose interpolation. -struct BracketedDepthError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - BracketedDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, - double left_ref_stamp, double right_ref_stamp, double cam_stamp, - std::vector const& block_sizes): - m_weight(weight), - m_meas_depth_xyz(meas_depth_xyz), - m_left_ref_stamp(left_ref_stamp), - m_right_ref_stamp(right_ref_stamp), - m_cam_stamp(cam_stamp), - m_block_sizes(block_sizes) { - // Sanity check - if (m_block_sizes.size() != 7 || - m_block_sizes[0] != NUM_RIGID_PARAMS || - m_block_sizes[1] != NUM_RIGID_PARAMS || - m_block_sizes[2] != NUM_RIGID_PARAMS || - m_block_sizes[3] != NUM_RIGID_PARAMS || - m_block_sizes[4] != NUM_SCALAR_PARAMS || - m_block_sizes[5] != NUM_XYZ_PARAMS || - m_block_sizes[6] != NUM_SCALAR_PARAMS) { - LOG(FATAL) << "BracketedDepthError: The block sizes were not set up properly.\n"; - } - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - // Current world to camera transform - Eigen::Affine3d world_to_cam_trans = - calc_world_to_cam_trans(parameters[0], // beg_world_to_ref_t - parameters[1], // end_world_to_ref_t - parameters[2], // ref_to_cam_trans - m_left_ref_stamp, m_right_ref_stamp, - parameters[6][0], // ref_to_cam_offset - m_cam_stamp); - - // The current transform from the depth point cloud to the camera image - Eigen::Affine3d depth_to_image; - array_to_rigid_transform(depth_to_image, parameters[3]); - - // Apply the scale - double depth_to_image_scale = parameters[4][0]; - depth_to_image.linear() *= depth_to_image_scale; - // std::cout << "--depth to image:\n" << depth_to_image.matrix() << std::endl; - - // std::cout << "--meas pt " << m_meas_depth_xyz.transpose() << std::endl; - - // Convert from depth cloud coordinates to cam coordinates - Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; - - // std::cout << "--image meas pt " << M.transpose() << std::endl; - - // Convert to world coordinates - M = world_to_cam_trans.inverse() * M; - // std::cout << "--depth in world coords " << M.transpose() << std::endl; - - // Triangulated world point - Eigen::Vector3d X(parameters[5][0], parameters[5][1], parameters[5][2]); - // std::cout << "--triangulated X is " << X.transpose() << std::endl; - - // std::cout << "--weight " << m_weight << std::endl; - - // Compute the residuals - for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { - residuals[it] = m_weight * (X[it] - M[it]); - // std::cout << "--residual " << residuals[it] << std::endl; - } - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, - double left_ref_stamp, double right_ref_stamp, - double cam_stamp, std::vector const& block_sizes) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new BracketedDepthError(weight, meas_depth_xyz, left_ref_stamp, right_ref_stamp, - cam_stamp, block_sizes)); - - // The residual size is always the same. - cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - - for (size_t i = 0; i < block_sizes.size(); i++) - cost_function->AddParameterBlock(block_sizes[i]); - - return cost_function; - } - - private: - double m_weight; // How much weight to give to this constraint - Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement - double m_left_ref_stamp, m_right_ref_stamp; // left and right ref cam timestamps - double m_cam_stamp; // Current cam timestamp - std::vector m_block_sizes; -}; // End class BracketedDepthError - -// An error function minimizing the product of a given weight and the -// error between a triangulated point and a measured depth point for -// the ref camera. The depth point needs to be transformed to world -// coordinates first. -struct RefDepthError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - RefDepthError(double weight, Eigen::Vector3d const& meas_depth_xyz, - std::vector const& block_sizes): - m_weight(weight), - m_meas_depth_xyz(meas_depth_xyz), - m_block_sizes(block_sizes) { - // Sanity check - if (m_block_sizes.size() != 4 || m_block_sizes[0] != NUM_RIGID_PARAMS || - m_block_sizes[1] != NUM_RIGID_PARAMS || m_block_sizes[2] != NUM_SCALAR_PARAMS || - m_block_sizes[3] != NUM_XYZ_PARAMS) { - LOG(FATAL) << "RefDepthError: The block sizes were not set up properly.\n"; - } - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - bool operator()(double const* const* parameters, double* residuals) const { - // Current world to camera transform - Eigen::Affine3d world_to_cam; - array_to_rigid_transform(world_to_cam, parameters[0]); - - // The current transform from the depth point cloud to the camera image - Eigen::Affine3d depth_to_image; - array_to_rigid_transform(depth_to_image, parameters[1]); - - // Apply the scale - double depth_to_image_scale = parameters[2][0]; - depth_to_image.linear() *= depth_to_image_scale; - // std::cout << "--depth to image:\n" << depth_to_image.matrix() << std::endl; - - // std::cout << "--meas pt " << m_meas_depth_xyz.transpose() << std::endl; - - // Convert from depth cloud coordinates to cam coordinates - Eigen::Vector3d M = depth_to_image * m_meas_depth_xyz; - - // std::cout << "--image meas pt " << M.transpose() << std::endl; - - // Convert to world coordinates - M = world_to_cam.inverse() * M; - // std::cout << "--depth in world coords " << M.transpose() << std::endl; - - // Triangulated world point - Eigen::Vector3d X(parameters[3][0], parameters[3][1], parameters[3][2]); - // std::cout << "--triangulated X is " << X.transpose() << std::endl; - - // std::cout << "--weight " << m_weight << std::endl; - - // Compute the residuals - for (size_t it = 0; it < NUM_XYZ_PARAMS; it++) { - residuals[it] = m_weight * (X[it] - M[it]); - // std::cout << "--residual " << residuals[it] << std::endl; - } - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(double weight, Eigen::Vector3d const& meas_depth_xyz, - std::vector const& block_sizes) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction - (new RefDepthError(weight, meas_depth_xyz, block_sizes)); - - cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - - for (size_t i = 0; i < block_sizes.size(); i++) - cost_function->AddParameterBlock(block_sizes[i]); - - return cost_function; - } - - private: - double m_weight; // How much weight to give to this constraint - Eigen::Vector3d m_meas_depth_xyz; // Measured depth measurement - std::vector m_block_sizes; -}; // End class RefDepthError - -// An error function minimizing a weight times the distance from a -// variable xyz point to a fixed reference xyz point. -struct XYZError { - EIGEN_MAKE_ALIGNED_OPERATOR_NEW - XYZError(Eigen::Vector3d const& ref_xyz, std::vector const& block_sizes, double weight) - : m_ref_xyz(ref_xyz), m_block_sizes(block_sizes), m_weight(weight) { - // Sanity check - if (m_block_sizes.size() != 1 || m_block_sizes[0] != NUM_XYZ_PARAMS) - LOG(FATAL) << "XYZError: The block sizes were not set up properly.\n"; - } - - // Call to work with ceres::DynamicNumericDiffCostFunction. - // Takes array of arrays as parameters. - // TODO(oalexan1): May want to use the analytical Ceres cost function - bool operator()(double const* const* parameters, double* residuals) const { - // Compute the residuals - for (int it = 0; it < NUM_XYZ_PARAMS; it++) residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); - - return true; - } - - // Factory to hide the construction of the CostFunction object from the client code. - static ceres::CostFunction* Create(Eigen::Vector3d const& ref_xyz, - std::vector const& block_sizes, - double weight) { - ceres::DynamicNumericDiffCostFunction* cost_function = - new ceres::DynamicNumericDiffCostFunction(new XYZError(ref_xyz, block_sizes, weight)); - - // The residual size is always the same - cost_function->SetNumResiduals(NUM_XYZ_PARAMS); - - // The camera wrapper knows all of the block sizes to add. - for (size_t i = 0; i < block_sizes.size(); i++) { - cost_function->AddParameterBlock(block_sizes[i]); - } - return cost_function; - } - - private: - Eigen::Vector3d m_ref_xyz; // reference xyz - std::vector m_block_sizes; - double m_weight; -}; // End class XYZError - -enum imgType { NAV_CAM, SCI_CAM, HAZ_CAM }; - -// Calculate the rmse residual for each residual type. -void calc_median_residuals(std::vector const& residuals, - std::vector const& residual_names, - std::string const& tag) { - size_t num = residuals.size(); - - if (num != residual_names.size()) - LOG(FATAL) << "There must be as many residuals as residual names."; - - std::map > stats; - for (size_t it = 0; it < residuals.size(); it++) - stats[residual_names[it]] = std::vector(); // initialize - - for (size_t it = 0; it < residuals.size(); it++) - stats[residual_names[it]].push_back(std::abs(residuals[it])); - - std::cout << "The 25, 50 and 75 percentile residual stats " << tag << std::endl; - for (auto it = stats.begin(); it != stats.end(); it++) { - std::string const& name = it->first; - std::vector vals = stats[name]; // make a copy - std::sort(vals.begin(), vals.end()); - - int len = vals.size(); - - int it1 = static_cast(0.25 * len); - int it2 = static_cast(0.50 * len); - int it3 = static_cast(0.75 * len); - - if (len == 0) - std::cout << name << ": " << "none"; - else - std::cout << std::setprecision(8) - << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' << vals[it3]; - std::cout << " (" << len << " residuals)" << std::endl; - } -} - - // TODO(oalexan1): This needs to be handled better. - void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { - int raw_image_cols = image.cols; - int raw_image_rows = image.rows; - int calib_image_cols = cam_params.GetDistortedSize()[0]; - int calib_image_rows = cam_params.GetDistortedSize()[1]; - - int factor = raw_image_cols / calib_image_cols; - - if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { - LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" - << "Calibrated image width and height are: " - << calib_image_cols << ' ' << calib_image_rows << "\n" - << "These must be equal up to an integer factor.\n"; - } - - if (factor != 1) { - // TODO(oalexan1): This kind of resizing may be creating aliased images. - cv::Mat local_image; - cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); - local_image.copyTo(image); - } - - // Check - if (image.cols != calib_image_cols || image.rows != calib_image_rows) - LOG(FATAL) << "Sci cam images have the wrong size."; - } - - typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; - - // A class to encompass all known information about a camera - // This is work in progress and will replace some of the logic further down. - struct cameraImage { - // An index to look up the type of camera. This will equal the - // value ref_camera_type if and only if this is a reference - // camera. - int camera_type; - - // The timestamp for this camera (in floating point seconds since epoch) - double timestamp; - - // The timestamp with an adjustment added to it to be in - // reference camera time - double ref_timestamp; - - // Indices to look up the reference cameras bracketing this camera - // in time. The two indices will have same value if and only if - // this is a reference camera. - int beg_ref_index; - int end_ref_index; - - // The image for this camera, in grayscale - cv::Mat image; - - // The corresponding depth cloud, for an image + depth camera - cv::Mat depth_cloud; - }; - - // Sort by timestamps in the ref camera clock - bool timestampLess(cameraImage i, cameraImage j) { - return (i.ref_timestamp < j.ref_timestamp); - } - - // Find the haz cam depth measurement. Use nearest neighbor interpolation - // to look into the depth cloud. - bool depthValue( // Inputs - cv::Mat const& depth_cloud, Eigen::Vector2d const& dist_ip, - // Output - Eigen::Vector3d& depth_xyz) { - depth_xyz = Eigen::Vector3d(0, 0, 0); // initialize - - if (depth_cloud.cols == 0 && depth_cloud.rows == 0) return false; // empty cloud - - int col = round(dist_ip[0]); - int row = round(dist_ip[1]); - - if (col < 0 || row < 0 || col > depth_cloud.cols || row > depth_cloud.rows) - LOG(FATAL) << "Out of range in depth cloud."; - - // After rounding one may hit the bound - if (col == depth_cloud.cols || row == depth_cloud.rows) - return false; - - cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); - - // Skip invalid measurements - if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) - return false; - - depth_xyz = Eigen::Vector3d(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); - - return true; - } - - // Rebuild the map. - // TODO(oalexan1): This must be integrated in astrobee. - void RebuildMap(std::string const& map_file, // Will be used for temporary saving of aux data - camera::CameraParameters const& cam_params, - boost::shared_ptr sparse_map) { - std::string rebuild_detector = "SURF"; - std::cout << "Rebuilding map with " << rebuild_detector << " detector."; - - // Copy some data to make sure it does not get lost on resetting things below - std::vector world_to_ref_t = sparse_map->cid_to_cam_t_global_; - std::vector> pid_to_cid_fid = sparse_map->pid_to_cid_fid_; - - // Ensure the new camera parameters are set - sparse_map->SetCameraParameters(cam_params); - - std::cout << "Detecting features."; - sparse_map->DetectFeatures(); - - std::cout << "Matching features."; - // Borrow from the original map which images should be matched with which. - sparse_map->cid_to_cid_.clear(); - for (size_t p = 0; p < pid_to_cid_fid.size(); p++) { - std::map const& track = pid_to_cid_fid[p]; // alias - for (std::map::const_iterator it1 = track.begin(); - it1 != track.end() ; it1++) { - for (std::map::const_iterator it2 = it1; - it2 != track.end() ; it2++) { - if (it1->first != it2->first) { - // Never match an image with itself - sparse_map->cid_to_cid_[it1->first].insert(it2->first); - } - } - } - } - - sparse_mapping::MatchFeatures(sparse_mapping::EssentialFile(map_file), - sparse_mapping::MatchesFile(map_file), sparse_map.get()); - for (size_t i = 0; i < world_to_ref_t.size(); i++) - sparse_map->SetFrameGlobalTransform(i, world_to_ref_t[i]); - - // Wipe file that is no longer needed - try { - std::remove(sparse_mapping::EssentialFile(map_file).c_str()); - }catch(...) {} - - std::cout << "Building tracks."; - bool rm_invalid_xyz = true; // by now cameras are good, so filter out bad stuff - sparse_mapping::BuildTracks(rm_invalid_xyz, - sparse_mapping::MatchesFile(map_file), - sparse_map.get()); - - // It is essential that during re-building we do not vary the - // cameras. Those were usually computed with a lot of SURF features, - // while rebuilding is usually done with many fewer ORGBRISK - // features. - bool fix_cameras = true; - if (fix_cameras) - std::cout << "Performing bundle adjustment with fixed cameras."; - else - std::cout << "Performing bundle adjustment while floating cameras."; - - sparse_mapping::BundleAdjust(fix_cameras, sparse_map.get()); - } - - // TODO(oalexan1): Move this to utils. - // Intersect ray with mesh. Return true on success. - bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, camera::CameraParameters const& cam_params, - Eigen::Affine3d const& world_to_cam, mve::TriangleMesh::Ptr const& mesh, - std::shared_ptr const& bvh_tree, - double min_ray_dist, double max_ray_dist, - // Output - Eigen::Vector3d& intersection) { - // Initialize the output - intersection = Eigen::Vector3d(0.0, 0.0, 0.0); - - // Ray from camera going through the pixel - Eigen::Vector3d cam_ray(undist_pix.x() / cam_params.GetFocalVector()[0], - undist_pix.y() / cam_params.GetFocalVector()[1], 1.0); - cam_ray.normalize(); - - Eigen::Affine3d cam_to_world = world_to_cam.inverse(); - Eigen::Vector3d world_ray = cam_to_world.linear() * cam_ray; - Eigen::Vector3d cam_ctr = cam_to_world.translation(); - - // Set up the ray structure for the mesh - BVHTree::Ray bvh_ray; - bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); - bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); - bvh_ray.dir.normalize(); - - bvh_ray.tmin = min_ray_dist; - bvh_ray.tmax = max_ray_dist; - - // Intersect the ray with the mesh - BVHTree::Hit hit; - if (bvh_tree->intersect(bvh_ray, &hit)) { - double cam_to_mesh_dist = hit.t; - intersection = cam_ctr + cam_to_mesh_dist * world_ray; - return true; - } - - return false; - } - -} // namespace dense_map - -int main(int argc, char** argv) { - ff_common::InitFreeFlyerApplication(&argc, &argv); - GOOGLE_PROTOBUF_VERIFY_VERSION; - - std::cout.precision(17); // to be able to print timestamps - - if (FLAGS_ros_bag.empty()) - LOG(FATAL) << "The bag file was not specified."; - if (FLAGS_sparse_map.empty()) - LOG(FATAL) << "The input sparse map was not specified."; - - if (FLAGS_output_map.empty()) - LOG(FATAL) << "The output sparse map was not specified."; - - if (FLAGS_robust_threshold <= 0.0) - LOG(FATAL) << "The robust threshold must be positive.\n"; - - if (FLAGS_bracket_len <= 0.0) LOG(FATAL) << "Bracket length must be positive."; - - if (FLAGS_num_overlaps < 1) LOG(FATAL) << "Number of overlaps must be positive."; - - if (FLAGS_timestamp_offsets_max_change < 0) - LOG(FATAL) << "The timestamp offsets must be non-negative."; - - // Read the config file - double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; - Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::Affine3d hazcam_depth_to_image_transform; - hazcam_depth_to_image_transform.setIdentity(); // default value - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), - Eigen::Vector2d(0, 0)); - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", // NOLINT - "scicam_to_hazcam_timestamp_offset", // NOLINT - "hazcam_to_navcam_transform", // NOLINT - "scicam_to_hazcam_transform", // NOLINT - "nav_cam_transform", // NOLINT - "hazcam_depth_to_image_transform", // NOLINT - navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, - hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, - nav_cam_params, haz_cam_params, - sci_cam_params); - - if (!std::isnan(FLAGS_scicam_to_hazcam_timestamp_offset_override_value)) { - double new_val = FLAGS_scicam_to_hazcam_timestamp_offset_override_value; - std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset - << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; - scicam_to_hazcam_timestamp_offset = new_val; - } - - if (FLAGS_mesh_weight <= 0.0) - LOG(FATAL) << "The mesh weight must be positive.\n"; - - mve::TriangleMesh::Ptr mesh; - std::shared_ptr mesh_info; - std::shared_ptr graph; - std::shared_ptr bvh_tree; - if (FLAGS_mesh != "") dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); - - // If desired to process only specific timestamps - std::set sci_cam_timestamps_to_use; - if (FLAGS_sci_cam_timestamps != "") { - std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); - double val; - while (ifs >> val) sci_cam_timestamps_to_use.insert(val); - } - -#if 0 - std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; - std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; - std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() - << "\n"; -#endif - std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; - - double hazcam_depth_to_image_scale - = pow(hazcam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); - - // Since we will keep the scale fixed, vary the part of the transform without - // the scale, while adding the scale each time before the transform is applied - Eigen::Affine3d hazcam_depth_to_image_noscale = hazcam_depth_to_image_transform; - hazcam_depth_to_image_noscale.linear() /= hazcam_depth_to_image_scale; - - // Convert hazcam_to_navcam_trans to Affine3d - Eigen::Affine3d hazcam_to_navcam_aff_trans; - hazcam_to_navcam_aff_trans.matrix() = hazcam_to_navcam_trans; - - // Convert scicam_to_hazcam_trans to Affine3d - Eigen::Affine3d scicam_to_hazcam_aff_trans; - scicam_to_hazcam_aff_trans.matrix() = scicam_to_hazcam_trans; - - // Read the sparse map - boost::shared_ptr sparse_map = - boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); - - // TODO(oalexan1): All this timestamp reading logic below should be in a function - - // Parse the ref timestamps from the sparse map - // We assume the sparse map image names are the timestamps. - std::vector ref_timestamps; - const std::vector& sparse_map_images = sparse_map->cid_to_filename_; - ref_timestamps.resize(sparse_map_images.size()); - for (size_t cid = 0; cid < sparse_map_images.size(); cid++) { - double timestamp = dense_map::fileNameToTimestamp(sparse_map_images[cid]); - ref_timestamps[cid] = timestamp; - } - if (ref_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; - - // Will optimize the nav cam poses as part of the process - std::vector & world_to_ref_t = sparse_map->cid_to_cam_t_global_; // alias - - // Put transforms of the reference cameras in a vector. We will optimize them. - int num_ref_cams = world_to_ref_t.size(); - if (world_to_ref_t.size() != ref_timestamps.size()) - LOG(FATAL) << "Must have as many ref cam timestamps as ref cameras.\n"; - std::vector world_to_ref_vec(num_ref_cams * dense_map::NUM_RIGID_PARAMS); - for (int cid = 0; cid < num_ref_cams; cid++) - dense_map::rigid_transform_to_array(world_to_ref_t[cid], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); - - // We assume our camera rig has n camera types. Each can be image or - // depth + image. Just one camera must be the reference camera. In - // this code it will be nav_cam. Every camera object (class - // cameraImage) knows its type (an index), which can be used to look - // up its intrinsics, image topic, depth topic (if present), - // ref_to_cam_timestamp_offset, and ref_to_cam_transform. A camera - // object also stores its image, depth cloud (if present), its - // timestamp, and indices pointing to its left and right ref - // bracketing cameras. - - // For every instance of a reference camera its - // ref_to_cam_timestamp_offset is 0 and kept fixed, - // ref_to_cam_transform is the identity and kept fixed, and the - // indices pointing to the left and right ref bracketing cameras are - // identical. - - // The info below will eventually come from a file - int num_cam_types = 3; - int ref_cam_type = 0; // Below we assume the starting cam is the ref cam - - // Image and depth topics - std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; - std::vector image_topics = {"/mgt/img_sampler/nav_cam/image_record", - "/hw/depth_haz/extended/amplitude_int", - "/hw/cam_sci/compressed"}; - std::vector depth_topics = {"", "/hw/depth_haz/points", ""}; - - std::vector> cam_timestamps_to_use = {std::set(), - std::set(), - sci_cam_timestamps_to_use}; - - // The timestamp offsets from ref cam to given cam - std::vector ref_to_cam_timestamp_offsets = - {0.0, - navcam_to_hazcam_timestamp_offset, - navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset}; - - for (size_t it = 0; it < ref_to_cam_timestamp_offsets.size(); it++) { - std::cout << "Ref to cam offset for " << cam_names[it] << ' ' - << ref_to_cam_timestamp_offsets[it] << std::endl; - } - - std::vector cam_params = {nav_cam_params, - haz_cam_params, - sci_cam_params}; - - // Which intrinsics from which cameras to float - std::vector> intrinsics_to_float(num_cam_types); - dense_map::parse_intrinsics_to_float(FLAGS_nav_cam_intrinsics_to_float, intrinsics_to_float[0]); - dense_map::parse_intrinsics_to_float(FLAGS_haz_cam_intrinsics_to_float, intrinsics_to_float[1]); - dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, intrinsics_to_float[2]); - - // The transform from ref to given cam - std::vector ref_to_cam_trans; - ref_to_cam_trans.push_back(Eigen::Affine3d::Identity()); - ref_to_cam_trans.push_back(hazcam_to_navcam_aff_trans.inverse()); - ref_to_cam_trans.push_back(scicam_to_hazcam_aff_trans.inverse() - * hazcam_to_navcam_aff_trans.inverse()); - - // Put in arrays, so we can optimize them - std::vector ref_to_cam_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - dense_map::rigid_transform_to_array - (ref_to_cam_trans[cam_type], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - - for (size_t it = 0; it < ref_to_cam_trans.size(); it++) { - std::cout << "Ref to cam transform for " << cam_names[it] << ":\n" - << ref_to_cam_trans[it].matrix() << std::endl; - } - - // Depth to image transforms and scales - std::vector depth_to_image_noscale; - std::vector depth_to_image_scales = {1.0, hazcam_depth_to_image_scale, 1.0}; - depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); - depth_to_image_noscale.push_back(hazcam_depth_to_image_noscale); - depth_to_image_noscale.push_back(Eigen::Affine3d::Identity()); - // Put in arrays, so we can optimize them - std::vector depth_to_image_noscale_vec(num_cam_types * dense_map::NUM_RIGID_PARAMS); - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - dense_map::rigid_transform_to_array - (depth_to_image_noscale[cam_type], - &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - - // Put the intrinsics in arrays - std::vector focal_lengths(num_cam_types); - std::vector optical_centers(num_cam_types); - std::vector distortions(num_cam_types); - for (int it = 0; it < num_cam_types; it++) { - focal_lengths[it] = cam_params[it].GetFocalLength(); // average the two focal lengths - optical_centers[it] = cam_params[it].GetOpticalOffset(); - distortions[it] = cam_params[it].GetDistortion(); - } - - // Build a map for quick access for all the messages we may need - // TODO(oalexan1): Must the view be kept open for this to work? - std::vector topics; - for (auto it = 0; it < image_topics.size(); it++) - if (image_topics[it] != "") topics.push_back(image_topics[it]); - for (auto it = 0; it < depth_topics.size(); it++) - if (depth_topics[it] != "") topics.push_back(depth_topics[it]); - rosbag::Bag bag; - bag.open(FLAGS_ros_bag, rosbag::bagmode::Read); - rosbag::View view(bag, rosbag::TopicQuery(topics)); - std::map> bag_map; - dense_map::indexMessages(view, bag_map); - - // A lot of care is needed here. This remembers how we travel in time - // for each camera type so we have fewer messages to search. - // But if a mistake is done below it will mess up this bookkeeping. - std::vector image_start_positions(num_cam_types, 0); - std::vector cloud_start_positions(num_cam_types, 0); - - // Cannot add a (positive) value more this to - // ref_to_cam_timestamp_offsets[cam_type] before getting out of the - // bracket. - std::vector upper_bound(num_cam_types, 1.0e+100); - - // Cannot add a (negative) value less than this to - // ref_to_cam_timestamp_offsets[cam_type] before getting out of the - // bracket. - std::vector lower_bound(num_cam_types, -1.0e+100); - - std::cout << "Bracketing the images in time." << std::endl; - - // Populate the data for each camera image - std::vector cams; - for (int ref_it = 0; ref_it < num_ref_cams; ref_it++) { - std::cout.precision(18); - - if (ref_cam_type != 0) - LOG(FATAL) << "It is assumed that the ref cam type is 0."; - - for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { - dense_map::cameraImage cam; - bool success = false; - if (cam_type == ref_cam_type) { - cam.camera_type = cam_type; - cam.timestamp = ref_timestamps[ref_it]; - cam.ref_timestamp = cam.timestamp; // the time offset is 0 between ref and itself - cam.beg_ref_index = ref_it; - cam.end_ref_index = ref_it; - - // Search the whole set of timestamps, so set start_pos = - // 0. This is slower but more robust than keeping track of how - // we move in the increasing order of time. - int start_pos = 0; - bool save_grayscale = true; // for matching we will need grayscale - double found_time = -1.0; - // this has to succeed since we picked the ref images in the map - if (!dense_map::lookupImage(cam.timestamp, bag_map[image_topics[cam_type]], save_grayscale, - // outputs - cam.image, image_start_positions[cam_type], // care here - found_time)) - LOG(FATAL) << std::fixed << std::setprecision(17) - << "Cannot look up camera at time " << cam.timestamp << ".\n"; - - // The exact time is expected - if (found_time != cam.timestamp) - LOG(FATAL) << std::fixed << std::setprecision(17) - << "Cannot look up camera at time " << cam.timestamp << ".\n"; - - // See if to skip this timestamp - if (!cam_timestamps_to_use[cam_type].empty() && - cam_timestamps_to_use[cam_type].find(cam.timestamp) == - cam_timestamps_to_use[cam_type].end()) - continue; - - success = true; - - } else { - if (ref_it + 1 >= num_ref_cams) break; // Arrived at the end, cannot do a bracket - - // Convert the bracketing timestamps to current cam's time - double left_timestamp = ref_timestamps[ref_it] + ref_to_cam_timestamp_offsets[cam_type]; - double right_timestamp = ref_timestamps[ref_it + 1] + ref_to_cam_timestamp_offsets[cam_type]; - - if (right_timestamp <= left_timestamp) - LOG(FATAL) << "Ref timestamps must be in increasing order.\n"; - - if (right_timestamp - left_timestamp > FLAGS_bracket_len) - continue; // Must respect the bracket length - - // Find the image timestamp closest to the midpoint of the brackets. This will give - // more room to vary the timestamp later. - double mid_timestamp = (left_timestamp + right_timestamp)/2.0; - - // Search forward in time from image_start_positions[cam_type]. - // We will update that too later. One has to be very careful - // with it so it does not go too far forward in time - // so that at the next iteration we are passed what we - // search for. - int start_pos = image_start_positions[cam_type]; // care here - bool save_grayscale = true; // for matching we will need grayscale - double curr_timestamp = left_timestamp; // start here - cv::Mat best_image; - double best_dist = 1.0e+100; - double best_time = -1.0, found_time = -1.0; - while (1) { - if (found_time >= right_timestamp) break; // out of range - - cv::Mat image; - if (!dense_map::lookupImage(curr_timestamp, bag_map[image_topics[cam_type]], save_grayscale, - // outputs - image, - start_pos, // care here - found_time)) - break; // Need not succeed, but then there's no need to go on are we are at the end - - std::cout.precision(18); - - double curr_dist = std::abs(found_time - mid_timestamp); - if (curr_dist < best_dist) { - best_dist = curr_dist; - best_time = found_time; - // Update the start position for the future only if this is a good - // solution. Otherwise we may have moved too far. - image_start_positions[cam_type] = start_pos; - image.copyTo(best_image); - } - - // Go forward in time. We count on the fact that lookupImage() looks forward from given guess. - curr_timestamp = std::nextafter(found_time, 1.01 * found_time); - } - - if (best_time < 0.0) continue; // bracketing failed - - // Note how we allow best_time == left_timestamp if there's no other choice - if (best_time < left_timestamp || best_time >= right_timestamp) continue; // no luck - - cam.camera_type = cam_type; - cam.timestamp = best_time; - cam.ref_timestamp = best_time - ref_to_cam_timestamp_offsets[cam_type]; - cam.beg_ref_index = ref_it; - cam.end_ref_index = ref_it + 1; - cam.image = best_image; - - upper_bound[cam_type] = std::min(upper_bound[cam_type], best_time - left_timestamp); - lower_bound[cam_type] = std::max(lower_bound[cam_type], best_time - right_timestamp); - - // TODO(oalexan1): Wipe this temporary block - if (cam_type == 1) { - // Must compare raw big timestamps before and after! - // Must compare residuals! - - double nav_start = ref_timestamps[ref_it]; - double haz_time = cam.ref_timestamp; - double nav_end = ref_timestamps[ref_it + 1]; - // std::cout << "--xxxhaz after " << haz_time - nav_start << ' ' << nav_end - haz_time << - // ' ' << nav_end - nav_start << std::endl; - } - if (cam_type == 2) { - double nav_start = ref_timestamps[ref_it]; - double sci_time = cam.ref_timestamp; - double nav_end = ref_timestamps[ref_it + 1]; - // std::cout << "--xxxsci after " << sci_time - nav_start << ' ' << nav_end - sci_time << - // ' ' << nav_end - nav_start << std::endl; - } - - // See if to skip this timestamp - if (!cam_timestamps_to_use[cam_type].empty() && - cam_timestamps_to_use[cam_type].find(cam.timestamp) == - cam_timestamps_to_use[cam_type].end()) - continue; - - success = true; - } - - if (!success) continue; - - if (depth_topics[cam_type] != "") { - double found_time = -1.0; - cv::Mat cloud; - // Look up the closest cloud in time (either before or after cam.timestamp) - // This need not succeed. - dense_map::lookupCloud(cam.timestamp, bag_map[depth_topics[cam_type]], - FLAGS_max_haz_cam_image_to_depth_timestamp_diff, - // Outputs - cam.depth_cloud, - cloud_start_positions[cam_type], // care here - found_time); - } - - cams.push_back(cam); - } // end loop over camera types - } // end loop over ref images - - std::cout << "Allowed timestamp offset range while respecting the bracket for given cameras:\n"; - for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { - if (cam_type == ref_cam_type) continue; // bounds don't make sense here - - // So far we had the relative change. Now add the actual offset to get the max allowed offset. - lower_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; - upper_bound[cam_type] += ref_to_cam_timestamp_offsets[cam_type]; - - std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] - << ", " << upper_bound[cam_type] << "]\n"; - } - - if (FLAGS_float_timestamp_offsets) { - std::cout << "Given the user constraint the timestamp offsets will float in these ranges:\n"; - for (int cam_type = ref_cam_type; cam_type < num_cam_types; cam_type++) { - if (cam_type == ref_cam_type) continue; // bounds don't make sense here - - lower_bound[cam_type] = std::max(lower_bound[cam_type], - ref_to_cam_timestamp_offsets[cam_type] - - FLAGS_timestamp_offsets_max_change); - - upper_bound[cam_type] = std::min(upper_bound[cam_type], - ref_to_cam_timestamp_offsets[cam_type] - + FLAGS_timestamp_offsets_max_change); - - // Tighten a bit to ensure we don't exceed things when we add - // and subtract timestamps later. Note that timestamps are - // measured in seconds and fractions of a second since epoch and - // can be quite large so precision loss can easily happen. - lower_bound[cam_type] += 1.0e-5; - upper_bound[cam_type] -= 1.0e-5; - - std::cout << std::setprecision(8) << cam_names[cam_type] << ": [" << lower_bound[cam_type] - << ", " << upper_bound[cam_type] << "]\n"; - } - } - - std::cout << "--Deal with adjustment!" << std::endl; - for (size_t it = 0; it < cams.size(); it++) { - if (cams[it].camera_type == 2) { - dense_map::adjustImageSize(cam_params[2], cams[it].image); - } - } - - std::sort(cams.begin(), cams.end(), dense_map::timestampLess); - - if (FLAGS_verbose) { - int count = 10000; - std::vector image_files; - for (size_t it = 0; it < cams.size(); it++) { - std::ostringstream oss; - oss << count << "_" << cams[it].camera_type << ".jpg"; - std::string name = oss.str(); - std::cout << "--writing " << name << std::endl; - cv::imwrite(name, cams[it].image); - count++; - image_files.push_back(name); - } - } - - std::cout << "Detecting features." << std::endl; - - // Detect features using multiple threads - std::vector cid_to_descriptor_map; - std::vector cid_to_keypoint_map; - cid_to_descriptor_map.resize(cams.size()); - cid_to_keypoint_map.resize(cams.size()); - ff_common::ThreadPool thread_pool1; - for (size_t it = 0; it < cams.size(); it++) { - thread_pool1.AddTask(&dense_map::detectFeatures, cams[it].image, FLAGS_verbose, - &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); - } - thread_pool1.Join(); - - dense_map::MATCH_MAP matches; - - std::vector > image_pairs; - for (size_t it1 = 0; it1 < cams.size(); it1++) { - for (size_t it2 = it1 + 1; it2 < std::min(cams.size(), it1 + FLAGS_num_overlaps + 1); it2++) { - image_pairs.push_back(std::make_pair(it1, it2)); - } - } - - std::cout << "Matching features." << std::endl; - ff_common::ThreadPool thread_pool2; - std::mutex match_mutex; - for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { - auto pair = image_pairs[pair_it]; - int left_image_it = pair.first, right_image_it = pair.second; - bool verbose = true; - thread_pool2.AddTask(&dense_map::matchFeatures, &match_mutex, - left_image_it, right_image_it, - cid_to_descriptor_map[left_image_it], - cid_to_descriptor_map[right_image_it], - cid_to_keypoint_map[left_image_it], - cid_to_keypoint_map[right_image_it], - verbose, &matches[pair]); - } - thread_pool2.Join(); - - // If feature A in image I matches feather B in image J, which matches feature C in image K, - // then (A, B, C) belong together into a track. Build such a track. - - std::vector, int>> keypoint_map(cams.size()); - - int num_total_matches = 0; // temporary - - // Give all interest points in a given image a unique id - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair const& index_pair = it->first; // alias - - int left_index = index_pair.first; - int right_index = index_pair.second; - - dense_map::MATCH_PAIR const& match_pair = it->second; // alias - std::vector const& left_ip_vec = match_pair.first; - std::vector const& right_ip_vec = match_pair.second; - for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { - auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); - auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); - keypoint_map[left_index][dist_left_ip] = 0; - keypoint_map[right_index][dist_right_ip] = 0; - num_total_matches++; - } - } - - std::cout << "--bbb num total matches " << num_total_matches << std::endl; - std::cout << "--why so many more matches than pid?" << std::endl; - std::cout << "--test adding missing pairs!" << std::endl; - std::cout << "--must do two passes!" << std::endl; - - // Give all interest points in a given image a unique id - // And put them in a vector with the id corresponding to the interest point - std::vector>> keypoint_vec(cams.size()); - for (size_t cam_it = 0; cam_it < cams.size(); cam_it++) { - int count = 0; - for (auto ip_it = keypoint_map[cam_it].begin(); ip_it != keypoint_map[cam_it].end(); ip_it++) { - ip_it->second = count; - count++; - // std::cout << "--value " << (ip_it->first).first << ' ' << (ip_it->first).second << ' ' - // << ip_it->second << std::endl; - keypoint_vec[cam_it].push_back(ip_it->first); - // std::cout << "--size is " << keypoint_vec[cam_it].size() << std::endl; - } - } - - std::cout << "--write my own function! It should just remove conflicts!" << std::endl; - - openMVG::matching::PairWiseMatches match_map; - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair const& index_pair = it->first; // alias - - int left_index = index_pair.first; - int right_index = index_pair.second; - - dense_map::MATCH_PAIR const& match_pair = it->second; // alias - std::vector const& left_ip_vec = match_pair.first; - std::vector const& right_ip_vec = match_pair.second; - - std::vector mvg_matches; - - for (size_t ip_it = 0; ip_it < left_ip_vec.size(); ip_it++) { - auto dist_left_ip = std::make_pair(left_ip_vec[ip_it].x, left_ip_vec[ip_it].y); - auto dist_right_ip = std::make_pair(right_ip_vec[ip_it].x, right_ip_vec[ip_it].y); - - // std::cout << "zzz1 " << left_index << ' ' << dist_left_ip.first << ' ' << - // dist_left_ip.second << ' ' << right_index << ' ' << dist_right_ip.first << ' ' << - // dist_right_ip.second << std::endl; - - int left_id = keypoint_map[left_index][dist_left_ip]; - int right_id = keypoint_map[right_index][dist_right_ip]; - mvg_matches.push_back(openMVG::matching::IndMatch(left_id, right_id)); - } - match_map[index_pair] = mvg_matches; - } - - if (FLAGS_verbose) { - for (auto it = matches.begin(); it != matches.end(); it++) { - std::pair index_pair = it->first; - dense_map::MATCH_PAIR const& match_pair = it->second; - - int left_index = index_pair.first; - int right_index = index_pair.second; - - std::cout << "--indices " << left_index << ' ' << right_index << std::endl; - - std::ostringstream oss1; - oss1 << (10000 + left_index) << "_" << cams[left_index].camera_type; - - std::string left_stem = oss1.str(); - std::string left_image = left_stem + ".jpg"; - - std::ostringstream oss2; - oss2 << (10000 + right_index) << "_" << cams[right_index].camera_type; - - std::string right_stem = oss2.str(); - std::string right_image = right_stem + ".jpg"; - - std::string match_file = left_stem + "__" + right_stem + ".match"; - - std::cout << "Writing: " << left_image << ' ' << right_image << ' ' << match_file << std::endl; - dense_map::writeMatchFile(match_file, match_pair.first, match_pair.second); - } - } - - std::cout << "Find other things which need deallocation!" << std::endl; - std::cout << "De-allocate images and point clouds when no longer needed!" << std::endl; - - // not needed anymore and take up a lot of RAM - matches.clear(); matches = dense_map::MATCH_MAP(); - keypoint_map.clear(); keypoint_map.shrink_to_fit(); - cid_to_descriptor_map.clear(); cid_to_descriptor_map.shrink_to_fit(); - cid_to_keypoint_map.clear(); cid_to_keypoint_map.shrink_to_fit(); - - std::vector > pid_to_cid_fid; - { - // Build tracks - // De-allocate these as soon as not needed to save memory - openMVG::tracks::TracksBuilder trackBuilder; - trackBuilder.Build(match_map); // Build: Efficient fusion of correspondences - trackBuilder.Filter(); // Filter: Remove tracks that have conflict - // trackBuilder.ExportToStream(std::cout); - // Export tracks as a map (each entry is a sequence of imageId and featureIndex): - // {TrackIndex => {(imageIndex, featureIndex), ... ,(imageIndex, featureIndex)} - openMVG::tracks::STLMAPTracks map_tracks; - trackBuilder.ExportToSTL(map_tracks); - - // TODO(oalexan1): Print how many pairwise matches were there before - // and after filtering tracks. - - if (map_tracks.empty()) - LOG(FATAL) << "No tracks left after filtering. Perhaps images are too dis-similar?\n"; - - size_t num_elems = map_tracks.size(); - // Populate the filtered tracks - pid_to_cid_fid.clear(); - pid_to_cid_fid.resize(num_elems); - size_t curr_id = 0; - for (auto itr = map_tracks.begin(); itr != map_tracks.end(); itr++) { - for (auto itr2 = (itr->second).begin(); itr2 != (itr->second).end(); itr2++) { - pid_to_cid_fid[curr_id][itr2->first] = itr2->second; - } - curr_id++; - } - } - - // The transform from every camera to the world - std::vector world_to_cam(cams.size()); - for (size_t it = 0; it < cams.size(); it++) { - int beg_index = cams[it].beg_ref_index; - int end_index = cams[it].end_ref_index; - int cam_type = cams[it].camera_type; - // std::cout << "--ref indices " << beg_index << ' ' << end_index << std::endl; - // std::cout << "--cam type " << cam_type << std::endl; - world_to_cam[it] = dense_map::calc_world_to_cam_trans - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - ref_timestamps[beg_index], ref_timestamps[end_index], - ref_to_cam_timestamp_offsets[cam_type], - cams[it].timestamp); - - // std::cout << "--trans for camera: " << it << ' ' << world_to_cam[it].matrix() << std::endl; - } - - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - // std::cout << std::endl; - // std::cout << "pid is " << pid << std::endl; - // std::cout << "---aaa pid size is " << pid_to_cid_fid[pid].size() << std::endl; - // std::cout << "zzz2 "; - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - // std::cout << cid << ' ' << keypoint_vec[cid][fid].first << ' '; - // std::cout << keypoint_vec[cid][fid].second << " "; - } - // std::cout << std::endl; - } - - // Do multiview triangulation - std::vector xyz_vec(pid_to_cid_fid.size()); - - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - Eigen::Vector3d mesh_xyz(0, 0, 0); - int num = 0; - - std::vector focal_length_vec; - std::vector world_to_cam_vec; - std::vector pix_vec; - - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - Eigen::Vector2d undist_ip; - cam_params[cams[cid].camera_type].Convert - (dist_ip, &undist_ip); - - focal_length_vec.push_back(cam_params[cams[cid].camera_type].GetFocalLength()); - world_to_cam_vec.push_back(world_to_cam[cid]); - pix_vec.push_back(undist_ip); - } - - // Triangulate n rays emanating from given undistorted and centered pixels - Eigen::Vector3d joint_xyz = dense_map::Triangulate(focal_length_vec, world_to_cam_vec, pix_vec); - - xyz_vec[pid] = joint_xyz; - - // std::cout << "--xyz1 " << pid << ' ' << xyz_vec[pid].transpose() << std::endl; - } - - std::cout << "--must do two passes!" << std::endl; - std::cout << "--must filter by min triangulation angle and points behind camera" << std::endl; - - // std::vector> cid_fid_to_pid; - // sparse_mapping::InitializeCidFidToPid(cams.size(), pid_to_cid_fid, &cid_fid_to_pid); - - std::cout << "---too many outliers!" << std::endl; - - std::cout << "--start selecting!" << std::endl; - - // Set up the variable blocks to optimize for BracketedCamError - std::vector bracketed_cam_block_sizes; - int num_focal_lengths = 1; - int num_distortion_params = 1; // will be overwritten - bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - bracketed_cam_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - bracketed_cam_block_sizes.push_back(num_focal_lengths); - bracketed_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); - std::cout << "--make bracketed block sizes individual!" << std::endl; - bracketed_cam_block_sizes.push_back(num_distortion_params); // must be last, will be modified later - - // Set up the variable blocks to optimize for RefCamError - std::vector ref_cam_block_sizes; - ref_cam_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - ref_cam_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - ref_cam_block_sizes.push_back(num_focal_lengths); - ref_cam_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); - std::cout << "--make ref block sizes individual!" << std::endl; - ref_cam_block_sizes.push_back(num_distortion_params); // must be last, will be modified later - - // Set up the variable blocks to optimize for BracketedDepthError - std::vector bracketed_depth_block_sizes; - bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - bracketed_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - - // Set up the variable blocks to optimize for RefDepthError - std::vector ref_depth_block_sizes; - ref_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - ref_depth_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); - ref_depth_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); - ref_depth_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - - // Set up the variable blocks to optimize for the mesh xyz - std::vector mesh_block_sizes; - mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); - - std::cout << "must test with the ref cam having depth!" << std::endl; - - // Form the problem - ceres::Problem problem; - std::vector residual_names; - std::vector residual_scales; - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - for (auto cid_fid = pid_to_cid_fid[pid].begin(); - cid_fid != pid_to_cid_fid[pid].end(); cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - int cam_type = cams[cid].camera_type; - int beg_ref_index = cams[cid].beg_ref_index; - int end_ref_index = cams[cid].end_ref_index; - - Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - - if (cam_type == ref_cam_type) { - // The cost function of projecting in the ref cam. - ceres::CostFunction* ref_cost_function = - dense_map::RefCamError::Create(dist_ip, ref_cam_block_sizes, - cam_params[cam_type]); - ceres::LossFunction* ref_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - // std::cout << "--add block" << std::endl; - residual_names.push_back(cam_names[cam_type] + "_pix_x"); - residual_names.push_back(cam_names[cam_type] + "_pix_y"); - residual_scales.push_back(1.0); - residual_scales.push_back(1.0); - problem.AddResidualBlock - (ref_cost_function, ref_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &xyz_vec[pid][0], - &focal_lengths[cam_type], - &optical_centers[cam_type][0], - &distortions[cam_type][0]); - - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - } - - Eigen::Vector3d depth_xyz(0, 0, 0); - if (!dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) - continue; // could not look up the depth value - - // std::cout << "--depth xyz is " << depth_xyz.transpose() << std::endl; - - // The constraint that the depth points agree with triangulated points - ceres::CostFunction* ref_depth_cost_function - = dense_map::RefDepthError::Create(FLAGS_depth_weight, - depth_xyz, - ref_depth_block_sizes); - - ceres::LossFunction* ref_depth_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - residual_names.push_back(cam_names[cam_type] + "_depth_x_m"); - residual_names.push_back(cam_names[cam_type] + "_depth_y_m"); - residual_names.push_back(cam_names[cam_type] + "_depth_z_m"); - residual_scales.push_back(FLAGS_depth_weight); - residual_scales.push_back(FLAGS_depth_weight); - residual_scales.push_back(FLAGS_depth_weight); - problem.AddResidualBlock - (ref_depth_cost_function, - ref_depth_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &depth_to_image_scales[cam_type], - &xyz_vec[pid][0]); - - if (!FLAGS_float_sparse_map) - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - if (!FLAGS_float_scale) - problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); - - } else { - // Other cameras, which need bracketing - ceres::CostFunction* bracketed_cost_function = - dense_map::BracketedCamError::Create(dist_ip, - ref_timestamps[beg_ref_index], - ref_timestamps[end_ref_index], - cams[cid].timestamp, - bracketed_cam_block_sizes, - cam_params[cam_type]); - ceres::LossFunction* bracketed_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - // std::cout << "--add block" << std::endl; - residual_names.push_back(cam_names[cam_type] + "_pix_x"); - residual_names.push_back(cam_names[cam_type] + "_pix_y"); - residual_scales.push_back(1.0); - residual_scales.push_back(1.0); - problem.AddResidualBlock - (bracketed_cost_function, bracketed_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &xyz_vec[pid][0], - &ref_to_cam_timestamp_offsets[cam_type], - &focal_lengths[cam_type], - &optical_centers[cam_type][0], - &distortions[cam_type][0]); - - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); - } - if (!FLAGS_float_timestamp_offsets) { - problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); - } else { - problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - lower_bound[cam_type]); - problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - upper_bound[cam_type]); - } - - Eigen::Vector3d depth_xyz(0, 0, 0); - if (!dense_map::depthValue(cams[cid].depth_cloud, dist_ip, depth_xyz)) - continue; // could not look up the depth value - - // std::cout << "--depth xyz is " << depth_xyz.transpose() << std::endl; - - // Ensure that the depth points agree with triangulated points - ceres::CostFunction* bracketed_depth_cost_function - = dense_map::BracketedDepthError::Create(FLAGS_depth_weight, - depth_xyz, - ref_timestamps[beg_ref_index], - ref_timestamps[end_ref_index], - cams[cid].timestamp, - bracketed_depth_block_sizes); - - ceres::LossFunction* bracketed_depth_loss_function - = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - residual_names.push_back(cam_names[cam_type] + "_depth_x_m"); - residual_names.push_back(cam_names[cam_type] + "_depth_y_m"); - residual_names.push_back(cam_names[cam_type] + "_depth_z_m"); - residual_scales.push_back(FLAGS_depth_weight); - residual_scales.push_back(FLAGS_depth_weight); - residual_scales.push_back(FLAGS_depth_weight); - problem.AddResidualBlock - (bracketed_depth_cost_function, - bracketed_depth_loss_function, - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type], - &depth_to_image_scales[cam_type], - &xyz_vec[pid][0], - &ref_to_cam_timestamp_offsets[cam_type]); - - if (!FLAGS_float_sparse_map) { - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * beg_ref_index]); - problem.SetParameterBlockConstant - (&world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * end_ref_index]); - } - if (!FLAGS_float_scale) - problem.SetParameterBlockConstant(&depth_to_image_scales[cam_type]); - if (!FLAGS_float_timestamp_offsets) { - problem.SetParameterBlockConstant(&ref_to_cam_timestamp_offsets[cam_type]); - } else { - problem.SetParameterLowerBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - lower_bound[cam_type]); - problem.SetParameterUpperBound(&ref_to_cam_timestamp_offsets[cam_type], 0, - upper_bound[cam_type]); - } - } - } - } - - if (FLAGS_mesh != "") { - // Add the mesh constraint - - for (size_t pid = 0; pid < pid_to_cid_fid.size(); pid++) { - Eigen::Vector3d mesh_xyz(0, 0, 0); - int num = 0; - - for (auto cid_fid = pid_to_cid_fid[pid].begin(); cid_fid != pid_to_cid_fid[pid].end(); - cid_fid++) { - int cid = cid_fid->first; - int fid = cid_fid->second; - - Eigen::Vector2d dist_ip(keypoint_vec[cid][fid].first, keypoint_vec[cid][fid].second); - Eigen::Vector2d undist_ip; - cam_params[cams[cid].camera_type].Convert - (dist_ip, &undist_ip); - - bool have_mesh_intersection = false; - // Try to make the intersection point be on the mesh and the nav cam ray - // to make the sci cam to conform to that. - // TODO(oalexan1): Think more of the range of the ray below - double min_ray_dist = 0.0; - double max_ray_dist = 10.0; - Eigen::Vector3d intersection(0.0, 0.0, 0.0); - have_mesh_intersection - = dense_map::ray_mesh_intersect(undist_ip, cam_params[cams[cid].camera_type], - world_to_cam[cid], mesh, bvh_tree, - min_ray_dist, max_ray_dist, - // Output - intersection); - - if (have_mesh_intersection) { - mesh_xyz += intersection; - num += 1; - } - } - - if (num >= 1) { - mesh_xyz /= num; - - // std::cout << "--num and mesh xyz is " << num << ' ' << mesh_xyz.transpose() << std::endl; - - ceres::CostFunction* mesh_cost_function = - dense_map::XYZError::Create(mesh_xyz, mesh_block_sizes, FLAGS_mesh_weight); - - ceres::LossFunction* mesh_loss_function = - dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); - - problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, - &xyz_vec[pid][0]); - - residual_names.push_back("mesh_x_m"); - residual_names.push_back("mesh_y_m"); - residual_names.push_back("mesh_z_m"); - - residual_scales.push_back(FLAGS_mesh_weight); - residual_scales.push_back(FLAGS_mesh_weight); - residual_scales.push_back(FLAGS_mesh_weight); - } - } - // std::cout << "--xyz1 " << pid << ' ' << xyz_vec[pid].transpose() << std::endl; - } - - // See which intrinsics from which cam to float or keep fixed - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) { - if (intrinsics_to_float[cam_type].find("focal_length") == intrinsics_to_float[cam_type].end()) { - std::cout << "For " << cam_names[cam_type] << " not floating focal_length." << std::endl; - problem.SetParameterBlockConstant(&focal_lengths[cam_type]); - } else { - std::cout << "For " << cam_names[cam_type] << " floating focal_length." << std::endl; - } - if (intrinsics_to_float[cam_type].find("optical_center") == intrinsics_to_float[cam_type].end()) { - std::cout << "For " << cam_names[cam_type] << " not floating optical_center." << std::endl; - problem.SetParameterBlockConstant(&optical_centers[cam_type][0]); - } else { - std::cout << "For " << cam_names[cam_type] << " floating optical_center." << std::endl; - } - if (intrinsics_to_float[cam_type].find("distortion") == intrinsics_to_float[cam_type].end()) { - std::cout << "For " << cam_names[cam_type] << " not floating distortion." << std::endl; - problem.SetParameterBlockConstant(&distortions[cam_type][0]); - } else { - std::cout << "For " << cam_names[cam_type] << " floating distortion." << std::endl; - } - } - - // Evaluate the residual before optimization - double total_cost = 0.0; - std::vector residuals; - ceres::Problem::EvaluateOptions eval_options; - eval_options.num_threads = 1; - eval_options.apply_loss_function = false; // want raw residuals - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "There must be as many residual names as residual values."; - if (residuals.size() != residual_scales.size()) - LOG(FATAL) << "There must be as many residual values as residual scales."; - for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale - residuals[it] /= residual_scales[it]; - dense_map::calc_median_residuals(residuals, residual_names, "before opt"); - if (FLAGS_verbose) { - for (size_t it = 0; it < residuals.size(); it++) - std::cout << "initial res " << residual_names[it] << " " << residuals[it] << std::endl; - } - - // Copy the offsets to specific cameras - std::cout.precision(18); - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - std::cout << "--offset before " << ref_to_cam_timestamp_offsets[cam_type] << std::endl; - - // Solve the problem - ceres::Solver::Options options; - ceres::Solver::Summary summary; - options.linear_solver_type = ceres::ITERATIVE_SCHUR; - options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread - options.max_num_iterations = FLAGS_num_iterations; - options.minimizer_progress_to_stdout = true; - options.gradient_tolerance = 1e-16; - options.function_tolerance = 1e-16; - options.parameter_tolerance = FLAGS_parameter_tolerance; - ceres::Solve(options, &problem, &summary); - - // Evaluate the residual after optimization - eval_options.num_threads = 1; - eval_options.apply_loss_function = false; // want raw residuals - problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); - if (residuals.size() != residual_names.size()) - LOG(FATAL) << "There must be as many residual names as residual values."; - if (residuals.size() != residual_scales.size()) - LOG(FATAL) << "There must be as many residual values as residual scales."; - for (size_t it = 0; it < residuals.size(); it++) // compensate for the scale - residuals[it] /= residual_scales[it]; - dense_map::calc_median_residuals(residuals, residual_names, "after opt"); - if (FLAGS_verbose) { - for (size_t it = 0; it < residuals.size(); it++) - std::cout << "final res " << residual_names[it] << " " << residuals[it] << std::endl; - } - - // Copy back the reference transforms - std::cout.precision(18); - std::cout << "--sparse map before\n" << sparse_map->cid_to_cam_t_global_[0].matrix() << std::endl; - - for (int cid = 0; cid < num_ref_cams; cid++) - dense_map::array_to_rigid_transform(world_to_ref_t[cid], - &world_to_ref_vec[dense_map::NUM_RIGID_PARAMS * cid]); - - std::cout << "--sparse map after\n" << sparse_map->cid_to_cam_t_global_[0].matrix() << std::endl; - - // Copy back the optimized intrinsics - for (int it = 0; it < num_cam_types; it++) { - cam_params[it].SetFocalLength(Eigen::Vector2d(focal_lengths[it], focal_lengths[it])); - cam_params[it].SetOpticalOffset(optical_centers[it]); - cam_params[it].SetDistortion(distortions[it]); - } - - // Copy back the optimized extrinsics - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - dense_map::array_to_rigid_transform - (ref_to_cam_trans[cam_type], - &ref_to_cam_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - - // Copy back the depth to image transforms without scales - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - dense_map::array_to_rigid_transform - (depth_to_image_noscale[cam_type], - &depth_to_image_noscale_vec[dense_map::NUM_RIGID_PARAMS * cam_type]); - - // Copy extrinsics to specific cameras - hazcam_to_navcam_aff_trans = ref_to_cam_trans[1].inverse(); - scicam_to_hazcam_aff_trans = hazcam_to_navcam_aff_trans.inverse() * ref_to_cam_trans[2].inverse(); - - // Copy intrinsics to the specific cameras - if (FLAGS_nav_cam_intrinsics_to_float != "") nav_cam_params = cam_params[0]; - if (FLAGS_haz_cam_intrinsics_to_float != "") haz_cam_params = cam_params[1]; - if (FLAGS_sci_cam_intrinsics_to_float != "") sci_cam_params = cam_params[2]; - - // Copy the offsets to specific cameras - std::cout.precision(18); - for (int cam_type = 0; cam_type < num_cam_types; cam_type++) - std::cout << "--offset after " << ref_to_cam_timestamp_offsets[cam_type] << std::endl; - - navcam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1]; - scicam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1] - - ref_to_cam_timestamp_offsets[2]; - - std::cout.precision(18); - std::cout << "--navcam_to_hazcam_timestamp_offset " << navcam_to_hazcam_timestamp_offset << std::endl; - std::cout << "--scicam_to_hazcam_timestamp_offset " << scicam_to_hazcam_timestamp_offset << std::endl; - - // Update the optimized depth to image (for haz cam only) - int cam_type = 1; // haz cam - hazcam_depth_to_image_scale = depth_to_image_scales[cam_type]; - hazcam_depth_to_image_transform = depth_to_image_noscale[cam_type]; - hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; - - // Update the config file - { - // update nav and haz - bool update_cam1 = true, update_cam2 = true; - std::string cam1_name = "nav_cam", cam2_name = "haz_cam"; - boost::shared_ptr - cam1_params(new camera::CameraParameters(nav_cam_params)); - boost::shared_ptr - cam2_params(new camera::CameraParameters(haz_cam_params)); - bool update_depth_to_image_transform = true; - bool update_extrinsics = true; - bool update_timestamp_offset = true; - std::string cam1_to_cam2_timestamp_offset_str = "navcam_to_hazcam_timestamp_offset"; - // Modify in-place the robot config file - dense_map::update_config_file(update_cam1, cam1_name, cam1_params, - update_cam2, cam2_name, cam2_params, - update_depth_to_image_transform, - hazcam_depth_to_image_transform, update_extrinsics, - hazcam_to_navcam_aff_trans, update_timestamp_offset, - cam1_to_cam2_timestamp_offset_str, - navcam_to_hazcam_timestamp_offset); - } - { - // update sci and haz cams - // TODO(oalexan1): Write a single function to update all 3 of them - bool update_cam1 = true, update_cam2 = true; - std::string cam1_name = "sci_cam", cam2_name = "haz_cam"; - boost::shared_ptr - cam1_params(new camera::CameraParameters(sci_cam_params)); - boost::shared_ptr - cam2_params(new camera::CameraParameters(haz_cam_params)); - bool update_depth_to_image_transform = true; - bool update_extrinsics = true; - bool update_timestamp_offset = true; - std::string cam1_to_cam2_timestamp_offset_str = "scicam_to_hazcam_timestamp_offset"; - // Modify in-place the robot config file - dense_map::update_config_file(update_cam1, cam1_name, cam1_params, - update_cam2, cam2_name, cam2_params, - update_depth_to_image_transform, - hazcam_depth_to_image_transform, update_extrinsics, - scicam_to_hazcam_aff_trans.inverse(), - update_timestamp_offset, - cam1_to_cam2_timestamp_offset_str, - scicam_to_hazcam_timestamp_offset); - } - - std::cout << "Writing: " << FLAGS_output_map << std::endl; - if (FLAGS_float_sparse_map || FLAGS_nav_cam_intrinsics_to_float != "") { - std::cout << "Either the sparse map intrinsics or cameras got modified. " - << "The map must be rebuilt." << std::endl; - - dense_map::RebuildMap(FLAGS_output_map, // Will be used for temporary saving of aux data - nav_cam_params, sparse_map); - } - - sparse_map->Save(FLAGS_output_map); - - return 0; -} // NOLINT - diff --git a/dense_map/geometry_mapper/tools/camera_refiner_old.cc b/dense_map/geometry_mapper/tools/camera_refiner_old.cc new file mode 100644 index 00000000..36e8520f --- /dev/null +++ b/dense_map/geometry_mapper/tools/camera_refiner_old.cc @@ -0,0 +1,2268 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +// This is an old version of camera_refiner which could only match each sci and haz +// image to its two bracketing nav_cam images. It is still functional, hence it is kept, +// but the new camera_refiner performs better. It used to be invoked like this: +// +// $ISAAC_WS/devel/lib/geometry_mapper/camera_refiner \ +// --ros_bag mybag.bag --sparse_map mymap.map \ +// --num_iterations 20 --bracket_len 0.6 \ +// --nav_cam_topic /mgt/img_sampler/nav_cam/image_record \ +// --output_map output_map.map \ +// --fix_map --skip_registration --float_scale \ +// --timestamp_interpolation --robust_threshold 3 \ +// --sci_cam_intrinsics_to_float \ +// 'focal_length optical_center distortion' \ +// --mesh mesh.ply --mesh_weight 25 \ +// --mesh_robust_threshold 3 + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and full-resolution sci_cam data."); + +DEFINE_string(sparse_map, "", + "A registered SURF sparse map made with some of the ROS bag data, " + "and including nav cam images closely bracketing the sci cam images."); + +DEFINE_string(output_map, "", "Output file containing the updated map."); + +DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); + +DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); + +DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", + "The depth camera intensity topic in the bag file."); + +DEFINE_string(sci_cam_topic, "/hw/cam_sci/compressed", "The sci cam topic in the bag file."); + +DEFINE_double(start, 0.0, "How many seconds into the bag to start processing the data."); + +DEFINE_double(duration, -1.0, "For how many seconds to do the processing."); + +DEFINE_double(max_haz_cam_image_to_depth_timestamp_diff, 0.2, + "Use depth haz cam clouds that are within this distance in " + "time from the nearest haz cam intensity image."); + +DEFINE_double(robust_threshold, 3.0, + "Pixel errors much larger than this will be exponentially attenuated " + "to affect less the cost function."); + +DEFINE_int32(num_iterations, 20, "How many solver iterations to perform in calibration."); + +DEFINE_double(parameter_tolerance, 1e-12, "Stop when the optimization variables change by less than this."); + +DEFINE_double(bracket_len, 2.0, + "Lookup sci and haz cam images only between consecutive nav cam images " + "whose distance in time is no more than this (in seconds), after adjusting " + "for the timestamp offset between these cameras. It is assumed the robot " + "moves slowly and uniformly during this time."); + +DEFINE_int32(num_opt_threads, 16, "How many threads to use in the optimization."); + +DEFINE_string(hugin_file, "", "The path to the hugin .pto file used for sparse map registration."); + +DEFINE_string(xyz_file, "", "The path to the xyz file used for sparse map registration."); + +DEFINE_bool(timestamp_interpolation, false, + "If true, interpolate between " + "timestamps. May give better results if the robot is known to move " + "uniformly, and perhaps less so for stop-and-go robot motion."); + +DEFINE_string(sci_cam_timestamps, "", + "Use only these sci cam timestamps. Must be " + "a file with one timestamp per line."); + +DEFINE_bool(skip_registration, false, + "If true, do not re-register the optimized map. " + "Then the hugin and xyz file options need not be provided. " + "This may result in the scale not being correct if the sparse map is not fixed."); + +DEFINE_bool(opt_map_only, false, "If to optimize only the map and not the camera params."); + +DEFINE_bool(fix_map, false, "Do not optimize the sparse map, hence only the camera params."); + +DEFINE_bool(float_scale, false, + "If to optimize the scale of the clouds (use it if the " + "sparse map is kept fixed)."); + +DEFINE_string(sci_cam_intrinsics_to_float, "", + "Refine 0 or more of the following intrinsics for sci_cam: focal_length, " + "optical_center, distortion. Specify as a quoted list. " + "For example: 'focal_length optical_center'."); + +DEFINE_double(nav_cam_to_sci_cam_offset_override_value, + std::numeric_limits::quiet_NaN(), + "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " + "file with this value."); + +DEFINE_string(mesh, "", + "Refine the sci cam so that the sci cam texture agrees with the nav cam " + "texture when projected on this mesh."); + +DEFINE_double(mesh_weight, 25.0, + "A larger value will give more weight to the mesh constraint. " + "The mesh residuals printed at the end can be used to examine " + "the effect of this weight."); + +DEFINE_double(mesh_robust_threshold, 3.0, + "A larger value will try harder to minimize large divergences from " + "the mesh (but note that some of those may be outliers)."); + +DEFINE_bool(verbose, false, + "Print the residuals and save the images and match files." + "Stereo Pipeline's viewer can be used for visualizing these."); + +namespace dense_map { + +// TODO(oalexan1): Store separately matches which end up being +// squashed in a pid_cid_to_fid. + +ceres::LossFunction* GetLossFunction(std::string cost_fun, double th) { + // Convert to lower-case + std::transform(cost_fun.begin(), cost_fun.end(), cost_fun.begin(), ::tolower); + + ceres::LossFunction* loss_function = NULL; + if (cost_fun == "l2") + loss_function = NULL; + else if (cost_fun == "huber") + loss_function = new ceres::HuberLoss(th); + else if (cost_fun == "cauchy") + loss_function = new ceres::CauchyLoss(th); + else if (cost_fun == "l1") + loss_function = new ceres::SoftLOneLoss(th); + else + LOG(FATAL) << "Unknown cost function: " + cost_fun; + + return loss_function; +} + +// An error function minimizing the error of projection of haz cam +// depth points to the haz cam image. +struct DepthToHazError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + DepthToHazError(Eigen::Vector2d const& haz_pix, Eigen::Vector3d const& depth_xyz, std::vector const& block_sizes, + camera::CameraParameters const& haz_cam_params) + : m_haz_pix(haz_pix), m_depth_xyz(depth_xyz), m_block_sizes(block_sizes), m_haz_cam_params(haz_cam_params) { + // Sanity check. + if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_SCALAR_PARAMS) { + LOG(FATAL) << "DepthToHazError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + // Takes array of arrays as parameters. + bool operator()(double const* const* parameters, double* residuals) const { + // Populate the intrinsics + + // The current transform from the depth point cloud to the image + Eigen::Affine3d depth_to_image; + array_to_rigid_transform(depth_to_image, parameters[0]); + + // Apply the scale + double depth_to_image_scale = parameters[1][0]; + depth_to_image.linear() *= depth_to_image_scale; + + // Convert from depth cloud coordinates to haz cam coordinates + Eigen::Vector3d X = depth_to_image * m_depth_xyz; + + // Project into the camera + Eigen::Vector2d undist_pix = m_haz_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + // Eigen::Vector2d dist_pix; + // m_haz_cam_params.Convert(undist_pix, &dist_pix); + + // Compute the residuals + residuals[0] = undist_pix[0] - m_haz_pix[0]; + residuals[1] = undist_pix[1] - m_haz_pix[1]; + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, + std::vector const& block_sizes, + camera::CameraParameters const& haz_cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction( + new DepthToHazError(nav_pix, depth_xyz, block_sizes, haz_cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_PIX_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector2d m_haz_pix; // The pixel observation + Eigen::Vector3d m_depth_xyz; // The measured position + std::vector m_block_sizes; + camera::CameraParameters m_haz_cam_params; +}; // End class DepthToHazError + +// An error function minimizing the error of projection of a point in the nav cam +// image. Both the point and the camera pose are variables of optimization. +struct NavError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + NavError(Eigen::Vector2d const& nav_pix, std::vector const& block_sizes, + camera::CameraParameters const& nav_cam_params) + : m_nav_pix(nav_pix), m_block_sizes(block_sizes), m_nav_cam_params(nav_cam_params) { + // Sanity check + if (m_block_sizes.size() != 2 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_XYZ_PARAMS) { + LOG(FATAL) << "NavError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + // Takes array of arrays as parameters. + bool operator()(double const* const* parameters, double* residuals) const { + // Populate the intrinsics + + Eigen::Affine3d world_to_nav_cam; + array_to_rigid_transform(world_to_nav_cam, parameters[0]); + + Eigen::Vector3d xyz; + for (int it = 0; it < NUM_XYZ_PARAMS; it++) xyz[it] = parameters[1][it]; + + // Convert to camera coordinates + Eigen::Vector3d X = world_to_nav_cam * xyz; + + // Project into the camera + Eigen::Vector2d undist_pix = m_nav_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + // Eigen::Vector2d dist_pix; + // m_nav_cam_params.Convert(undist_pix, &dist_pix); + + // Compute the residuals + residuals[0] = undist_pix[0] - m_nav_pix[0]; + residuals[1] = undist_pix[1] - m_nav_pix[1]; + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, std::vector const& block_sizes, + camera::CameraParameters const& nav_cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction(new NavError(nav_pix, block_sizes, nav_cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_PIX_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector2d m_nav_pix; // The pixel observation + std::vector m_block_sizes; + camera::CameraParameters m_nav_cam_params; +}; // End class NavError + +// An error function minimizing the error of transforming depth points +// first to haz cam image coordinates, then to nav cam coordinates +// (via time interpolation) then to world coordinates, then by +// projecting into the nav cam image having a match point with the +// original haz cam image. This is a little tricky to follow, because +// each haz cam image is bound in time by two nav cam images. +struct DepthToNavError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + DepthToNavError(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, + double alpha, // used for interpolation + bool match_left, std::vector const& block_sizes, camera::CameraParameters const& nav_cam_params) + : m_nav_pix(nav_pix), + m_depth_xyz(depth_xyz), + m_alpha(alpha), + m_match_left(match_left), + m_block_sizes(block_sizes), + m_nav_cam_params(nav_cam_params) { + // Sanity check. + if (m_block_sizes.size() != 5 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || + m_block_sizes[4] != NUM_SCALAR_PARAMS) { + LOG(FATAL) << "DepthToNavError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + // As mentioned before, we have to deal with four transforms + + Eigen::Affine3d left_nav_trans; + array_to_rigid_transform(left_nav_trans, parameters[0]); + + Eigen::Affine3d right_nav_trans; + array_to_rigid_transform(right_nav_trans, parameters[1]); + + Eigen::Affine3d hazcam_to_navcam_trans; + array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); + + // The haz cam depth to image transform, which has a fixed scale + Eigen::Affine3d hazcam_depth_to_image_trans; + array_to_rigid_transform(hazcam_depth_to_image_trans, parameters[3]); + double depth_to_image_scale = parameters[4][0]; + hazcam_depth_to_image_trans.linear() *= depth_to_image_scale; + + // Convert from depth cloud coordinates to haz cam coordinates + Eigen::Vector3d X = hazcam_depth_to_image_trans * m_depth_xyz; + + // Convert to nav cam coordinates + X = hazcam_to_navcam_trans * X; + + // The haz cam to nav cam transform at the haz cam time is obtained + // by interpolation in time + Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(m_alpha, left_nav_trans, right_nav_trans); + + // Convert to world coordinates + X = interp_world_to_nav_trans.inverse() * X; + + // Transform to either the left or right nav camera coordinates, + // depending on for which one we managed to find a match with he + // haz cam image + if (m_match_left) { + X = left_nav_trans * X; + } else { + X = right_nav_trans * X; + } + + // Project into the image + Eigen::Vector2d undist_pix = m_nav_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + // Eigen::Vector2d dist_pix; + // m_nav_cam_params.Convert(undist_pix, &dist_pix); + + // Compute the residuals + residuals[0] = undist_pix[0] - m_nav_pix[0]; + residuals[1] = undist_pix[1] - m_nav_pix[1]; + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector2d const& nav_pix, Eigen::Vector3d const& depth_xyz, double alpha, + bool match_left, std::vector const& block_sizes, + camera::CameraParameters const& nav_cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction( + new DepthToNavError(nav_pix, depth_xyz, alpha, match_left, block_sizes, nav_cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_PIX_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector2d m_nav_pix; // The nav cam pixel observation + Eigen::Vector3d m_depth_xyz; // The measured position in the depth camera + double m_alpha; + bool m_match_left; + std::vector m_block_sizes; + camera::CameraParameters m_nav_cam_params; +}; // End class DepthToNavError + +// An error function minimizing the error of transforming depth points at haz cam +// time through enough coordinate systems until it can project +// into the sci cam at the sci cam time +struct DepthToSciError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + DepthToSciError(Eigen::Vector2d const& dist_sci_pix, Eigen::Vector3d const& depth_xyz, double alpha_haz, + double alpha_sci, // used for interpolation + std::vector const& block_sizes, camera::CameraParameters const& sci_cam_params) + : m_dist_sci_pix(dist_sci_pix), + m_depth_xyz(depth_xyz), + m_alpha_haz(alpha_haz), + m_alpha_sci(alpha_sci), + m_block_sizes(block_sizes), + m_sci_cam_params(sci_cam_params), + m_num_focal_lengths(1) { + // Sanity check. + if (m_block_sizes.size() != 9 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || + m_block_sizes[4] != NUM_RIGID_PARAMS || m_block_sizes[5] != NUM_SCALAR_PARAMS || + m_block_sizes[6] != m_num_focal_lengths || m_block_sizes[7] != NUM_OPT_CTR_PARAMS || + m_block_sizes[8] != m_sci_cam_params.GetDistortion().size()) { + LOG(FATAL) << "DepthToSciError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + // Make a deep copy which we will modify + camera::CameraParameters sci_cam_params = m_sci_cam_params; + + // We have to deal with four transforms + Eigen::Affine3d left_nav_trans; + array_to_rigid_transform(left_nav_trans, parameters[0]); + + Eigen::Affine3d right_nav_trans; + array_to_rigid_transform(right_nav_trans, parameters[1]); + + Eigen::Affine3d hazcam_to_navcam_trans; + array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); + + Eigen::Affine3d scicam_to_hazcam_trans; + array_to_rigid_transform(scicam_to_hazcam_trans, parameters[3]); + + // The haz cam depth to image transform, which has a fixed scale + Eigen::Affine3d hazcam_depth_to_image_trans; + array_to_rigid_transform(hazcam_depth_to_image_trans, parameters[4]); + double depth_to_image_scale = parameters[5][0]; + hazcam_depth_to_image_trans.linear() *= depth_to_image_scale; + + // Intrinsics, including a single focal length + Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[6][0], parameters[6][0]); + Eigen::Vector2d optical_center(parameters[7][0], parameters[7][1]); + Eigen::VectorXd distortion(m_block_sizes[8]); + for (int i = 0; i < m_block_sizes[8]; i++) distortion[i] = parameters[8][i]; + sci_cam_params.SetFocalLength(focal_vector); + sci_cam_params.SetOpticalOffset(optical_center); + sci_cam_params.SetDistortion(distortion); + + // Convert from depth cloud coordinates to haz cam coordinates + Eigen::Vector3d X = hazcam_depth_to_image_trans * m_depth_xyz; + + // Convert to nav cam coordinates at haz cam time + X = hazcam_to_navcam_trans * X; + + // World to navcam at haz time + Eigen::Affine3d interp_world_to_nav_trans_haz_time = + dense_map::linearInterp(m_alpha_haz, left_nav_trans, right_nav_trans); + + // World to nav time at sci time + Eigen::Affine3d interp_world_to_nav_trans_sci_time = + dense_map::linearInterp(m_alpha_sci, left_nav_trans, right_nav_trans); + + // Convert to world coordinates + X = interp_world_to_nav_trans_haz_time.inverse() * X; + + // Convert to nav coordinates at sci cam time + X = interp_world_to_nav_trans_sci_time * X; + + // Convert to sci cam coordinates + X = scicam_to_hazcam_trans.inverse() * hazcam_to_navcam_trans.inverse() * X; + + // Convert to sci cam pix + Eigen::Vector2d undist_pix = sci_cam_params.GetFocalVector().cwiseProduct(X.hnormalized()); + + // Apply distortion + Eigen::Vector2d comp_dist_sci_pix; + sci_cam_params.Convert(undist_pix, &comp_dist_sci_pix); + + // Compute the residuals + residuals[0] = comp_dist_sci_pix[0] - m_dist_sci_pix[0]; + residuals[1] = comp_dist_sci_pix[1] - m_dist_sci_pix[1]; + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector2d const& dist_sci_pix, Eigen::Vector3d const& depth_xyz, + double alpha_haz, double alpha_sci, std::vector const& block_sizes, + camera::CameraParameters const& sci_cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction( + new DepthToSciError(dist_sci_pix, depth_xyz, alpha_haz, alpha_sci, block_sizes, sci_cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_PIX_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector2d m_dist_sci_pix; // The sci cam pixel observation + Eigen::Vector3d m_depth_xyz; // The measured position in the depth camera + double m_alpha_haz; + double m_alpha_sci; + std::vector m_block_sizes; + camera::CameraParameters m_sci_cam_params; + int m_num_focal_lengths; +}; // End class DepthToSciError + +// An error function projecting an xyz point in the sci cam +// bounded by two nav cams. +struct SciError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + SciError(Eigen::Vector2d const& dist_sci_pix, + double alpha, // used for interpolation + std::vector const& block_sizes, camera::CameraParameters const& sci_cam_params) + : m_dist_sci_pix(dist_sci_pix), + m_alpha(alpha), + m_block_sizes(block_sizes), + m_sci_cam_params(sci_cam_params), + m_num_focal_lengths(1) { + // Sanity check. + if (m_block_sizes.size() != 8 || m_block_sizes[0] != NUM_RIGID_PARAMS || m_block_sizes[1] != NUM_RIGID_PARAMS || + m_block_sizes[2] != NUM_RIGID_PARAMS || m_block_sizes[3] != NUM_RIGID_PARAMS || + m_block_sizes[4] != NUM_XYZ_PARAMS || m_block_sizes[5] != m_num_focal_lengths || + m_block_sizes[6] != NUM_OPT_CTR_PARAMS || m_block_sizes[7] != m_sci_cam_params.GetDistortion().size()) { + LOG(FATAL) << "SciError: The block sizes were not set up properly.\n"; + } + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + bool operator()(double const* const* parameters, double* residuals) const { + // Make a deep copy which we will modify + camera::CameraParameters sci_cam_params = m_sci_cam_params; + + // We have to deal with four transforms + + Eigen::Affine3d left_nav_trans; + array_to_rigid_transform(left_nav_trans, parameters[0]); + + Eigen::Affine3d right_nav_trans; + array_to_rigid_transform(right_nav_trans, parameters[1]); + + Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(m_alpha, left_nav_trans, right_nav_trans); + + Eigen::Affine3d hazcam_to_navcam_trans; + array_to_rigid_transform(hazcam_to_navcam_trans, parameters[2]); + + Eigen::Affine3d scicam_to_hazcam_trans; + array_to_rigid_transform(scicam_to_hazcam_trans, parameters[3]); + + Eigen::Vector3d X; + for (int it = 0; it < NUM_XYZ_PARAMS; it++) X[it] = parameters[4][it]; + + // Intrinsics, including a single focal length + Eigen::Vector2d focal_vector = Eigen::Vector2d(parameters[5][0], parameters[5][0]); + Eigen::Vector2d optical_center(parameters[6][0], parameters[6][1]); + Eigen::VectorXd distortion(m_block_sizes[7]); + for (int i = 0; i < m_block_sizes[7]; i++) distortion[i] = parameters[7][i]; + sci_cam_params.SetFocalLength(focal_vector); + sci_cam_params.SetOpticalOffset(optical_center); + sci_cam_params.SetDistortion(distortion); + + // Find the sci cam to world transform + Eigen::Affine3d interp_world_to_sci_trans = + scicam_to_hazcam_trans.inverse() * hazcam_to_navcam_trans.inverse() * interp_world_to_nav_trans; + + // Project into the sci cam + Eigen::Vector3d sciX = interp_world_to_sci_trans * X; + Eigen::Vector2d undist_sci_pix = sci_cam_params.GetFocalVector().cwiseProduct(sciX.hnormalized()); + + // Convert to distorted pixel + Eigen::Vector2d comp_dist_sci_pix; + sci_cam_params.Convert(undist_sci_pix, &comp_dist_sci_pix); + + // Compute the residuals + residuals[0] = comp_dist_sci_pix[0] - m_dist_sci_pix[0]; + residuals[1] = comp_dist_sci_pix[1] - m_dist_sci_pix[1]; + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector2d const& dist_sci_pix, double alpha, + std::vector const& block_sizes, + camera::CameraParameters const& sci_cam_params) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction( + new SciError(dist_sci_pix, alpha, block_sizes, sci_cam_params)); + + // The residual size is always the same. + cost_function->SetNumResiduals(NUM_PIX_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector2d m_dist_sci_pix; // The sci cam pixel observation + double m_alpha; + std::vector m_block_sizes; + camera::CameraParameters m_sci_cam_params; + int m_num_focal_lengths; +}; // End class SciError + +// An error function minimizing a weight times the distance from a +// variable xyz point to a fixed reference xyz point. +struct XYZError { + EIGEN_MAKE_ALIGNED_OPERATOR_NEW + XYZError(Eigen::Vector3d const& ref_xyz, std::vector const& block_sizes, double weight) + : m_ref_xyz(ref_xyz), m_block_sizes(block_sizes), m_weight(weight) { + // Sanity check + if (m_block_sizes.size() != 1 || m_block_sizes[0] != NUM_XYZ_PARAMS) + LOG(FATAL) << "XYZError: The block sizes were not set up properly.\n"; + } + + // Call to work with ceres::DynamicNumericDiffCostFunction. + // Takes array of arrays as parameters. + // TODO(oalexan1): May want to use the analytical Ceres cost function + bool operator()(double const* const* parameters, double* residuals) const { + // Compute the residuals + for (int it = 0; it < NUM_XYZ_PARAMS; it++) residuals[it] = m_weight * (parameters[0][it] - m_ref_xyz[it]); + + return true; + } + + // Factory to hide the construction of the CostFunction object from the client code. + static ceres::CostFunction* Create(Eigen::Vector3d const& ref_xyz, + std::vector const& block_sizes, + double weight) { + ceres::DynamicNumericDiffCostFunction* cost_function = + new ceres::DynamicNumericDiffCostFunction(new XYZError(ref_xyz, block_sizes, weight)); + + // The residual size is always the same + cost_function->SetNumResiduals(NUM_XYZ_PARAMS); + + // The camera wrapper knows all of the block sizes to add. + for (size_t i = 0; i < block_sizes.size(); i++) { + cost_function->AddParameterBlock(block_sizes[i]); + } + return cost_function; + } + + private: + Eigen::Vector3d m_ref_xyz; // reference xyz + std::vector m_block_sizes; + double m_weight; +}; // End class XYZError + +enum imgType { NAV_CAM, SCI_CAM, HAZ_CAM }; + +// Calculate the rmse residual for each residual type. +void calc_median_residuals(std::vector const& residuals, + std::vector const& residual_names, + std::string const& tag) { + size_t num = residuals.size(); + + if (num != residual_names.size()) + LOG(FATAL) << "There must be as many residuals as residual names."; + + std::map > stats; + for (size_t it = 0; it < residuals.size(); it++) stats[residual_names[it]] = std::vector(); // initialize + + for (size_t it = 0; it < residuals.size(); it++) + stats[residual_names[it]].push_back(std::abs(residuals[it])); + + std::cout << "The 25, 50 and 75 percentile residual stats " << tag << std::endl; + for (auto it = stats.begin(); it != stats.end(); it++) { + std::string const& name = it->first; + std::vector vals = stats[name]; // make a copy + std::sort(vals.begin(), vals.end()); + + int len = vals.size(); + + int it1 = static_cast(0.25 * len); + int it2 = static_cast(0.5 * len); + int it3 = static_cast(0.75 * len); + + if (len = 0) + std::cout << name << ": " + << "none" << std::endl; + else + std::cout << name << ": " << vals[it1] << ' ' << vals[it2] << ' ' << vals[it3] << std::endl; + } +} + + // Intersect ray with mesh. Return true on success. + bool ray_mesh_intersect(Eigen::Vector2d const& undist_pix, camera::CameraParameters const& cam_params, + Eigen::Affine3d const& world_to_cam, mve::TriangleMesh::Ptr const& mesh, + std::shared_ptr const& bvh_tree, + double min_ray_dist, double max_ray_dist, + // Output + Eigen::Vector3d& intersection) { + // Initialize the output + intersection = Eigen::Vector3d(0.0, 0.0, 0.0); + + // Ray from camera going through the pixel + Eigen::Vector3d cam_ray(undist_pix.x() / cam_params.GetFocalVector()[0], + undist_pix.y() / cam_params.GetFocalVector()[1], 1.0); + cam_ray.normalize(); + + Eigen::Affine3d cam_to_world = world_to_cam.inverse(); + Eigen::Vector3d world_ray = cam_to_world.linear() * cam_ray; + Eigen::Vector3d cam_ctr = cam_to_world.translation(); + + // Set up the ray structure for the mesh + BVHTree::Ray bvh_ray; + bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); + bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); + bvh_ray.dir.normalize(); + + bvh_ray.tmin = min_ray_dist; + bvh_ray.tmax = max_ray_dist; + + // Intersect the ray with the mesh + BVHTree::Hit hit; + if (bvh_tree->intersect(bvh_ray, &hit)) { + double cam_to_mesh_dist = hit.t; + intersection = cam_ctr + cam_to_mesh_dist * world_ray; + return true; + } + + return false; + } + + // Prevent the linter from messing up with the beautiful formatting below + void add_haz_nav_cost // NOLINT + (// Inputs // NOLINT + int haz_it, int nav_it, int nav_cam_start, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + MATCH_PAIR const & match_pair, // NOLINT + std::vector const & haz_cam_intensity_timestamps, // NOLINT + std::vector const & sparse_map_timestamps, // NOLINT + std::map const & haz_cam_to_left_nav_cam_index, // NOLINT + std::map const & haz_cam_to_right_nav_cam_index, // NOLINT + camera::CameraParameters const & nav_cam_params, // NOLINT + camera::CameraParameters const & haz_cam_params, // NOLINT + std::vector const & depth_to_nav_block_sizes, // NOLINT + std::vector const & depth_to_haz_block_sizes, // NOLINT + std::vector const & nav_cam_affines, // NOLINT + std::vector const & depth_clouds, // NOLINT + // Outputs // NOLINT + std::vector & residual_names, // NOLINT + double & hazcam_depth_to_image_scale, // NOLINT + std::vector & nav_cam_vec, // NOLINT + std::vector & hazcam_to_navcam_vec, // NOLINT + std::vector & hazcam_depth_to_image_vec, // NOLINT + ceres::Problem & problem) { // NOLINT + // Figure out the two nav cam indices bounding the current haz cam + // Must have sparse_map_timestamp + navcam_to_hazcam_timestamp_offset <= haz_timestamp + // which must be < next sparse_map_timestamp + navcam_to_hazcam_timestamp_offset. + bool match_left = false; + if (haz_cam_intensity_timestamps[haz_it] >= + sparse_map_timestamps[nav_cam_start + nav_it] + navcam_to_hazcam_timestamp_offset) { + match_left = true; + } else { + match_left = false; // match right then + } + + auto left_it = haz_cam_to_left_nav_cam_index.find(haz_it); + auto right_it = haz_cam_to_right_nav_cam_index.find(haz_it); + if (left_it == haz_cam_to_left_nav_cam_index.end() || + right_it == haz_cam_to_right_nav_cam_index.end()) + LOG(FATAL) << "Book-keeping error in add_haz_nav_cost."; + + int left_nav_it = left_it->second; + int right_nav_it = right_it->second; + + if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size()) || + haz_it >= static_cast(haz_cam_intensity_timestamps.size())) + LOG(FATAL) << "Book-keeping error 2."; + + // Left and right nav cam image time, in haz_cam's time measurement + double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] + + navcam_to_hazcam_timestamp_offset; + double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] + + navcam_to_hazcam_timestamp_offset; + double haz_time = haz_cam_intensity_timestamps[haz_it]; + + bool good = (left_time <= haz_time && haz_time < right_time); + + if (!good) LOG(FATAL) << "Book-keeping error 3."; + + // The current nav it better be either the left or right kind + if (nav_it != left_nav_it && nav_it != right_nav_it) LOG(FATAL) << "Book-keeping error 4."; + + // Find the transform from the world to nav cam at the haz cam time + Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; + Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; + double alpha = (haz_time - left_time) / (right_time - left_time); + if (right_time == left_time) alpha = 0.0; // handle division by zero + + if (!FLAGS_timestamp_interpolation) alpha = round(alpha); + + Eigen::Affine3d world_to_nav_trans_at_haz_time + = dense_map::linearInterp(alpha, left_nav_trans, right_nav_trans); + + std::vector const& haz_ip_vec = match_pair.first; // alias + std::vector const& nav_ip_vec = match_pair.second; // alias + + cv::Mat const& depth_cloud = depth_clouds[haz_it]; // alias + + for (size_t ip_it = 0; ip_it < haz_ip_vec.size(); ip_it++) { + // Find the haz cam depth measurement. Use nearest neighbor interpolation + // to look into the depth cloud. + int col = round(haz_ip_vec[ip_it].x); + int row = round(haz_ip_vec[ip_it].y); + + if (col < 0 || row < 0 || col >= depth_cloud.cols || row >= depth_cloud.rows) + LOG(FATAL) << "Book-keeping error 5."; + + // Skip any point that goes out of bounds due to rounding + if (col == depth_cloud.cols || row == depth_cloud.rows) continue; + + cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); + + // Skip invalid measurements + if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) continue; + + // Convert to Eigen + Eigen::Vector3d depth_xyz(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); + + Eigen::Vector2d undist_haz_ip; + Eigen::Vector2d undist_nav_ip; + { + // Make sure we don't use the distorted pixels from now on + Eigen::Vector2d haz_ip(haz_ip_vec[ip_it].x, haz_ip_vec[ip_it].y); + Eigen::Vector2d nav_ip(nav_ip_vec[ip_it].x, nav_ip_vec[ip_it].y); + haz_cam_params.Convert(haz_ip, &undist_haz_ip); + nav_cam_params.Convert(nav_ip, &undist_nav_ip); + } + + // Ensure the depth point projects well into the haz cam interest point + ceres::CostFunction* depth_to_haz_cost_function = + dense_map::DepthToHazError::Create(undist_haz_ip, depth_xyz, depth_to_haz_block_sizes, + haz_cam_params); + ceres::LossFunction* depth_to_haz_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + problem.AddResidualBlock(depth_to_haz_cost_function, depth_to_haz_loss_function, + &hazcam_depth_to_image_vec[0], + &hazcam_depth_to_image_scale); + residual_names.push_back("haznavhaz1"); + residual_names.push_back("haznavhaz2"); + + // Ensure that the depth points projects well in the nav cam interest point + ceres::CostFunction* depth_to_nav_cost_function + = dense_map::DepthToNavError::Create(undist_nav_ip, depth_xyz, alpha, match_left, + depth_to_nav_block_sizes, nav_cam_params); + ceres::LossFunction* depth_to_nav_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + int left_nav_index = NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it); + int right_nav_index = NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it); + problem.AddResidualBlock(depth_to_nav_cost_function, depth_to_nav_loss_function, + &nav_cam_vec[left_nav_index], + &nav_cam_vec[right_nav_index], + &hazcam_to_navcam_vec[0], &hazcam_depth_to_image_vec[0], + &hazcam_depth_to_image_scale); + residual_names.push_back("haznavnav1"); + residual_names.push_back("haznavnav2"); + + if (FLAGS_fix_map) { + problem.SetParameterBlockConstant(&nav_cam_vec[left_nav_index]); + problem.SetParameterBlockConstant(&nav_cam_vec[right_nav_index]); + } + } + + return; + } + + // Prevent the linter from messing up with the beautiful formatting below + void add_haz_sci_cost(// Inputs // NOLINT + int haz_it, int sci_it, int nav_cam_start, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + double scicam_to_hazcam_timestamp_offset, // NOLINT + MATCH_PAIR const & match_pair, // NOLINT + std::vector const & haz_cam_intensity_timestamps, // NOLINT + std::vector const & sparse_map_timestamps, // NOLINT + std::vector const & sci_cam_timestamps, // NOLINT + std::map const & haz_cam_to_left_nav_cam_index, // NOLINT + std::map const & haz_cam_to_right_nav_cam_index, // NOLINT + std::map const & sci_cam_to_left_nav_cam_index, // NOLINT + std::map const & sci_cam_to_right_nav_cam_index, // NOLINT + camera::CameraParameters const & sci_cam_params, // NOLINT + camera::CameraParameters const & haz_cam_params, // NOLINT + std::vector const & depth_to_sci_block_sizes, // NOLINT + std::vector const & depth_to_haz_block_sizes, // NOLINT + std::vector const & nav_cam_affines, // NOLINT + std::vector const & depth_clouds, // NOLINT + // Outputs // NOLINT + std::vector & residual_names, // NOLINT + double & hazcam_depth_to_image_scale, // NOLINT + std::vector & nav_cam_vec, // NOLINT + std::vector & hazcam_to_navcam_vec, // NOLINT + std::vector & scicam_to_hazcam_vec, // NOLINT + std::vector & hazcam_depth_to_image_vec, // NOLINT + Eigen::Vector2d & sci_cam_focal_vector, // NOLINT + Eigen::Vector2d & sci_cam_optical_center, // NOLINT + Eigen::VectorXd & sci_cam_distortion, // NOLINT + ceres::Problem & problem) { // NOLINT + auto left_it = haz_cam_to_left_nav_cam_index.find(haz_it); + auto right_it = haz_cam_to_right_nav_cam_index.find(haz_it); + if (left_it == haz_cam_to_left_nav_cam_index.end() || + right_it == haz_cam_to_right_nav_cam_index.end()) + LOG(FATAL) << "Book-keeping error 1 in add_haz_sci_cost."; + + int left_nav_it = left_it->second; + int right_nav_it = right_it->second; + + if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size()) || + haz_it >= static_cast(haz_cam_intensity_timestamps.size())) + LOG(FATAL) << "Book-keeping error 2 in add_haz_sci_cost."; + + // The haz and sci images must be bracketed by the same two nav images + { + auto left_it2 = sci_cam_to_left_nav_cam_index.find(sci_it); + auto right_it2 = sci_cam_to_right_nav_cam_index.find(sci_it); + if (left_it2 == sci_cam_to_left_nav_cam_index.end() || + right_it2 == sci_cam_to_right_nav_cam_index.end()) + LOG(FATAL) << "Book-keeping error 3 in add_haz_sci_cost."; + + int left_nav_it2 = left_it2->second; + int right_nav_it2 = right_it2->second; + + if (left_nav_it2 != left_nav_it || right_nav_it2 != right_nav_it) + LOG(FATAL) << "Book-keeping error 4 in add_haz_sci_cost."; + } + // Left and right nav cam image time, in haz_cam's time measurement + double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] + + navcam_to_hazcam_timestamp_offset; + double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] + + navcam_to_hazcam_timestamp_offset; + + // Find the haz and sci time and convert them to haz cam's clock + double haz_time = haz_cam_intensity_timestamps[haz_it]; + double sci_time = sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset; + + bool good1 = (left_time <= haz_time && haz_time < right_time); + if (!good1) LOG(FATAL) << "Book-keeping error 5 in add_haz_sci_cost."; + + bool good2 = (left_time <= sci_time && sci_time < right_time); + if (!good2) LOG(FATAL) << "Book-keeping error 6 in add_haz_sci_cost."; + + // Find the transform from the world to nav cam at the haz cam time + Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; + Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; + + double alpha_haz = (haz_time - left_time) / (right_time - left_time); + if (right_time == left_time) alpha_haz = 0.0; // handle division by zero + + if (!FLAGS_timestamp_interpolation) alpha_haz = round(alpha_haz); + + Eigen::Affine3d world_to_nav_trans_at_haz_time = + dense_map::linearInterp(alpha_haz, left_nav_trans, right_nav_trans); + + double alpha_sci = (sci_time - left_time) / (right_time - left_time); + if (right_time == left_time) alpha_sci = 0.0; // handle division by zero + + if (!FLAGS_timestamp_interpolation) + alpha_sci = round(alpha_sci); + + Eigen::Affine3d world_to_nav_trans_at_sci_time = + dense_map::linearInterp(alpha_sci, left_nav_trans, right_nav_trans); + + std::vector const& haz_ip_vec = match_pair.first; + std::vector const& sci_ip_vec = match_pair.second; + + cv::Mat const& depth_cloud = depth_clouds[haz_it]; + + for (size_t ip_it = 0; ip_it < haz_ip_vec.size(); ip_it++) { + // Find the haz cam depth measurement. Use nearest neighbor interpolation + // to look into the depth cloud. + int col = round(haz_ip_vec[ip_it].x); + int row = round(haz_ip_vec[ip_it].y); + + if (col < 0 || row < 0 || col >= depth_cloud.cols || row >= depth_cloud.rows) + LOG(FATAL) << "Book-keeping error 7 in add_haz_sci_cost."; + + // Skip any point that goes out of bounds due to rounding + if (col == depth_cloud.cols || row == depth_cloud.rows) continue; + + cv::Vec3f cv_depth_xyz = depth_cloud.at(row, col); + + // Skip invalid measurements + if (cv_depth_xyz == cv::Vec3f(0, 0, 0)) continue; + + // Convert to Eigen + Eigen::Vector3d depth_xyz(cv_depth_xyz[0], cv_depth_xyz[1], cv_depth_xyz[2]); + + // Apply undistortion. Must take great care to not mix up + // distorted and undistorted pixels. + Eigen::Vector2d dist_haz_ip(haz_ip_vec[ip_it].x, haz_ip_vec[ip_it].y); + Eigen::Vector2d dist_sci_ip(sci_ip_vec[ip_it].x, sci_ip_vec[ip_it].y); + Eigen::Vector2d undist_haz_ip; + Eigen::Vector2d undist_sci_ip; + haz_cam_params.Convert(dist_haz_ip, &undist_haz_ip); + sci_cam_params.Convert(dist_sci_ip, &undist_sci_ip); + + // Ensure the depth point projects well into the haz cam interest point + ceres::CostFunction* depth_to_haz_cost_function = + dense_map::DepthToHazError::Create(undist_haz_ip, depth_xyz, depth_to_haz_block_sizes, haz_cam_params); + ceres::LossFunction* depth_to_haz_loss_function = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + problem.AddResidualBlock(depth_to_haz_cost_function, depth_to_haz_loss_function, &hazcam_depth_to_image_vec[0], + &hazcam_depth_to_image_scale); + residual_names.push_back("hazscihaz1"); + residual_names.push_back("hazscihaz2"); + + // Ensure that the depth points projects well in the sci cam interest point. + // Note how we pass a distorted sci cam pix, as in that error function we will + // take the difference of distorted pixels. + ceres::CostFunction* depth_to_sci_cost_function + = dense_map::DepthToSciError::Create(dist_sci_ip, depth_xyz, alpha_haz, + alpha_sci, depth_to_sci_block_sizes, sci_cam_params); + ceres::LossFunction* depth_to_sci_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + problem.AddResidualBlock(depth_to_sci_cost_function, depth_to_sci_loss_function, + &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)], + &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)], + &hazcam_to_navcam_vec[0], &scicam_to_hazcam_vec[0], &hazcam_depth_to_image_vec[0], + &hazcam_depth_to_image_scale, &sci_cam_focal_vector[0], &sci_cam_optical_center[0], + &sci_cam_distortion[0]); + + residual_names.push_back("hazscisci1"); + residual_names.push_back("hazscisci2"); + + if (FLAGS_fix_map) { + problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)]); + problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)]); + } + } + + return; + } + + // Prevent the linter from messing up with the beautiful formatting below + void add_nav_sci_cost + (// Inputs // NOLINT + int nav_it, int sci_it, int nav_cam_start, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + double scicam_to_hazcam_timestamp_offset, // NOLINT + MATCH_PAIR const & match_pair, // NOLINT + std::vector const & sparse_map_timestamps, // NOLINT + std::vector const & sci_cam_timestamps, // NOLINT + std::map const & sci_cam_to_left_nav_cam_index, // NOLINT + std::map const & sci_cam_to_right_nav_cam_index, // NOLINT + Eigen::Affine3d const & hazcam_to_navcam_aff_trans, // NOLINT + Eigen::Affine3d const & scicam_to_hazcam_aff_trans, // NOLINT + camera::CameraParameters const & nav_cam_params, // NOLINT + camera::CameraParameters const & sci_cam_params, // NOLINT + std::vector const & nav_block_sizes, // NOLINT + std::vector const & sci_block_sizes, // NOLINT + std::vector const & mesh_block_sizes, // NOLINT + std::vector const & nav_cam_affines, // NOLINT + std::vector const & depth_clouds, // NOLINT + mve::TriangleMesh::Ptr const & mesh, // NOLINT + std::shared_ptr const & bvh_tree, // NOLINT + // Outputs // NOLINT + int & nav_sci_xyz_count, // NOLINT + std::vector & residual_names, // NOLINT + std::vector & nav_cam_vec, // NOLINT + std::vector & hazcam_to_navcam_vec, // NOLINT + std::vector & scicam_to_hazcam_vec, // NOLINT + Eigen::Vector2d & sci_cam_focal_vector, // NOLINT + Eigen::Vector2d & sci_cam_optical_center, // NOLINT + Eigen::VectorXd & sci_cam_distortion, // NOLINT + std::vector & initial_nav_sci_xyz, // NOLINT + std::vector & nav_sci_xyz, // NOLINT + ceres::Problem & problem) { // NOLINT + auto left_it = sci_cam_to_left_nav_cam_index.find(sci_it); + auto right_it = sci_cam_to_right_nav_cam_index.find(sci_it); + if (left_it == sci_cam_to_left_nav_cam_index.end() || + right_it == sci_cam_to_right_nav_cam_index.end()) + LOG(FATAL) << "Book-keeping error 1 in add_sci_sci_cost."; + + int left_nav_it = left_it->second; + int right_nav_it = right_it->second; + + if (nav_cam_start + right_nav_it >= static_cast(sparse_map_timestamps.size())) + LOG(FATAL) << "Book-keeping error 1 in add_nav_sci_cost."; + + // Figure out the two nav cam indices bounding the current sci cam + bool match_left = false; + if (sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset >= + sparse_map_timestamps[nav_cam_start + nav_it] + navcam_to_hazcam_timestamp_offset) { + match_left = true; + } else { + match_left = false; // match right then + } + + // Left and right nav cam image time, and sci cam time, in haz_cam's time measurement + double left_time = sparse_map_timestamps[nav_cam_start + left_nav_it] + navcam_to_hazcam_timestamp_offset; + double right_time = sparse_map_timestamps[nav_cam_start + right_nav_it] + navcam_to_hazcam_timestamp_offset; + double sci_time = sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset; + + bool good = (left_time <= sci_time && sci_time < right_time); + + if (!good) LOG(FATAL) << "Book-keeping 2 in add_nav_sci_cost."; + + // Find the transform from the world to nav cam at the sci cam time + Eigen::Affine3d left_nav_trans = nav_cam_affines[nav_cam_start + left_nav_it]; + Eigen::Affine3d right_nav_trans = nav_cam_affines[nav_cam_start + right_nav_it]; + double alpha = (sci_time - left_time) / (right_time - left_time); + if (right_time == left_time) alpha = 0.0; // handle division by zero + + if (!FLAGS_timestamp_interpolation) alpha = round(alpha); + + Eigen::Affine3d interp_world_to_nav_trans = dense_map::linearInterp(alpha, left_nav_trans, right_nav_trans); + + // Find the sci cam to world transform + Eigen::Affine3d interp_world_to_sci_trans = + scicam_to_hazcam_aff_trans.inverse() * + hazcam_to_navcam_aff_trans.inverse() * + interp_world_to_nav_trans; + + Eigen::Affine3d world_to_nav_trans; + if (match_left) { + world_to_nav_trans = left_nav_trans; + } else { + world_to_nav_trans = right_nav_trans; + } + + std::vector const& sci_ip_vec = match_pair.first; + std::vector const& nav_ip_vec = match_pair.second; + + for (size_t ip_it = 0; ip_it < sci_ip_vec.size(); ip_it++) { + Eigen::Vector2d dist_sci_ip(sci_ip_vec[ip_it].x, sci_ip_vec[ip_it].y); + Eigen::Vector2d dist_nav_ip(nav_ip_vec[ip_it].x, nav_ip_vec[ip_it].y); + Eigen::Vector2d undist_nav_ip; + Eigen::Vector2d undist_sci_ip; + nav_cam_params.Convert(dist_nav_ip, &undist_nav_ip); + sci_cam_params.Convert(dist_sci_ip, &undist_sci_ip); + + Eigen::Vector3d X = + dense_map::TriangulatePair(sci_cam_params.GetFocalLength(), nav_cam_params.GetFocalLength(), + interp_world_to_sci_trans, world_to_nav_trans, undist_sci_ip, undist_nav_ip); + + bool have_mesh_intersection = false; + if (FLAGS_mesh != "") { + // Try to make the intersection point be on the mesh and the nav cam ray + // to make the sci cam to conform to that. + // TODO(oalexan1): Think more of the range of the ray below + double min_ray_dist = 0.0; + double max_ray_dist = 10.0; + Eigen::Vector3d intersection(0.0, 0.0, 0.0); + have_mesh_intersection + = dense_map::ray_mesh_intersect(undist_nav_ip, nav_cam_params, + world_to_nav_trans, mesh, bvh_tree, + min_ray_dist, max_ray_dist, + // Output + intersection); + + // Overwrite the triangulation result above with the intersection + if (have_mesh_intersection) X = intersection; + } + + // Record the triangulated positions. These will be optimized. + for (int i = 0; i < dense_map::NUM_XYZ_PARAMS; i++) + nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count + i] = X[i]; + + // A copy of the triangulated positions which won't be optimized. + initial_nav_sci_xyz[nav_sci_xyz_count] = X; + + // The cost function of projecting in the sci cam. Note that we use dist_sci_ip, + // as in the cost function below we will do differences of distorted sci cam pixels. + ceres::CostFunction* sci_cost_function = + dense_map::SciError::Create(dist_sci_ip, alpha, sci_block_sizes, sci_cam_params); + ceres::LossFunction* sci_loss_function = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + problem.AddResidualBlock(sci_cost_function, sci_loss_function, + &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)], + &nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)], + &hazcam_to_navcam_vec[0], &scicam_to_hazcam_vec[0], + &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count], &sci_cam_focal_vector[0], + &sci_cam_optical_center[0], &sci_cam_distortion[0]); + + residual_names.push_back("navscisci1"); + residual_names.push_back("navscisci2"); + + if (FLAGS_fix_map) { + problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + left_nav_it)]); + problem.SetParameterBlockConstant(&nav_cam_vec[NUM_RIGID_PARAMS * (nav_cam_start + right_nav_it)]); + } + + // The nav cam cost function + ceres::CostFunction* nav_cost_function = + dense_map::NavError::Create(undist_nav_ip, nav_block_sizes, nav_cam_params); + ceres::LossFunction* nav_loss_function = + dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + + problem.AddResidualBlock(nav_cost_function, nav_loss_function, + &nav_cam_vec[(nav_cam_start + nav_it) * NUM_RIGID_PARAMS], + &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count]); + residual_names.push_back("navscinav1"); + residual_names.push_back("navscinav2"); + + if (FLAGS_fix_map) + problem.SetParameterBlockConstant(&nav_cam_vec[(nav_cam_start + nav_it) + * NUM_RIGID_PARAMS]); + + if (have_mesh_intersection) { + // Constrain the sci cam texture to agree with the nav cam texture on the mesh + + ceres::CostFunction* mesh_cost_function = + dense_map::XYZError::Create(initial_nav_sci_xyz[nav_sci_xyz_count], + mesh_block_sizes, FLAGS_mesh_weight); + + ceres::LossFunction* mesh_loss_function = + dense_map::GetLossFunction("cauchy", FLAGS_mesh_robust_threshold); + + problem.AddResidualBlock(mesh_cost_function, mesh_loss_function, + &nav_sci_xyz[dense_map::NUM_XYZ_PARAMS * nav_sci_xyz_count]); + + residual_names.push_back("mesh_x"); + residual_names.push_back("mesh_y"); + residual_names.push_back("mesh_z"); + } + + // Very important, go forward in the vector of xyz. Do it at the end. + nav_sci_xyz_count++; + } + + return; + } + + void adjustImageSize(camera::CameraParameters const& cam_params, cv::Mat & image) { + int raw_image_cols = image.cols; + int raw_image_rows = image.rows; + int calib_image_cols = cam_params.GetDistortedSize()[0]; + int calib_image_rows = cam_params.GetDistortedSize()[1]; + + int factor = raw_image_cols / calib_image_cols; + + if ((raw_image_cols != calib_image_cols * factor) || (raw_image_rows != calib_image_rows * factor)) { + LOG(FATAL) << "Image width and height are: " << raw_image_cols << ' ' << raw_image_rows << "\n" + << "Calibrated image width and height are: " + << calib_image_cols << ' ' << calib_image_rows << "\n" + << "These must be equal up to an integer factor.\n"; + } + + if (factor != 1) { + // TODO(oalexan1): This kind of resizing may be creating aliased images. + cv::Mat local_image; + cv::resize(image, local_image, cv::Size(), 1.0/factor, 1.0/factor, cv::INTER_AREA); + local_image.copyTo(image); + } + + // Check + if (image.cols != calib_image_cols || image.rows != calib_image_rows) + LOG(FATAL) << "Sci cam images have the wrong size."; + } + + void select_images_to_match(// Inputs // NOLINT + double haz_cam_start_time, // NOLINT + double navcam_to_hazcam_timestamp_offset, // NOLINT + double scicam_to_hazcam_timestamp_offset, // NOLINT + std::vector const& sparse_map_timestamps, // NOLINT + std::vector const& all_haz_cam_inten_timestamps, // NOLINT + std::vector const& all_sci_cam_timestamps, // NOLINT + std::set const& sci_cam_timestamps_to_use, // NOLINT + dense_map::RosBagHandle const& nav_cam_handle, // NOLINT + dense_map::RosBagHandle const& sci_cam_handle, // NOLINT + dense_map::RosBagHandle const& haz_cam_points_handle, // NOLINT + dense_map::RosBagHandle const& haz_cam_intensity_handle, // NOLINT + camera::CameraParameters const& sci_cam_params, // NOLINT + // Outputs // NOLINT + int& nav_cam_start, // NOLINT + std::vector& cid_to_image_type, // NOLINT + std::vector& haz_cam_intensity_timestamps, // NOLINT + std::vector& sci_cam_timestamps, // NOLINT + std::vector& images, // NOLINT + std::vector& depth_clouds) { // NOLINT + // Wipe the outputs + nav_cam_start = -1; + cid_to_image_type.clear(); + haz_cam_intensity_timestamps.clear(); + sci_cam_timestamps.clear(); + images.clear(); + depth_clouds.clear(); + + bool stop_early = false; + double found_time = -1.0; + bool save_grayscale = true; // feature detection needs grayscale + + double navcam_to_scicam_timestamp_offset + = navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; + + // Use these to keep track where in the bags we are. After one + // traversal forward in time they need to be reset. + int nav_cam_pos = 0, haz_cam_intensity_pos = 0, haz_cam_cloud_pos = 0, sci_cam_pos = 0; + + for (size_t map_it = 0; map_it + 1 < sparse_map_timestamps.size(); map_it++) { + if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { + // The case when we would like to start later. Note the second + // comparison after "&&". When FLAG_start is 0, we want to + // make sure if the first nav image from the bag is in the map + // we use it, so we don't skip it even if based on + // navcam_to_hazcam_timestamp_offset we should. + if (sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset + < FLAGS_start + haz_cam_start_time && + sparse_map_timestamps[map_it] < FLAGS_start + haz_cam_start_time) + continue; + } + + if (nav_cam_start < 0) nav_cam_start = map_it; + + images.push_back(cv::Mat()); + if (!dense_map::lookupImage(sparse_map_timestamps[map_it], nav_cam_handle.bag_msgs, + save_grayscale, images.back(), + nav_cam_pos, found_time)) { + LOG(FATAL) << std::fixed << std::setprecision(17) + << "Cannot look up nav cam at time " << sparse_map_timestamps[map_it] << ".\n"; + } + cid_to_image_type.push_back(dense_map::NAV_CAM); + + if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { + // If we would like to end earlier, then save the last nav cam image so far + // and quit + if (sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset > + FLAGS_start + FLAGS_duration + haz_cam_start_time) { + stop_early = true; + break; + } + } + + // Do not look up sci cam and haz cam images in time intervals bigger than this + if (std::abs(sparse_map_timestamps[map_it + 1] - sparse_map_timestamps[map_it]) > FLAGS_bracket_len) continue; + + // Append at most two haz cam images between consecutive sparse + // map timestamps, close to these sparse map timestamps. + std::vector local_haz_timestamps; + dense_map::pickTimestampsInBounds(all_haz_cam_inten_timestamps, + sparse_map_timestamps[map_it], + sparse_map_timestamps[map_it + 1], + -navcam_to_hazcam_timestamp_offset, + local_haz_timestamps); + + for (size_t samp_it = 0; samp_it < local_haz_timestamps.size(); samp_it++) { + haz_cam_intensity_timestamps.push_back(local_haz_timestamps[samp_it]); + + double nav_start = sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset + - haz_cam_start_time; + double haz_time = local_haz_timestamps[samp_it] - haz_cam_start_time; + double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset + - haz_cam_start_time; + + std::cout << "nav_start haz nav_end times " + << nav_start << ' ' << haz_time << ' ' << nav_end << std::endl; + std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; + + // Read the image + images.push_back(cv::Mat()); + if (!dense_map::lookupImage(haz_cam_intensity_timestamps.back(), + haz_cam_intensity_handle.bag_msgs, + save_grayscale, images.back(), haz_cam_intensity_pos, + found_time)) + LOG(FATAL) << "Cannot look up haz cam image at given time"; + cid_to_image_type.push_back(dense_map::HAZ_CAM); + + double cloud_time = -1.0; + depth_clouds.push_back(cv::Mat()); + if (!dense_map::lookupCloud(haz_cam_intensity_timestamps.back(), + haz_cam_points_handle.bag_msgs, + FLAGS_max_haz_cam_image_to_depth_timestamp_diff, + depth_clouds.back(), + haz_cam_cloud_pos, cloud_time)) { + // This need not succeed always + } + } + + // Append at most two sci cam images between consecutive sparse + // map timestamps, close to these sparse map timestamps. + + std::vector local_sci_timestamps; + dense_map::pickTimestampsInBounds(all_sci_cam_timestamps, sparse_map_timestamps[map_it], + sparse_map_timestamps[map_it + 1], + -navcam_to_scicam_timestamp_offset, + local_sci_timestamps); + + // Append to the vector of sampled timestamps + for (size_t samp_it = 0; samp_it < local_sci_timestamps.size(); samp_it++) { + // See if to use only specified timestamps + if (!sci_cam_timestamps_to_use.empty() && + sci_cam_timestamps_to_use.find(local_sci_timestamps[samp_it]) == + sci_cam_timestamps_to_use.end()) + continue; + + sci_cam_timestamps.push_back(local_sci_timestamps[samp_it]); + + double nav_start = sparse_map_timestamps[map_it] + navcam_to_hazcam_timestamp_offset + - haz_cam_start_time; + double sci_time = local_sci_timestamps[samp_it] + scicam_to_hazcam_timestamp_offset + - haz_cam_start_time; + double nav_end = sparse_map_timestamps[map_it + 1] + navcam_to_hazcam_timestamp_offset + - haz_cam_start_time; + std::cout << "nav_start sci nav_end times " + << nav_start << ' ' << sci_time << ' ' << nav_end << std::endl; + std::cout << "nav_end - nav_start " << nav_end - nav_start << std::endl; + + // Read the sci cam image, and perhaps adjust its size + images.push_back(cv::Mat()); + cv::Mat local_img; + if (!dense_map::lookupImage(sci_cam_timestamps.back(), sci_cam_handle.bag_msgs, + save_grayscale, local_img, + sci_cam_pos, found_time)) + LOG(FATAL) << "Cannot look up sci cam image at given time."; + adjustImageSize(sci_cam_params, local_img); + local_img.copyTo(images.back()); + + // Sanity check + Eigen::Vector2i sci_cam_size = sci_cam_params.GetDistortedSize(); + if (images.back().cols != sci_cam_size[0] || images.back().rows != sci_cam_size[1]) + LOG(FATAL) << "Sci cam images have the wrong size."; + + cid_to_image_type.push_back(dense_map::SCI_CAM); + } + } // End iterating over nav cam timestamps + + // Add the last nav cam image from the map, unless we stopped early and this was done + if (!stop_early) { + images.push_back(cv::Mat()); + if (!dense_map::lookupImage(sparse_map_timestamps.back(), nav_cam_handle.bag_msgs, + save_grayscale, images.back(), + nav_cam_pos, found_time)) + LOG(FATAL) << "Cannot look up nav cam image at given time."; + cid_to_image_type.push_back(dense_map::NAV_CAM); + } + + if (images.size() > sparse_map_timestamps.size() + haz_cam_intensity_timestamps.size() + + sci_cam_timestamps.size()) + LOG(FATAL) << "Book-keeping error in select_images_to_match."; + + return; + } + + void set_up_block_sizes(int num_scicam_focal_lengths, int num_scicam_distortions, + std::vector & depth_to_haz_block_sizes, + std::vector & nav_block_sizes, + std::vector & depth_to_nav_block_sizes, + std::vector & depth_to_sci_block_sizes, + std::vector & sci_block_sizes, + std::vector & mesh_block_sizes) { + // Wipe the outputs + depth_to_haz_block_sizes.clear(); + nav_block_sizes.clear(); + depth_to_nav_block_sizes.clear(); + depth_to_sci_block_sizes.clear(); + sci_block_sizes.clear(); + mesh_block_sizes.clear(); + + // Set up the variable blocks to optimize for DepthToHazError + depth_to_haz_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_haz_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + + // Set up the variable blocks to optimize for NavError + nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + nav_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + + // Set up the variable blocks to optimize for DepthToNavError + depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_nav_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_nav_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + + // Set up the variable blocks to optimize for DepthToSciError + depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + depth_to_sci_block_sizes.push_back(dense_map::NUM_SCALAR_PARAMS); + depth_to_sci_block_sizes.push_back(num_scicam_focal_lengths); // focal length + depth_to_sci_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); // optical center + depth_to_sci_block_sizes.push_back(num_scicam_distortions); // distortion + + // Set up the variable blocks to optimize for SciError + sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + sci_block_sizes.push_back(dense_map::NUM_RIGID_PARAMS); + sci_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + sci_block_sizes.push_back(num_scicam_focal_lengths); // focal length + sci_block_sizes.push_back(dense_map::NUM_OPT_CTR_PARAMS); // optical center + sci_block_sizes.push_back(num_scicam_distortions); // distortion + + // Set up the variable blocks to optimize for the mesh xyz error + mesh_block_sizes.push_back(dense_map::NUM_XYZ_PARAMS); + } + + typedef std::map, dense_map::MATCH_PAIR> MATCH_MAP; + + // Wrapper to find the value of a const map at a given key + int mapVal(std::map const& map, int key) { + auto ptr = map.find(key); + if (ptr == map.end()) + LOG(FATAL) << "Cannot find map value for given index."; + return ptr->second; + } + + // Find nav images close in time. Match sci and haz images in between to each + // other and to these nave images + // TODO(oalexan1): Reword the above explanation + void detect_match_features(// Inputs // NOLINT + std::vector const& images, // NOLINT + std::vector const& cid_to_image_type, // NOLINT + // Outputs // NOLINT + std::map & image_to_nav_it, // NOLINT + std::map & image_to_haz_it, // NOLINT + std::map & image_to_sci_it, // NOLINT + std::map & haz_cam_to_left_nav_cam_index, // NOLINT + std::map & haz_cam_to_right_nav_cam_index, // NOLINT + std::map & sci_cam_to_left_nav_cam_index, // NOLINT + std::map & sci_cam_to_right_nav_cam_index, // NOLINT + MATCH_MAP & matches) { // NOLINT + // Wipe the outputs + image_to_nav_it.clear(); + image_to_haz_it.clear(); + image_to_sci_it.clear(); + haz_cam_to_left_nav_cam_index.clear(); + haz_cam_to_right_nav_cam_index.clear(); + sci_cam_to_left_nav_cam_index.clear(); + sci_cam_to_right_nav_cam_index.clear(); + matches.clear(); + + int nav_it = 0, haz_it = 0, sci_it = 0; + for (int image_it = 0; image_it < static_cast(images.size()); image_it++) { + if (cid_to_image_type[image_it] == dense_map::NAV_CAM) { + image_to_nav_it[image_it] = nav_it; + nav_it++; + } else if (cid_to_image_type[image_it] == dense_map::HAZ_CAM) { + image_to_haz_it[image_it] = haz_it; + haz_it++; + } else if (cid_to_image_type[image_it] == dense_map::SCI_CAM) { + image_to_sci_it[image_it] = sci_it; + sci_it++; + } + } + + std::vector > image_pairs; + + // Look at two neighboring nav images. Find left_img_it and + // right_img_it so cid_to_image_type for these are nav images. + for (int left_img_it = 0; left_img_it < static_cast(images.size()); left_img_it++) { + if (cid_to_image_type[left_img_it] != dense_map::NAV_CAM) continue; // Not nav cam + + // now find right_img_it + int right_img_it = -1; + for (int local_it = left_img_it + 1; local_it < static_cast(images.size()); local_it++) { + if (cid_to_image_type[local_it] == dense_map::NAV_CAM) { + right_img_it = local_it; + break; + } + } + + if (right_img_it < 0) continue; + + // Now look at sci and haz images in between + std::vector nav_cam_indices, haz_cam_indices, sci_cam_indices; + + nav_cam_indices.push_back(left_img_it); + nav_cam_indices.push_back(right_img_it); + + for (int local_img_it = left_img_it + 1; local_img_it < right_img_it; local_img_it++) { + int left_nav_it = mapVal(image_to_nav_it, left_img_it); + int right_nav_it = mapVal(image_to_nav_it, right_img_it); + + if (cid_to_image_type[local_img_it] == dense_map::HAZ_CAM) { + int haz_it = mapVal(image_to_haz_it, local_img_it); + haz_cam_indices.push_back(local_img_it); + haz_cam_to_left_nav_cam_index[haz_it] = left_nav_it; + haz_cam_to_right_nav_cam_index[haz_it] = right_nav_it; + + } else if (cid_to_image_type[local_img_it] == dense_map::SCI_CAM) { + int sci_it = mapVal(image_to_sci_it, local_img_it); + sci_cam_indices.push_back(local_img_it); + sci_cam_to_left_nav_cam_index[image_to_sci_it[local_img_it]] = left_nav_it; + sci_cam_to_right_nav_cam_index[image_to_sci_it[local_img_it]] = right_nav_it; + } + } + + // Match haz to nav + for (size_t haz_it = 0; haz_it < haz_cam_indices.size(); haz_it++) { + for (size_t nav_it = 0; nav_it < nav_cam_indices.size(); nav_it++) { + image_pairs.push_back(std::make_pair(haz_cam_indices[haz_it], nav_cam_indices[nav_it])); + } + } + + // Match haz to sci + for (size_t haz_it = 0; haz_it < haz_cam_indices.size(); haz_it++) { + for (size_t sci_it = 0; sci_it < sci_cam_indices.size(); sci_it++) { + image_pairs.push_back(std::make_pair(haz_cam_indices[haz_it], sci_cam_indices[sci_it])); + } + } + + // Match sci to nav + for (size_t nav_it = 0; nav_it < nav_cam_indices.size(); nav_it++) { + for (size_t sci_it = 0; sci_it < sci_cam_indices.size(); sci_it++) { + image_pairs.push_back(std::make_pair(sci_cam_indices[sci_it], nav_cam_indices[nav_it])); + } + } + } + + // Detect features using multiple threads + std::vector cid_to_descriptor_map; + std::vector cid_to_keypoint_map; + cid_to_descriptor_map.resize(images.size()); + cid_to_keypoint_map.resize(images.size()); + ff_common::ThreadPool thread_pool1; + for (size_t it = 0; it < images.size(); it++) + thread_pool1.AddTask(&dense_map::detectFeatures, images[it], FLAGS_verbose, + &cid_to_descriptor_map[it], &cid_to_keypoint_map[it]); + thread_pool1.Join(); + + // Create the matches among nav, haz, and sci images. Note that we + // have a starting and ending nav cam images, and match all the + // haz and sci images to these two nav cam images and to each + // other. + + // Find the matches using multiple threads + ff_common::ThreadPool thread_pool2; + std::mutex match_mutex; + for (size_t pair_it = 0; pair_it < image_pairs.size(); pair_it++) { + auto pair = image_pairs[pair_it]; + int left_image_it = pair.first, right_image_it = pair.second; + thread_pool2.AddTask(&dense_map::matchFeatures, &match_mutex, left_image_it, right_image_it, + cid_to_descriptor_map[left_image_it], + cid_to_descriptor_map[right_image_it], + cid_to_keypoint_map[left_image_it], cid_to_keypoint_map[right_image_it], + FLAGS_verbose, &matches[pair]); + } + thread_pool2.Join(); + + return; + } +} // namespace dense_map + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + GOOGLE_PROTOBUF_VERIFY_VERSION; + std::cout.precision(17); // to be able to print timestamps + + if (FLAGS_ros_bag.empty()) + LOG(FATAL) << "The bag file was not specified."; + if (FLAGS_sparse_map.empty()) + LOG(FATAL) << "The input sparse map was not specified."; + + if (FLAGS_output_map.empty()) + LOG(FATAL) << "The output sparse map was not specified."; + + if (!FLAGS_skip_registration) { + if (FLAGS_xyz_file.empty() || FLAGS_hugin_file.empty()) + LOG(FATAL) << "Either the hugin or xyz file was not specified."; + } + + if (FLAGS_opt_map_only && FLAGS_fix_map) + LOG(FATAL) << "Cannot both float the sparse map and keep it fixed."; + + if (FLAGS_robust_threshold <= 0.0) + LOG(FATAL) << "The robust threshold must be positive.\n"; + + // Set up handles for reading data at given time stamp without + // searching through the whole bag each time. + dense_map::RosBagHandle nav_cam_handle(FLAGS_ros_bag, FLAGS_nav_cam_topic); + dense_map::RosBagHandle sci_cam_handle(FLAGS_ros_bag, FLAGS_sci_cam_topic); + dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); + dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); + + if (nav_cam_handle.bag_msgs.empty()) LOG(FATAL) << "No nav cam images found."; + if (sci_cam_handle.bag_msgs.empty()) LOG(FATAL) << "No sci cam images found."; + if (haz_cam_intensity_handle.bag_msgs.empty()) LOG(FATAL) << "No haz cam images found."; + + // Read the config file + std::vector cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector cam_params; + std::vector ref_to_cam_trans; + std::vector ref_to_cam_timestamp_offsets; + Eigen::Affine3d navcam_to_body_trans; + Eigen::Affine3d hazcam_depth_to_image_transform; + dense_map::readConfigFile( // Inputs + cam_names, "nav_cam_transform", "haz_cam_depth_to_image_transform", + // Outputs + cam_params, ref_to_cam_trans, ref_to_cam_timestamp_offsets, navcam_to_body_trans, + hazcam_depth_to_image_transform); + + if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { + for (size_t it = 0; it < cam_names.size(); it++) { + if (cam_names[it] == "sci_cam") { + double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; + std::cout << "Overriding the value " << ref_to_cam_timestamp_offsets[it] + << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; + ref_to_cam_timestamp_offsets[it] = new_val; + } + } + } + + // Copy to structures expected by this tool + camera::CameraParameters nav_cam_params = cam_params[0]; + camera::CameraParameters haz_cam_params = cam_params[1]; + camera::CameraParameters sci_cam_params = cam_params[2]; + Eigen::Affine3d hazcam_to_navcam_aff_trans = ref_to_cam_trans[1].inverse(); + Eigen::Affine3d scicam_to_hazcam_aff_trans = + ref_to_cam_trans[1] * ref_to_cam_trans[2].inverse(); + double navcam_to_hazcam_timestamp_offset = ref_to_cam_timestamp_offsets[1]; + double scicam_to_hazcam_timestamp_offset = + ref_to_cam_timestamp_offsets[1] - ref_to_cam_timestamp_offsets[2]; + + if (FLAGS_mesh_weight <= 0.0 || FLAGS_mesh_robust_threshold <= 0.0) + LOG(FATAL) << "The mesh weight and robust threshold must be positive.\n"; + + mve::TriangleMesh::Ptr mesh; + std::shared_ptr mesh_info; + std::shared_ptr graph; + std::shared_ptr bvh_tree; + if (FLAGS_mesh != "") dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); + +#if 0 + std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; + std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; + std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() + << "\n"; +#endif + std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; + std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; + + // Read the sparse map + boost::shared_ptr sparse_map = + boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + + // TODO(oalexan1): All this timestamp reading logic below should be in a function + + // Find the minimum and maximum timestamps in the sparse map + double min_map_timestamp = std::numeric_limits::max(); + double max_map_timestamp = -min_map_timestamp; + std::vector sparse_map_timestamps; + const std::vector& sparse_map_images = sparse_map->cid_to_filename_; + sparse_map_timestamps.resize(sparse_map_images.size()); + for (size_t cid = 0; cid < sparse_map_images.size(); cid++) { + double timestamp = dense_map::fileNameToTimestamp(sparse_map_images[cid]); + sparse_map_timestamps[cid] = timestamp; + min_map_timestamp = std::min(min_map_timestamp, sparse_map_timestamps[cid]); + max_map_timestamp = std::max(max_map_timestamp, sparse_map_timestamps[cid]); + } + if (sparse_map_timestamps.empty()) LOG(FATAL) << "No sparse map timestamps found."; + + // Read the haz cam timestamps + std::vector const& haz_cam_intensity_msgs + = haz_cam_intensity_handle.bag_msgs; + if (haz_cam_intensity_msgs.empty()) LOG(FATAL) << "No haz cam messages are present."; + double haz_cam_start_time = -1.0; + std::vector all_haz_cam_inten_timestamps; + for (size_t it = 0; it < haz_cam_intensity_msgs.size(); it++) { + sensor_msgs::Image::ConstPtr image_msg + = haz_cam_intensity_msgs[it].instantiate(); + if (image_msg) { + double haz_cam_time = image_msg->header.stamp.toSec(); + all_haz_cam_inten_timestamps.push_back(haz_cam_time); + if (haz_cam_start_time < 0) haz_cam_start_time = haz_cam_time; + } + } + + // Read the sci cam timestamps from the bag + std::vector const& sci_cam_msgs = sci_cam_handle.bag_msgs; + std::vector all_sci_cam_timestamps; + for (size_t sci_it = 0; sci_it < sci_cam_msgs.size(); sci_it++) { + sensor_msgs::Image::ConstPtr sci_image_msg + = sci_cam_msgs[sci_it].instantiate(); + sensor_msgs::CompressedImage::ConstPtr comp_sci_image_msg = + sci_cam_msgs[sci_it].instantiate(); + if (sci_image_msg) + all_sci_cam_timestamps.push_back(sci_image_msg->header.stamp.toSec()); + else if (comp_sci_image_msg) + all_sci_cam_timestamps.push_back(comp_sci_image_msg->header.stamp.toSec()); + } + + // If desired to process only specific timestamps + std::set sci_cam_timestamps_to_use; + if (FLAGS_sci_cam_timestamps != "") { + std::ifstream ifs(FLAGS_sci_cam_timestamps.c_str()); + double val; + while (ifs >> val) sci_cam_timestamps_to_use.insert(val); + } + + // Will optimize the nav cam poses as part of the process + std::vector& nav_cam_affines = sparse_map->cid_to_cam_t_global_; // alias + + if (FLAGS_start >= 0.0 && FLAGS_duration > 0.0) { + double latest_start + = sparse_map_timestamps.back() + navcam_to_hazcam_timestamp_offset - haz_cam_start_time; + if (latest_start < FLAGS_start) { + std::cout << "The sparse map ended before the desired start time. " + << "Use a start time no later than " << latest_start << "." << std::endl; + return 1; + } + + double earliest_end = sparse_map_timestamps[0] + navcam_to_hazcam_timestamp_offset + - haz_cam_start_time; + if (earliest_end > FLAGS_start + FLAGS_duration) { + std::cout << "The sparse map did not start yet. The sum of start and duration " + << "must be at least " << earliest_end << "." << std::endl; + return 1; + } + } + + int nav_cam_start = -1; // Record the first nav cam image used + std::vector cid_to_image_type; + std::vector haz_cam_intensity_timestamps, sci_cam_timestamps; // timestamps we will use + std::vector images; + std::vector depth_clouds; + // Find nav images close in time. Match sci and haz images in between to each + // other and to these nave images + // TODO(oalexan1): This selects haz cam images outside of bracket. + // TODO(oalexan1): No check for bracket size either. + select_images_to_match( // Inputs // NOLINT + haz_cam_start_time, navcam_to_hazcam_timestamp_offset, // NOLINT + scicam_to_hazcam_timestamp_offset, // NOLINT + sparse_map_timestamps, all_haz_cam_inten_timestamps, // NOLINT + all_sci_cam_timestamps, sci_cam_timestamps_to_use, // NOLINT + nav_cam_handle, sci_cam_handle, haz_cam_points_handle, // NOLINT + haz_cam_intensity_handle, // NOLINT + sci_cam_params, // NOLINT + // Outputs // NOLINT + nav_cam_start, cid_to_image_type, // NOLINT + haz_cam_intensity_timestamps, sci_cam_timestamps, // NOLINT + images, depth_clouds); // NOLINT + + // If an image in the vector of images is of nav_cam type, see where its index is + // in the list of nav cam images + std::map image_to_nav_it, image_to_haz_it, image_to_sci_it; + + // Given the index of a haz cam or sci cam image, find the index of the left and right + // nav cam images bounding it in time. + std::map haz_cam_to_left_nav_cam_index, haz_cam_to_right_nav_cam_index; + std::map sci_cam_to_left_nav_cam_index, sci_cam_to_right_nav_cam_index; + + // Map haz cam and nav cam index to the vectors of haz cam to nav cam matches + dense_map::MATCH_MAP matches; + + // TODO(oalexan1): Add explanation here + detect_match_features(// Inputs // NOLINT + images, cid_to_image_type, // NOLINT + // Outputs // NOLINT + image_to_nav_it, image_to_haz_it, // NOLINT + image_to_sci_it, // NOLINT + haz_cam_to_left_nav_cam_index, // NOLINT + haz_cam_to_right_nav_cam_index, // NOLINT + sci_cam_to_left_nav_cam_index, // NOLINT + sci_cam_to_right_nav_cam_index, // NOLINT + matches); // NOLINT + + double hazcam_depth_to_image_scale = pow(hazcam_depth_to_image_transform.matrix().determinant(), 1.0 / 3.0); + + // Since we will keep the scale fixed, vary the part of the transform without + // the scale, while adding the scale each time before the transform is applied + Eigen::Affine3d hazcam_depth_to_image_noscale = hazcam_depth_to_image_transform; + hazcam_depth_to_image_noscale.linear() /= hazcam_depth_to_image_scale; + + // Form the optimization vector for hazcam_depth_to_image_transform + std::vector hazcam_depth_to_image_vec(dense_map::NUM_RIGID_PARAMS); + dense_map::rigid_transform_to_array(hazcam_depth_to_image_noscale, &hazcam_depth_to_image_vec[0]); + + // Go back from the array to the original transform. This will make it into a pure + // scale*rotation + translation, removing the artifacts of it having been read + // from disk where it was stored with just a few digits of precision + dense_map::array_to_rigid_transform(hazcam_depth_to_image_transform, &hazcam_depth_to_image_vec[0]); + hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; + + // Form the optimization vector for hazcam_to_navcam_aff_trans + std::vector hazcam_to_navcam_vec(dense_map::NUM_RIGID_PARAMS); + dense_map::rigid_transform_to_array(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); + + // Recreate hazcam_to_navcam_aff_trans as above + dense_map::array_to_rigid_transform(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); + + // Form the optimization vector for scicam_to_hazcam_aff_trans + std::vector scicam_to_hazcam_vec(dense_map::NUM_RIGID_PARAMS); + dense_map::rigid_transform_to_array(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); + + // Recreate scicam_to_hazcam_aff_trans as above + dense_map::array_to_rigid_transform(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); + + // See how many nav to sci matches we have and allocate space for their xyz + // TODO(oalexan1): This must be factored out as a function + int num_nav_sci_xyz = 0; + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair index_pair = it->first; + dense_map::MATCH_PAIR const& match_pair = it->second; + + std::map::iterator image_haz_it, image_nav_it, image_sci_it; + + image_sci_it = image_to_sci_it.find(index_pair.first); + image_nav_it = image_to_nav_it.find(index_pair.second); + if (image_sci_it != image_to_sci_it.end() && image_nav_it != image_to_nav_it.end()) { + int sci_it = image_sci_it->second; + int nav_it = image_nav_it->second; + + // Add to the number of xyz + num_nav_sci_xyz += match_pair.first.size(); + + if (FLAGS_verbose) saveImagesAndMatches("sci", "nav", index_pair, match_pair, images); + } + + image_haz_it = image_to_haz_it.find(index_pair.first); + image_sci_it = image_to_sci_it.find(index_pair.second); + if (image_haz_it != image_to_haz_it.end() && image_sci_it != image_to_sci_it.end()) { + if (FLAGS_verbose) saveImagesAndMatches("haz", "sci", index_pair, match_pair, images); + } + + image_haz_it = image_to_haz_it.find(index_pair.first); + image_nav_it = image_to_nav_it.find(index_pair.second); + if (image_haz_it != image_to_haz_it.end() && image_nav_it != image_to_nav_it.end()) { + if (FLAGS_verbose) saveImagesAndMatches("haz", "nav", index_pair, match_pair, images); + } + } + + // Prepare for floating sci cam params + int num_scicam_focal_lengths = 1; // Same focal length in x and y + std::set sci_cam_intrinsics_to_float; + dense_map::parse_intrinsics_to_float(FLAGS_sci_cam_intrinsics_to_float, sci_cam_intrinsics_to_float); + Eigen::Vector2d sci_cam_focal_vector = sci_cam_params.GetFocalVector(); + if (num_scicam_focal_lengths == 1) { + sci_cam_focal_vector[0] = sci_cam_params.GetFocalLength(); // average the two focal lengths + sci_cam_focal_vector[1] = sci_cam_focal_vector[0]; + } + std::cout << std::endl; + std::cout << "Initial focal length for sci cam: " << sci_cam_focal_vector.transpose() << std::endl; + Eigen::Vector2d sci_cam_optical_center = sci_cam_params.GetOpticalOffset(); + std::cout << "Initial optical center for sci_cam: " << sci_cam_optical_center.transpose() << std::endl; + Eigen::VectorXd sci_cam_distortion = sci_cam_params.GetDistortion(); + std::cout << "Initial distortion for sci_cam: " << sci_cam_distortion.transpose() << std::endl; + + std::vector depth_to_haz_block_sizes, nav_block_sizes; + std::vector depth_to_nav_block_sizes, depth_to_sci_block_sizes; + std::vector sci_block_sizes, mesh_block_sizes; + dense_map::set_up_block_sizes(// Inputs // NOLINT + num_scicam_focal_lengths, sci_cam_distortion.size(), // NOLINT + // Outputs // NOLINT + depth_to_haz_block_sizes, nav_block_sizes, // NOLINT + depth_to_nav_block_sizes, depth_to_sci_block_sizes, // NOLINT + sci_block_sizes, mesh_block_sizes); // NOLINT + + // Storage for the residual names and xyz + std::vector residual_names; + std::vector nav_sci_xyz(dense_map::NUM_XYZ_PARAMS * num_nav_sci_xyz, 0); + std::vector initial_nav_sci_xyz(num_nav_sci_xyz, Eigen::Vector3d(0, 0, 0)); + + // Put the sparse map camera transforms in a vector so we can optimize them further + int num_cams = nav_cam_affines.size(); + std::vector nav_cam_vec(num_cams * dense_map::NUM_RIGID_PARAMS); + for (int cid = 0; cid < num_cams; cid++) + dense_map::rigid_transform_to_array(nav_cam_affines[cid], + &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + int nav_sci_xyz_count = 0; + + ceres::Problem problem; + + // Form the problem + for (auto it = matches.begin(); it != matches.end(); it++) { + std::pair const& index_pair = it->first; // alias + dense_map::MATCH_PAIR const& match_pair = it->second; // alias + + std::map::iterator image_haz_it, image_nav_it, image_sci_it; + + // Doing haz cam to nav cam matches + image_haz_it = image_to_haz_it.find(index_pair.first); + image_nav_it = image_to_nav_it.find(index_pair.second); + if (image_haz_it != image_to_haz_it.end() && image_nav_it != image_to_nav_it.end()) { + int haz_it = image_haz_it->second; + int nav_it = image_nav_it->second; + dense_map::add_haz_nav_cost(// Inputs // NOLINT + haz_it, nav_it, nav_cam_start, // NOLINT + navcam_to_hazcam_timestamp_offset, // NOLINT + match_pair, haz_cam_intensity_timestamps, // NOLINT + sparse_map_timestamps, haz_cam_to_left_nav_cam_index, // NOLINT + haz_cam_to_right_nav_cam_index, nav_cam_params, // NOLINT + haz_cam_params, depth_to_nav_block_sizes, // NOLINT + depth_to_haz_block_sizes, // NOLINT + nav_cam_affines, depth_clouds, // NOLINT + // Outputs // NOLINT + residual_names, hazcam_depth_to_image_scale, // NOLINT + nav_cam_vec, hazcam_to_navcam_vec, // NOLINT + hazcam_depth_to_image_vec, // NOLINT + problem); // NOLINT + } + + // Doing haz cam to sci cam matches + image_haz_it = image_to_haz_it.find(index_pair.first); + image_sci_it = image_to_sci_it.find(index_pair.second); + if (image_haz_it != image_to_haz_it.end() && image_sci_it != image_to_sci_it.end()) { + int haz_it = image_haz_it->second; + int sci_it = image_sci_it->second; + add_haz_sci_cost( // Inputs // NOLINT + haz_it, sci_it, nav_cam_start, navcam_to_hazcam_timestamp_offset, // NOLINT + scicam_to_hazcam_timestamp_offset, match_pair, // NOLINT + haz_cam_intensity_timestamps, sparse_map_timestamps, // NOLINT + sci_cam_timestamps, haz_cam_to_left_nav_cam_index, // NOLINT + haz_cam_to_right_nav_cam_index, sci_cam_to_left_nav_cam_index, // NOLINT + sci_cam_to_right_nav_cam_index, sci_cam_params, // NOLINT + haz_cam_params, depth_to_sci_block_sizes, // NOLINT + depth_to_haz_block_sizes, nav_cam_affines, // NOLINT + depth_clouds, // NOLINT + // Outputs // NOLINT + residual_names, hazcam_depth_to_image_scale, nav_cam_vec, // NOLINT + hazcam_to_navcam_vec, scicam_to_hazcam_vec, // NOLINT + hazcam_depth_to_image_vec, sci_cam_focal_vector, // NOLINT + sci_cam_optical_center, sci_cam_distortion, problem); // NOLINT + } + + // Doing sci cam to nav cam matches + image_nav_it = image_to_nav_it.find(index_pair.second); + image_sci_it = image_to_sci_it.find(index_pair.first); + if (image_nav_it != image_to_nav_it.end() && image_sci_it != image_to_sci_it.end()) { + int nav_it = image_nav_it->second; + int sci_it = image_sci_it->second; + + add_nav_sci_cost(// Inputs // NOLINT + nav_it, sci_it, nav_cam_start, // NOLINT + navcam_to_hazcam_timestamp_offset, // NOLINT + scicam_to_hazcam_timestamp_offset, // NOLINT + match_pair, // NOLINT + sparse_map_timestamps, sci_cam_timestamps, // NOLINT + sci_cam_to_left_nav_cam_index, sci_cam_to_right_nav_cam_index, // NOLINT + hazcam_to_navcam_aff_trans, scicam_to_hazcam_aff_trans, // NOLINT + nav_cam_params, sci_cam_params, // NOLINT + nav_block_sizes, sci_block_sizes, mesh_block_sizes, // NOLINT + nav_cam_affines, depth_clouds, mesh, // NOLINT + bvh_tree, // NOLINT + // Outputs // NOLINT + nav_sci_xyz_count, residual_names, // NOLINT + nav_cam_vec, hazcam_to_navcam_vec, // NOLINT + scicam_to_hazcam_vec, sci_cam_focal_vector, // NOLINT + sci_cam_optical_center, sci_cam_distortion, // NOLINT + initial_nav_sci_xyz, nav_sci_xyz, // NOLINT + problem); // NOLINT + } + } // end iterating over matches + + if (nav_sci_xyz_count != num_nav_sci_xyz) LOG(FATAL) << "nav sci xyz book-keeping error."; + + if (sparse_map->pid_to_cid_fid_.size() != sparse_map->pid_to_xyz_.size()) + LOG(FATAL) << "Book-keeping error 1 in the sparse map."; + + for (size_t pid = 0; pid < sparse_map->pid_to_cid_fid_.size(); pid++) { + // Note that xyz is an alias. This is very important as we optimize these + // xyz in-place. + Eigen::Vector3d& xyz = sparse_map->pid_to_xyz_[pid]; + + for (auto cid_fid = sparse_map->pid_to_cid_fid_[pid].begin(); + cid_fid != sparse_map->pid_to_cid_fid_[pid].end(); + cid_fid++) { + int cid = cid_fid->first; + int fid = cid_fid->second; + + Eigen::Matrix2Xd& keypoints = sparse_map->cid_to_keypoint_map_[cid]; // alias + + if (fid > keypoints.cols()) LOG(FATAL) << "Book-keeping error 2 in the sparse map."; + + Eigen::Vector2d undist_nav_ip = keypoints.col(fid); + + ceres::CostFunction* nav_cost_function = + dense_map::NavError::Create(undist_nav_ip, nav_block_sizes, nav_cam_params); + ceres::LossFunction* nav_loss_function + = dense_map::GetLossFunction("cauchy", FLAGS_robust_threshold); + problem.AddResidualBlock(nav_cost_function, nav_loss_function, + &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid], + &xyz[0]); + residual_names.push_back("nav1"); + residual_names.push_back("nav2"); + + if (FLAGS_fix_map) + problem.SetParameterBlockConstant(&nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); + } + } + + if (FLAGS_opt_map_only) { + problem.SetParameterBlockConstant(&hazcam_depth_to_image_vec[0]); + problem.SetParameterBlockConstant(&hazcam_depth_to_image_scale); + problem.SetParameterBlockConstant(&hazcam_to_navcam_vec[0]); + problem.SetParameterBlockConstant(&scicam_to_hazcam_vec[0]); + } + + // Evaluate the residual before optimization + double total_cost = 0.0; + std::vector residuals; + ceres::Problem::EvaluateOptions eval_options; + eval_options.num_threads = 1; + eval_options.apply_loss_function = false; // want raw residuals + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "Book-keeping error in the number of residuals."; + + if (FLAGS_verbose) { + for (size_t it = 0; it < residuals.size(); it++) + std::cout << "initial res " << residual_names[it] << " " << residuals[it] << std::endl; + } + + // Want the RMSE residual with loss, to understand what all residuals contribute, + // but without getting distracted by the outliers + eval_options.apply_loss_function = false; + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + dense_map::calc_median_residuals(residuals, residual_names, "before opt"); + + // Experimentally it was found that it was better to keep the scale + // constant and optimize everything else. Later registration will + // happen to correct any drift in the nav cam poses. + if (!FLAGS_float_scale) problem.SetParameterBlockConstant(&hazcam_depth_to_image_scale); + + // See which sci cam intrinsics values to float or keep fixed + if (sci_cam_intrinsics_to_float.find("focal_length") == sci_cam_intrinsics_to_float.end()) { + // std::cout << "For " << sci_cam << " not floating focal_length." << std::endl; + problem.SetParameterBlockConstant(&sci_cam_focal_vector[0]); + } else { + // std::cout << "For " << sci_cam << " floating focal_length." << std::endl; + } + if (sci_cam_intrinsics_to_float.find("optical_center") == sci_cam_intrinsics_to_float.end()) { + // std::cout << "For " << sci_cam << " not floating optical_center." << std::endl; + problem.SetParameterBlockConstant(&sci_cam_optical_center[0]); + } else { + // std::cout << "For " << sci_cam << " floating optical_center." << std::endl; + } + if (sci_cam_intrinsics_to_float.find("distortion") == sci_cam_intrinsics_to_float.end()) { + // std::cout << "For " << sci_cam << " not floating distortion." << std::endl; + problem.SetParameterBlockConstant(&sci_cam_distortion[0]); + } else { + // std::cout << "For " << sci_cam << " floating distortion." << std::endl; + } + + // Solve the problem + ceres::Solver::Options options; + ceres::Solver::Summary summary; + options.linear_solver_type = ceres::ITERATIVE_SCHUR; + options.num_threads = FLAGS_num_opt_threads; // The result is more predictable with one thread + options.max_num_iterations = FLAGS_num_iterations; + options.minimizer_progress_to_stdout = true; + options.gradient_tolerance = 1e-16; + options.function_tolerance = 1e-16; + options.parameter_tolerance = FLAGS_parameter_tolerance; + ceres::Solve(options, &problem, &summary); + + // Copy back the optimized intrinsics + sci_cam_params.SetFocalLength(Eigen::Vector2d(sci_cam_focal_vector[0], + sci_cam_focal_vector[0])); + sci_cam_params.SetOpticalOffset(sci_cam_optical_center); + sci_cam_params.SetDistortion(sci_cam_distortion); + + // Update with the optimized results + dense_map::array_to_rigid_transform(hazcam_depth_to_image_transform, &hazcam_depth_to_image_vec[0]); + hazcam_depth_to_image_transform.linear() *= hazcam_depth_to_image_scale; + dense_map::array_to_rigid_transform(hazcam_to_navcam_aff_trans, &hazcam_to_navcam_vec[0]); + dense_map::array_to_rigid_transform(scicam_to_hazcam_aff_trans, &scicam_to_hazcam_vec[0]); + for (int cid = 0; cid < num_cams; cid++) + dense_map::array_to_rigid_transform(nav_cam_affines[cid], &nav_cam_vec[dense_map::NUM_RIGID_PARAMS * cid]); + + // Compute the residuals without loss, want to see the outliers too + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + if (residuals.size() != residual_names.size()) + LOG(FATAL) << "Book-keeping error in the number of residuals."; + + if (FLAGS_verbose) { + for (size_t it = 0; it < residuals.size(); it++) + std::cout << "final res " << residual_names[it] << " " << residuals[it] << std::endl; + } + + // Want the RMSE residual with loss, to understand what all residuals contribute, + // but without getting distracted by the outliers + eval_options.apply_loss_function = false; + problem.Evaluate(eval_options, &total_cost, &residuals, NULL, NULL); + dense_map::calc_median_residuals(residuals, residual_names, "after opt"); + + // Re-register the map, and keep track of what scale was used. + // Note that we do not update the positions of the sci and haz + // images. We don't need those any more. + if (!FLAGS_skip_registration) { + std::vector data_files; + data_files.push_back(FLAGS_hugin_file); + data_files.push_back(FLAGS_xyz_file); + bool verification = false; + double map_scale + = sparse_mapping::RegistrationOrVerification(data_files, verification, sparse_map.get()); + + // Apply the scale + hazcam_depth_to_image_scale *= map_scale; + // This transform is affine, so both the linear and translation parts need a scale + hazcam_depth_to_image_transform.linear() *= map_scale; + hazcam_depth_to_image_transform.translation() *= map_scale; + // These two are rotation + translation, so only the translation needs the scale + hazcam_to_navcam_aff_trans.translation() *= map_scale; + scicam_to_hazcam_aff_trans.translation() *= map_scale; + } + + std::cout << "Writing: " << FLAGS_output_map << std::endl; + sparse_map->Save(FLAGS_output_map); + + if (!FLAGS_opt_map_only) { + // Save back the results + // Recall that cam_names = {"nav_cam", "haz_cam", "sci_cam"}; + // nav_to_haz + ref_to_cam_trans[1] = hazcam_to_navcam_aff_trans.inverse(); + // nav_to_sci + ref_to_cam_trans[2] = scicam_to_hazcam_aff_trans.inverse() * + hazcam_to_navcam_aff_trans.inverse(); + ref_to_cam_timestamp_offsets[1] = navcam_to_hazcam_timestamp_offset; + ref_to_cam_timestamp_offsets[2] = + navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; + dense_map::updateConfigFile(cam_names, "haz_cam_depth_to_image_transform", + cam_params, ref_to_cam_trans, + ref_to_cam_timestamp_offsets, + hazcam_depth_to_image_transform); + } + + return 0; +} // NOLINT + diff --git a/dense_map/geometry_mapper/tools/cameras_to_texrecon.py b/dense_map/geometry_mapper/tools/cameras_to_texrecon.py index 54006b5d..387483e1 100644 --- a/dense_map/geometry_mapper/tools/cameras_to_texrecon.py +++ b/dense_map/geometry_mapper/tools/cameras_to_texrecon.py @@ -40,7 +40,9 @@ help="The directory containing the undistorted images.", ) parser.add_argument( - "--camera_type", default="", help="The camera type (nav_cam, haz_cam, or sci_cam)." + "--camera_type", + default="", + help="The camera type (nav_cam, haz_cam, or sci_cam, etc.).", ) args = parser.parse_args() @@ -51,14 +53,6 @@ ) sys.exit(1) -if ( - args.camera_type != "nav_cam" - and args.camera_type != "haz_cam" - and args.camera_type != "sci_cam" -): - print("The camera type must be nav_cam, haz_cam, or sci_cam") - sys.exit(1) - # Read the intrinsics intr_file = args.undistorted_image_dir + "/undistorted_intrinsics.txt" if not os.path.exists(intr_file): @@ -99,6 +93,8 @@ # run into old files index_file = os.path.join(args.camera_dir, args.camera_type + "_index.txt") +camera_files = [] + with open(index_file, "r") as f: for image_file in f: image_file = image_file.rstrip() @@ -109,6 +105,7 @@ print("Expecting a .jpg file, but got: " + image_file) in_cam = os.path.join(args.camera_dir, m.group(1) + suffix) + camera_files.append(in_cam) out_cam = args.undistorted_image_dir + "/" + os.path.basename(in_cam) @@ -126,13 +123,13 @@ M = np.linalg.inv(M) # world to camera print("Writing: " + out_cam) - with open(out_cam, "w") as f: + with open(out_cam, "w") as g: # translation - f.write("%0.17g %0.17g %0.17g " % (M[0][3], M[1][3], M[2][3])) + g.write("%0.17g %0.17g %0.17g " % (M[0][3], M[1][3], M[2][3])) # rotation - f.write( + g.write( "%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" % ( M[0][0], @@ -148,7 +145,15 @@ ) # normaized inrinsics - f.write( + g.write( "%0.17g %0.17g %0.17g %0.17g %0.17g %0.17g\n" % (nf, d0, d1, paspect, ncx, ncy) ) + +# Save the name of the camera transforms. This will be used later +# for individual texturing of each image and camera. +camera_list = os.path.join(args.camera_dir, args.camera_type + "_transforms.txt") +with open(camera_list, "w") as f: + print("Writing: " + camera_list) + for camera in camera_files: + f.write(camera + "\n") diff --git a/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc b/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc index ab4bc5ca..deb954bf 100644 --- a/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc +++ b/dense_map/geometry_mapper/tools/extract_pc_from_bag.cc @@ -20,22 +20,17 @@ #include #include +#include + #include #include -#include - -// ROS -#include #include #include -#include -#include - #include + #include #include -#include #include #include @@ -46,9 +41,9 @@ #include #include -// Extract images from a ROS bag. +// Extract clouds a ROS bag. -DEFINE_string(topic, "/hw/cam_nav", "Image topic that you want to write to disk."); +DEFINE_string(topic, "/hw/depth_haz/points", "The topic having the clouds."); DEFINE_string(output_directory, "", "Directory for writing the output imagery."); DEFINE_string(output_format, "%06i", "Format string for writing the output data."); DEFINE_double(start, 0, "Start extracting this many seconds into the bag."); @@ -85,7 +80,7 @@ int main(int argc, char** argv) { if (output_directory.empty()) { char* temp = strdup(argv[1]); std::string base = std::string(basename(temp)); - output_directory = base.substr(0, base.length() - 4) + "_images"; + output_directory = base.substr(0, base.length() - 4) + "_clouds"; } // Create the output directory, if missing @@ -99,70 +94,6 @@ int main(int argc, char** argv) { rosbag::View view(bag, rosbag::TopicQuery(topics)); std::cout << "Copying at most " << view.size() << " frames from the bag file.\n"; for (rosbag::MessageInstance const m : view) { - // Extract a regular image - sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); - if (image_msg) { - ros::Time stamp = image_msg->header.stamp; - curr_time = stamp.toSec(); - - // Set up the output filename - form_filename(image_msg->header.seq, curr_time, filename_buffer, sizeof(filename_buffer)); - std::string name(filename_buffer); - name = output_directory + "/" + name + ".jpg"; - - if (beg_time < 0) beg_time = curr_time; - if (curr_time - beg_time < FLAGS_start || curr_time - beg_time > FLAGS_start + FLAGS_duration) { - continue; - } - - // Convert to an opencv image - cv::Mat image; - try { - // Do a copy as the message is temporary - (cv_bridge::toCvShare(image_msg, "bgr8")->image).copyTo(image); - } catch (cv_bridge::Exception const& e) { - try { - (cv_bridge::toCvShare(image_msg, "32FC1")->image).copyTo(image); - } catch (cv_bridge::Exception const& e) { - LOG(ERROR) << "Unable to convert " << image_msg->encoding.c_str() << " image to bgr8 or 32FC1"; - continue; - } - } - - std::cout << "Writing: " << name << "\n"; - cv::imwrite(name, image); - } - - // Extract a compressed image - sensor_msgs::CompressedImage::ConstPtr comp_image_msg - = m.instantiate(); - if (comp_image_msg) { - ros::Time stamp = comp_image_msg->header.stamp; - curr_time = stamp.toSec(); - - // Set up the output filename - form_filename(comp_image_msg->header.seq, curr_time, filename_buffer, - sizeof(filename_buffer)); - std::string name(filename_buffer); - name = output_directory + "/" + name + ".jpg"; - - if (beg_time < 0) beg_time = curr_time; - if (curr_time - beg_time < FLAGS_start || curr_time - beg_time > FLAGS_start + FLAGS_duration) { - continue; - } - - cv::Mat image; - try { - // convert compressed image data to cv::Mat - image = cv::imdecode(cv::Mat(comp_image_msg->data), cv::IMREAD_COLOR); - } catch (cv_bridge::Exception const& e) { - LOG(ERROR) << "Unable to convert compressed image to bgr8."; - continue; - } - std::cout << "Writing: " << name << "\n"; - cv::imwrite(name, image); - } - sensor_msgs::PointCloud2::ConstPtr pc_msg = m.instantiate(); if (pc_msg) { ros::Time stamp = pc_msg->header.stamp; @@ -174,12 +105,13 @@ int main(int argc, char** argv) { name = output_directory + "/" + name + ".ply"; if (beg_time < 0) beg_time = curr_time; - if (curr_time - beg_time < FLAGS_start || curr_time - beg_time > FLAGS_start + FLAGS_duration) { + if (curr_time - beg_time < FLAGS_start || + curr_time - beg_time > FLAGS_start + FLAGS_duration) { continue; } pcl::PointCloud pc; - pcl::fromROSMsg(*pc_msg, pc); + dense_map::msgToPcl(pc_msg, pc); if (static_cast(pc.points.size()) != static_cast(pc_msg->width * pc_msg->height)) LOG(FATAL) << "Extracted point cloud size does not agree with original size."; diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.cc b/dense_map/geometry_mapper/tools/geometry_mapper.cc index 0836a161..33bba1cd 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.cc +++ b/dense_map/geometry_mapper/tools/geometry_mapper.cc @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -61,14 +60,15 @@ #include #include -// Read from the bag in intrinsics information for the simulated sci cam and save it to disk -void saveSciCamIntrinsics(std::string const& bag_file, std::string const& output_dir) { +// Read from the bag in intrinsics information for the given simulated camera save it to disk +void saveSimCamIntrinsics(std::string const& bag_file, std::string cam_type, + std::string const& output_dir) { int image_width, image_height; double focal_length, optical_center_x, optical_center_y; std::vector topics; - std::string topic_sci_cam_sim_info = std::string("/") + TOPIC_SCI_CAM_SIM_INFO; - topics.push_back(topic_sci_cam_sim_info); + std::string info_topic = "/sim/" + cam_type + "/info"; + topics.push_back(info_topic); bool success = false; rosbag::Bag bag; @@ -90,7 +90,8 @@ void saveSciCamIntrinsics(std::string const& bag_file, std::string const& output } if (!success) { - LOG(FATAL) << "Could not read sci cam intrinsics from: " << bag_file << " on topic: " << topic_sci_cam_sim_info; + LOG(FATAL) << "Could not read intrinsics for simulated camera " << cam_type + << " from bag: " << bag_file << " on topic: " << info_topic << "\n"; return; } @@ -103,8 +104,8 @@ void saveSciCamIntrinsics(std::string const& bag_file, std::string const& output } ofs.precision(17); ofs << "# Unidistored width and height, focal length, undistorted optical center\n"; - ofs << image_width << ' ' << image_height << ' ' << focal_length << ' ' << optical_center_x << ' ' << optical_center_y - << "\n"; + ofs << image_width << ' ' << image_height << ' ' << focal_length << ' ' + << optical_center_x << ' ' << optical_center_y << "\n"; ofs.close(); } @@ -112,9 +113,10 @@ void saveSciCamIntrinsics(std::string const& bag_file, std::string const& output // around that timestamp. Note that we use the counter sparse_id to // travel forward in time through the sparse map upon repeated // invocations of this function to narrow down the search. -bool findNearbySparseMapImages(double desired_nav_cam_time, std::vector const& sparse_map_timestamps, - int& sparse_map_id, // gets modified - std::vector& nearby_cid) { +bool findNearbySparseMapImages(double desired_nav_cam_time, + std::vector const& sparse_map_timestamps, + int & sparse_map_id, // gets modified + std::vector & nearby_cid) { int num_cid = sparse_map_timestamps.size(); if (num_cid < 2) LOG(FATAL) << "Must have a map with at least two images."; @@ -125,7 +127,8 @@ bool findNearbySparseMapImages(double desired_nav_cam_time, std::vector // using the SURF map with the map made up of the same images that // we want to texture) localization may become unreliable. bool within_bounds = - (sparse_map_timestamps[0] <= desired_nav_cam_time && desired_nav_cam_time <= sparse_map_timestamps[num_cid - 1]); + (sparse_map_timestamps[0] <= desired_nav_cam_time && + desired_nav_cam_time <= sparse_map_timestamps[num_cid - 1]); if (!within_bounds) return false; // Try to find this many images in the map around the time in desired_nav_cam_time. @@ -172,14 +175,16 @@ bool findNearbySparseMapImages(double desired_nav_cam_time, std::vector } // Given a timestamp of a desired camera (haz_cam or sci_cam) and the -// list of navcam image timestamps, find the nearest navcam images in +// list of nav_cam image timestamps, find the nearest nav_cam images in // time to the desired timestamp, localize from the sparse map their // poses, then interpolate and transform to find the pose of the // desired camera. -bool findInterpolatedPose(double desired_nav_cam_time, std::vector const& nav_cam_bag_timestamps, - Eigen::MatrixXd const& desired_cam_to_nav_cam_transform, +bool findInterpolatedPose(double desired_nav_cam_time, + std::vector const& nav_cam_bag_timestamps, + Eigen::Affine3d const& nav_cam_to_desired_cam_trans, std::vector const& nav_cam_msgs, - boost::shared_ptr sparse_map, bool use_brisk_map, + boost::shared_ptr sparse_map, + bool use_brisk_map, std::vector const& sparse_map_timestamps, Eigen::MatrixXd& desired_cam_to_world_trans, int& sparse_map_id, int& nav_cam_bag_id, int& nav_cam_pos) { @@ -219,7 +224,8 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const camera::CameraModel beg_localized_cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), sparse_map->GetCameraParameters()); - if (!sparse_map->Localize(beg_image, &beg_localized_cam, NULL, NULL, nearby_cid_ptr)) return false; + if (!sparse_map->Localize(beg_image, &beg_localized_cam, NULL, NULL, nearby_cid_ptr)) + return false; Eigen::Affine3d beg_trans = beg_localized_cam.GetTransform(); // Note that below we use a throw-away value for nav_cam_pos. @@ -234,20 +240,20 @@ bool findInterpolatedPose(double desired_nav_cam_time, std::vector const return false; camera::CameraModel end_localized_cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), sparse_map->GetCameraParameters()); - if (!sparse_map->Localize(end_image, &end_localized_cam, NULL, NULL, nearby_cid_ptr)) return false; + if (!sparse_map->Localize(end_image, &end_localized_cam, NULL, NULL, nearby_cid_ptr)) + return false; Eigen::Affine3d end_trans = end_localized_cam.GetTransform(); - for (size_t it = 0; it < nearby_cid.size(); it++) { - // std::cout << "nearby image: " << sparse_map->cid_to_filename_[nearby_cid[it]] - // << std::endl; - } // std::cout << "localizing cloud at time " << desired_nav_cam_time << std::endl; - double alpha = (desired_nav_cam_time - beg_nav_cam_time) / (end_nav_cam_time - beg_nav_cam_time); + double alpha = (desired_nav_cam_time - beg_nav_cam_time) + / (end_nav_cam_time - beg_nav_cam_time); + if (end_nav_cam_time == beg_nav_cam_time) alpha = 0.0; // handle division by zero Eigen::Affine3d interp_trans = dense_map::linearInterp(alpha, beg_trans, end_trans); - desired_cam_to_world_trans = interp_trans.inverse().matrix() * desired_cam_to_nav_cam_transform; + desired_cam_to_world_trans + = interp_trans.inverse().matrix() * nav_cam_to_desired_cam_trans.matrix().inverse(); return true; } @@ -349,13 +355,17 @@ double median_mesh_edge(cv::Mat const& depthMat) { // Add points on edges and inside triangles having distances longer // than the median edge length times a factor. Store the combined set // in depthMat by adding further rows to it. -void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, Vec5d const& Zero) { +void add_extra_pts(cv::Mat& depthMat, cv::Mat& workMat, double voxel_size, Vec5d const& Zero) { double median_len = median_mesh_edge(depthMat); if (median_len <= 0) return; // should not happen // Add points if lengths are more than this - double min_len = 1.3 * median_len; + double min_len = std::min(1.3 * median_len, 0.9 * voxel_size); + + // There is no point in this being way smaller than the voxels these + // points will end up being binned into. + min_len = std::max(min_len, 0.4 * voxel_size); std::vector points; @@ -582,8 +592,8 @@ void median_filter(cv::Mat& depthMat, cv::Mat& workMat, Vec5d const& Zero, int w // ray from the existing point to the new point being almost parallel // to existing nearby rays (emanating from camera center) which is not // good. -void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec5d const& Zero, int radius1, int radius2, - double foreshortening_delta) { +void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec5d const& Zero, int radius1, + int radius2, double foreshortening_delta) { // Copy depthMat to workMat, with the latter unchanged below depthMat.copyTo(workMat); @@ -696,8 +706,8 @@ void hole_fill(cv::Mat& depthMat, cv::Mat& workMat, double sigma, Vec5d const& Z // Also handle the case when this angle may be invalid double cos_angle = new_dir.dot(curr_dir) / (curr_dir.norm() * new_dir.norm()); double angle = (180.0 / M_PI) * acos(cos_angle); - if (angle < foreshortening_delta || 180.0 - angle < foreshortening_delta || std::isnan(angle) || - std::isinf(angle)) { + if (angle < foreshortening_delta || 180.0 - angle < foreshortening_delta || + std::isnan(angle) || std::isinf(angle)) { will_accept = false; break; } @@ -782,8 +792,20 @@ void smooth_additions(cv::Mat const& origMat, cv::Mat& depthMat, cv::Mat& workMa } // Add weights which are higher at image center and decrease towards -// boundary. This makes voxblox blend better. -void calc_weights(cv::Mat& depthMat, double exponent) { +// boundary. This makes voxblox blend better. Need a lot of care, as +// voxblox does not like weights under 1e-6 or so, neither ones above +// 1e+6, since it treats them as floats. +void calc_weights(cv::Mat& depthMat, double reliability_weight_exponent, + double max_ray_length, bool simulated_data) { + // The weights defined below decay too fast, causing problems for voxblox. + // Hence multiply them all by something to normalize them somewhat. This + // factor should not be too big either, per the earlier note. + double factor = 1.0; + if (!simulated_data) { + factor = Eigen::Vector2d(depthMat.rows / 2.0, depthMat.cols / 2.0).norm(); + factor = pow(factor, reliability_weight_exponent/2.0); + } + for (int row = 0; row < depthMat.rows; row++) { #pragma omp parallel for for (int col = 0; col < depthMat.cols; col++) { @@ -793,28 +815,39 @@ void calc_weights(cv::Mat& depthMat, double exponent) { double dcol = col - depthMat.cols / 2.0; double dist_sq = drow * drow + dcol * dcol; - // Some kind of function decaying from center - float weight = 1.0 / pow(1.0 + dist_sq, exponent/2.0); + // For sim data use a weight of 1.0 for each point + float weight = 1.0; + if (!simulated_data) { + // Some kind of function decaying from center + weight = factor / pow(1.0 + dist_sq, reliability_weight_exponent/2.0); + + // Also, points that are further in z are given less weight + double z = depthMat.at(row, col)[2]; - // Also, points that are further in z are given less weight - double z = depthMat.at(row, col)[2]; - weight *= 1.0 / (0.001 + z * z); + // Likely max_ray_length is between 1 and 10, and then this factor + // is between 0.1 and 10. + weight *= max_ray_length / (0.001 + z * z); - // Make sure the weight does not get too small as voxblox - // will read it as a float. Here making the weights - // just a little bigger than what voxblox will accept. - weight = std::max(weight, static_cast(1.1e-6)); + // Make sure the weight does not get too small as voxblox + // will read it as a float. Here making the weights + // just a little bigger than what voxblox will accept. + weight = std::max(weight, static_cast(1.1e-6)); + } depthMat.at(row, col)[4] = weight; } } } -void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat const& haz_cam_intensity, - Eigen::Affine3d const& hazcam_depth_to_image_transform, int depth_exclude_columns, - int depth_exclude_rows, double foreshortening_delta, double depth_hole_fill_diameter, - double reliability_weight_exponent, std::vector const& median_filter_params, - bool save_debug_data, const char* filename_buffer, +void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, + cv::Mat const& haz_cam_intensity, + Eigen::Affine3d const& haz_cam_depth_to_image_transform, + bool simulated_data, int depth_exclude_columns, int depth_exclude_rows, + double foreshortening_delta, double depth_hole_fill_diameter, + double reliability_weight_exponent, + double max_ray_length, double voxel_size, + std::vector const& median_filter_params, bool save_debug_data, + const char* filename_buffer, Eigen::MatrixXd const& desired_cam_to_world_trans) { // Sanity checks if (static_cast(haz_cam_intensity.cols) != static_cast(pc_msg->width) || @@ -825,7 +858,7 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Extract the depth point cloud from the message pcl::PointCloud pc; - pcl::fromROSMsg(*pc_msg, pc); + dense_map::msgToPcl(pc_msg, pc); if (static_cast(pc.points.size()) != static_cast(pc_msg->width * pc_msg->height)) LOG(FATAL) << "Extracted point cloud size does not agree with original size."; @@ -848,12 +881,16 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co double y = pc.points[count].y; double z = pc.points[count].z; - // Pick first channel. The precise color of the depth cloud is not important. - // It will be wiped later during smoothing and hole-filling anyway. + // Pick the first image channel. The precise color of the depth + // cloud is not important. It will be wiped later during + // smoothing and hole-filling anyway. double i = haz_cam_intensity.at(row, col)[0]; - bool skip = (col < depth_exclude_columns || depthMat.cols - col < depth_exclude_columns || - row < depth_exclude_rows || depthMat.rows - row < depth_exclude_rows); + double ray_len = Eigen::Vector3d(x, y, z).norm(); + // Exclude points at the boundary or points further than max_ray_length. + bool skip = (col < depth_exclude_columns || depthMat.cols - 1 - col < depth_exclude_columns || + row < depth_exclude_rows || depthMat.rows - 1 - row < depth_exclude_rows || + ray_len > max_ray_length || std::isnan(ray_len) || std::isinf(ray_len)); if ((x == 0 && y == 0 && z == 0) || skip) depthMat.at(row, col) = Zero; @@ -865,9 +902,12 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Throw out as outliers points whose x, y, or z values differs // from the median of such values by more than given distance. // This can be a slow operation for big windows. - int num_filter_passes = median_filter_params.size() / 2; - for (int pass = 0; pass < num_filter_passes; pass++) - median_filter(depthMat, workMat, Zero, median_filter_params[2 * pass + 0], median_filter_params[2 * pass + 1]); + if (!simulated_data) { + int num_filter_passes = median_filter_params.size() / 2; + for (int pass = 0; pass < num_filter_passes; pass++) + median_filter(depthMat, workMat, Zero, median_filter_params[2 * pass + 0], + median_filter_params[2 * pass + 1]); + } // Make a copy of the current depth image before filling holes cv::Mat origMat; @@ -878,20 +918,23 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Use a smaller radius for the foreshortening constraint int radius2 = depth_hole_fill_diameter / 4.0; double sigma = 0.5; // gaussian sigma used for hole-filling and smoothing - if (depth_hole_fill_diameter > 0) hole_fill(depthMat, workMat, sigma, Zero, radius1, radius2, foreshortening_delta); + if (depth_hole_fill_diameter > 0 && !simulated_data) + hole_fill(depthMat, workMat, sigma, Zero, radius1, radius2, foreshortening_delta); // Smooth newly added points and also their immediate neighbors. // Use origMat for reference. int radius = depth_hole_fill_diameter / 4.0; - if (depth_hole_fill_diameter > 0) smooth_additions(origMat, depthMat, workMat, sigma, Zero, radius); + if (depth_hole_fill_diameter > 0 && !simulated_data) + smooth_additions(origMat, depthMat, workMat, sigma, Zero, radius); // This is the right place at which to add weights, before adding // extra points messes up with the number of rows in in depthMat. - calc_weights(depthMat, reliability_weight_exponent); + calc_weights(depthMat, reliability_weight_exponent, max_ray_length, simulated_data); // Add extra points, but only if we are committed to manipulating the // depth cloud to start with - if (depth_hole_fill_diameter > 0) add_extra_pts(depthMat, workMat, Zero); + if (depth_hole_fill_diameter > 0 && !simulated_data) + add_extra_pts(depthMat, workMat, voxel_size, Zero); if (save_debug_data) { // Save the updated depthMat as a mesh (for debugging) @@ -926,7 +969,7 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co } Eigen::Vector3d X(pt[0], pt[1], pt[2]); - X = hazcam_depth_to_image_transform * X; + X = haz_cam_depth_to_image_transform * X; pci.points[count].x = X[0]; pci.points[count].y = X[1]; pci.points[count].z = X[2]; @@ -1002,7 +1045,7 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co // Apply an affine transform Eigen::Vector3d X(pt[0], pt[1], pt[2]); - X = hazcam_depth_to_image_transform * X; + X = haz_cam_depth_to_image_transform * X; // Apply a 4x4 rotation + translation transform Eigen::VectorXd V(4); @@ -1019,117 +1062,6 @@ void save_proc_depth_cloud(sensor_msgs::PointCloud2::ConstPtr pc_msg, cv::Mat co } } -// This is an instructive code to add to saveCameraPoses() -// to compare the sci cam pose as found via localizing -// the nav cam images vs localizing the sci cam image directly. -// The conclusion is that the sci cam calibration needs -// improvement, as the results are all over the place. -#if 0 -void localize_sci_cam_directly() { - std::cout.precision(8); - while (cam_type == "sci_cam" && !use_brisk_map) { - // Find nav cam images around the nav cam pose to compute - std::vector nearby_cid; - double desired_nav_cam_time = curr_time + desired_cam_to_nav_cam_offset; - int local_sparse_map_id = 0; - if (!findNearbySparseMapImages(desired_nav_cam_time, - sparse_map_timestamps, - local_sparse_map_id, // gets modified - nearby_cid)) { - std::cout << "Fail to find nearby cid!" << std::endl; - break; - } - std::cout << "Pose from using nav cam\n" << desired_cam_to_world_trans << std::endl; - - cv::Mat curr_sci_cam_image; - bool curr_save_grayscale = true; - double found_time = -1.0; - if (!dense_map::lookupImage(curr_time, *desired_cam_msgs, curr_save_grayscale, - curr_sci_cam_image, desired_cam_pos, found_time)) { - std::cout << "Failed to look up image!" << std::endl; - break; - } else { - std::cout << "Success in finding sci cam image!" << std::endl; - } - - snprintf(filename_buffer, sizeof(filename_buffer), - "%s/%10.7f_grayscale.jpg", desired_cam_dir.c_str(), curr_time); - std::cout << "Writing: " << filename_buffer << std::endl; - cv::imwrite(filename_buffer, curr_sci_cam_image); - - double scale = 0.25; - std::cout << "resizing with scale " << scale << std::endl; - cv::Mat resized_image; - cv::resize(curr_sci_cam_image, resized_image, cv::Size(), scale, scale, cv::INTER_AREA); - - std::cout << "Resized image dims: " << resized_image.cols << ' ' << resized_image.rows << "\n"; - - camera::CameraModel curr_sci_cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), - g_sci_cam_params); - if (!sparse_map->Localize(resized_image, &curr_sci_cam, NULL, NULL, &nearby_cid)) { - std::cout << "Failed to localize!" << std::endl; - break; - } else { - std::cout << "Success localizing!" << std::endl; - } - - Eigen::Affine3d curr_trans = curr_sci_cam.GetTransform(); - Eigen::MatrixXd T = (curr_trans.inverse()).matrix(); - std::cout << "sci cam mat\n" << T << std::endl; - - Eigen::Vector3d P(T(0, 3), T(1, 3), T(2, 3)); - std::cout << "Position " << P.transpose() << std::endl; - - double best_dist = 1e+100; - double best_delta = -1.0; - Eigen::MatrixXd best_s2n; - - for (double delta = -0.5; delta <= 0.5; delta += 0.05) { - std::cout << "delta is " << delta << std::endl; - - int local_sparse_map_id = 0; - int local_nav_cam_bag_id = 0; - int local_nav_cam_pos = 0; - Eigen::MatrixXd S; - - if (!findInterpolatedPose(curr_time + desired_cam_to_nav_cam_offset + delta, - nav_cam_bag_timestamps, desired_cam_to_nav_cam_transform, - nav_cam_msgs, sparse_map, use_brisk_map, - sparse_map_timestamps, - S, - local_sparse_map_id, - local_nav_cam_bag_id, - local_nav_cam_pos)) { - std::cout << "Failed at local interp" << std::endl; - continue; - } - std::cout << "Local trans for delta:\n" << S << std::endl; - Eigen::Vector3d Q(S(0, 3), S(1, 3), S(2, 3)); - double local_dist = (Q-P).norm(); - std::cout << "delta position and norm " - << delta << ' ' << Q.transpose() << ' ' << local_dist << std::endl; - - Eigen::MatrixXd s2n = desired_cam_to_nav_cam_transform * (S.inverse()) * T; - - if (local_dist < best_dist) { - best_dist = local_dist; - best_delta = delta; - best_s2n = s2n; - } - - if (std::abs(delta) < 1e-10) { - std::cout << "best s2n1\n" << s2n << std::endl; - std::cout << "actual s2n\n" << desired_cam_to_nav_cam_transform << std::endl; - } - } - - std::cout << "best delta and dist " << best_delta << ' ' << best_dist << std::endl; - std::cout << "best s2n2\n" << best_s2n << std::endl; - break; - } -} -#endif - // Given a desired camera and a set of acquisition timestamps for it, // find the nav cam images nearest in time bracketing each timestamp, // find the poses of those images using localization, interpolate the @@ -1140,29 +1072,25 @@ void localize_sci_cam_directly() { // Also write the extracted point clouds and the desired cam (nav or sci) images. -void saveCameraPoses(bool simulated_data, std::string const& cam_type, - double desired_cam_to_nav_cam_offset, - Eigen::MatrixXd const& desired_cam_to_nav_cam_transform, - Eigen::Affine3d& hazcam_depth_to_image_transform, - int depth_exclude_columns, int depth_exclude_rows, - double foreshortening_delta, double depth_hole_fill_diameter, - double reliability_weight_exponent, - std::vector const& median_filter_params, - bool save_debug_data, - std::vector const& nav_cam_msgs, - std::vector const& sci_cam_msgs, - std::vector const& haz_cam_points_msgs, - std::vector const& haz_cam_intensity_msgs, - std::vector const& nav_cam_bag_timestamps, - std::map> const& sci_cam_exif, - std::string const& output_dir, std::string const& desired_cam_dir, - double start, double duration, - double sampling_spacing_seconds, - double dist_between_processed_cams, std::set const& sci_cam_timestamps, - double max_iso_times_exposure, - boost::shared_ptr sparse_map, bool use_brisk_map, - dense_map::StampedPoseStorage const& sim_desired_cam_poses, - std::string const& external_mesh) { +void saveCameraPoses( + bool simulated_data, std::string const& cam_type, double nav_to_desired_cam_offset, + Eigen::Affine3d const& nav_cam_to_desired_cam_trans, + Eigen::Affine3d& haz_cam_depth_to_image_transform, int depth_exclude_columns, + int depth_exclude_rows, double foreshortening_delta, double depth_hole_fill_diameter, + double reliability_weight_exponent, double max_ray_length, double voxel_size, + std::vector const& median_filter_params, + std::vector const& desired_cam_msgs, + std::vector const& nav_cam_msgs, // always needed when not doing sim + std::vector const& haz_cam_points_msgs, + std::vector const& nav_cam_bag_timestamps, + std::map> const& sci_cam_exif, std::string const& output_dir, + std::string const& desired_cam_dir, double start, double duration, + double sampling_spacing_seconds, double dist_between_processed_cams, + double angle_between_processed_cams, + std::set const& sci_cam_timestamps, double max_iso_times_exposure, + boost::shared_ptr sparse_map, bool use_brisk_map, + bool do_haz_cam_image, + dense_map::StampedPoseStorage const& sim_desired_cam_poses, bool save_debug_data) { double min_map_timestamp = std::numeric_limits::max(); double max_map_timestamp = -min_map_timestamp; std::vector sparse_map_timestamps; @@ -1178,7 +1106,9 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, } } - // Open a handle for writing the index of files being written to disk + // Open a handle for writing the index of files being written to + // disk. This is not done if we only want to process the haz cam + // cloud without the haz_cam image. std::string index_file = output_dir + "/" + cam_type + "_index.txt"; std::ofstream ofs(index_file.c_str()); @@ -1201,48 +1131,37 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, // This is used to ensure cameras are not too close Eigen::Vector3d prev_cam_ctr(std::numeric_limits::quiet_NaN(), 0, 0); + Eigen::Affine3d prev_trans = Eigen::Affine3d::Identity(); - // Decide on which messages to process - std::vector const* desired_cam_msgs = NULL; - if (cam_type == "haz_cam") { - desired_cam_msgs = &haz_cam_points_msgs; // points topic - } else if (cam_type == "sci_cam") { - desired_cam_msgs = &sci_cam_msgs; // image topic - } else if (cam_type == "nav_cam") { - desired_cam_msgs = &nav_cam_msgs; // image topic - } else { - LOG(FATAL) << "Unknown camera type: " << cam_type; - } - - if (external_mesh == "") { - // Compute the starting bag time as the timestamp of the first cloud - for (auto& m : haz_cam_points_msgs) { - if (beg_time < 0) { - sensor_msgs::PointCloud2::ConstPtr pc_msg; - pc_msg = m.instantiate(); - if (pc_msg) { - beg_time = pc_msg->header.stamp.toSec(); - break; - } + // Compute the starting bag time as the timestamp of the first cloud + for (auto& m : haz_cam_points_msgs) { + if (beg_time < 0) { + sensor_msgs::PointCloud2::ConstPtr pc_msg; + pc_msg = m.instantiate(); + if (pc_msg) { + beg_time = pc_msg->header.stamp.toSec(); + break; } } - } else { - // Use an external mesh, so ignore the depth clouds (may even be absent) - beg_time = min_map_timestamp; } - for (auto& m : *desired_cam_msgs) { + std::vector const* iter_msgs = NULL; + if (cam_type == "haz_cam") + iter_msgs = &haz_cam_points_msgs; // iterate over the point clouds + else + iter_msgs = &desired_cam_msgs; // iterate over desired images + + for (auto& m : *iter_msgs) { sensor_msgs::PointCloud2::ConstPtr pc_msg; double curr_time = -1.0; if (cam_type == "haz_cam") { - // haz cam + // Find the depth cloud and its time pc_msg = m.instantiate(); if (!pc_msg) continue; curr_time = pc_msg->header.stamp.toSec(); } else { - // sci cam or nav cam - // Check for image + // Find the image and its time sensor_msgs::Image::ConstPtr image_msg = m.instantiate(); if (image_msg) { curr_time = image_msg->header.stamp.toSec(); @@ -1259,10 +1178,12 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, } } - bool custom_timestamps = (cam_type == "sci_cam" && !sci_cam_timestamps.empty()); - if (!simulated_data && !use_brisk_map && !custom_timestamps) { - if (curr_time < min_map_timestamp) continue; // Do not process data before the sparse map starts - if (curr_time > max_map_timestamp) break; // Do not process data after the sparse map ends + bool custom_sci_timestamps = (cam_type == "sci_cam" && !sci_cam_timestamps.empty()); + if (!simulated_data && !use_brisk_map && !custom_sci_timestamps) { + // Do not process data before the sparse map starts + if (curr_time < min_map_timestamp) continue; + // Do not process data after the sparse map ends + if (curr_time > max_map_timestamp) break; } if (curr_time - beg_time < start) continue; // did not yet reach the desired starting time @@ -1275,26 +1196,28 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, Eigen::MatrixXd desired_cam_to_world_trans; // Let some time elapse between fusing clouds - if (std::abs(curr_time - prev_time) < sampling_spacing_seconds && !custom_timestamps) { + if (std::abs(curr_time - prev_time) < sampling_spacing_seconds && !custom_sci_timestamps) { continue; } // The case of custom timestamps - if (custom_timestamps && sci_cam_timestamps.find(curr_time) == sci_cam_timestamps.end()) continue; + if (custom_sci_timestamps && sci_cam_timestamps.find(curr_time) == sci_cam_timestamps.end()) + continue; if (!simulated_data) { // Do localization based on nav cam, transform the pose to desired cam coordinates, etc - // Note that we convert from the desired time to the nav cam time for this operation - if (!findInterpolatedPose(curr_time + desired_cam_to_nav_cam_offset, nav_cam_bag_timestamps, - desired_cam_to_nav_cam_transform, nav_cam_msgs, sparse_map, + // Note that we convert from the desired cam time to nav cam time for this operation + if (!findInterpolatedPose(curr_time - nav_to_desired_cam_offset, + nav_cam_bag_timestamps, + nav_cam_to_desired_cam_trans, nav_cam_msgs, sparse_map, use_brisk_map, sparse_map_timestamps, desired_cam_to_world_trans, sparse_map_id, nav_cam_bag_id, nav_cam_pos)) { - std::cout.precision(17); - std::cout << "Localization failed at time " << curr_time << std::endl; - std::cout << "if too many such failures, consider modifying the value of " - << "--localization_options. See the documentation for more info." << std::endl; + std::cout << std::setprecision(17) + << "Localization failed at time " << curr_time << ". " + << "If too many such failures, consider modifying the value of " + << "--localization_options. See the documentation for more info.\n"; continue; } } else { @@ -1311,121 +1234,109 @@ void saveCameraPoses(bool simulated_data, std::string const& cam_type, // Success localizing. Update the time at which it took place for next time. prev_time = curr_time; - std::cout << "Time elapsed since the bag started: " << curr_time - beg_time << "\n"; + std::cout << "Time elapsed since the bag started: " << curr_time - beg_time << " seconds.\n"; // See if to skip some images if the robot did not move much Eigen::Affine3d curr_trans; curr_trans.matrix() = desired_cam_to_world_trans; Eigen::Vector3d curr_cam_ctr = curr_trans.translation(); double curr_dist_bw_cams = (prev_cam_ctr - curr_cam_ctr).norm(); - if (!std::isnan(prev_cam_ctr[0]) && !custom_timestamps && - curr_dist_bw_cams < dist_between_processed_cams) { - std::cout << "Distance to previously processed camera is " - << curr_dist_bw_cams << ", skipping it." - << std::endl; + Eigen::Affine3d world_pose_change = curr_trans * (prev_trans.inverse()); + double angle_diff = dense_map::maxRotationAngle(world_pose_change); + if (!std::isnan(prev_cam_ctr[0])) + std::cout << std::setprecision(4) + << "Position and orientation changes relative to prev. processed cam are " + << curr_dist_bw_cams << " m and " << angle_diff <<" deg.\n"; + + bool skip_processing = (!std::isnan(prev_cam_ctr[0]) && + !custom_sci_timestamps && + curr_dist_bw_cams < dist_between_processed_cams && + angle_diff < angle_between_processed_cams); + if (skip_processing) { + std::cout << "Skipping this camera." << std::endl; continue; } char filename_buffer[1000]; std::string suffix = cam_type + "_to_world"; - if (cam_type == "haz_cam") { - cv::Mat haz_cam_intensity; - bool save_grayscale = false; // Get a color image, if possible - - // Normally try to look up the image time right after the current cloud time - double desired_image_time = curr_time; - double found_time = -1.0; - if (!dense_map::lookupImage(desired_image_time, haz_cam_intensity_msgs, - save_grayscale, haz_cam_intensity, + // Lookup and save the image at the current time or soon after it (the latter + // case happens when looking up the image based on the cloud time) + cv::Mat desired_image; + bool save_grayscale = false; // Get a color image, if possible + double found_time = -1.0; + if (cam_type != "haz_cam" || do_haz_cam_image) { + if (!dense_map::lookupImage(curr_time, desired_cam_msgs, save_grayscale, desired_image, desired_cam_pos, found_time)) - continue; + continue; // the expected image could not be found + } else { + // We have cam_type == haz_cam but no haz_cam image texturing + // desired. Create a fake image, only for the purpose of saving + // the point cloud, for which an image is needed. + desired_image = cv::Mat(pc_msg->height, pc_msg->width, CV_8UC3, cv::Scalar(255, 255, 255)); + } + + // Apply an optional scale. Use a pointer to not copy the data more than + // one has to. + cv::Mat* img_ptr = &desired_image; + cv::Mat scaled_image; + if (!simulated_data && cam_type == "sci_cam") { + auto exif_it = sci_cam_exif.find(curr_time); + if (exif_it != sci_cam_exif.end()) { + std::vector const& exif = exif_it->second; + double iso = exif[dense_map::ISO]; + double exposure = exif[dense_map::EXPOSURE_TIME]; + dense_map::exposureCorrection(max_iso_times_exposure, iso, exposure, + *img_ptr, scaled_image); + img_ptr = &scaled_image; + } + } + + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", + desired_cam_dir.c_str(), curr_time); + std::cout << "Writing: " << filename_buffer << std::endl; + cv::imwrite(filename_buffer, *img_ptr); - // Save the transform (only if the above lookup was successful) - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", output_dir.c_str(), - curr_time, suffix.c_str()); - dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); + // Save the name of the image in the index + ofs << filename_buffer << "\n"; - // Save the transform name in the index file - depth_ofs << filename_buffer << "\n"; // for the depth + // Save the transform + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", + output_dir.c_str(), curr_time, suffix.c_str()); + dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); + + if (cam_type == "haz_cam") { + // Save the transform name in the index file, for voxblox + depth_ofs << filename_buffer << "\n"; // Append the intensity to the point cloud and save it snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.pcd", output_dir.c_str(), curr_time); - save_proc_depth_cloud(pc_msg, haz_cam_intensity, hazcam_depth_to_image_transform, - depth_exclude_columns, - depth_exclude_rows, foreshortening_delta, depth_hole_fill_diameter, - reliability_weight_exponent, median_filter_params, + save_proc_depth_cloud(pc_msg, desired_image, haz_cam_depth_to_image_transform, + simulated_data, + depth_exclude_columns, depth_exclude_rows, + foreshortening_delta, depth_hole_fill_diameter, + reliability_weight_exponent, max_ray_length, + voxel_size, median_filter_params, save_debug_data, filename_buffer, desired_cam_to_world_trans); // Save the name of the point cloud to the index depth_ofs << filename_buffer << "\n"; - - // Save the image - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", - desired_cam_dir.c_str(), curr_time); - std::cout << "Writing: " << filename_buffer << std::endl; - cv::imwrite(filename_buffer, haz_cam_intensity); - - // Save the name of the image in the index - ofs << filename_buffer << "\n"; - - } else { - // Save the nav or sci cam image at the given timestamp. - cv::Mat desired_cam_image; - bool save_grayscale = false; - double found_time = -1.0; - if (!dense_map::lookupImage(curr_time, *desired_cam_msgs, save_grayscale, - desired_cam_image, desired_cam_pos, found_time)) - continue; - - // Save the transform (only if the above lookup was successful) - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f_%s.txt", - output_dir.c_str(), curr_time, suffix.c_str()); - dense_map::writeMatrix(desired_cam_to_world_trans, filename_buffer); - - // This is very useful for debugging things with ASP. - - // A debug utility for saving a camera in format ASP understands - // if (cam_type == "sci_cam") - // dense_map::save_tsai_camera(desired_cam_to_world_trans, output_dir, curr_time, suffix); - - // Apply an optional scale. Use a pointer to not copy the data more than - // one has to. - cv::Mat* img_ptr = &desired_cam_image; - cv::Mat scaled_image; - if (!simulated_data && cam_type == "sci_cam") { - auto exif_it = sci_cam_exif.find(curr_time); - if (exif_it != sci_cam_exif.end()) { - std::vector const& exif = exif_it->second; - double iso = exif[dense_map::ISO]; - double exposure = exif[dense_map::EXPOSURE_TIME]; - dense_map::exposureCorrection(max_iso_times_exposure, iso, exposure, - *img_ptr, scaled_image); - img_ptr = &scaled_image; - } - } - - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", - desired_cam_dir.c_str(), curr_time); - std::cout << "Writing: " << filename_buffer << std::endl; - cv::imwrite(filename_buffer, *img_ptr); - - // Save the name of the image in the index - ofs << filename_buffer << "\n"; } // Update the previous camera center prev_cam_ctr = curr_cam_ctr; + prev_trans = curr_trans; } std::cout << "Wrote: " << index_file << std::endl; } -void save_navcam_poses_and_images(boost::shared_ptr sparse_map, +void save_nav_cam_poses_and_images(boost::shared_ptr sparse_map, std::vector const& nav_cam_msgs, - std::string const& output_dir, std::string const& nav_cam_dir) { + std::string const& output_dir, + std::string const& nav_cam_dir) { // Keep track of where in the bag we are int nav_cam_pos = 0; @@ -1443,7 +1354,8 @@ void save_navcam_poses_and_images(boost::shared_ptr s std::string ext = ff_common::file_extension(img_file); std::string cam_file = ff_common::ReplaceInStr(img_file, "." + ext, "_nav_cam_to_world.txt"); cam_file = output_dir + "/" + ff_common::basename(cam_file); - if (cam_file == img_file) LOG(FATAL) << "Failed to replace the image extension in: " << img_file; + if (cam_file == img_file) LOG(FATAL) << "Failed to replace the image extension in: " + << img_file; dense_map::writeMatrix(sparse_map->cid_to_cam_t_global_[cid].inverse().matrix(), cam_file); @@ -1454,7 +1366,8 @@ void save_navcam_poses_and_images(boost::shared_ptr s nav_cam_pos, found_time)) continue; - snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", nav_cam_dir.c_str(), timestamp); + snprintf(filename_buffer, sizeof(filename_buffer), "%s/%10.7f.jpg", + nav_cam_dir.c_str(), timestamp); std::cout << "Writing: " << filename_buffer << std::endl; cv::imwrite(filename_buffer, nav_cam_image); @@ -1465,27 +1378,30 @@ void save_navcam_poses_and_images(boost::shared_ptr s DEFINE_string(ros_bag, "", "A ROS bag with recorded nav_cam, haz_cam, and sci_cam data."); DEFINE_string(sparse_map, "", "A registered sparse map made with some of the ROS bag data."); DEFINE_string(output_dir, "", "The full path to a directory where to write the processed data."); -DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); -DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); -DEFINE_string(haz_cam_intensity_topic, "/hw/depth_haz/extended/amplitude_int", - "The depth camera intensity topic in the bag file."); -DEFINE_string(sci_cam_topic, "/hw/cam_sci", "The sci cam topic in the bag file."); DEFINE_string(sci_cam_exif_topic, "/hw/sci_cam_exif", "The sci cam exif metadata topic the output bag."); -DEFINE_string(camera_type, "all", - "Specify which cameras to process. By default, process all. Options: " - "nav_cam, sci_cam."); +DEFINE_string(camera_types, "sci_cam nav_cam haz_cam", + "Specify the cameras to use for the textures, as a list in quotes."); +DEFINE_string(camera_topics, "/hw/cam_sci/compressed /mgt/img_sampler/nav_cam/image_record " + "/hw/depth_haz/extended/amplitude_int", + "Specify the bag topics for the cameras to texture (in the same order as in " + "--camera_types). Use a list in quotes."); +DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", + "The depth point cloud topic in the bag file."); DEFINE_double(start, 0.0, "How many seconds into the bag to start processing the data."); DEFINE_double(duration, -1.0, "For how many seconds to do the processing."); DEFINE_double(sampling_spacing_seconds, 0.5, "Spacing to use, in seconds, between consecutive depth images in " "the bag that are processed."); DEFINE_double(dist_between_processed_cams, 0.1, - "Once an image or depth image is processed, how far the camera " - "should move (in meters) before it should process more data."); + "Once an image or depth cloud is processed, " + "process a new one whenever either the camera moves by more than this " + "distance, in meters, or the angle changes by more than " + "--angle_between_processed_cams, in degrees."); +DEFINE_double(angle_between_processed_cams, 5.0, "See --dist_between_processed_cams."); DEFINE_string(sci_cam_timestamps, "", - "Process only these sci cam timestamps (rather than " - "any in the bag using --dist_between_processed_cams, etc.). Must be " - "a file with one timestamp per line."); + "Process only these sci cam timestamps (rather than any in the bag using " + "--dist_between_processed_cams, etc.). Must be a file with one timestamp " + "per line."); DEFINE_double(max_iso_times_exposure, 5.1, "Apply the inverse gamma transform to " "images, multiply them by max_iso_times_exposure/ISO/exposure_time " @@ -1500,19 +1416,27 @@ DEFINE_int32(depth_exclude_rows, 0, "at margins to avoid some distortion of that data."); DEFINE_double(foreshortening_delta, 5.0, "A smaller value here will result in holes in depth images being filled more " - "aggressively but potentially with more artifacts in foreshortened regions."); + "aggressively but potentially with more artifacts in foreshortened regions." + "Works only with positive --depth_hole_fill_diameter."); DEFINE_string(median_filters, "7 0.1 25 0.1", "Given a list 'w1 d1 w2 d2 ... ', remove a depth image point " "if it differs, in the Manhattan norm, from the median of cloud points " "in the pixel window of size wi centered at it by more than di. This " "removes points sticking out for each such i."); -DEFINE_double(depth_hole_fill_diameter, 30.0, - "Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds " - "are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh " - "later (--max_hole_diameter)."); +DEFINE_double(depth_hole_fill_diameter, 0.0, + "Fill holes in the depth point clouds with this diameter, in pixels. This happens " + "before the clouds are fused. If set to a positive value it can fill really big " + "holes but may introduce artifacts. It is better to leave the hole-filling for " + "later, once the mesh is fused (see --max_hole_diameter)."); DEFINE_double(reliability_weight_exponent, 2.0, "A larger value will give more weight to depth points corresponding to " "pixels closer to depth image center, which are considered more reliable."); +DEFINE_double(max_ray_length, 2.0, + "Process haz cam depth image points no further than " + "this distance from the camera."); +DEFINE_double(voxel_size, 0.01, + "The grid size used for binning depth cloud points and creating the mesh. " + "Measured in meters."); DEFINE_bool(simulated_data, false, "If specified, use data recorded in simulation. " "Then haz and sci camera poses and intrinsics should be recorded in the bag file."); @@ -1520,9 +1444,8 @@ DEFINE_bool(use_brisk_map, false, "If specified, instead of a SURF sparse map made from the same bag that needs " "texturing, use a pre-existing and unrelated BRISK map. This map may be more " "convenient but less reliable."); -DEFINE_string(external_mesh, "", "Use this mesh to texture the images, rather than creating one from depth data."); -DEFINE_string(scicam_to_hazcam_timestamp_offset_override_value, "", - "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " +DEFINE_string(nav_cam_to_sci_cam_offset_override_value, "", + "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " "file with this value."); DEFINE_bool(save_debug_data, false, "Save many intermediate datasets for debugging."); @@ -1532,9 +1455,11 @@ int main(int argc, char** argv) { if (FLAGS_ros_bag.empty()) LOG(FATAL) << "The bag file was not specified."; if (FLAGS_output_dir.empty()) LOG(FATAL) << "The output directory was not specified."; - if (FLAGS_sparse_map.empty() && !FLAGS_simulated_data) LOG(FATAL) << "The sparse map was not specified."; + if (FLAGS_sparse_map.empty() && !FLAGS_simulated_data) + LOG(FATAL) << "The sparse map was not specified."; - if (!boost::filesystem::exists(FLAGS_ros_bag)) LOG(FATAL) << "Bag does not exist: " << FLAGS_ros_bag; + if (!boost::filesystem::exists(FLAGS_ros_bag)) LOG(FATAL) << "Bag does not exist: " + << FLAGS_ros_bag; if (!boost::filesystem::exists(FLAGS_sparse_map) && !FLAGS_simulated_data) LOG(FATAL) << "Sparse map does not exist: " << FLAGS_sparse_map; @@ -1542,38 +1467,82 @@ int main(int argc, char** argv) { // Parse the median filter params double val; std::vector median_filter_params; - std::istringstream ifs(FLAGS_median_filters); - while (ifs >> val) median_filter_params.push_back(val); - + std::istringstream ifsm(FLAGS_median_filters); + while (ifsm >> val) median_filter_params.push_back(val); if (median_filter_params.size() % 2 != 0) LOG(FATAL) << "There must exist an even number of median filter parameters, " << "being pairs of window sizes and distances."; - // Need high precision to print timestamps - std::cout.precision(17); + // Parse the camera types + std::vector cam_types; + std::string str; + std::istringstream ifsc(FLAGS_camera_types); + bool do_haz_cam_image = false, do_nav_cam = false; + while (ifsc >> str) { + cam_types.push_back(str); + if (str == "haz_cam") do_haz_cam_image = true; + if (str == "nav_cam") do_nav_cam = true; + } + if (!FLAGS_simulated_data && !do_nav_cam) + LOG(FATAL) << "Nav cam must be in --camera_topics (unless using simulated data) as it is " + << "needed for camera localization.\n"; + if (FLAGS_simulated_data && do_nav_cam) + LOG(FATAL) << "The geometry mapper does not support nav_cam with simulated data as " + << "its distortion is not modeled.\n"; + if (FLAGS_simulated_data && !do_haz_cam_image) + LOG(FATAL) << "The haz_cam must be one of the camera types in simulation mode " + << "as it is needed to read the simulated camera pose in order to " + << "process the depth clouds."; + + // Parse the camera topics + std::map cam_topics; + std::istringstream ifst(FLAGS_camera_topics); + while (ifst >> str) { + size_t count = cam_topics.size(); + if (count >= cam_types.size()) + LOG(FATAL) << "There must be a topic for each camera type.\n"; + cam_topics[cam_types[count]] = str; + std::cout << "Camera topic for " << cam_types[count] << ": " << cam_topics[cam_types[count]] + << "\n"; + } + if (cam_types.size() != cam_topics.size()) + LOG(FATAL) << "There must be a topic for each camera type.\n"; + + if (!FLAGS_simulated_data && !do_haz_cam_image) { + // Even if it is not wanted to process the haz cam image, need to + // have the haz cam topic to be able to process the cloud. + cam_types.push_back("haz_cam"); + // The topic below won't be used, but haz to be in for the + // bookkeeping to be correct. + cam_topics["haz_cam"] = "/hw/depth_haz/extended/amplitude_int"; + } // Read simulated poses - dense_map::StampedPoseStorage sim_sci_cam_poses, sim_haz_cam_poses; + std::vector sim_cam_poses(cam_types.size()); if (FLAGS_simulated_data) { - std::string haz_cam_pose_topic = std::string("/") + TOPIC_HAZ_CAM_SIM_POSE; - std::string sci_cam_pose_topic = std::string("/") + TOPIC_SCI_CAM_SIM_POSE; - std::cout << "haz cam pose topic " << haz_cam_pose_topic << std::endl; - std::cout << "sci cam pose topic " << sci_cam_pose_topic << std::endl; - dense_map::readBagPoses(FLAGS_ros_bag, haz_cam_pose_topic, sim_haz_cam_poses); - dense_map::readBagPoses(FLAGS_ros_bag, sci_cam_pose_topic, sim_sci_cam_poses); + for (size_t it = 0; it < cam_types.size(); it++) { + std::string pose_topic = "/sim/" + cam_types[it] + "/pose"; + std::cout << "Pose topic for simulated camera: " << cam_types[it] << ": " + << pose_topic << "\n"; + dense_map::readBagPoses(FLAGS_ros_bag, pose_topic, sim_cam_poses[it]); + } } // Set up handles for reading data at given time stamp without - // searching through the whole bag each time. - dense_map::RosBagHandle nav_cam_handle(FLAGS_ros_bag, FLAGS_nav_cam_topic); - dense_map::RosBagHandle sci_cam_handle(FLAGS_ros_bag, FLAGS_sci_cam_topic); + // searching through the whole bag each time. Must use pointers + // due to the rosbag API. + std::map> bag_handles; + for (size_t it = 0; it < cam_types.size(); it++) { + bag_handles[cam_types[it]] = boost::shared_ptr + (new dense_map::RosBagHandle(FLAGS_ros_bag, cam_topics[cam_types[it]])); + } + dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); - dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); dense_map::RosBagHandle exif_handle(FLAGS_ros_bag, FLAGS_sci_cam_exif_topic); std::vector nav_cam_bag_timestamps; if (!FLAGS_simulated_data) - dense_map::readBagImageTimestamps(FLAGS_ros_bag, FLAGS_nav_cam_topic, nav_cam_bag_timestamps); + dense_map::readBagImageTimestamps(FLAGS_ros_bag, cam_topics["nav_cam"], nav_cam_bag_timestamps); std::map> sci_cam_exif; if (!FLAGS_simulated_data) dense_map::readExifFromBag(exif_handle.bag_msgs, sci_cam_exif); @@ -1584,53 +1553,45 @@ int main(int argc, char** argv) { std::cout << "Cutting off " << FLAGS_depth_exclude_columns << " columns and " << FLAGS_depth_exclude_rows << " rows at the margins of the depth point clouds.\n"; - double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; - Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::Affine3d hazcam_depth_to_image_transform; - hazcam_depth_to_image_transform.setIdentity(); // default value - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); + std::vector cam_params; + std::vector nav_to_cam_trans; + std::vector nav_to_cam_timestamp_offset; + Eigen::Affine3d nav_cam_to_body_trans; // Will not be used + Eigen::Affine3d haz_cam_depth_to_image_transform; if (!FLAGS_simulated_data) { - // Read a bunch of transforms from the robot calibration file. - // The empirically determined offset between the measured timestamps - // of the cameras, in seconds. There are two reasons for this time offset: - // (1) The nav cam and sci cam are acquired on different processors. - // (2) The actual moment the camera image is recorded is not the same - // moment that image finished processing and the timestamp is set. - // This delay is worse for the sci cam which takes longer to acquire. - // TODO(oalexan1): camera_calibrator must actually solve for them. - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", "scicam_to_hazcam_timestamp_offset", - "hazcam_to_navcam_transform", "scicam_to_hazcam_transform", "nav_cam_transform", - "hazcam_depth_to_image_transform", navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, nav_cam_params, haz_cam_params, - sci_cam_params); - - if (FLAGS_scicam_to_hazcam_timestamp_offset_override_value != "") { - double new_val = atof(FLAGS_scicam_to_hazcam_timestamp_offset_override_value.c_str()); - std::cout << "Overriding the value " << scicam_to_hazcam_timestamp_offset - << " of scicam_to_hazcam_timestamp_offset with: " << new_val << std::endl; - scicam_to_hazcam_timestamp_offset = new_val; + dense_map::readConfigFile( // Inputs + cam_types, + "nav_cam_transform", // this is the nav cam to body transform + "haz_cam_depth_to_image_transform", + // Outputs + cam_params, nav_to_cam_trans, nav_to_cam_timestamp_offset, nav_cam_to_body_trans, + haz_cam_depth_to_image_transform); + + if (FLAGS_nav_cam_to_sci_cam_offset_override_value != "") { + for (size_t it = 0; it < cam_types.size(); it++) { + if (cam_types[it] == "sci_cam") { + double new_val = atof(FLAGS_nav_cam_to_sci_cam_offset_override_value.c_str()); + std::cout << "Overriding the value " << nav_to_cam_timestamp_offset[it] + << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; + nav_to_cam_timestamp_offset[it] = new_val; + } + } } - - scicam_to_navcam_trans = hazcam_to_navcam_trans * scicam_to_hazcam_trans; - std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << std::endl; - std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << std::endl; - std::cout << "scicam_to_navcam_trans\n" << scicam_to_navcam_trans << std::endl; - std::cout << "navcam_to_hazcam_timestamp_offset: " << navcam_to_hazcam_timestamp_offset << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " << scicam_to_hazcam_timestamp_offset << "\n"; - std::cout << "hazcam_depth_to_image_transform\n" << hazcam_depth_to_image_transform.matrix() << "\n"; + } else { + // No modeling of timestamp offset is used with simulated data. The + // transforms between the cameras are not needed as we record each + // camera pose. + nav_to_cam_timestamp_offset = std::vector(cam_types.size(), 0); + for (size_t it = 0; it < cam_types.size(); it++) + nav_to_cam_trans.push_back(Eigen::Affine3d::Identity()); + haz_cam_depth_to_image_transform = Eigen::Affine3d::Identity(); // no adjustment needed } boost::shared_ptr sparse_map; if (!FLAGS_simulated_data) { std::cout << "Loading sparse map " << FLAGS_sparse_map << "\n"; - sparse_map = boost::shared_ptr(new sparse_mapping::SparseMap(FLAGS_sparse_map)); + sparse_map = boost::shared_ptr + (new sparse_mapping::SparseMap(FLAGS_sparse_map)); if (!FLAGS_use_brisk_map && (sparse_map->GetDetectorName() != "SURF" || sparse_map->vocab_db_.binary_db != NULL)) { LOG(FATAL) << "A SURF map with no vocab db is expected, unless --use_brisk_map is specified."; @@ -1646,7 +1607,8 @@ int main(int argc, char** argv) { // ("true" or "false"). Here we ensure that the same flag for histogram // equalization is used in the map and for localization. std::string histogram_equalization; - if (gflags::GetCommandLineOption("histogram_equalization", &histogram_equalization)) { + if (!FLAGS_simulated_data && + gflags::GetCommandLineOption("histogram_equalization", &histogram_equalization)) { if ((histogram_equalization == "true" && !sparse_map->GetHistogramEqualization()) || (histogram_equalization == "false" && sparse_map->GetHistogramEqualization())) LOG(FATAL) << "The histogram equalization option in the sparse map and then one desired " @@ -1661,80 +1623,60 @@ int main(int argc, char** argv) { while (ifs >> val) sci_cam_timestamps.insert(val); } + // Create output directories dense_map::createDir(FLAGS_output_dir); - - // Store here the sci cam and nav cam images whose poses we save - std::string nav_cam_dir = FLAGS_output_dir + "/distorted_nav_cam"; - std::string haz_cam_dir = FLAGS_output_dir + "/distorted_haz_cam"; - std::string sci_cam_dir = FLAGS_output_dir + "/distorted_sci_cam"; - dense_map::createDir(nav_cam_dir); - dense_map::createDir(haz_cam_dir); - dense_map::createDir(sci_cam_dir); - - // Save haz cam poses and haz cam clouds - if (FLAGS_external_mesh == "") { - std::string cam_type = "haz_cam"; - // haz to nav offset - double desired_cam_to_nav_cam_offset = -navcam_to_hazcam_timestamp_offset; - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, hazcam_to_navcam_trans, - hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, - FLAGS_reliability_weight_exponent, - median_filter_params, - FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, - haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, haz_cam_dir, - FLAGS_start, FLAGS_duration, FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, - sci_cam_timestamps, FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, - sim_haz_cam_poses, FLAGS_external_mesh); + std::map cam_dirs; + for (size_t it = 0; it < cam_types.size(); it++) { + cam_dirs[cam_types[it]] = FLAGS_output_dir + "/distorted_" + cam_types[it]; + dense_map::createDir(cam_dirs[cam_types[it]]); } - // Save sci cam poses and sci cam images - if (FLAGS_camera_type == "sci_cam" || FLAGS_camera_type == "all") { - std::string cam_type = "sci_cam"; - // sci to nav offset - double desired_cam_to_nav_cam_offset = scicam_to_hazcam_timestamp_offset - navcam_to_hazcam_timestamp_offset; - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, scicam_to_navcam_trans, - hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, - FLAGS_reliability_weight_exponent, - median_filter_params, - FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, - haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, sci_cam_dir, FLAGS_start, FLAGS_duration, - FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, sci_cam_timestamps, - FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_sci_cam_poses, - FLAGS_external_mesh); - - // With simulated data we won't perform an additional undistortion - // step which would save the undistorted intrinsics. Hence need to - // do that here. - if (FLAGS_simulated_data) saveSciCamIntrinsics(FLAGS_ros_bag, sci_cam_dir); - } - // Save nav cam poses and nav cam images. This does not work for simulated data. - if ((FLAGS_camera_type == "nav_cam" || FLAGS_camera_type == "all") && !FLAGS_simulated_data) { - if (FLAGS_use_brisk_map) { - // Do the same logic as for the sci cam, localize each nav cam image against the map, etc. - std::string cam_type = "nav_cam"; - double desired_cam_to_nav_cam_offset = 0.0; // no offset from the nav cam to itself - dense_map::StampedPoseStorage sim_nav_cam_poses; // won't be used - saveCameraPoses(FLAGS_simulated_data, cam_type, desired_cam_to_nav_cam_offset, navcam_to_navcam_trans, - hazcam_depth_to_image_transform, FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, - FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, - FLAGS_reliability_weight_exponent, - median_filter_params, - FLAGS_save_debug_data, nav_cam_handle.bag_msgs, sci_cam_handle.bag_msgs, - haz_cam_points_handle.bag_msgs, haz_cam_intensity_handle.bag_msgs, nav_cam_bag_timestamps, - sci_cam_exif, FLAGS_output_dir, nav_cam_dir, FLAGS_start, FLAGS_duration, - FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, sci_cam_timestamps, - FLAGS_max_iso_times_exposure, sparse_map, FLAGS_use_brisk_map, sim_nav_cam_poses, - FLAGS_external_mesh); - } else { - // Take a shortcut, since the sparse map is made of precisely of the images we - // want to texture, just save those images and their poses from the map, avoiding - // localization - save_navcam_poses_and_images(sparse_map, nav_cam_handle.bag_msgs, FLAGS_output_dir, nav_cam_dir); + // The nav cam msgs may not exist for simulated data + std::vector empty_nav_cam_msgs; + std::vector * nav_cam_msgs_ptr; + if (FLAGS_simulated_data) + nav_cam_msgs_ptr = &empty_nav_cam_msgs; + else + nav_cam_msgs_ptr = &bag_handles["nav_cam"]->bag_msgs; + + // Save camera poses and other data for all desired cameras + for (size_t it = 0; it < cam_types.size(); it++) { + std::string cam_type = cam_types[it]; + + if (cam_type == "nav_cam" && FLAGS_simulated_data) { + std::cout << "The nav_cam is not supported with simulated data.\n"; + continue; } + + if (cam_type == "nav_cam" && !FLAGS_use_brisk_map && !FLAGS_simulated_data) { + // Take a shortcut, since the sparse map is made of precisely of + // the images we want to texture. Just save those images and + // their poses from the map, avoiding localization. + save_nav_cam_poses_and_images(sparse_map, + *nav_cam_msgs_ptr, + FLAGS_output_dir, cam_dirs["nav_cam"]); + continue; + } + + saveCameraPoses(FLAGS_simulated_data, cam_type, nav_to_cam_timestamp_offset[it], // NOLINT + nav_to_cam_trans[it], haz_cam_depth_to_image_transform, // NOLINT + FLAGS_depth_exclude_columns, FLAGS_depth_exclude_rows, // NOLINT + FLAGS_foreshortening_delta, FLAGS_depth_hole_fill_diameter, // NOLINT + FLAGS_reliability_weight_exponent, FLAGS_max_ray_length, // NOLINT + FLAGS_voxel_size, median_filter_params, // NOLINT + bag_handles[cam_type]->bag_msgs, // desired cam msgs // NOLINT + *nav_cam_msgs_ptr, // nav msgs // NOLINT + haz_cam_points_handle.bag_msgs, nav_cam_bag_timestamps, // NOLINT + sci_cam_exif, FLAGS_output_dir, // NOLINT + cam_dirs[cam_type], FLAGS_start, FLAGS_duration, // NOLINT + FLAGS_sampling_spacing_seconds, FLAGS_dist_between_processed_cams, // NOLINT + FLAGS_angle_between_processed_cams, // NOLINT + sci_cam_timestamps, FLAGS_max_iso_times_exposure, // NOLINT + sparse_map, FLAGS_use_brisk_map, // NOLINT + do_haz_cam_image, sim_cam_poses[it], FLAGS_save_debug_data); // NOLINT + + if (FLAGS_simulated_data) + saveSimCamIntrinsics(FLAGS_ros_bag, cam_type, cam_dirs[cam_type]); } return 0; diff --git a/dense_map/geometry_mapper/tools/geometry_mapper.py b/dense_map/geometry_mapper/tools/geometry_mapper.py index 018ed92c..95314d77 100644 --- a/dense_map/geometry_mapper/tools/geometry_mapper.py +++ b/dense_map/geometry_mapper/tools/geometry_mapper.py @@ -21,7 +21,6 @@ """ A wrapper around the tools that when run together produce a textured mesh. """ - import argparse import os import re @@ -37,7 +36,7 @@ def process_args(args): Set up the parser and parse the args. """ - # Extract some paths before the args are parsed + # Extract some paths before the arguments are parsed src_path = os.path.dirname(args[0]) exec_path = os.path.dirname( os.path.dirname(os.path.dirname(os.path.dirname(src_path))) @@ -51,7 +50,7 @@ def process_args(args): "--ros_bag", dest="ros_bag", default="", - help="A ROS bag with recorded nav_cam, haz_cam, and sci_cam data.", + help="A ROS bag with recorded image and point cloud data.", ) parser.add_argument( "--sparse_map", @@ -66,35 +65,32 @@ def process_args(args): help="The directory where to write the processed data.", ) parser.add_argument( - "--nav_cam_topic", - dest="nav_cam_topic", - default="/hw/cam_nav", - help="The nav cam topic in the bag file.", - ) - parser.add_argument( - "--haz_cam_points_topic", - dest="haz_cam_points_topic", - default="/hw/depth_haz/points", - help="The haz cam point cloud topic in the bag file.", + "--camera_types", + dest="camera_types", + default="sci_cam nav_cam haz_cam", + help="Specify the cameras to use for the textures, as a list in quotes.", ) parser.add_argument( - "--haz_cam_intensity_topic", - dest="haz_cam_intensity_topic", - default="/hw/depth_haz/extended/amplitude_int", - help="The haz cam intensity topic in the bag file.", + "--camera_topics", + dest="camera_topics", + default="/hw/cam_sci/compressed /mgt/img_sampler/nav_cam/image_record /hw/depth_haz/extended/amplitude_int", + help="Specify the bag topics for the cameras to texture (in the same order as in " + + "--camera_types). Use a list in quotes.", ) parser.add_argument( - "--sci_cam_topic", - dest="sci_cam_topic", - default="/hw/cam_sci", - help="The sci cam topic in the bag file.", + "--undistorted_crop_wins", + dest="undistorted_crop_wins", + default="sci_cam,1250,1000 nav_cam,1100,776 haz_cam,250,200", + help="The central region to keep after undistorting an image and " + + "before texturing. For sci_cam the numbers are at 1/4th of the full " + + "resolution (resolution of calibration) and will be adjusted for the " + + "actual input image dimensions. Use a list in quotes.", ) parser.add_argument( - "--camera_type", - dest="camera_type", - default="all", - help="Specify which cameras to process. By default, process all. " - + "Options: nav_cam, haz_cam, sci_cam.", + "--haz_cam_points_topic", + dest="haz_cam_points_topic", + default="/hw/depth_haz/points", + help="The depth point cloud topic in the bag file.", ) parser.add_argument( "--start", @@ -119,8 +115,15 @@ def process_args(args): "--dist_between_processed_cams", dest="dist_between_processed_cams", default="0.1", - help="Once an image or depth image is processed, how far the camera " - + "should move (in meters) before it should process more data.", + help="Once an image or depth cloud is processed, process a new one whenever either " + + "the camera moves by more than this distance, in meters, or the angle changes by more " + + "than --angle_between_processed_cams, in degrees.", + ) + parser.add_argument( + "--angle_between_processed_cams", + dest="angle_between_processed_cams", + default="5.0", + help="See --dist_between_processed_cams.", ) parser.add_argument( "--sci_cam_timestamps", @@ -150,7 +153,7 @@ def process_args(args): default="5.0", help="A smaller value here will result in holes in depth images " + "being filled more aggressively but potentially with more artifacts " - + "in foreshortened regions.", + + "in foreshortened regions. Works only with positive --depth_hole_fill_diameter.", ) parser.add_argument( "--median_filters", @@ -164,8 +167,12 @@ def process_args(args): parser.add_argument( "--depth_hole_fill_diameter", dest="depth_hole_fill_diameter", - default="30", - help="Fill holes in the depth point clouds with this diameter, in pixels. This happens before the clouds are fused. It is suggested to not make this too big, as more hole-filling happens on the fused mesh later (--max_hole_diameter).", + default="0", + help="Fill holes in the depth point clouds with this diameter, in pixels. " + + "This happens before the clouds are fused. If set to a positive value it " + + "can fill really big holes but may introduce artifacts. It is better to " + + "leave the hole-filling for later, once the mesh is fused (see " + + "--max_hole_diameter).", ) parser.add_argument( "--reliability_weight_exponent", @@ -184,8 +191,8 @@ def process_args(args): "--voxel_size", dest="voxel_size", default="0.01", - help="When fusing the depth point clouds use a voxel of this size, " - + "in meters.", + help="The grid size used for binning depth cloud points and creating the mesh. " + + "Measured in meters.", ) parser.add_argument( "--voxblox_integrator", @@ -210,6 +217,16 @@ def process_args(args): default="0.00005", help="A larger value will result in a smoother mesh.", ) + + parser.add_argument( + "--no_boundary_erosion", + dest="no_boundary_erosion", + action="store_true", + help="Do not erode the boundary when smoothing the mesh. Erosion may help with " + + "making the mesh more regular and easier to hole-fill, but may be undesirable " + + "in regions which don't get to be hole-filled.", + ) + parser.add_argument( "--max_num_hole_edges", dest="max_num_hole_edges", @@ -298,10 +315,10 @@ def process_args(args): + "from depth data in the current bag.", ) parser.add_argument( - "--scicam_to_hazcam_timestamp_offset_override_value", - dest="scicam_to_hazcam_timestamp_offset_override_value", + "--nav_cam_to_sci_cam_offset_override_value", + dest="nav_cam_to_sci_cam_offset_override_value", default="", - help="Override the value of scicam_to_hazcam_timestamp_offset " + help="Override the value of nav_cam_to_sci_cam_timestamp_offset " + "from the robot config file with this value.", ) parser.add_argument( @@ -316,12 +333,33 @@ def process_args(args): action="store_true", help="Save many intermediate datasets for debugging.", ) + + parser.add_argument( + "--texture_individual_images", + dest="texture_individual_images", + action="store_true", + help="If specified, in addition to a joint texture of all images create individual " + + "textures for each image and camera. Does not work with simulated cameras. For debugging.", + ) + args = parser.parse_args() - return (src_path, exec_path, args) + # Parse the crop windows + crop_wins = args.undistorted_crop_wins.split() + crop_win_map = {} + for crop_win in crop_wins: + vals = crop_win.split(",") + if len(vals) != 3: + raise Exception( + "Invalid value for --undistorted_crop_wins: " + + args.undistorted_crop_wins + ) + crop_win_map[vals[0]] = vals[1:] + + return (src_path, exec_path, crop_win_map, args) -def sanity_checks(geometry_mapper_path, batch_tsdf_path, args): +def sanity_checks(geometry_mapper_path, batch_tsdf_path, crop_win_map, args): # Check if the environment was set for var in [ @@ -345,20 +383,43 @@ def sanity_checks(geometry_mapper_path, batch_tsdf_path, args): + "Specify it via --astrobee_build_dir." ) - if args.camera_type == "nav_cam" and args.simulated_data: - print( - "Cannot apply nav cam texture with simulated cameras " - + "as the gazebo distorted images are not accurate enough." - ) - sys.exit(1) - - if args.camera_type == "haz_cam" and args.simulated_data: - print("Texturing haz cam with simulated data was not tested.") - sys.exit(1) + camera_types = args.camera_types.split() if args.output_dir == "": raise Exception("The path to the output directory was not specified.") + if len(camera_types) != len(args.camera_topics.split()): + raise Exception("There must be as many camera types as camera topics.") + + if (not args.simulated_data) and len(camera_types) != len( + args.undistorted_crop_wins.split() + ): + raise Exception( + "There must be as many camera types as listed undistorted " + + "crop windows." + ) + + if args.simulated_data and "nav_cam" in camera_types: + raise Exception( + "The geometry mapper does not support nav_cam with simulated data as " + + "its distortion is not modeled." + ) + + if args.simulated_data and "haz_cam" not in camera_types: + raise Exception( + "The haz_cam must be one of the camera types in simulation mode " + + "as it is needed to read the simulated camera pose in order to " + + "process the depth clouds." + ) + + if not args.simulated_data: + for cam in camera_types: + if not (cam in crop_win_map): + raise Exception( + "No crop win specified in --undistorted_crop_wins for camera: " + + cam + ) + def mkdir_p(path): if path == "": @@ -378,13 +439,26 @@ def setup_outputs(args): mkdir_p(args.output_dir) +def format_cmd(cmd): + """If some command arguments have spaces, quote them. Then concatenate the results.""" + ans = "" + for val in cmd: + if " " in val or "\t" in cmd: + val = '"' + val + '"' + ans += val + " " + return ans + + def run_cmd(cmd, log_file, verbose=False): """ Run a command and write the output to a file. In verbose mode also print to screen. """ - print(" ".join(cmd) + "\n") + + cmd_str = format_cmd(cmd) + print(cmd_str + "\n") + with open(log_file, "w", buffering=0) as f: # replace 'w' with 'wb' for Python 3 - f.write(" ".join(cmd) + "\n") + f.write(cmd_str + "\n") process = subprocess.Popen(cmd, stdout=subprocess.PIPE) for line in iter( process.stdout.readline, "" @@ -409,20 +483,14 @@ def compute_poses_and_clouds(geometry_mapper_path, args): geometry_mapper_path, "--ros_bag", args.ros_bag, - "--sparse_map", - args.sparse_map, "--output_dir", args.output_dir, - "--nav_cam_topic", - args.nav_cam_topic, + "--camera_topics", + args.camera_topics, "--haz_cam_points_topic", args.haz_cam_points_topic, - "--haz_cam_intensity_topic", - args.haz_cam_intensity_topic, - "--sci_cam_topic", - args.sci_cam_topic, - "--camera_type", - args.camera_type, + "--camera_types", + args.camera_types, "--start", args.start, "--duration", @@ -431,6 +499,8 @@ def compute_poses_and_clouds(geometry_mapper_path, args): args.sampling_spacing_seconds, "--dist_between_processed_cams", args.dist_between_processed_cams, + "--angle_between_processed_cams", + args.angle_between_processed_cams, "--max_iso_times_exposure", args.max_iso_times_exposure, "--depth_exclude_columns", @@ -443,6 +513,10 @@ def compute_poses_and_clouds(geometry_mapper_path, args): args.depth_hole_fill_diameter, "--reliability_weight_exponent", args.reliability_weight_exponent, + "--max_ray_length", + args.max_ray_length, + "--voxel_size", + args.voxel_size, "--median_filters", args.median_filters, ] + args.localization_options.split(" ") @@ -455,17 +529,16 @@ def compute_poses_and_clouds(geometry_mapper_path, args): if args.simulated_data: cmd += ["--simulated_data"] + else: + cmd += ["--sparse_map", args.sparse_map] if args.save_debug_data: cmd += ["--save_debug_data"] - if args.external_mesh != "": - cmd += ["--external_mesh", args.external_mesh] - - if args.scicam_to_hazcam_timestamp_offset_override_value != "": + if args.nav_cam_to_sci_cam_offset_override_value != "": cmd += [ - "--scicam_to_hazcam_timestamp_offset_override_value", - args.scicam_to_hazcam_timestamp_offset_override_value, + "--nav_cam_to_sci_cam_offset_override_value", + args.nav_cam_to_sci_cam_offset_override_value, ] log_file = os.path.join(args.output_dir, "geometry_mapper_log.txt") @@ -508,7 +581,11 @@ def smoothe_mesh(input_mesh, output_mesh, args, attempt): raise Exception("Cannot find the smoothing tool:" + smoothe_mesh_path) num_iterations = "1" - smoothe_boundary = "1" + if args.no_boundary_erosion: + smoothe_boundary = "0" + else: + smoothe_boundary = "1" + cmd = [ smoothe_mesh_path, num_iterations, @@ -598,7 +675,7 @@ def undistort_images( cmd = [ undistort_image_path, "--undistorted_crop_win", - undist_crop_win, + str(undist_crop_win[0]) + " " + str(undist_crop_win[1]), "--save_bgr", "--robot_camera", cam_type, @@ -644,6 +721,30 @@ def create_texrecon_cameras(args, src_path, undist_dir, cam_type): run_cmd(cmd, log_file, verbose=args.verbose) +def texture_individual_images(args, exec_path, mesh, dist_image_list, cam_type): + + camera_list = os.path.join(args.output_dir, cam_type + "_transforms.txt") + orthoproject_path = os.path.join(exec_path, "orthoproject") + + cmd = [ + orthoproject_path, + "-mesh", + mesh, + "-image_list", + dist_image_list, + "-camera_list", + camera_list, + "--camera_name", + cam_type, + "-output_prefix", + args.output_dir + "/" + cam_type, + ] + + log_file = os.path.join(args.output_dir, "orthoproject_log.txt") + print("Projecting individual images. Writing the output log to: " + log_file) + run_cmd(cmd, log_file, verbose=args.verbose) + + def run_texrecon(args, src_path, mesh, undist_dir, cam_type): # That is one long path @@ -656,7 +757,7 @@ def run_texrecon(args, src_path, mesh, undist_dir, cam_type): if not os.path.exists(texrecon_path): raise Exception("Cannot find: " + texrecon_path) - texrecon_dir = os.path.join(args.output_dir, cam_type + "_texture/run") + texrecon_dir = os.path.join(args.output_dir, cam_type + "_texture/" + cam_type) parent_dir = os.path.dirname(texrecon_dir) if os.path.isdir(parent_dir): # Wipe the existing directory @@ -706,7 +807,11 @@ def find_sci_cam_scale(image_file): return float(image_width) / float(config_width) -def texture_mesh(src_path, cam_type, mesh, args): +def texture_mesh(src_path, exec_path, cam_type, crop_win_map, mesh, args): + if args.simulated_data and cam_type == "nav_cam": + print("Texturing nav_cam is not supported with simulated data.") + return "None" + dist_image_list = os.path.join(args.output_dir, cam_type + "_index.txt") with open(dist_image_list) as f: image_files = f.readlines() @@ -721,28 +826,26 @@ def texture_mesh(src_path, cam_type, mesh, args): if not args.simulated_data: - scale = 1.0 - undist_crop_win = "" - - if cam_type == "nav_cam": - scale = 1.0 - undist_crop_win = "1100 776" - elif cam_type == "haz_cam": + if cam_type == "sci_cam": + # The sci cam needs special treatment + scale = find_sci_cam_scale(image_files[0].rstrip()) + else: scale = 1.0 - undist_crop_win = "210 160" - elif cam_type == "sci_cam": - # Deal with the fact that the robot config file may assume - # a different resolution than what is in the bag - scale = find_sci_cam_scale(image_files[0].rstrip()) - crop_wd = 2 * int(round(625.0 * scale)) # an even number - crop_ht = 2 * int(round(500.0 * scale)) # an even number - undist_crop_win = str(crop_wd) + " " + str(crop_ht) + # Make the crop win even, it is just easier that way + undist_crop_win = [ + 2 * int(round(float(crop_win_map[cam_type][0]) * scale / 2.0)), + 2 * int(round(float(crop_win_map[cam_type][1]) * scale / 2.0)), + ] undistort_images( args, cam_type, dist_image_list, undist_dir, undist_crop_win, scale ) create_texrecon_cameras(args, src_path, undist_dir, cam_type) + + if args.texture_individual_images: + texture_individual_images(args, exec_path, mesh, dist_image_list, cam_type) + textured_mesh = run_texrecon(args, src_path, mesh, undist_dir, cam_type) else: # Simulated images don't have distortion @@ -826,14 +929,14 @@ def merge_poses_and_clouds(args): if __name__ == "__main__": - (src_path, exec_path, args) = process_args(sys.argv) + (src_path, exec_path, crop_win_map, args) = process_args(sys.argv) geometry_mapper_path = os.path.join(exec_path, "geometry_mapper") batch_tsdf_path = os.path.join( os.environ["HOME"], "catkin_ws/devel/lib/voxblox_ros/batch_tsdf" ) - sanity_checks(geometry_mapper_path, batch_tsdf_path, args) + sanity_checks(geometry_mapper_path, batch_tsdf_path, crop_win_map, args) setup_outputs(args) @@ -852,70 +955,62 @@ def merge_poses_and_clouds(args): # Smothing must happen before hole-filling, to remove weird # artifacts smooth_mesh = os.path.join(args.output_dir, "smooth_mesh.ply") - if start_step <= 2 and args.external_mesh == "": + if start_step <= 2 and args.external_mesh == "" and (not args.simulated_data): attempt = 1 smoothe_mesh(fused_mesh, smooth_mesh, args, attempt) # Fill holes hole_filled_mesh = os.path.join(args.output_dir, "hole_filled_mesh.ply") - if start_step <= 3 and args.external_mesh == "": + if start_step <= 3 and args.external_mesh == "" and (not args.simulated_data): attempt = 1 fill_holes_in_mesh(smooth_mesh, hole_filled_mesh, args, attempt) # Rm small connected components clean_mesh = os.path.join(args.output_dir, "clean_mesh.ply") - if start_step <= 4 and args.external_mesh == "": + if start_step <= 4 and args.external_mesh == "" and (not args.simulated_data): rm_connected_components(hole_filled_mesh, clean_mesh, args) # Smoothe again smooth_mesh2 = os.path.join(args.output_dir, "smooth_mesh2.ply") - if start_step <= 5 and args.external_mesh == "": + if start_step <= 5 and args.external_mesh == "" and (not args.simulated_data): attempt = 2 smoothe_mesh(clean_mesh, smooth_mesh2, args, attempt) - # Fill holes again. That is necessary, and should happen after smoothing. - # Mesh cleaning creates small holes and they can't be cleaned well - # without some more smoothing like above. + # Fill holes again. That is necessary, and should happen after + # smoothing. Mesh cleaning creates small holes and they can't be + # cleaned well without some more smoothing like above. hole_filled_mesh2 = os.path.join(args.output_dir, "hole_filled_mesh2.ply") - if start_step <= 6 and args.external_mesh == "": + if start_step <= 6 and args.external_mesh == "" and (not args.simulated_data): attempt = 2 fill_holes_in_mesh(smooth_mesh2, hole_filled_mesh2, args, attempt) # Smoothe again as filling holes can make the mesh a little rough smooth_mesh3 = os.path.join(args.output_dir, "smooth_mesh3.ply") - if start_step <= 7 and args.external_mesh == "": + if start_step <= 7 and args.external_mesh == "" and (not args.simulated_data): attempt = 3 smoothe_mesh(hole_filled_mesh2, smooth_mesh3, args, attempt) # Simplify the mesh simplified_mesh = os.path.join(args.output_dir, "simplified_mesh.ply") - if start_step <= 8 and args.external_mesh == "": + if start_step <= 8 and args.external_mesh == "" and (not args.simulated_data): simplify_mesh(smooth_mesh3, simplified_mesh, args) - if args.external_mesh != "": - # So that can texture on top of it + if args.simulated_data: + simplified_mesh = fused_mesh + elif args.external_mesh != "": simplified_mesh = args.external_mesh - nav_textured_mesh = "" - haz_textured_mesh = "" - sci_textured_mesh = "" + textured_meshes = [] if start_step <= 9: - # For simulated data texturing nav cam images is not supported - if (not args.simulated_data) and ( - args.camera_type == "all" or args.camera_type == "nav_cam" - ): - nav_textured_mesh = texture_mesh(src_path, "nav_cam", simplified_mesh, args) - - # For simulated data texturing haz cam images is not supported - if (not args.simulated_data) and ( - args.camera_type == "all" or args.camera_type == "haz_cam" - ): - haz_textured_mesh = texture_mesh(src_path, "haz_cam", simplified_mesh, args) - - if args.camera_type == "all" or args.camera_type == "sci_cam": - sci_textured_mesh = texture_mesh(src_path, "sci_cam", simplified_mesh, args) - - if args.external_mesh == "": + for camera_type in args.camera_types.split(): + textured_mesh = texture_mesh( + src_path, exec_path, camera_type, crop_win_map, simplified_mesh, args + ) + textured_meshes += [textured_mesh] + + if args.simulated_data: + print("Fused mesh: " + fused_mesh) + elif args.external_mesh == "": print("Fused mesh: " + fused_mesh) print("Smoothed mesh: " + smooth_mesh) print("Hole-filled mesh: " + hole_filled_mesh) @@ -927,11 +1022,7 @@ def merge_poses_and_clouds(args): else: print("External mesh: " + args.external_mesh) - if nav_textured_mesh != "": - print("Nav cam textured mesh: " + nav_textured_mesh) - - if haz_textured_mesh != "": - print("Haz cam textured mesh: " + haz_textured_mesh) - - if sci_textured_mesh != "": - print("Sci cam textured mesh: " + sci_textured_mesh) + count = 0 + for camera_type in args.camera_types.split(): + print(camera_type + " textured mesh: " + textured_meshes[count]) + count += 1 diff --git a/dense_map/geometry_mapper/tools/image_picker.cc b/dense_map/geometry_mapper/tools/image_picker.cc index d8e14135..82ddfe1e 100644 --- a/dense_map/geometry_mapper/tools/image_picker.cc +++ b/dense_map/geometry_mapper/tools/image_picker.cc @@ -60,7 +60,8 @@ DEFINE_string(ros_bag, "", DEFINE_string(output_nav_cam_dir, "", "The path to a file saving the nav cam image list."); -DEFINE_string(nav_cam_topic, "/hw/cam_nav", "The nav cam topic in the bag file."); +DEFINE_string(nav_cam_topic, "/mgt/img_sampler/nav_cam/image_record", + "The nav cam topic in the bag file."); DEFINE_string(haz_cam_points_topic, "/hw/depth_haz/points", "The depth point cloud topic in the bag file."); @@ -72,19 +73,18 @@ DEFINE_double(max_time_between_images, 1.0e+10, "Select additional nav cam images to make the timestamps between any two " "consecutive such images be no more than this."); -DEFINE_double(bracket_len, 2.0, +DEFINE_double(bracket_len, 0.6, "Lookup sci and haz cam images only between consecutive nav cam images " "whose distance in time is no more than this (in seconds). It is assumed " "the robot moves slowly and uniformly during this time."); -DEFINE_double(scicam_to_hazcam_timestamp_offset_override_value, +DEFINE_double(nav_cam_to_sci_cam_offset_override_value, std::numeric_limits::quiet_NaN(), - "Override the value of scicam_to_hazcam_timestamp_offset from the robot config " + "Override the value of nav_cam_to_sci_cam_timestamp_offset from the robot config " "file with this value."); int main(int argc, char** argv) { ff_common::InitFreeFlyerApplication(&argc, &argv); - if (FLAGS_ros_bag.empty()) LOG(FATAL) << "The bag file was not specified."; if (FLAGS_output_nav_cam_dir.empty()) LOG(FATAL) << "The output nav cam dir was not specified."; @@ -105,49 +105,52 @@ int main(int argc, char** argv) { dense_map::RosBagHandle haz_cam_points_handle(FLAGS_ros_bag, FLAGS_haz_cam_points_topic); dense_map::RosBagHandle haz_cam_intensity_handle(FLAGS_ros_bag, FLAGS_haz_cam_intensity_topic); - // Read the config file - double navcam_to_hazcam_timestamp_offset = 0.0, scicam_to_hazcam_timestamp_offset = 0.0; - Eigen::MatrixXd hazcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_hazcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd scicam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::MatrixXd navcam_to_body_trans = Eigen::MatrixXd::Identity(4, 4); - Eigen::Affine3d hazcam_depth_to_image_transform; - hazcam_depth_to_image_transform.setIdentity(); // default value - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", "scicam_to_hazcam_timestamp_offset", - "hazcam_to_navcam_transform", "scicam_to_hazcam_transform", "nav_cam_transform", - "hazcam_depth_to_image_transform", navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, nav_cam_params, haz_cam_params, - sci_cam_params); + std::vector cam_types = {"nav_cam", "haz_cam", "sci_cam"}; + std::vector cam_params; + std::vector nav_to_cam_trans; + std::vector nav_to_cam_timestamp_offset; + Eigen::Affine3d nav_cam_to_body_trans; + Eigen::Affine3d haz_cam_depth_to_image_transform; + dense_map::readConfigFile( // Inputs + cam_types, "nav_cam_transform", "haz_cam_depth_to_image_transform", + // Outputs + cam_params, nav_to_cam_trans, nav_to_cam_timestamp_offset, nav_cam_to_body_trans, + haz_cam_depth_to_image_transform); + + if (!std::isnan(FLAGS_nav_cam_to_sci_cam_offset_override_value)) { + for (size_t it = 0; it < cam_types.size(); it++) { + if (cam_types[it] == "sci_cam") { + double new_val = FLAGS_nav_cam_to_sci_cam_offset_override_value; + std::cout << "Overriding the value " << nav_to_cam_timestamp_offset[it] + << " of nav_cam_to_sci_cam_timestamp_offset with: " << new_val << std::endl; + nav_to_cam_timestamp_offset[it] = new_val; + } + } + } // This will be used for output std::map haz_depth_to_image_timestamps, haz_image_to_depth_timestamps; std::vector all_nav_cam_timestamps, all_haz_cam_timestamps, all_sci_cam_timestamps; - double navcam_to_scicam_timestamp_offset - = navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; - - if (!std::isnan(FLAGS_scicam_to_hazcam_timestamp_offset_override_value)) { - double new_val = FLAGS_scicam_to_hazcam_timestamp_offset_override_value; - std::cout << "Overriding scicam_to_hazcam_timestamp_offset.\n"; - scicam_to_hazcam_timestamp_offset = new_val; - } + // Here we count on the order of cam_types above + double nav_cam_to_haz_cam_timestamp_offset = nav_to_cam_timestamp_offset[1]; + double nav_cam_to_sci_cam_timestamp_offset = nav_to_cam_timestamp_offset[2]; - std::cout << "navcam_to_hazcam_timestamp_offset = " << navcam_to_hazcam_timestamp_offset << std::endl; - std::cout << "scicam_to_hazcam_timestamp_offset = " << scicam_to_hazcam_timestamp_offset << std::endl; + std::cout << "nav_cam_to_haz_cam_timestamp_offset = " << nav_cam_to_haz_cam_timestamp_offset + << "\n"; + std::cout << "nav_cam_to_sci_cam_timestamp_offset = " << nav_cam_to_sci_cam_timestamp_offset + << "\n"; - // TODO(oalexan1): Make these into functions + // TODO(oalexan1): Make the logic below into functions // Read the haz cam data we will need double haz_cam_start_time = -1.0; - std::vector const& haz_cam_intensity_msgs = haz_cam_intensity_handle.bag_msgs; + std::vector const& haz_cam_intensity_msgs + = haz_cam_intensity_handle.bag_msgs; for (size_t it = 0; it < haz_cam_intensity_msgs.size(); it++) { - sensor_msgs::Image::ConstPtr image_msg = haz_cam_intensity_msgs[it].instantiate(); + sensor_msgs::Image::ConstPtr image_msg + = haz_cam_intensity_msgs[it].instantiate(); if (image_msg) { double haz_cam_time = image_msg->header.stamp.toSec(); all_haz_cam_timestamps.push_back(haz_cam_time); @@ -166,6 +169,7 @@ int main(int argc, char** argv) { // Read the sci cam data we will need (images and clouds) std::vector const& sci_cam_msgs = sci_cam_handle.bag_msgs; std::cout << "Number of sci cam images " << sci_cam_msgs.size() << std::endl; + std::set sci_cam_set; for (size_t it = 0; it < sci_cam_msgs.size(); it++) { sensor_msgs::Image::ConstPtr image_msg = sci_cam_msgs[it].instantiate(); if (image_msg) { @@ -178,6 +182,7 @@ int main(int argc, char** argv) { if (comp_image_msg) { double sci_cam_time = comp_image_msg->header.stamp.toSec(); all_sci_cam_timestamps.push_back(sci_cam_time); + sci_cam_set.insert(sci_cam_time); } } if (all_sci_cam_timestamps.empty()) LOG(FATAL) << "No sci cam timestamps."; @@ -196,10 +201,12 @@ int main(int argc, char** argv) { } } } + + std::vector sci_cam_timestamps_plus_extra = all_sci_cam_timestamps; for (size_t extra_it = 0; extra_it < extra_sci_cam_timestamps.size(); extra_it++) { - all_sci_cam_timestamps.push_back(extra_sci_cam_timestamps[extra_it]); + sci_cam_timestamps_plus_extra.push_back(extra_sci_cam_timestamps[extra_it]); } - std::sort(all_sci_cam_timestamps.begin(), all_sci_cam_timestamps.end()); + std::sort(sci_cam_timestamps_plus_extra.begin(), sci_cam_timestamps_plus_extra.end()); // Read the nav cam data we will need std::vector const& nav_cam_msgs = nav_cam_handle.bag_msgs; @@ -227,13 +234,15 @@ int main(int argc, char** argv) { // time in the list of nav and sci messages. // Keep the nav image whose time plus FLAGS_bracket_len is <= the given sci cam image. int num_nav = all_nav_cam_timestamps.size(); - int num_sci = all_sci_cam_timestamps.size(); + int num_sci_plus_extra = sci_cam_timestamps_plus_extra.size(); + int num_sci = sci_cam_set.size(); int nav_cam_pos = 0; // used to narrow down the lookup - for (int sci_it = 0; sci_it < num_sci; sci_it++) { + int num_bracketed_sci_cams = 0; + for (int sci_it = 0; sci_it < num_sci_plus_extra; sci_it++) { for (int nav_it1 = nav_cam_pos; nav_it1 < num_nav; nav_it1++) { // Adjust for timestamp offsets - double t1 = all_nav_cam_timestamps[nav_it1] + navcam_to_hazcam_timestamp_offset; - double s = all_sci_cam_timestamps[sci_it] + scicam_to_hazcam_timestamp_offset; + double t1 = all_nav_cam_timestamps[nav_it1]; + double s = sci_cam_timestamps_plus_extra[sci_it] - nav_cam_to_sci_cam_timestamp_offset; bool is_good1 = (t1 >= s - d/2.0 && t1 <= s); @@ -242,15 +251,27 @@ int main(int argc, char** argv) { nav_cam_pos = nav_it1; // save this for the future // Now get the right bracket - for (size_t nav_it2 = num_nav - 1; nav_it2 >= 0; nav_it2--) { - // Adjust for timestamp offsets - double t2 = all_nav_cam_timestamps[nav_it2] + navcam_to_hazcam_timestamp_offset; + // Use an int counter, as with unsigned int values subtracting is dangerous + for (int nav_it2 = num_nav - 1; nav_it2 >= 0; nav_it2--) { + double t2 = all_nav_cam_timestamps[nav_it2]; bool is_good2 = (s < t2 && t2 <= s + d/2.0); if (is_good2) { nav_cam_timestamps.push_back(all_nav_cam_timestamps[nav_it1]); nav_cam_timestamps.push_back(all_nav_cam_timestamps[nav_it2]); - std::cout << "sci - nav1 and nav2 - sci: " << s - t1 << ' ' << t2 - s << std::endl; + std::cout << std::setprecision(17) << std::fixed << "For "; + if (sci_cam_set.find(sci_cam_timestamps_plus_extra[sci_it]) != sci_cam_set.end()) + std::cout << "sci_cam "; + else + std::cout << "extra "; + std::cout << "timestamp " + << sci_cam_timestamps_plus_extra[sci_it] << std::setprecision(4) + << ", adj_stamp - nav1 and nav2 - adj_stamp: " << s - t1 << ' ' << t2 - s + << std::endl; + + if (sci_cam_set.find(sci_cam_timestamps_plus_extra[sci_it]) != sci_cam_set.end()) + num_bracketed_sci_cams++; + break; // Found what we needed, stop this loop } } @@ -260,17 +281,27 @@ int main(int argc, char** argv) { } } + if (num_bracketed_sci_cams == num_sci) + std::cout << "Bracketed all " << num_sci << " sci_cam timestamps." << std::endl; + else + std::cout << "Bracketed only " << num_bracketed_sci_cams << " sci cam timestamps " + << "out of " << num_sci << ", not counting the extra timestamps. " + << "Perhaps a bigger bracket may be desired but that may result in inaccurate " + << "timestamp interpolation later. Ensure existing images are removed before new " + << "ones are added.\n"; + // Ensure the timestamps are sorted std::sort(nav_cam_timestamps.begin(), nav_cam_timestamps.end()); // Write the images to disk + std::cout << "Writing the images to: " << FLAGS_output_nav_cam_dir << std::endl; + int bag_pos = 0; bool save_grayscale = true; // For feature matching need grayscale for (size_t nav_it = 0; nav_it < nav_cam_timestamps.size(); nav_it++) { - double found_time = -1.0; // won't be used, but expected by the api - int bag_pos = 0; // reset this each time + double found_time = -1.0; cv::Mat image; - if (!dense_map::lookupImage(nav_cam_timestamps[nav_it], nav_cam_handle.bag_msgs, - save_grayscale, image, bag_pos, + if (!dense_map::lookupImage(nav_cam_timestamps[nav_it], nav_cam_handle.bag_msgs, save_grayscale, image, + bag_pos, // will change each time found_time)) LOG(FATAL) << "Could not find image at the desired time."; @@ -281,6 +312,5 @@ int main(int argc, char** argv) { std::cout << "Writing: " << filename_buffer << std::endl; cv::imwrite(filename_buffer, image); } - return 0; } diff --git a/dense_map/geometry_mapper/tools/old_orthoproject.cc b/dense_map/geometry_mapper/tools/old_orthoproject.cc new file mode 100644 index 00000000..121278b3 --- /dev/null +++ b/dense_map/geometry_mapper/tools/old_orthoproject.cc @@ -0,0 +1,383 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include + +// This code used to project a camera image onto a plane most aligned +// to the mesh portion seen in the current camera view. It is no +// longer needed, it is broken, but may be of use one day. + +#if 0 + +DEFINE_string(image, "", + "The image to orthoproject. For each image named image.jpg, " + "an input file image_world2cam.txt is expected, and the output " + "orthoimage will be written to image_ortho.png"); + +DEFINE_string(image_list, "", + "Instead of a single image, specify the list of images to orthoproject, " + "with the same convention as above. "); + +DEFINE_double(min_ray_length, 0.0, + "The shortest distance from the camera to the mesh."); + +DEFINE_double(max_ray_length, 10.0, + "The longest distance from the camera to the mesh."); + +DEFINE_double(pixel_size, 0.01, + "The output orthoimage pixel size, in meters."); + +DEFINE_bool(snap_plane, false, + "Adjust the plane to project onto to make " + "angles multiple of 45 degrees with the coordinate axes."); + +DEFINE_string(plane_normal, "", + "Override the auto-computed the normal of the plane to " + "project onto with this. Specify as three values in quotes."); + +// Shoot a set of rays from the camera onto the mesh and see +// where they intersect +void sample_mesh(cv::Mat const& image, + camera::CameraModel const& cam, + std::shared_ptr const& bvh_tree, + std::vector & intersections) { + // Wipe the output + intersections.clear(); + + Eigen::Vector3d cam_ctr = -cam.GetTransform().rotation().transpose() * + cam.GetTransform().translation(); + + // Sample the image uniformly + int num_samples = 100; + Eigen::Matrix3d cam_to_world_rot = cam.GetTransform().rotation().transpose(); +#pragma omp parallel for + for (int itx = 0; itx < num_samples; itx++) { + for (int ity = 0; ity < num_samples; ity++) { + double x = (image.cols - 1.0) * static_cast(itx) / (num_samples - 1.0); + double y = (image.rows - 1.0) * static_cast(ity) / (num_samples - 1.0); + + Eigen::Vector2d dist_pix; + dist_pix << x, y; + + Eigen::Vector2d undist_centered_pix; + cam.GetParameters().Convert + (dist_pix, &undist_centered_pix); + + // Ray from camera going through the pixel + Eigen::Vector3d cam_ray(undist_centered_pix.x() / cam.GetParameters().GetFocalVector()[0], + undist_centered_pix.y() / cam.GetParameters().GetFocalVector()[1], + 1.0); + cam_ray.normalize(); + + Eigen::Vector3d world_ray = cam_to_world_rot * cam_ray; + + // Set up the ray structure for the mesh + BVHTree::Ray bvh_ray; + bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); + bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); + bvh_ray.dir.normalize(); + bvh_ray.tmin = FLAGS_min_ray_length; + bvh_ray.tmax = FLAGS_max_ray_length; + + // Intersect the ray with the mesh + BVHTree::Hit hit; + if (!bvh_tree->intersect(bvh_ray, &hit)) + continue; + double cam_to_mesh_dist = hit.t; + + Eigen::Vector3d intersection = cam_ctr + cam_to_mesh_dist * world_ray; + +#pragma omp critical + { + intersections.push_back(intersection); + } + } + } + + if (intersections.empty()) + LOG(FATAL) << "Could not project the image onto the mesh.\n"; +} + +// Create a new coordinate system having the desired plane as its x-y +// plane, and the normal to the plane as its z axis. +void setup_plane_coord_system(camera::CameraModel const& cam, + Eigen::Vector3d & plane_normal, + Eigen::Matrix3d & world2planecoord) { + // The camera view (the ray starting at the camera center and going through + // the image center). + Eigen::Matrix3d cam_to_world_rot = cam.GetTransform().rotation().transpose(); + Eigen::Vector3d cam_dir = cam_to_world_rot * Eigen::Vector3d(0, 0, 1); + + // Ensure that the normal to the plane agrees with the camera direction + double dot_prod = cam_dir.dot(plane_normal); + if (dot_prod < 0) + plane_normal *= -1.0; + + // The "up" direction + Eigen::Vector3d z(0, 0, 1); + + Eigen::Vector3d lz = plane_normal.normalized(); + // The new y points "down", as suggested by the z axis, while still + // being in the plane + Eigen::Vector3d ly = -1.0 * (z - z.dot(plane_normal) * plane_normal); + // Treat the special case when the plane is perfectly horizontal, + // then the normal can't point down. Normalize. + if (ly.norm() == 0) + ly = Eigen::Vector3d(0, 1, 0); + ly.normalize(); + // The new x is then the remaining direction so that we have a + // right-handed coordinate system. + Eigen::Vector3d lx = ly.cross(lz); + if (lx.norm() == 0) { + lx = Eigen::Vector3d(1, 0, 0); // should never happen + ly = lz.cross(lx); + } + lx.normalize(); + + // The matrix to transform to the coordinate system having the + // desired plane to project onto as its x-y plane. + Eigen::Matrix3d planecoord2world(3, 3); + planecoord2world.col(0) = lx; + planecoord2world.col(1) = ly; + planecoord2world.col(2) = lz; + + world2planecoord = planecoord2world.transpose(); +} + +void orthoproject(cv::Mat const& image, + Eigen::Matrix3d const& world2planecoord, + Eigen::Vector3d const& plane_normal, + camera::CameraModel const& cam, + std::shared_ptr const& bvh_tree, + std::vector const& intersections, + double pixel_size, + cv::Mat & orthoimage) { + // Find the bounding box of the mesh points seen by the camera + // in the coordinate system of the desired plane + + double min_x = std::numeric_limits::max(), max_x = -min_x; + double min_y = min_x, max_y = max_x; + double min_z = min_x, max_z = max_x; + for (size_t it = 0; it < intersections.size(); it++) { + Eigen::Vector3d local_pt = world2planecoord * intersections[it]; + if (local_pt.x() < min_x) min_x = local_pt.x(); + if (local_pt.x() > max_x) max_x = local_pt.x(); + if (local_pt.y() < min_y) min_y = local_pt.y(); + if (local_pt.y() > max_y) max_y = local_pt.y(); + if (local_pt.z() < min_z) min_z = local_pt.z(); + if (local_pt.z() > max_z) max_z = local_pt.z(); + } + + if (min_x > max_x || min_y > max_y || min_z > max_z) + LOG(FATAL) << "The rays don't intersect the mesh."; + + // The output image dimensions + int num_cols = ceil((max_x - min_x)/pixel_size); + int num_rows = ceil((max_y - min_y)/pixel_size); + + orthoimage = cv::Mat(num_rows, num_cols, CV_8UC1, cv::Scalar(0)); + +#pragma omp parallel for + for (int col = 0; col < num_cols; col++) { + for (int row = 0; row < num_rows; row++) { + // Create a point in the orthoimage plane corresponding to a future pixel + double z = min_z; + double y = min_y + row * pixel_size + pixel_size/2.0; + double x = min_x + col * pixel_size + pixel_size/2.0; + Eigen::Vector3d world_pt = world2planecoord.transpose() * Eigen::Vector3d(x, y, z); + + // Shoot a ray from that point to the mesh + BVHTree::Ray bvh_ray; + bvh_ray.dir = dense_map::eigen_to_vec3f(plane_normal); + bvh_ray.origin = dense_map::eigen_to_vec3f(world_pt); + bvh_ray.tmin = 0; + bvh_ray.tmax = max_z - min_z; + + // Intersect the ray with the mesh + BVHTree::Hit hit; + if (!bvh_tree->intersect(bvh_ray, &hit)) + continue; + + double plane_to_mesh_dist = hit.t; + Eigen::Vector3d intersection = world_pt + plane_to_mesh_dist * plane_normal; + Eigen::Vector3d cam_pt = cam.GetTransform() * intersection; + + // Skip points that project behind the camera + if (cam_pt.z() <= 0) + continue; + + // Get the distorted pixel value + Eigen::Vector2d undist_centered_pix = + cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); + Eigen::Vector2d dist_pix; + cam.GetParameters().Convert + (undist_centered_pix, &dist_pix); + + // Skip pixels that don't project in the image + if (dist_pix.x() < 0 || dist_pix.x() > image.cols - 1 || + dist_pix.y() < 0 || dist_pix.y() > image.rows - 1) + continue; + + // Find the pixel value + cv::Size s(1, 1); + cv::Mat interp_pix_val; + cv::Point2f P; + P.x = dist_pix[0]; + P.y = dist_pix[1]; + + // Do bilinear interpolation into the image + cv::getRectSubPix(image, s, P, interp_pix_val); + uchar color = interp_pix_val.at(0, 0); + + orthoimage.at(row, col) = color; + } + } +} + +void run_orthoproject(bool snap_plane, + std::string const& image_file, + double pixel_size, + std::string const& plane_normal_str, + std::shared_ptr const& bvh_tree, + camera::CameraModel & cam) { + // Manufacture the world_to_cam_file and output orthoimage_file + std::string world_to_cam_file, orthoimage_file; + + world_to_cam_file = ff_common::ReplaceInStr(image_file, + ".jpg", "_world2cam.txt"); + orthoimage_file = ff_common::ReplaceInStr(image_file, ".jpg", "_ortho.png"); + + if (world_to_cam_file == image_file || orthoimage_file == image_file) + LOG(FATAL) << "Expecting a .jpg image for file: " << image_file; + + Eigen::Affine3d world_to_cam; + if (!dense_map::readAffine(world_to_cam, world_to_cam_file)) + LOG(FATAL) << "Could not read a 4x4 matrix from: " << world_to_cam_file << "\n"; + cam.SetTransform(world_to_cam); + + cv::Mat image = cv::imread(image_file, CV_LOAD_IMAGE_GRAYSCALE); + if (image.rows == 0 || image.cols == 0) + LOG(FATAL) << "Could not read image from: " << image_file; + + // Shoot a set of rays from the camera onto the mesh and see + // where they intersect + std::vector intersections; + sample_mesh(image, cam, bvh_tree, intersections); + + Eigen::Vector3d plane_normal; + if (plane_normal_str != "") { + // The user wants to override the plane normal + std::istringstream is(plane_normal_str); + if (!(is >> plane_normal[0] >> plane_normal[1] >> plane_normal[2])) + LOG(FATAL) << "Could not parse three values (the plane normal) from: " << plane_normal_str; + std::cout << "Using user-defined normal to the plane: " + << plane_normal.transpose() << std::endl; + } else { + // Find the best fit plane for the projections of the pixels onto the walls + Eigen::Vector3d centroid; + dense_map::bestFitPlane(intersections, centroid, plane_normal); + + std::cout << "Normal to the plane: " << plane_normal.transpose() << std::endl; + + // Make the plane have angles of 45 degrees with the coordinate axes + if (snap_plane) { + dense_map::snapPlaneNormal(plane_normal); + std::cout << "Snapped normal to the plane: " << plane_normal.transpose() << std::endl; + } + } + + plane_normal.normalize(); + + // Create a new coordinate system having the desired plane as its x-y + // plane, and the normal to the plane as its z axis. + Eigen::Matrix3d world2planecoord; + setup_plane_coord_system(cam, plane_normal, world2planecoord); + + cv::Mat orthoimage; + orthoproject(image, world2planecoord, plane_normal, cam, + bvh_tree, intersections, pixel_size, + orthoimage); + + std::cout << "Writing: " << orthoimage_file << std::endl; + cv::imwrite(orthoimage_file, orthoimage); +} + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + + if (FLAGS_mesh.empty()) + LOG(FATAL) << "The mesh was not specified."; + + // Load the mesh + mve::TriangleMesh::Ptr mesh; + std::shared_ptr mesh_info; + std::shared_ptr graph; // TODO(oalexan1): Is this necessary? + std::shared_ptr bvh_tree; + dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); + + // Load the camera intrinsics + config_reader::ConfigReader config; + config.AddFile("cameras.config"); + if (!config.ReadFiles()) + LOG(FATAL) << "Could not read the configuration file."; + camera::CameraParameters cam_params(&config, "nav_cam"); + camera::CameraModel cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), + cam_params); + + // Run either a single image or a list of images + std::vector image_list; + if (!FLAGS_image.empty()) { + image_list.push_back(FLAGS_image); + } else if (!FLAGS_image_list.empty()) { + std::ifstream ifs(FLAGS_image_list.c_str()); + std::string image_file; + while (ifs >> image_file) + image_list.push_back(image_file); + } else { + LOG(FATAL) << "Must specify either an image to orthoproject or an image list."; + } + + for (size_t it = 0; it < image_list.size(); it++) + run_orthoproject(FLAGS_snap_plane, + image_list[it], + FLAGS_pixel_size, + FLAGS_plane_normal, + bvh_tree, cam); + + return 0; +} +#endif + +int main(int argc, char** argv) { + std::cout << "This code is broken and commented out." << std::endl; +} diff --git a/dense_map/geometry_mapper/tools/orthoproject.cc b/dense_map/geometry_mapper/tools/orthoproject.cc index d622d924..3bc19528 100644 --- a/dense_map/geometry_mapper/tools/orthoproject.cc +++ b/dense_map/geometry_mapper/tools/orthoproject.cc @@ -28,360 +28,17 @@ #include +#include #include -#include #include -#if 0 - -// Old broken code, project onto a plane. See current working code further down. - -DEFINE_string(image, "", - "The image to orthoproject. For each image named image.jpg, " - "an input file image_world2cam.txt is expected, and the output " - "orthoimage will be written to image_ortho.png"); - -DEFINE_string(image_list, "", - "Instead of a single image, specify the list of images to orthoproject, " - "with the same convention as above. "); - -DEFINE_double(min_ray_length, 0.0, - "The shortest distance from the camera to the mesh."); - -DEFINE_double(max_ray_length, 10.0, - "The longest distance from the camera to the mesh."); - -DEFINE_double(pixel_size, 0.01, - "The output orthoimage pixel size, in meters."); - -DEFINE_bool(snap_plane, false, - "Adjust the plane to project onto to make " - "angles multiple of 45 degrees with the coordinate axes."); - -DEFINE_string(plane_normal, "", - "Override the auto-computed the normal of the plane to " - "project onto with this. Specify as three values in quotes."); - -// Shoot a set of rays from the camera onto the mesh and see -// where they intersect -void sample_mesh(cv::Mat const& image, - camera::CameraModel const& cam, - std::shared_ptr const& bvh_tree, - std::vector & intersections) { - // Wipe the output - intersections.clear(); - - Eigen::Vector3d cam_ctr = -cam.GetTransform().rotation().transpose() * - cam.GetTransform().translation(); - - // Sample the image uniformly - int num_samples = 100; - Eigen::Matrix3d cam_to_world_rot = cam.GetTransform().rotation().transpose(); -#pragma omp parallel for - for (int itx = 0; itx < num_samples; itx++) { - for (int ity = 0; ity < num_samples; ity++) { - double x = (image.cols - 1.0) * static_cast(itx) / (num_samples - 1.0); - double y = (image.rows - 1.0) * static_cast(ity) / (num_samples - 1.0); - - Eigen::Vector2d dist_pix; - dist_pix << x, y; - - Eigen::Vector2d undist_centered_pix; - cam.GetParameters().Convert - (dist_pix, &undist_centered_pix); - - // Ray from camera going through the pixel - Eigen::Vector3d cam_ray(undist_centered_pix.x() / cam.GetParameters().GetFocalVector()[0], - undist_centered_pix.y() / cam.GetParameters().GetFocalVector()[1], - 1.0); - cam_ray.normalize(); - - Eigen::Vector3d world_ray = cam_to_world_rot * cam_ray; - - // Set up the ray structure for the mesh - BVHTree::Ray bvh_ray; - bvh_ray.origin = dense_map::eigen_to_vec3f(cam_ctr); - bvh_ray.dir = dense_map::eigen_to_vec3f(world_ray); - bvh_ray.dir.normalize(); - bvh_ray.tmin = FLAGS_min_ray_length; - bvh_ray.tmax = FLAGS_max_ray_length; - - // Intersect the ray with the mesh - BVHTree::Hit hit; - if (!bvh_tree->intersect(bvh_ray, &hit)) - continue; - double cam_to_mesh_dist = hit.t; - - Eigen::Vector3d intersection = cam_ctr + cam_to_mesh_dist * world_ray; - -#pragma omp critical - { - intersections.push_back(intersection); - } - } - } - - if (intersections.empty()) - LOG(FATAL) << "Could not project the image onto the mesh.\n"; -} - -// Create a new coordinate system having the desired plane as its x-y -// plane, and the normal to the plane as its z axis. -void setup_plane_coord_system(camera::CameraModel const& cam, - Eigen::Vector3d & plane_normal, - Eigen::Matrix3d & world2planecoord) { - // The camera view (the ray starting at the camera center and going through - // the image center). - Eigen::Matrix3d cam_to_world_rot = cam.GetTransform().rotation().transpose(); - Eigen::Vector3d cam_dir = cam_to_world_rot * Eigen::Vector3d(0, 0, 1); - - // Ensure that the normal to the plane agrees with the camera direction - double dot_prod = cam_dir.dot(plane_normal); - if (dot_prod < 0) - plane_normal *= -1.0; - - // The "up" direction - Eigen::Vector3d z(0, 0, 1); - - Eigen::Vector3d lz = plane_normal.normalized(); - // The new y points "down", as suggested by the z axis, while still - // being in the plane - Eigen::Vector3d ly = -1.0 * (z - z.dot(plane_normal) * plane_normal); - // Treat the special case when the plane is perfectly horizontal, - // then the normal can't point down. Normalize. - if (ly.norm() == 0) - ly = Eigen::Vector3d(0, 1, 0); - ly.normalize(); - // The new x is then the remaining direction so that we have a - // right-handed coordinate system. - Eigen::Vector3d lx = ly.cross(lz); - if (lx.norm() == 0) { - lx = Eigen::Vector3d(1, 0, 0); // should never happen - ly = lz.cross(lx); - } - lx.normalize(); - - // The matrix to transform to the coordinate system having the - // desired plane to project onto as its x-y plane. - Eigen::Matrix3d planecoord2world(3, 3); - planecoord2world.col(0) = lx; - planecoord2world.col(1) = ly; - planecoord2world.col(2) = lz; - - world2planecoord = planecoord2world.transpose(); -} - -void orthoproject(cv::Mat const& image, - Eigen::Matrix3d const& world2planecoord, - Eigen::Vector3d const& plane_normal, - camera::CameraModel const& cam, - std::shared_ptr const& bvh_tree, - std::vector const& intersections, - double pixel_size, - cv::Mat & orthoimage) { - // Find the bounding box of the mesh points seen by the camera - // in the coordinate system of the desired plane - - double min_x = std::numeric_limits::max(), max_x = -min_x; - double min_y = min_x, max_y = max_x; - double min_z = min_x, max_z = max_x; - for (size_t it = 0; it < intersections.size(); it++) { - Eigen::Vector3d local_pt = world2planecoord * intersections[it]; - if (local_pt.x() < min_x) min_x = local_pt.x(); - if (local_pt.x() > max_x) max_x = local_pt.x(); - if (local_pt.y() < min_y) min_y = local_pt.y(); - if (local_pt.y() > max_y) max_y = local_pt.y(); - if (local_pt.z() < min_z) min_z = local_pt.z(); - if (local_pt.z() > max_z) max_z = local_pt.z(); - } - - if (min_x > max_x || min_y > max_y || min_z > max_z) - LOG(FATAL) << "The rays don't intersect the mesh."; - - // The output image dimensions - int num_cols = ceil((max_x - min_x)/pixel_size); - int num_rows = ceil((max_y - min_y)/pixel_size); - - orthoimage = cv::Mat(num_rows, num_cols, CV_8UC1, cv::Scalar(0)); - -#pragma omp parallel for - for (int col = 0; col < num_cols; col++) { - for (int row = 0; row < num_rows; row++) { - // Create a point in the orthoimage plane corresponding to a future pixel - double z = min_z; - double y = min_y + row * pixel_size + pixel_size/2.0; - double x = min_x + col * pixel_size + pixel_size/2.0; - Eigen::Vector3d world_pt = world2planecoord.transpose() * Eigen::Vector3d(x, y, z); - - // Shoot a ray from that point to the mesh - BVHTree::Ray bvh_ray; - bvh_ray.dir = dense_map::eigen_to_vec3f(plane_normal); - bvh_ray.origin = dense_map::eigen_to_vec3f(world_pt); - bvh_ray.tmin = 0; - bvh_ray.tmax = max_z - min_z; - - // Intersect the ray with the mesh - BVHTree::Hit hit; - if (!bvh_tree->intersect(bvh_ray, &hit)) - continue; - - double plane_to_mesh_dist = hit.t; - Eigen::Vector3d intersection = world_pt + plane_to_mesh_dist * plane_normal; - Eigen::Vector3d cam_pt = cam.GetTransform() * intersection; - - // Skip points that project behind the camera - if (cam_pt.z() <= 0) - continue; - - // Get the distorted pixel value - Eigen::Vector2d undist_centered_pix = - cam.GetParameters().GetFocalVector().cwiseProduct(cam_pt.hnormalized()); - Eigen::Vector2d dist_pix; - cam.GetParameters().Convert - (undist_centered_pix, &dist_pix); - - // Skip pixels that don't project in the image - if (dist_pix.x() < 0 || dist_pix.x() > image.cols - 1 || - dist_pix.y() < 0 || dist_pix.y() > image.rows - 1) - continue; - - // Find the pixel value - cv::Size s(1, 1); - cv::Mat interp_pix_val; - cv::Point2f P; - P.x = dist_pix[0]; - P.y = dist_pix[1]; - - // Do bilinear interpolation into the image - cv::getRectSubPix(image, s, P, interp_pix_val); - uchar color = interp_pix_val.at(0, 0); - - orthoimage.at(row, col) = color; - } - } -} - -void run_orthoproject(bool snap_plane, - std::string const& image_file, - double pixel_size, - std::string const& plane_normal_str, - std::shared_ptr const& bvh_tree, - camera::CameraModel & cam) { - // Manufacture the world_to_cam_file and output orthoimage_file - std::string world_to_cam_file, orthoimage_file; - - world_to_cam_file = ff_common::ReplaceInStr(image_file, - ".jpg", "_world2cam.txt"); - orthoimage_file = ff_common::ReplaceInStr(image_file, ".jpg", "_ortho.png"); - - if (world_to_cam_file == image_file || orthoimage_file == image_file) - LOG(FATAL) << "Expecting a .jpg image for file: " << image_file; - - Eigen::Affine3d world_to_cam; - if (!dense_map::readAffine(world_to_cam, world_to_cam_file)) - LOG(FATAL) << "Could not read a 4x4 matrix from: " << world_to_cam_file << "\n"; - cam.SetTransform(world_to_cam); - - cv::Mat image = cv::imread(image_file, CV_LOAD_IMAGE_GRAYSCALE); - if (image.rows == 0 || image.cols == 0) - LOG(FATAL) << "Could not read image from: " << image_file; - - // Shoot a set of rays from the camera onto the mesh and see - // where they intersect - std::vector intersections; - sample_mesh(image, cam, bvh_tree, intersections); - - Eigen::Vector3d plane_normal; - if (plane_normal_str != "") { - // The user wants to override the plane normal - std::istringstream is(plane_normal_str); - if (!(is >> plane_normal[0] >> plane_normal[1] >> plane_normal[2])) - LOG(FATAL) << "Could not parse three values (the plane normal) from: " << plane_normal_str; - std::cout << "Using user-defined normal to the plane: " - << plane_normal.transpose() << std::endl; - } else { - // Find the best fit plane for the projections of the pixels onto the walls - Eigen::Vector3d centroid; - dense_map::bestFitPlane(intersections, centroid, plane_normal); - - std::cout << "Normal to the plane: " << plane_normal.transpose() << std::endl; - - // Make the plane have angles of 45 degrees with the coordinate axes - if (snap_plane) { - dense_map::snapPlaneNormal(plane_normal); - std::cout << "Snapped normal to the plane: " << plane_normal.transpose() << std::endl; - } - } - - plane_normal.normalize(); - - // Create a new coordinate system having the desired plane as its x-y - // plane, and the normal to the plane as its z axis. - Eigen::Matrix3d world2planecoord; - setup_plane_coord_system(cam, plane_normal, world2planecoord); - - cv::Mat orthoimage; - orthoproject(image, world2planecoord, plane_normal, cam, - bvh_tree, intersections, pixel_size, - orthoimage); - - std::cout << "Writing: " << orthoimage_file << std::endl; - cv::imwrite(orthoimage_file, orthoimage); -} - -int main(int argc, char** argv) { - ff_common::InitFreeFlyerApplication(&argc, &argv); - - if (FLAGS_mesh.empty()) - LOG(FATAL) << "The mesh was not specified."; - - // Load the mesh - mve::TriangleMesh::Ptr mesh; - std::shared_ptr mesh_info; - std::shared_ptr graph; // TODO(oalexan1): Is this necessary? - std::shared_ptr bvh_tree; - dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); - - // Load the camera intrinsics - config_reader::ConfigReader config; - config.AddFile("cameras.config"); - if (!config.ReadFiles()) - LOG(FATAL) << "Could not read the configuration file."; - camera::CameraParameters cam_params(&config, "nav_cam"); - camera::CameraModel cam(Eigen::Vector3d(), Eigen::Matrix3d::Identity(), - cam_params); - - // Run either a single image or a list of images - std::vector image_list; - if (!FLAGS_image.empty()) { - image_list.push_back(FLAGS_image); - } else if (!FLAGS_image_list.empty()) { - std::ifstream ifs(FLAGS_image_list.c_str()); - std::string image_file; - while (ifs >> image_file) - image_list.push_back(image_file); - } else { - LOG(FATAL) << "Must specify either an image to orthoproject or an image list."; - } - - for (size_t it = 0; it < image_list.size(); it++) - run_orthoproject(FLAGS_snap_plane, - image_list[it], - FLAGS_pixel_size, - FLAGS_plane_normal, - bvh_tree, cam); - - return 0; -} - -#endif - // Project a given image and camera onto a given mesh, and save an .obj file DEFINE_string(mesh, "", "The mesh to project onto."); -DEFINE_string(image, "", "The image to orthoproject. It should be distorted, as extracted from a bag."); +DEFINE_string(image, "", "The image to orthoproject. It should be distorted, as extracted " + "from a bag."); DEFINE_string(camera_to_world, "", "The camera to world file, as written by the geometry mapper. For example: " @@ -389,18 +46,27 @@ DEFINE_string(camera_to_world, "", DEFINE_string(camera_name, "", "The the camera name. For example: 'sci_cam'."); -DEFINE_string(output_dir, "", "The output texture directory. The texture name will be /run.obj."); +DEFINE_string(image_list, "", "The list of images to orthoproject, one per line, " + "unless specified individually as above."); + +DEFINE_string(camera_list, "", "The list of cameras to world transforms use, one per line, " + "unless specified individually as above."); + +DEFINE_int32(num_exclude_boundary_pixels, 0, + "Exclude pixels closer to the boundary than this when texturing."); + +DEFINE_string(output_prefix, "", "The output prefix. The texture name will be" + " -.obj."); int main(int argc, char** argv) { ff_common::InitFreeFlyerApplication(&argc, &argv); - if (FLAGS_mesh.empty() || FLAGS_image.empty() || FLAGS_camera_to_world.empty() || FLAGS_camera_name.empty() || - FLAGS_output_dir.empty()) - LOG(FATAL) << "Not all inputs were specified."; + if ((FLAGS_image.empty() || FLAGS_camera_to_world.empty()) && + (FLAGS_image_list.empty() || FLAGS_camera_list.empty())) + LOG(FATAL) << "Must specify either an image and camera, or an image list and camera list."; - if (!boost::filesystem::exists(FLAGS_output_dir)) - if (!boost::filesystem::create_directories(FLAGS_output_dir) || !boost::filesystem::is_directory(FLAGS_output_dir)) - LOG(FATAL) << "Failed to create directory: " << FLAGS_output_dir; + if (FLAGS_mesh.empty() || FLAGS_camera_name.empty() || FLAGS_output_prefix.empty()) + LOG(FATAL) << "Not all inputs were specified."; // Load the mesh mve::TriangleMesh::Ptr mesh; @@ -409,9 +75,6 @@ int main(int argc, char** argv) { std::shared_ptr bvh_tree; dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); - std::cout << "Reading image: " << FLAGS_image << std::endl; - cv::Mat image = cv::imread(FLAGS_image); - // Load the camera intrinsics config_reader::ConfigReader config; config.AddFile("cameras.config"); @@ -419,47 +82,37 @@ int main(int argc, char** argv) { std::cout << "Using camera: " << FLAGS_camera_name << std::endl; camera::CameraParameters cam_params(&config, FLAGS_camera_name.c_str()); - std::cout << "Reading pose: " << FLAGS_camera_to_world << std::endl; - Eigen::Affine3d cam_to_world; - dense_map::readAffine(cam_to_world, FLAGS_camera_to_world); - std::cout << "Read pose\n" << cam_to_world.matrix() << std::endl; - - std::vector face_vec; - std::map uv_map; - int num_exclude_boundary_pixels = 0; - - std::vector const& faces = mesh->get_faces(); - int num_faces = faces.size(); - std::vector smallest_cost_per_face(num_faces, 1.0e+100); - - camera::CameraModel cam(cam_to_world.inverse(), cam_params); - - // Find the UV coordinates and the faces having them - std::string out_prefix = "run"; - dense_map::projectTexture(mesh, bvh_tree, image, cam, num_exclude_boundary_pixels, smallest_cost_per_face, face_vec, - uv_map); + std::vector images, cameras; + if (!FLAGS_image.empty() && !FLAGS_camera_to_world.empty()) { + images.push_back(FLAGS_image); + cameras.push_back(FLAGS_camera_to_world); + } else { + std::string val; + std::ifstream ifs(FLAGS_image_list); + while (ifs >> val) + images.push_back(val); + ifs.close(); + ifs = std::ifstream(FLAGS_camera_list); + while (ifs >> val) + cameras.push_back(val); + ifs.close(); + } - std::string obj_str; - dense_map::formObjCustomUV(mesh, face_vec, uv_map, out_prefix, obj_str); + if (images.size() != cameras.size()) + LOG(FATAL) << "As many images as cameras must be specified.\n"; - std::string mtl_str; - dense_map::formMtl(out_prefix, mtl_str); + for (size_t it = 0; it < images.size(); it++) { + cv::Mat image = cv::imread(images[it]); - std::string obj_file = FLAGS_output_dir + "/" + out_prefix + ".obj"; - std::cout << "Writing: " << obj_file << std::endl; - std::ofstream obj_handle(obj_file); - obj_handle << obj_str; - obj_handle.close(); + Eigen::Affine3d cam_to_world; + dense_map::readAffine(cam_to_world, cameras[it]); - std::string mtl_file = FLAGS_output_dir + "/" + out_prefix + ".mtl"; - std::cout << "Writing: " << mtl_file << std::endl; - std::ofstream mtl_handle(mtl_file); - mtl_handle << mtl_str; - mtl_handle.close(); + std::string prefix = FLAGS_output_prefix + "-" + + boost::filesystem::path(images[it]).stem().string(); - std::string texture_file = FLAGS_output_dir + "/" + out_prefix + ".png"; - std::cout << "Writing: " << texture_file << std::endl; - cv::imwrite(texture_file, image); + dense_map::meshProject(mesh, bvh_tree, image, cam_to_world.inverse(), cam_params, + FLAGS_num_exclude_boundary_pixels, prefix); + } return 0; } diff --git a/dense_map/geometry_mapper/tools/process_bag.cc b/dense_map/geometry_mapper/tools/scale_bag.cc similarity index 98% rename from dense_map/geometry_mapper/tools/process_bag.cc rename to dense_map/geometry_mapper/tools/scale_bag.cc index 9b3c9bc2..75f9d9e8 100644 --- a/dense_map/geometry_mapper/tools/process_bag.cc +++ b/dense_map/geometry_mapper/tools/scale_bag.cc @@ -120,7 +120,8 @@ int main(int argc, char** argv) { // Maybe resize cv::Mat resized_image; if (FLAGS_scale < 1.0) { - cv::resize(*image_ptr, resized_image, cv::Size(), FLAGS_scale, FLAGS_scale, cv::INTER_AREA); + cv::resize(*image_ptr, resized_image, cv::Size(), FLAGS_scale, FLAGS_scale, + cv::INTER_AREA); image_ptr = &resized_image; } diff --git a/dense_map/geometry_mapper/tools/streaming_mapper.cc b/dense_map/geometry_mapper/tools/streaming_mapper.cc index fe88c933..59db877c 100644 --- a/dense_map/geometry_mapper/tools/streaming_mapper.cc +++ b/dense_map/geometry_mapper/tools/streaming_mapper.cc @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -77,9 +76,12 @@ class StreamingMapper { void ReadParams(ros::NodeHandle const& nh); void publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, - double max_iso_times_exposure, double iso, double exposure, int processed_camera_count, - cv::Mat const& image, double image_timestamp, camera::CameraModel const& cam, - std::vector& smallest_cost_per_face, std::string const& out_prefix); + double max_iso_times_exposure, double iso, double exposure, + int processed_camera_count, + cv::Mat const& image, double image_timestamp, + camera::CameraModel const& cam, + std::vector& smallest_cost_per_face, + std::string const& out_prefix); void TextureCamSimPoseCallback(const geometry_msgs::PoseStamped::ConstPtr& pose); void TextureCamSimInfoCallback(const sensor_msgs::CameraInfo::ConstPtr& info); @@ -89,7 +91,7 @@ class StreamingMapper { void SciCamExifCallback(std_msgs::Float64MultiArray::ConstPtr const& exif); void CompressedTextureCallback(const sensor_msgs::CompressedImageConstPtr& msg); void UncompressedTextureCallback(const sensor_msgs::ImageConstPtr& msg); - void AddTextureCamPose(double nav_cam_timestamp, Eigen::Affine3d const& nav_cam_pose); + void AddTextureCamPose(double nav_cam_timestamp, Eigen::Affine3d const& nav_cam_to_world); void WipeOldImages(); std::shared_ptr image_transport; @@ -109,44 +111,46 @@ class StreamingMapper { config_reader::ConfigReader config_params; camera::CameraParameters m_texture_cam_params; - sensor_msgs::Image texture_obj_msg, texture_mtl_msg; + sensor_msgs::Image m_texture_obj_msg, m_texture_mtl_msg; - double navcam_to_texture_cam_timestamp_offset; - Eigen::MatrixXd texture_cam_to_navcam_trans; - Eigen::MatrixXd navcam_to_body_trans; + double m_nav_cam_to_texture_cam_timestamp_offset; + Eigen::MatrixXd m_texture_cam_to_nav_cam_trans; + Eigen::MatrixXd m_nav_cam_to_body_trans; - std::string nav_cam_pose_topic, ekf_state_topic, ekf_pose_topic, texture_cam_topic, sci_cam_exif_topic, - texture_cam_type, mesh_file; - bool sim_mode, save_to_disk, use_single_texture; + std::string nav_cam_pose_topic, ekf_state_topic, ekf_pose_topic, texture_cam_topic, + sci_cam_exif_topic, m_texture_cam_type, mesh_file; + bool m_sim_mode, save_to_disk, m_sci_cam_exp_corr, use_single_texture; // For meshing mve::TriangleMesh::Ptr mesh; std::shared_ptr mesh_info; - std::shared_ptr graph; // TODO(oalexan1): Is this necessary? + std::shared_ptr graph; std::shared_ptr bvh_tree; // Each callback must have a lock - std::mutex nav_cam_pose_lock, texture_cam_pose_lock, texture_cam_info_lock, texture_cam_image_lock, sci_cam_exif_lock; + std::mutex nav_cam_pose_lock, texture_cam_pose_lock, texture_cam_info_lock, + texture_cam_image_lock, sci_cam_exif_lock; // Data indexed by timestamp std::map nav_cam_localization_poses; std::map nav_cam_landmark_poses; - std::map texture_cam_poses; + std::map texture_cam_to_world_map; std::map texture_cam_images; std::map > sci_cam_exif; // For processing pictures as the camera moves along - double dist_between_processed_cams, max_iso_times_exposure; - Eigen::Vector3d last_processed_cam_ctr; - std::vector smallest_cost_per_face; - int processed_camera_count; + double m_dist_between_processed_cams, m_angle_between_processed_cams, m_max_iso_times_exposure; + Eigen::Vector3d m_prev_cam_ctr; + Eigen::Affine3d m_prev_cam_to_world; + std::vector m_smallest_cost_per_face; + int m_processed_camera_count; double num_exclude_boundary_pixels; // The info for each face which allows one later to project an image onto // the face and compute the texture. - std::vector face_projection_info; + std::vector m_face_projection_info; - tex::Model texture_model; + IsaacObjModel texture_model; std::vector texture_atlases; cv::Mat model_texture; @@ -161,11 +165,12 @@ class StreamingMapper { StreamingMapper::StreamingMapper() : m_texture_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)), - navcam_to_texture_cam_timestamp_offset(0.0), - texture_cam_to_navcam_trans(Eigen::MatrixXd::Identity(4, 4)), - navcam_to_body_trans(Eigen::MatrixXd::Identity(4, 4)), - last_processed_cam_ctr(Eigen::Vector3d(std::numeric_limits::quiet_NaN(), 0.0, 0.0)), - processed_camera_count(0) {} + m_nav_cam_to_texture_cam_timestamp_offset(0.0), + m_texture_cam_to_nav_cam_trans(Eigen::MatrixXd::Identity(4, 4)), + m_nav_cam_to_body_trans(Eigen::MatrixXd::Identity(4, 4)), + m_prev_cam_ctr(Eigen::Vector3d(std::numeric_limits::quiet_NaN(), 0.0, 0.0)), + m_prev_cam_to_world(Eigen::Affine3d::Identity()), + m_processed_camera_count(0) {} StreamingMapper::~StreamingMapper(void) { thread->join(); } @@ -176,7 +181,8 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { // Set the config path to ISAAC char* path; - if ((path = getenv("ISAAC_CONFIG_DIR")) == NULL) ROS_FATAL("Could not find the config path."); + if ((path = getenv("CUSTOM_CONFIG_DIR")) == NULL) + ROS_FATAL("Could not find the config path. Ensure that CUSTOM_CONFIG_DIR was set."); config_params.SetPath(path); config_params.AddFile("dense_map/streaming_mapper.config"); @@ -185,87 +191,64 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { StreamingMapper::ReadParams(nh); // Read configuration data - if (!sim_mode) { - Eigen::MatrixXd hazcam_to_navcam_trans; - Eigen::MatrixXd scicam_to_hazcam_trans; - Eigen::Affine3d hazcam_depth_to_image_transform; - double navcam_to_hazcam_timestamp_offset, scicam_to_hazcam_timestamp_offset; - - camera::CameraParameters nav_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters haz_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); - camera::CameraParameters sci_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), Eigen::Vector2d(0, 0)); + if (!m_sim_mode) { + // Read the bot config file + std::vector cam_names = {m_texture_cam_type}; + std::vector cam_params; + std::vector nav_to_cam_trans; + std::vector nav_to_cam_timestamp_offsets; + Eigen::Affine3d hazcam_depth_to_image_transform; + Eigen::Affine3d nav_cam_to_body_trans; + + dense_map::readConfigFile( // Inputs + cam_names, + "nav_cam_transform", // this is the nav cam to body transform + "haz_cam_depth_to_image_transform", + // Outputs + cam_params, nav_to_cam_trans, nav_to_cam_timestamp_offsets, nav_cam_to_body_trans, + hazcam_depth_to_image_transform); + + m_nav_cam_to_body_trans = nav_cam_to_body_trans.matrix(); { // Note the lock, because m_texture_cam_params is a shared resource const std::lock_guard lock(texture_cam_info_lock); - dense_map::readConfigFile("navcam_to_hazcam_timestamp_offset", "scicam_to_hazcam_timestamp_offset", - "hazcam_to_navcam_transform", "scicam_to_hazcam_transform", "nav_cam_transform", - "hazcam_depth_to_image_transform", navcam_to_hazcam_timestamp_offset, - scicam_to_hazcam_timestamp_offset, hazcam_to_navcam_trans, scicam_to_hazcam_trans, - navcam_to_body_trans, hazcam_depth_to_image_transform, nav_cam_params, haz_cam_params, - sci_cam_params); - - // Transform to convert from given camera to nav camera coordinates - if (texture_cam_type == "nav_cam") { - texture_cam_to_navcam_trans = Eigen::MatrixXd::Identity(4, 4); - navcam_to_texture_cam_timestamp_offset = 0.0; - m_texture_cam_params = nav_cam_params; - } else if (texture_cam_type == "haz_cam") { - texture_cam_to_navcam_trans = hazcam_to_navcam_trans; - navcam_to_texture_cam_timestamp_offset = navcam_to_hazcam_timestamp_offset; - m_texture_cam_params = haz_cam_params; - } else if (texture_cam_type == "sci_cam") { - texture_cam_to_navcam_trans = hazcam_to_navcam_trans * scicam_to_hazcam_trans; - navcam_to_texture_cam_timestamp_offset = navcam_to_hazcam_timestamp_offset - scicam_to_hazcam_timestamp_offset; - m_texture_cam_params = sci_cam_params; - } else { - LOG(FATAL) << "Invalid texture cam type: " << texture_cam_type; - } -#if 0 - std::cout << "hazcam_to_navcam_trans\n" << hazcam_to_navcam_trans << "\n"; - std::cout << "scicam_to_hazcam_trans\n" << scicam_to_hazcam_trans << "\n"; - std::cout << "texture_cam_to_navcam_trans\n" << texture_cam_to_navcam_trans << "\n"; - std::cout << "navcam_to_hazcam_timestamp_offset: " - << navcam_to_hazcam_timestamp_offset - << "\n"; - std::cout << "scicam_to_hazcam_timestamp_offset: " - << scicam_to_hazcam_timestamp_offset - << "\n"; - std::cout << "hazcam_depth_to_image_transform\n" - << hazcam_depth_to_image_transform.matrix() - << "\n"; - - std::cout << "navcam_to_texture_cam_timestamp_offset: " - << navcam_to_texture_cam_timestamp_offset << "\n"; - std::cout << "texture cam focal vector: " - << m_texture_cam_params.GetFocalVector().transpose() << "\n"; -#endif + // Index 0 below, based on the order in cam_names + m_texture_cam_to_nav_cam_trans = nav_to_cam_trans[0].inverse().matrix(); + m_nav_cam_to_texture_cam_timestamp_offset = nav_to_cam_timestamp_offsets[0]; + m_texture_cam_params = cam_params[0]; + + ROS_INFO_STREAM("nav_cam_to_texture_cam_timestamp_offset: " + << m_nav_cam_to_texture_cam_timestamp_offset << "\n"); } } // Set up the publishers - std::string mapper_img_topic = "/ism/" + texture_cam_type + "/img"; - std::string mapper_obj_topic = "/ism/" + texture_cam_type + "/obj"; - std::string mapper_mtl_topic = "/ism/" + texture_cam_type + "/mtl"; - ROS_INFO_STREAM("Publishing topics: " << mapper_img_topic << ' ' << mapper_obj_topic << ' ' << mapper_mtl_topic); + std::string mapper_img_topic = "/ism/" + m_texture_cam_type + "/img"; + std::string mapper_obj_topic = "/ism/" + m_texture_cam_type + "/obj"; + std::string mapper_mtl_topic = "/ism/" + m_texture_cam_type + "/mtl"; + ROS_INFO_STREAM("Publishing topics: " << mapper_img_topic << ' ' + << mapper_obj_topic << ' ' << mapper_mtl_topic); texture_img_pub = nh.advertise(mapper_img_topic, 1); texture_obj_pub = nh.advertise(mapper_obj_topic, 1); texture_mtl_pub = nh.advertise(mapper_mtl_topic, 1); // Set up the subscribers - if (sim_mode) { + if (m_sim_mode) { // Subscribe to the simulated texture cam image pose and intrinsics. // Keep a bunch of them in the queue. // The name of these topics are TOPIC_HAZ_CAM_SIM_POSE, etc., in ff_names.h. - std::string texture_cam_sim_pose_topic = "/sim/" + texture_cam_type + "/pose"; - std::string texture_cam_sim_info_topic = "/sim/" + texture_cam_type + "/info"; - ROS_INFO_STREAM("Subscribing to " << texture_cam_sim_pose_topic); - ROS_INFO_STREAM("Subscribing to " << texture_cam_sim_info_topic); + std::string texture_cam_sim_pose_topic = "/sim/" + m_texture_cam_type + "/pose"; + std::string texture_cam_sim_info_topic = "/sim/" + m_texture_cam_type + "/info"; + ROS_INFO_STREAM("Subscribing to: " << texture_cam_sim_pose_topic); + ROS_INFO_STREAM("Subscribing to: " << texture_cam_sim_info_topic); texture_cam_pose_sub = - nh.subscribe(texture_cam_sim_pose_topic, 10, &StreamingMapper::TextureCamSimPoseCallback, this); + nh.subscribe(texture_cam_sim_pose_topic, 10, + &StreamingMapper::TextureCamSimPoseCallback, this); texture_cam_info_sub = - nh.subscribe(texture_cam_sim_info_topic, 10, &StreamingMapper::TextureCamSimInfoCallback, this); + nh.subscribe(texture_cam_sim_info_topic, 10, + &StreamingMapper::TextureCamSimInfoCallback, this); } else { // Get the nav cam pose topic or the ekf state topic, or the ekf // pose topic, from which the other camera poses are deduced. @@ -281,17 +264,20 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { } } - sci_cam_exif_topic = "/hw/sci_cam_exif"; - ROS_INFO_STREAM("Subscribing to sci cam exif topic: " << sci_cam_exif_topic); - sci_cam_exif_sub = nh.subscribe(sci_cam_exif_topic, 10, &StreamingMapper::SciCamExifCallback, this); + if (m_texture_cam_type == "sci_cam" && !m_sim_mode && m_sci_cam_exp_corr) { + sci_cam_exif_topic = "/hw/sci_cam_exif"; + ROS_INFO_STREAM("Subscribing to sci cam exif topic: " << sci_cam_exif_topic); + sci_cam_exif_sub = nh.subscribe(sci_cam_exif_topic, 10, + &StreamingMapper::SciCamExifCallback, this); + } // Subscribe to images. Keep just 2 in the queue as they can be big. image_transport.reset(new image_transport::ImageTransport(nh)); std::string compressed = "/compressed"; if (texture_cam_topic.length() >= compressed.length() && - texture_cam_topic.compare(texture_cam_topic.length() - compressed.length(), compressed.length(), compressed) == - 0) { + texture_cam_topic.compare(texture_cam_topic.length() - compressed.length(), + compressed.length(), compressed) == 0) { // texture_cam_topic ends with the string /compressed ROS_INFO_STREAM("Subscribing to compressed image topic: " << texture_cam_topic); compressed_texture_image_sub = @@ -299,7 +285,8 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { } else { ROS_INFO_STREAM("Subscribing to uncompressed image topic: " << texture_cam_topic); uncompressed_texture_image_sub = - image_transport->subscribe(texture_cam_topic, 2, &StreamingMapper::UncompressedTextureCallback, this); + image_transport->subscribe(texture_cam_topic, 2, + &StreamingMapper::UncompressedTextureCallback, this); } // Load the mesh @@ -308,10 +295,11 @@ void StreamingMapper::Initialize(ros::NodeHandle& nh) { std::vector const& faces = mesh->get_faces(); int num_faces = faces.size(); - smallest_cost_per_face = std::vector(num_faces, 1.0e+100); + m_smallest_cost_per_face = std::vector(num_faces, 1.0e+100); if (use_single_texture) - dense_map::formModel(mesh, pixel_size, num_threads, face_projection_info, texture_atlases, texture_model); + dense_map::formModel(mesh, pixel_size, num_threads, m_face_projection_info, + texture_atlases, texture_model); } catch (std::exception& e) { LOG(FATAL) << "Could not load mesh.\n" << e.what() << "\n"; } @@ -330,12 +318,14 @@ void StreamingMapper::ReadParams(ros::NodeHandle const& nh) { ROS_FATAL("Could not read the ekf_state_topic parameter."); if (!config_params.GetStr("texture_cam_topic", &texture_cam_topic)) ROS_FATAL("Could not read the texture_cam_topic parameter."); - if (!config_params.GetStr("texture_cam_type", &texture_cam_type)) + if (!config_params.GetStr("texture_cam_type", &m_texture_cam_type)) ROS_FATAL("Could not read the texture_cam_type parameter."); if (!config_params.GetStr("mesh_file", &mesh_file)) ROS_FATAL("Could not read the mesh_file parameter."); - if (!config_params.GetReal("dist_between_processed_cams", &dist_between_processed_cams)) + if (!config_params.GetReal("dist_between_processed_cams", &m_dist_between_processed_cams)) ROS_FATAL("Could not read the dist_between_processed_cams parameter."); - if (!config_params.GetReal("max_iso_times_exposure", &max_iso_times_exposure)) + if (!config_params.GetReal("angle_between_processed_cams", &m_angle_between_processed_cams)) + ROS_FATAL("Could not read the angle_between_processed_cams parameter."); + if (!config_params.GetReal("max_iso_times_exposure", &m_max_iso_times_exposure)) ROS_FATAL("Could not read the max_iso_times_exposure parameter."); if (!config_params.GetReal("pixel_size", &pixel_size)) ROS_FATAL("Could not read the pixel_size parameter."); if (!config_params.GetReal("num_exclude_boundary_pixels", &num_exclude_boundary_pixels)) @@ -345,50 +335,66 @@ void StreamingMapper::ReadParams(ros::NodeHandle const& nh) { // The sim_mode parameter is very important, it must be read early // on as a lot of logic depends on its value. - nh.getParam("sim_mode", sim_mode); - - if (!config_params.GetBool("save_to_disk", &save_to_disk)) ROS_FATAL("Could not read the save_to_disk parameter."); + nh.getParam("sim_mode", m_sim_mode); if (!config_params.GetBool("use_single_texture", &use_single_texture)) ROS_FATAL("Could not read the use_single_texture parameter."); - ROS_INFO_STREAM("Texture camera type = " << texture_cam_type); + if (!config_params.GetBool("sci_cam_exposure_correction", &m_sci_cam_exp_corr)) + ROS_FATAL("Could not read the sci_cam_exposure_correction parameter."); + + if (!config_params.GetBool("save_to_disk", &save_to_disk)) + ROS_FATAL("Could not read the save_to_disk parameter."); + + ROS_INFO_STREAM("Texture camera type = " << m_texture_cam_type); ROS_INFO_STREAM("Mesh = " << mesh_file); - if (!sim_mode) { - int num = (!nav_cam_pose_topic.empty()) + (!ekf_state_topic.empty()) + (!ekf_pose_topic.empty()); + if (!m_sim_mode) { + int num = (!nav_cam_pose_topic.empty()) + (!ekf_state_topic.empty()) + + (!ekf_pose_topic.empty()); if (num != 1) LOG(FATAL) << "Must specify exactly only one of nav_cam_pose_topic, " << "ekf_state_topic, ekf_pose_topic."; - - ROS_INFO_STREAM("dist_between_processed_cams = " << dist_between_processed_cams); - ROS_INFO_STREAM("max_iso_times_exposure = " << max_iso_times_exposure); - ROS_INFO_STREAM("use_single_texture = " << use_single_texture); - ROS_INFO_STREAM("pixel_size = " << pixel_size); - ROS_INFO_STREAM("num_threads = " << num_threads); - ROS_INFO_STREAM("sim_mode = " << sim_mode); - ROS_INFO_STREAM("save_to_disk = " << save_to_disk); - ROS_INFO_STREAM("num_exclude_boundary_pixels = " << num_exclude_boundary_pixels); - ROS_INFO_STREAM("ASTROBEE_ROBOT = " << getenv("ASTROBEE_ROBOT")); } - if (texture_cam_type != "nav_cam" && texture_cam_type != "sci_cam" && texture_cam_type != "haz_cam" && - texture_cam_type != "heat_cam" && texture_cam_type != "acoustics_cam") - LOG(FATAL) << "Invalid texture cam type: " << texture_cam_type; + if (m_sim_mode && m_texture_cam_type == "nav_cam") + LOG(FATAL) << "The streaming mapper does not support nav_cam with simulated data as " + << "its distortion is not modeled.\n"; + + ROS_INFO_STREAM("Streaming mapper parameters:"); + ROS_INFO_STREAM("dist_between_processed_cams = " << m_dist_between_processed_cams); + ROS_INFO_STREAM("angle_between_processed_cams = " << m_angle_between_processed_cams); + ROS_INFO_STREAM("max_iso_times_exposure = " << m_max_iso_times_exposure); + ROS_INFO_STREAM("use_single_texture = " << use_single_texture); + ROS_INFO_STREAM("sci_cam_exposure_correction = " << m_sci_cam_exp_corr); + ROS_INFO_STREAM("pixel_size = " << pixel_size); + ROS_INFO_STREAM("num_threads = " << num_threads); + ROS_INFO_STREAM("sim_mode = " << m_sim_mode); + ROS_INFO_STREAM("save_to_disk = " << save_to_disk); + ROS_INFO_STREAM("num_exclude_boundary_pixels = " << num_exclude_boundary_pixels); + ROS_INFO_STREAM("ASTROBEE_ROBOT = " << getenv("ASTROBEE_ROBOT")); + ROS_INFO_STREAM("ASTROBEE_WORLD = " << getenv("ASTROBEE_WORLD")); // For a camera like sci_cam, the word "sci" better be part of texture_cam_topic. - std::string cam_name = texture_cam_type.substr(0, 3); + std::string cam_name = m_texture_cam_type.substr(0, 3); if (texture_cam_topic.find(cam_name) == std::string::npos) - LOG(FATAL) << "Texture topic " << texture_cam_topic << " is expected to contain the string " << cam_name; + LOG(FATAL) << "Texture topic " << texture_cam_topic << " is expected to contain the string " + << cam_name; } -void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std::shared_ptr bvh_tree, - double max_iso_times_exposure, double iso, double exposure, - int processed_camera_count, cv::Mat const& image, double image_timestamp, - camera::CameraModel const& cam, std::vector& smallest_cost_per_face, +void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, + std::shared_ptr bvh_tree, + double max_iso_times_exposure, + double iso, double exposure, + int processed_camera_count, + cv::Mat const& image, double image_timestamp, + camera::CameraModel const& cam, + std::vector& smallest_cost_per_face, std::string const& out_prefix) { - omp_set_dynamic(0); // Explicitly disable dynamic teams - omp_set_num_threads(num_threads); // Use this many threads for all consecutive - // parallel regions + // Explicitly disable dynamic determination of number of threads + omp_set_dynamic(0); + + // Use this many threads for all consecutive parallel regions + omp_set_num_threads(num_threads); // Colorize the image if grayscale. Careful here to avoid making a // copy if we don't need one. Keep img_ptr pointing at the final @@ -418,22 +424,21 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std: img_ptr = &scaled_image; } - // std::cout << "Exposure correction took " << timer1.get_elapsed()/1000.0 << - // " seconds\n"; - // Prepare the image for publishing sensor_msgs::ImagePtr msg; std::vector face_vec; std::map uv_map; if (!use_single_texture) { // Find the UV coordinates and the faces having them - dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, num_exclude_boundary_pixels, smallest_cost_per_face, + dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, num_exclude_boundary_pixels, + smallest_cost_per_face, face_vec, uv_map); msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", *img_ptr).toImageMsg(); } else { - dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, smallest_cost_per_face, pixel_size, num_threads, - face_projection_info, texture_atlases, texture_model, model_texture); + dense_map::projectTexture(mesh, bvh_tree, *img_ptr, cam, smallest_cost_per_face, + pixel_size, num_threads, m_face_projection_info, texture_atlases, + texture_model, model_texture); // Note that the output image has an alpha channel msg = cv_bridge::CvImage(std_msgs::Header(), "bgra8", model_texture).toImageMsg(); } @@ -457,22 +462,23 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std: int height = obj_len / width + 1; if (width * height <= obj_len) LOG(FATAL) << "Not enough room allocated for the image."; - texture_obj_msg.header.stamp = ros::Time(image_timestamp); - texture_obj_msg.width = width; - texture_obj_msg.height = height; - texture_obj_msg.step = width; - texture_obj_msg.encoding = "mono8"; - texture_obj_msg.is_bigendian = false; - texture_obj_msg.data.resize(width * height); + m_texture_obj_msg.header.stamp = ros::Time(image_timestamp); + m_texture_obj_msg.width = width; + m_texture_obj_msg.height = height; + m_texture_obj_msg.step = width; + m_texture_obj_msg.encoding = "mono8"; + m_texture_obj_msg.is_bigendian = false; + m_texture_obj_msg.data.resize(width * height); std::copy(reinterpret_cast(&obj_str[0]), // input beg reinterpret_cast(&obj_str[0]) + obj_len, // input end - texture_obj_msg.data.begin()); // destination + m_texture_obj_msg.data.begin()); // destination // Pad with nulls - for (int it = obj_len; it < width * height; it++) texture_obj_msg.data[it] = '\0'; + for (int it = obj_len; it < width * height; it++) m_texture_obj_msg.data[it] = '\0'; // When using a single texture, publish just once - if (!use_single_texture || processed_camera_count == 0) texture_obj_pub.publish(texture_obj_msg); + if (!use_single_texture || processed_camera_count == 0) + texture_obj_pub.publish(m_texture_obj_msg); } // Publish the mtl string as a mono image with one row (this hack is temporary) @@ -480,35 +486,36 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std: if (!use_single_texture || processed_camera_count == 0 || save_to_disk) { formMtl(out_prefix, mtl_str); int mtl_len = mtl_str.size(); - texture_mtl_msg.header.stamp = ros::Time(image_timestamp); - texture_mtl_msg.width = mtl_len; - texture_mtl_msg.height = 1; - texture_mtl_msg.step = mtl_len; - texture_mtl_msg.encoding = "mono8"; - texture_mtl_msg.is_bigendian = false; - texture_mtl_msg.data.resize(mtl_len); + m_texture_mtl_msg.header.stamp = ros::Time(image_timestamp); + m_texture_mtl_msg.width = mtl_len; + m_texture_mtl_msg.height = 1; + m_texture_mtl_msg.step = mtl_len; + m_texture_mtl_msg.encoding = "mono8"; + m_texture_mtl_msg.is_bigendian = false; + m_texture_mtl_msg.data.resize(mtl_len); std::copy(reinterpret_cast(&mtl_str[0]), // input beg reinterpret_cast(&mtl_str[0]) + mtl_len, // input end - texture_mtl_msg.data.begin()); // destination + m_texture_mtl_msg.data.begin()); // destination - if (!use_single_texture || processed_camera_count == 0) texture_mtl_pub.publish(texture_mtl_msg); + if (!use_single_texture || processed_camera_count == 0) + texture_mtl_pub.publish(m_texture_mtl_msg); } if (save_to_disk) { std::string obj_file = out_prefix + ".obj"; - std::cout << "Writing: " << obj_file << std::endl; + std::cout << "Writing: ~/.ros/" << obj_file << std::endl; std::ofstream obj_handle(obj_file); obj_handle << obj_str; obj_handle.close(); std::string mtl_file = out_prefix + ".mtl"; - std::cout << "Writing: " << mtl_file << std::endl; + std::cout << "Writing: ~/.ros/" << mtl_file << std::endl; std::ofstream mtl_handle(mtl_file); mtl_handle << mtl_str; mtl_handle.close(); std::string png_file = out_prefix + ".png"; - std::cout << "Writing: " << png_file << std::endl; + std::cout << "Writing: ~/.ros/" << png_file << std::endl; if (!use_single_texture) cv::imwrite(png_file, *img_ptr); else @@ -519,27 +526,27 @@ void StreamingMapper::publishTexturedMesh(mve::TriangleMesh::ConstPtr mesh, std: // Add the latest received texture cam pose to the storage. This is meant to be used // only for simulated data. Else use the landmark and ekf callbacks. void StreamingMapper::TextureCamSimPoseCallback(const geometry_msgs::PoseStamped::ConstPtr& pose) { - if (!sim_mode) return; + if (!m_sim_mode) return; // Add the latest pose. Use a lock. const std::lock_guard lock(texture_cam_pose_lock); // Convert the pose to Eigen::Affine3d - Eigen::Affine3d texture_cam_pose; - tf::poseMsgToEigen(pose->pose, texture_cam_pose); - texture_cam_poses[pose->header.stamp.toSec()] = texture_cam_pose; + Eigen::Affine3d texture_cam_to_world; + tf::poseMsgToEigen(pose->pose, texture_cam_to_world); + texture_cam_to_world_map[pose->header.stamp.toSec()] = texture_cam_to_world; // std::cout << "Received the sim texture cam pose at time: " << pose->header.stamp.toSec() // << "\n"; // Wipe the oldest poses. Keep a lot of them as sometimes they can come very often. - while (texture_cam_poses.size() > 100) { - texture_cam_poses.erase(texture_cam_poses.begin()); + while (texture_cam_to_world_map.size() > 100) { + texture_cam_to_world_map.erase(texture_cam_to_world_map.begin()); } } void StreamingMapper::TextureCamSimInfoCallback(const sensor_msgs::CameraInfo::ConstPtr& info) { - if (!sim_mode) return; + if (!m_sim_mode) return; // Initialize m_texture_cam_params just once. Use a lock. if (m_texture_cam_params.GetFocalVector() == Eigen::Vector2d(0, 0)) { @@ -565,87 +572,91 @@ void StreamingMapper::TextureCamSimInfoCallback(const sensor_msgs::CameraInfo::C // std::cout << "Received the camera info at time: " << info->header.stamp.toSec() << "\n"; m_texture_cam_params = - camera::CameraParameters(Eigen::Vector2i(image_width, image_height), Eigen::Vector2d(focal_length, focal_length), - Eigen::Vector2d(optical_center_x, optical_center_y), distortion); + camera::CameraParameters(Eigen::Vector2i(image_width, image_height), + Eigen::Vector2d(focal_length, focal_length), + Eigen::Vector2d(optical_center_x, optical_center_y), + distortion); } } // Compute the texture cam pose and timestamp based on the nav cam pose // and timestamp, and save it in the storage. -void StreamingMapper::AddTextureCamPose(double nav_cam_timestamp, Eigen::Affine3d const& nav_cam_pose) { - if (nav_cam_pose.matrix() == Eigen::Matrix::Identity()) { +void StreamingMapper::AddTextureCamPose(double nav_cam_timestamp, + Eigen::Affine3d const& nav_cam_to_world) { + if (nav_cam_to_world.matrix() == Eigen::Matrix::Identity()) { // Skip bad poses return; } // Must compensate for the fact that the nav cam, haz cam, and texture cam all // have some time delay among them - double texture_cam_timestamp = nav_cam_timestamp + navcam_to_texture_cam_timestamp_offset; + double texture_cam_timestamp = nav_cam_timestamp + m_nav_cam_to_texture_cam_timestamp_offset; - // Keep in mind that nav_cam_pose is the transform from the nav cam + // Keep in mind that nav_cam_to_world is the transform from the nav cam // to the world. Hence the matrices are multiplied in the order as // below. - Eigen::Affine3d texture_cam_pose; - texture_cam_pose.matrix() = nav_cam_pose.matrix() * texture_cam_to_navcam_trans; + Eigen::Affine3d texture_cam_to_world; + texture_cam_to_world.matrix() = nav_cam_to_world.matrix() * m_texture_cam_to_nav_cam_trans; { // Add the latest pose. Use a lock. const std::lock_guard lock(texture_cam_pose_lock); - texture_cam_poses[texture_cam_timestamp] = texture_cam_pose; + texture_cam_to_world_map[texture_cam_timestamp] = texture_cam_to_world; // Wipe the oldest poses. Keep a lot of them, as sometimes, particularly for ekf, // they can come very fast - while (texture_cam_poses.size() > 100) texture_cam_poses.erase(texture_cam_poses.begin()); + while (texture_cam_to_world_map.size() > 100) + texture_cam_to_world_map.erase(texture_cam_to_world_map.begin()); } // TODO(oalexan1): This will need more testing! } // This callback takes as input the nav_cam pose as output by localization_node. void StreamingMapper::LandmarkCallback(ff_msgs::VisualLandmarks::ConstPtr const& vl) { - if (sim_mode) return; + if (m_sim_mode) return; double nav_cam_timestamp = vl->header.stamp.toSec(); - Eigen::Affine3d nav_cam_pose; - tf::poseMsgToEigen(vl->pose, nav_cam_pose); + Eigen::Affine3d nav_cam_to_world; + tf::poseMsgToEigen(vl->pose, nav_cam_to_world); - StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_pose); + StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_to_world); } // This callback takes as input the robot pose as output by ekf on // ekf_state_topic (it should be set to /gnc/ekf) void StreamingMapper::EkfStateCallback(ff_msgs::EkfState::ConstPtr const& ekf_state) { - if (sim_mode) return; + if (m_sim_mode) return; double nav_cam_timestamp = ekf_state->header.stamp.toSec(); Eigen::Affine3d body_pose; tf::poseMsgToEigen(ekf_state->pose, body_pose); // Convert from body pose (body to world transform) to nav cam pose (nav cam to world transform) - // Use the equation: body_to_world = navcam_to_world * body_to_navcam, written equivalently as: - // navcam_to_world = body_to_world * navcam_to_body - Eigen::Affine3d nav_cam_pose; - nav_cam_pose.matrix() = body_pose.matrix() * navcam_to_body_trans; - StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_pose); + // Use the equation: body_to_world = nav_cam_to_world * body_to_nav_cam, written equivalently as: + // nav_cam_to_world = body_to_world * nav_cam_to_body + Eigen::Affine3d nav_cam_to_world; + nav_cam_to_world.matrix() = body_pose.matrix() * m_nav_cam_to_body_trans; + StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_to_world); } // This callback takes as input the robot pose as output by ekf on /loc/pose. void StreamingMapper::EkfPoseCallback(geometry_msgs::PoseStamped::ConstPtr const& ekf_pose) { - if (sim_mode) return; + if (m_sim_mode) return; double nav_cam_timestamp = ekf_pose->header.stamp.toSec(); Eigen::Affine3d body_pose; tf::poseMsgToEigen(ekf_pose->pose, body_pose); // Convert from body pose (body to world transform) to nav cam pose (nav cam to world transform) - // Use the equation: body_to_world = navcam_to_world * body_to_navcam, written equivalently as: - // navcam_to_world = body_to_world * navcam_to_body - Eigen::Affine3d nav_cam_pose; - nav_cam_pose.matrix() = body_pose.matrix() * navcam_to_body_trans; - StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_pose); + // Use the equation: body_to_world = nav_cam_to_world * body_to_nav_cam, written equivalently as: + // nav_cam_to_world = body_to_world * nav_cam_to_body + Eigen::Affine3d nav_cam_to_world; + nav_cam_to_world.matrix() = body_pose.matrix() * m_nav_cam_to_body_trans; + StreamingMapper::AddTextureCamPose(nav_cam_timestamp, nav_cam_to_world); } // This callback takes as input the robot pose as output by ekf on /loc/pose. void StreamingMapper::SciCamExifCallback(std_msgs::Float64MultiArray::ConstPtr const& exif) { - if (sim_mode) return; + if (m_sim_mode || !m_sci_cam_exp_corr) return; double timestamp = exif->data[dense_map::TIMESTAMP]; @@ -668,20 +679,25 @@ void StreamingMapper::SciCamExifCallback(std_msgs::Float64MultiArray::ConstPtr c void StreamingMapper::WipeOldImages() { // Wipe the oldest images. Keep just a few. - size_t num_to_keep = 10; // for sci_cam - if (texture_cam_type == "nav_cam") { - // For small images that get published often keep a bunch of them, - // as sometimes there is a delay for when the pose arrives, - // and if not enough images are kept they are wiped before - // they can be used. - // TODO(oalexan1): This is not robust enough. + // For small images that get published often keep a bunch of them, + // as sometimes there is a delay for when the pose arrives, + // and if not enough images are kept they are wiped before + // they can be used. + // TODO(oalexan1): This is not robust enough. + size_t num_to_keep = 0; + if (m_texture_cam_type == "nav_cam") { num_to_keep = 50; - } - if (texture_cam_type == "haz_cam") { + } else if (m_texture_cam_type == "haz_cam") { + // These are small num_to_keep = 100; + } else if (m_texture_cam_type == "sci_cam") { + num_to_keep = 10; + } else { + num_to_keep = 25; // something in the middle } - while (texture_cam_images.size() > num_to_keep) texture_cam_images.erase(texture_cam_images.begin()); + while (texture_cam_images.size() > num_to_keep) + texture_cam_images.erase(texture_cam_images.begin()); // There is a second wiping in this code in a different place. After // an image is processed it will be wiped together with any images @@ -698,7 +714,8 @@ void StreamingMapper::CompressedTextureCallback(const sensor_msgs::CompressedIma // Copy the data to local storage to ensure no strange behavior // cv::Mat tmp_img = cv::imdecode(cv::Mat(msg->data), cv::IMREAD_COLOR); // cv::Mat tmp_img = - texture_cam_images[texture_cam_image_timestamp] = cv::imdecode(cv::Mat(msg->data), cv::IMREAD_COLOR); + texture_cam_images[texture_cam_image_timestamp] + = cv::imdecode(cv::Mat(msg->data), cv::IMREAD_COLOR); // cv::namedWindow( "Display window", cv::WINDOW_AUTOSIZE ); // Create a window for display. // cv::imshow( "Display window", tmp_img); // Show our image inside it. @@ -735,12 +752,9 @@ void StreamingMapper::UncompressedTextureCallback(const sensor_msgs::ImageConstP WipeOldImages(); } -// Iterate over the texture images. Overlay them on the 3D model and publish -// the result. -// Need to consider here if they are -// full res and compressed and color, or 1/4 res and grayscale and -// and uncompressed -// +// Iterate over the texture images. Overlay them on the 3D model and +// publish the result. Need to consider here if they are full res and +// compressed and color, or 1/4 res and grayscale and and uncompressed. void StreamingMapper::ProcessingLoop() { double last_attempted_texture_cam_timestamp = -1.0; @@ -764,10 +778,10 @@ void StreamingMapper::ProcessingLoop() { // Safely copy the data we need (this data is small, so copying is cheap). - std::map local_texture_cam_poses; + std::map local_texture_cam_to_world_map; { const std::lock_guard lock(texture_cam_pose_lock); - local_texture_cam_poses = texture_cam_poses; + local_texture_cam_to_world_map = texture_cam_to_world_map; } camera::CameraParameters local_texture_cam_params(Eigen::Vector2i(0, 0), Eigen::Vector2d(0, 0), @@ -778,7 +792,7 @@ void StreamingMapper::ProcessingLoop() { } // Copy the exif data locally using a lock - bool need_exif = (!sim_mode && texture_cam_type == "sci_cam"); + bool need_exif = (!m_sim_mode && m_texture_cam_type == "sci_cam" && m_sci_cam_exp_corr); std::map > local_sci_cam_exif; if (need_exif) { @@ -804,20 +818,21 @@ void StreamingMapper::ProcessingLoop() { break; } - if (local_texture_cam_poses.empty()) { + if (local_texture_cam_to_world_map.empty()) { break; // no poses yet } // Ensure that we can bracket the texture image timestamp between // the timestamps of the poses - if (texture_cam_image_timestamp > local_texture_cam_poses.rbegin()->first) { + if (texture_cam_image_timestamp > local_texture_cam_to_world_map.rbegin()->first) { continue; } - if (texture_cam_image_timestamp < local_texture_cam_poses.begin()->first) { + if (texture_cam_image_timestamp < local_texture_cam_to_world_map.begin()->first) { continue; } - if (need_exif && local_sci_cam_exif.find(texture_cam_image_timestamp) == local_sci_cam_exif.end()) { + if (need_exif && + local_sci_cam_exif.find(texture_cam_image_timestamp) == local_sci_cam_exif.end()) { // Skip if the exif info did not arrive yet continue; } @@ -830,7 +845,8 @@ void StreamingMapper::ProcessingLoop() { success = true; // Wipe this image and images older than this as we will never use them - while (texture_cam_images.size() > 0 && texture_cam_images.begin()->first <= texture_cam_image_timestamp) + while (texture_cam_images.size() > 0 && + texture_cam_images.begin()->first <= texture_cam_image_timestamp) texture_cam_images.erase(texture_cam_images.begin()); // Stop given that we have found a good image to process @@ -842,6 +858,7 @@ void StreamingMapper::ProcessingLoop() { continue; } + util::WallTimer timer; // All is good with this timestamp. We may still not want to process it, @@ -850,31 +867,37 @@ void StreamingMapper::ProcessingLoop() { last_attempted_texture_cam_timestamp = texture_cam_image_timestamp; // Find the interpolated pose at the current image - Eigen::Affine3d curr_texture_cam_pose; + Eigen::Affine3d curr_cam_to_world; - if (!dense_map::findInterpPose(texture_cam_image_timestamp, local_texture_cam_poses, curr_texture_cam_pose)) + if (!dense_map::findInterpPose(texture_cam_image_timestamp, local_texture_cam_to_world_map, + curr_cam_to_world)) LOG(FATAL) << "Could not bracket the timestamp. Should have worked at this stage."; - camera::CameraModel cam(curr_texture_cam_pose.inverse(), local_texture_cam_params); + camera::CameraModel cam(curr_cam_to_world.inverse(), local_texture_cam_params); + + Eigen::Vector3d curr_cam_ctr = cam.GetPosition(); - Eigen::Vector3d cam_ctr = cam.GetPosition(); - double dist_to_prev_processed = (last_processed_cam_ctr - cam_ctr).norm(); - bool do_process = (std::isnan(dist_to_prev_processed) || // for the first camera to process - dist_to_prev_processed > dist_between_processed_cams); + double curr_dist_bw_cams = (m_prev_cam_ctr - curr_cam_ctr).norm(); + Eigen::Affine3d world_pose_change + = curr_cam_to_world * (m_prev_cam_to_world.inverse()); + double angle_diff = dense_map::maxRotationAngle(world_pose_change); - // if (!std::isnan(dist_to_prev_processed)) - // ROS_INFO_STREAM("Distance from current camera to last processed camera: " - // << dist_to_prev_processed << " meters."); + // Useful for testing + // if (!std::isnan(m_prev_cam_ctr[0])) + // std::cout << std::setprecision(4) + // << "Position and orientation changes relative to prev. processed cam are " + // << curr_dist_bw_cams << " m and " << angle_diff <<" deg.\n"; - // Current camera is too close to previously processed camera, hence wait for the bot to move - if (!do_process) { - // ROS_INFO_STREAM("Won't process this camera image, as it is too close to previous one."); + bool skip_processing = (!std::isnan(m_prev_cam_ctr[0]) && + curr_dist_bw_cams < m_dist_between_processed_cams && + angle_diff < m_angle_between_processed_cams); + if (skip_processing) { continue; } // May need to keep this comment long term // std::cout << "Processing the streaming camera image at time stamp " - // << texture_cam_image_timestamp << std::endl; + // << texture_cam_image_timestamp << std::endl; double iso = -1.0, exposure = -1.0; if (need_exif) { @@ -889,16 +912,18 @@ void StreamingMapper::ProcessingLoop() { // Publish the textured mesh in obj format std::ostringstream oss; - oss << "processed_" << 1000 + processed_camera_count; // add 1000 to list them nicely + oss << "processed_" << 1000 + m_processed_camera_count; // add 1000 to list them nicely std::string out_prefix = oss.str(); - publishTexturedMesh(mesh, bvh_tree, max_iso_times_exposure, iso, exposure, processed_camera_count, - texture_cam_image, texture_cam_image_timestamp, cam, smallest_cost_per_face, out_prefix); + publishTexturedMesh(mesh, bvh_tree, m_max_iso_times_exposure, iso, exposure, + m_processed_camera_count, texture_cam_image, texture_cam_image_timestamp, + cam, m_smallest_cost_per_face, out_prefix); // Save this for next time - last_processed_cam_ctr = cam_ctr; - processed_camera_count++; + m_prev_cam_ctr = curr_cam_ctr; + m_prev_cam_to_world = curr_cam_to_world; + m_processed_camera_count++; - std::cout << "Total processing took " << timer.get_elapsed() / 1000.0 << " seconds\n"; + std::cout << "Texture processing took " << timer.get_elapsed() / 1000.0 << " seconds\n"; } } diff --git a/dense_map/geometry_mapper/tools/test_texture_gen.cc b/dense_map/geometry_mapper/tools/test_texture_gen.cc new file mode 100644 index 00000000..849f73fe --- /dev/null +++ b/dense_map/geometry_mapper/tools/test_texture_gen.cc @@ -0,0 +1,134 @@ +/* Copyright (c) 2021, United States Government, as represented by the + * Administrator of the National Aeronautics and Space Administration. + * + * All rights reserved. + * + * The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking + * platform" software is licensed under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include + +// Test creating a texture the way the streaming_mapper does it, but +// without involving ROS. Hence, create a single shared texture buffer +// which will be used for any image, then use it with one image. This +// is different than the orthoproject tool, which creates a texture +// buffer specific to each image with no reserve space. + +DEFINE_string(mesh, "", "The mesh to project onto."); + +DEFINE_string(image, "", "The image to orthoproject. It should be distorted, as extracted " + "from a bag."); + +DEFINE_string(camera_to_world, "", + "The camera to world file, as written by the geometry mapper. For example: " + "run/1616779788.2920296_nav_cam_to_world.txt."); + +DEFINE_string(camera_name, "", "The the camera name. For example: 'sci_cam'."); + +DEFINE_int32(num_exclude_boundary_pixels, 0, + "Exclude pixels closer to the boundary than this when texturing."); + +DEFINE_string(output_prefix, "", "The output prefix. The textured mesh name will be" + " .obj."); + +int main(int argc, char** argv) { + ff_common::InitFreeFlyerApplication(&argc, &argv); + + if (FLAGS_image.empty() || FLAGS_camera_to_world.empty()) + LOG(FATAL) << "Must specify an image and camera."; + + if (FLAGS_mesh.empty() || FLAGS_camera_name.empty() || FLAGS_output_prefix.empty()) + LOG(FATAL) << "Not all inputs were specified."; + + // Load the mesh + mve::TriangleMesh::Ptr mesh; + std::shared_ptr mesh_info; + std::shared_ptr graph; + std::shared_ptr bvh_tree; + dense_map::loadMeshBuildTree(FLAGS_mesh, mesh, mesh_info, graph, bvh_tree); + + // Load the camera intrinsics + config_reader::ConfigReader config; + config.AddFile("cameras.config"); + if (!config.ReadFiles()) LOG(FATAL) << "Could not read the configuration file."; + std::cout << "Using camera: " << FLAGS_camera_name << std::endl; + camera::CameraParameters cam_params(&config, FLAGS_camera_name.c_str()); + + // Read image and camera_to_world + cv::Mat image = cv::imread(FLAGS_image); + Eigen::Affine3d cam_to_world; + dense_map::readAffine(cam_to_world, FLAGS_camera_to_world); + + double pixel_size = 0.001; + int num_threads = 48; + std::vector const& faces = mesh->get_faces(); + int num_faces = faces.size(); + std::vector smallest_cost_per_face(num_faces, 1.0e+100); + std::vector face_projection_info; + dense_map::IsaacObjModel texture_model; + std::vector texture_atlases; + cv::Mat model_texture; + + // Form the model + dense_map::formModel(mesh, pixel_size, num_threads, face_projection_info, + texture_atlases, texture_model); + + // Set up the camera + camera::CameraModel cam(cam_to_world.inverse(), cam_params); + + // Project a single image + dense_map::projectTexture(mesh, bvh_tree, image, cam, smallest_cost_per_face, + pixel_size, num_threads, face_projection_info, texture_atlases, + texture_model, model_texture); + std::string mtl_str; + dense_map::formMtl(FLAGS_output_prefix, mtl_str); + std::string obj_str; + dense_map::formObj(texture_model, FLAGS_output_prefix, obj_str); + + // Create the output directory, if needed + std::string out_dir = boost::filesystem::path(FLAGS_output_prefix).parent_path().string(); + if (out_dir != "") dense_map::createDir(out_dir); + + std::string obj_file = FLAGS_output_prefix + ".obj"; + std::cout << "Writing: " << obj_file << std::endl; + std::ofstream obj_handle(obj_file); + obj_handle << obj_str; + obj_handle.close(); + + std::string mtl_file = FLAGS_output_prefix + ".mtl"; + std::cout << "Writing: " << mtl_file << std::endl; + std::ofstream mtl_handle(mtl_file); + mtl_handle << mtl_str; + mtl_handle.close(); + + std::string png_file = FLAGS_output_prefix + ".png"; + std::cout << "Writing: " << png_file << std::endl; + cv::imwrite(png_file, model_texture); + + return 0; +} diff --git a/dense_map/volumetric_mapper/tools/RFID_mapper_node.cc b/dense_map/volumetric_mapper/tools/RFID_mapper_node.cc index 614363eb..b23ddf78 100755 --- a/dense_map/volumetric_mapper/tools/RFID_mapper_node.cc +++ b/dense_map/volumetric_mapper/tools/RFID_mapper_node.cc @@ -44,7 +44,7 @@ class RFIDMapperNode{ nh_ = nh; // Set the config path to ISAAC char *path; - if ((path = getenv("ISAAC_CONFIG_DIR")) == NULL) + if ((path = getenv("CUSTOM_CONFIG_DIR")) == NULL) ROS_FATAL("Could not find the config path."); config_params_.SetPath(path); diff --git a/dense_map/volumetric_mapper/tools/air_quality_mapper_node.cc b/dense_map/volumetric_mapper/tools/air_quality_mapper_node.cc index 3ccd2fff..6cb3c9ea 100755 --- a/dense_map/volumetric_mapper/tools/air_quality_mapper_node.cc +++ b/dense_map/volumetric_mapper/tools/air_quality_mapper_node.cc @@ -44,7 +44,7 @@ class AirQualityMapperNode{ nh_ = nh; // Set the config path to ISAAC char *path; - if ((path = getenv("ISAAC_CONFIG_DIR")) == NULL) + if ((path = getenv("CUSTOM_CONFIG_DIR")) == NULL) ROS_FATAL("Could not find the config path."); config_params_.SetPath(path); diff --git a/dense_map/volumetric_mapper/tools/wifi_mapper_node.cc b/dense_map/volumetric_mapper/tools/wifi_mapper_node.cc index 14e1367d..b0b5602b 100755 --- a/dense_map/volumetric_mapper/tools/wifi_mapper_node.cc +++ b/dense_map/volumetric_mapper/tools/wifi_mapper_node.cc @@ -43,7 +43,7 @@ class WifiMapperNode{ nh_ = nh; // Set the config path to ISAAC char *path; - if ((path = getenv("ISAAC_CONFIG_DIR")) == NULL) + if ((path = getenv("CUSTOM_CONFIG_DIR")) == NULL) ROS_FATAL("Could not find the config path."); config_params_.SetPath(path); diff --git a/img_analysis/src/img_analysis_nodelet.cc b/img_analysis/src/img_analysis_nodelet.cc index 5add9075..c1cf1c94 100755 --- a/img_analysis/src/img_analysis_nodelet.cc +++ b/img_analysis/src/img_analysis_nodelet.cc @@ -76,7 +76,7 @@ class ImageAnalysisNode : public ff_util::FreeFlyerNodelet { protected: void Initialize(ros::NodeHandle* nh) { // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) cfg_.SetPath(path); // Grab some configuration parameters for this node from the LUA config reader @@ -127,22 +127,25 @@ class ImageAnalysisNode : public ff_util::FreeFlyerNodelet { // Result isaac_msgs::ImageInspectionResult result; + result.anomaly_result.classifier_options.push_back("Vent free"); + result.anomaly_result.classifier_options.push_back("Vent has obstacle"); + result.anomaly_result.classifier_options.push_back("Unknown result, is robot looking at vent?"); switch (classification) { case vent_analysis_.vent_free_: - result.result = "Vent free"; - result.response = RESPONSE::VENT_FREE; + result.anomaly_result.classifier_result = "Vent free"; + result.response = RESPONSE::SUCCESS; break; case vent_analysis_.vent_blocked_: - result.result = "Vent has obstacle"; - result.response = RESPONSE::VENT_OBSTRUCTED; + result.anomaly_result.classifier_result = "Vent has obstacle"; + result.response = RESPONSE::SUCCESS; break; case vent_analysis_.vent_unknown_: - result.result = "Unknown result, is robot looking at vent?"; - result.response = RESPONSE::INCONCLUSIVE; + result.anomaly_result.classifier_result = "Unknown result, is robot looking at vent?"; + result.response = RESPONSE::SUCCESS; break; default: - result.result = "Image analysis failed"; + result.anomaly_result.classifier_result = "Image analysis failed"; result.response = RESPONSE::FAILED; } goal_.type = isaac_msgs::ImageInspectionGoal::NONE; diff --git a/img_analysis/src/img_vent.cc b/img_analysis/src/img_vent.cc index 2b225253..c2a7e99c 100644 --- a/img_analysis/src/img_vent.cc +++ b/img_analysis/src/img_vent.cc @@ -26,7 +26,7 @@ namespace img_analysis { // Read the configuration config_reader::ConfigReader config_params; // Set the config path to ISAAC - char *path = getenv("ISAAC_CONFIG_DIR"); + char *path = getenv("CUSTOM_CONFIG_DIR"); if (path != NULL) config_params.SetPath(path); config_params.AddFile("anomaly/img_vent.config"); diff --git a/img_analysis/test/test_vent.test b/img_analysis/test/test_vent.test index 3075f30f..b2297b90 100644 --- a/img_analysis/test/test_vent.test +++ b/img_analysis/test/test_vent.test @@ -21,8 +21,8 @@ - + diff --git a/isaac.doxyfile b/isaac.doxyfile index 7634df31..ad6b0fc5 100644 --- a/isaac.doxyfile +++ b/isaac.doxyfile @@ -38,7 +38,7 @@ PROJECT_NAME = "ISAAC" # could be handy for archiving the generated documentation or if some version # control system is used. -PROJECT_NUMBER = 0.2.3 +PROJECT_NUMBER = 0.2.4 # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a diff --git a/isaac/config/behaviors/inspection.config b/isaac/config/behaviors/inspection.config index 436035e6..47fc3874 100644 --- a/isaac/config/behaviors/inspection.config +++ b/isaac/config/behaviors/inspection.config @@ -128,51 +128,6 @@ parameters = { }, -- INSPECTION CONFIG PARAMETERS { - id = "vent_to_sci_cam_rotation_x", - reconfigurable = true, - type = "double", - default = 0.5, - min = -1.0, - max = 1.0, - unit = "m", - description = "Quaternion rotation target to sci_cam x" - },{ - id = "vent_to_sci_cam_rotation_y", - reconfigurable = true, - type = "double", - default = -0.5, - min = -1.0, - max = 1.0, - unit = "m", - description = "Quaternion rotation target to sci_cam y" - },{ - id = "vent_to_sci_cam_rotation_z", - reconfigurable = true, - type = "double", - default = -0.5, - min = -1.0, - max = 1.0, - unit = "m", - description = "Quaternion rotation target to sci_cam z" - },{ - id = "vent_to_sci_cam_rotation_w", - reconfigurable = true, - type = "double", - default = 0.5, - min = -1.0, - max = 1.0, - unit = "m", - description = "Quaternion rotation target to sci_cam w" - },{ - id = "optimal_distance", - reconfigurable = true, - type = "double", - default = 1.0, - min = 0.2, - max = 3.0, - unit = "m", - description = "Optimal distance between target and camera" - },{ id = "distance_resolution", reconfigurable = true, type = "double", @@ -289,6 +244,15 @@ parameters = { max = 1.0, unit = "percentage", description = "Overlap between consecutive images" + },{ + id = "station_time", + reconfigurable = true, + type = "double", + default = 2.0, + min = 0.0, + max = 10.0, + unit = "seconds", + description = "Wait time in each station" }} \ No newline at end of file diff --git a/isaac/config/dense_map/streaming_mapper.config b/isaac/config/dense_map/streaming_mapper.config index 6b78cc63..5c7cd660 100644 --- a/isaac/config/dense_map/streaming_mapper.config +++ b/isaac/config/dense_map/streaming_mapper.config @@ -16,10 +16,10 @@ -- License for the specific language governing permissions and limitations -- under the License. --- Check that we have ISAAC_RESOURCE_DIR set -resource_dir = os.getenv("ISAAC_RESOURCE_DIR") +-- Check that we have CUSTOM_RESOURCE_DIR set +resource_dir = os.getenv("CUSTOM_RESOURCE_DIR") if resource_dir == nil then - io.stderr:write("ISAAC_RESOURCE_DIR not set, aborting.\n") + io.stderr:write("CUSTOM_RESOURCE_DIR not set, aborting.\n") os.exit() end @@ -44,17 +44,34 @@ nav_cam_pose_topic = ""; -- A backup topic to get the pose from, rarely used ekf_pose_topic = ""; -texture_cam_type = "sci_cam"; -texture_cam_topic = "/hw/cam_sci/compressed"; -dist_between_processed_cams = 0.10; -max_iso_times_exposure = 5.1; +-- Type of camera to texture, and its image topic -use_single_texture = true; -pixel_size = 0.001; -num_threads = 48; +texture_cam_type = "sci_cam"; +texture_cam_topic = "/hw/cam_sci/compressed"; + +-- texture_cam_type = "haz_cam"; +-- texture_cam_topic = "/hw/depth_haz/extended/amplitude_int"; + +-- texture_cam_type = "nav_cam"; +-- texture_cam_topic = "/hw/cam_nav"; + +-- texture_cam_type = "heat_cam"; +-- texture_cam_topic = "/hw/cam_heat"; + +-- texture_cam_type = "acoustics_cam"; +-- texture_cam_topic = "/hw/cam_acoustics"; + +dist_between_processed_cams = 0.10; +angle_between_processed_cams = 5.0; +max_iso_times_exposure = 5.1; +sci_cam_exposure_correction = true; + +use_single_texture = true; +pixel_size = 0.001; +num_threads = 48; -- This is measured at full resolution, not the reduced resolution --- used in calibration . +-- used in calibration. num_exclude_boundary_pixels = 0; save_to_disk = false; diff --git a/isaac/config/transforms.config b/isaac/config/transforms.config new file mode 100644 index 00000000..50abe5c4 --- /dev/null +++ b/isaac/config/transforms.config @@ -0,0 +1,40 @@ +-- Copyright (c) 2017, United States Government, as represented by the +-- Administrator of the National Aeronautics and Space Administration. +-- +-- All rights reserved. +-- +-- The Astrobee platform is licensed under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with the +-- License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +-- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +-- License for the specific language governing permissions and limitations +-- under the License. + +-- All static transforms in our system +transforms = { + + ------------ + -- LOCALS -- + ------------ + + -- SOUNDSEE CAMERA + + { global = false, parent = "body", child = "soundsee", + transform = transform(vec3(0.131585, 0.00185, 0.1068), quat4(0.500, -0.500, 0.500, -0.500) ) }, + + -- TARGET TO SCICAM + + { global = false, parent = "sci_cam", child = "target", + transform = transform(vec3(0.0, 0.0, 1.0), quat4(0.500, -0.500, -0.500, 0.500) ) }, + + ------------- + -- GLOBALS -- + ------------- + + + } diff --git a/isaac/launch/isaac_astrobee.launch b/isaac/launch/isaac_astrobee.launch index 2f53eb88..c03b154c 100644 --- a/isaac/launch/isaac_astrobee.launch +++ b/isaac/launch/isaac_astrobee.launch @@ -69,11 +69,14 @@ name="ASTROBEE_RESOURCE_DIR" value="$(find astrobee)/resources" /> - - + - + @@ -178,6 +181,9 @@ + + + diff --git a/isaac/launch/robot/ILP.launch b/isaac/launch/robot/ILP.launch index 10deca97..544fcc24 100644 --- a/isaac/launch/robot/ILP.launch +++ b/isaac/launch/robot/ILP.launch @@ -38,6 +38,8 @@ name="ilp_cargo" output="$(arg output)"/> + @@ -64,6 +66,18 @@ + + + + + + + + + + + + diff --git a/isaac/readme.md b/isaac/readme.md index dce0d4e9..16809ef7 100644 --- a/isaac/readme.md +++ b/isaac/readme.md @@ -133,12 +133,8 @@ and then the bot can be moved via: rosrun mobility teleop -move -pos "11.0 -5.0 5.0" -tolerance_pos 0.0001 -att "0 0 0 1" -3. Start the bot with the acoustics camera on. +3. ISAAC supports a simulated acoustics camera. See: - source ~/freeflyer_build/native/devel/setup.zsh - source ~/projects/isaac/devel/setup.zsh - roslaunch isaac sim.launch world:=iss rviz:=true acoustics_cam:=true \ - pose:="11.2 -7.72 5.0 0 0 0 0 1" - -See isaac/hardware/acoustics_cam/readme.md for more details. + astrobee/simulation/acoustics_cam/readme.md +for how to use it with the simulator. diff --git a/isaac/resources/rviz/iss.rviz b/isaac/resources/rviz/iss.rviz index 1a0f6cc1..d75f7162 100644 --- a/isaac/resources/rviz/iss.rviz +++ b/isaac/resources/rviz/iss.rviz @@ -579,6 +579,17 @@ Visualization Manager: Transport Hint: raw Unreliable: false Value: true + - Class: rviz/Image + Enabled: true + Image Topic: /hw/depth_haz/extended/amplitude_int + Median window: 5 + Min Value: 0 + Name: "DEBUG: HazCam intensity" + Normalize Range: true + Queue Size: 2 + Transport Hint: raw + Unreliable: false + Value: true - Alpha: 1 Autocompute Intensity Bounds: true Autocompute Value Bounds: @@ -1164,6 +1175,8 @@ Window Geometry: collapsed: false "DEBUG: SciCam": collapsed: false + "DEBUG: HazCam": + collapsed: false Displays: collapsed: false Height: 788 diff --git a/isaac_msgs b/isaac_msgs index 4e08b717..c5b40c62 160000 --- a/isaac_msgs +++ b/isaac_msgs @@ -1 +1 @@ -Subproject commit 4e08b717104ffafe3c725aa4715041effd3b9ebe +Subproject commit c5b40c6248ce52b2bcc0ed1033492bab72a3fcb1 diff --git a/scripts/build/build_debian.sh b/scripts/build/build_debian.sh index 26ead5dc..d4607783 100755 --- a/scripts/build/build_debian.sh +++ b/scripts/build/build_debian.sh @@ -20,11 +20,11 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -# if [ -n "$(git status --porcelain)" ]; then -# echo "You should not build Debians for a dirty source tree!" -# echo "Make sure all your changes are committed AND pushed to the server..." -# exit -1 -# fi +if [ -n "$(git status --porcelain)" ]; then + echo "You should not build Debians for a dirty source tree!" + echo "Make sure all your changes are committed AND pushed to the server..." + exit -1 +fi EXTRA_FLAGS="-b -a armhf" if [[ $* == *--config* ]]; then @@ -33,5 +33,5 @@ fi pushd $DIR/../.. export CMAKE_TOOLCHAIN_FILE=${DIR}/isaac_cross.cmake -DEB_BUILD_OPTIONS="parallel=8" debuild -e ARMHF_CHROOT_DIR -e ARMHF_TOOLCHAIN -e CMAKE_TOOLCHAIN_FILE -e CMAKE_PREFIX_PATH -us -uc $EXTRA_FLAGS +DEB_BUILD_OPTIONS="parallel=8" debuild -e ASTROBEE_WS -e ARMHF_CHROOT_DIR -e ARMHF_TOOLCHAIN -e CMAKE_TOOLCHAIN_FILE -e CMAKE_PREFIX_PATH -us -uc $EXTRA_FLAGS popd > /dev/null diff --git a/scripts/build/isaac_cross.cmake b/scripts/build/isaac_cross.cmake index c24b9874..d96a2087 100644 --- a/scripts/build/isaac_cross.cmake +++ b/scripts/build/isaac_cross.cmake @@ -52,8 +52,10 @@ SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) # If this is not done, the resulting rpath will not look for them in the chroot # environment. # Also, RTI DDS is included once for the libraries and once for the headers. +execute_process(COMMAND catkin locate -i OUTPUT_VARIABLE CATKIN_INSTALL_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) +execute_process(COMMAND catkin locate -d OUTPUT_VARIABLE CATKIN_DEVEL_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) SET(CMAKE_FIND_ROOT_PATH - ${ARM_CHROOT_DIR}) + ${ARM_CHROOT_DIR} ${CATKIN_INSTALL_PATH} ${CATKIN_DEVEL_PATH} $ENV{ASTROBEE_WS}/armhf/opt/astrobee) SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH) SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH) @@ -68,7 +70,8 @@ IF( DEFINED EXTRA_ROOT_PATH ) SET(CMAKE_FIND_ROOT_PATH ${EXTRA_ROOT_PATH} ${CMAKE_FIND_ROOT_PATH}) ENDIF( DEFINED EXTRA_ROOT_PATH ) -SET(catkin2_DIR ${CMAKE_SOURCE_DIR}/cmake) +execute_process(COMMAND catkin locate -s OUTPUT_VARIABLE CATKIN_SRC_PATH OUTPUT_STRIP_TRAILING_WHITESPACE) +SET(catkin2_DIR ${CATKIN_SRC_PATH}/cmake) # needed for gflag to compile... SET( THREADS_PTHREAD_ARG diff --git a/scripts/docker/astrobee_msgs.Dockerfile b/scripts/docker/astrobee_msgs.Dockerfile index f6c4dad9..33fb38f7 100644 --- a/scripts/docker/astrobee_msgs.Dockerfile +++ b/scripts/docker/astrobee_msgs.Dockerfile @@ -20,7 +20,6 @@ # You must set the docker context to be the repository root directory ARG UBUNTU_VERSION=16.04 - FROM ubuntu:${UBUNTU_VERSION} ARG ROS_VERSION=kinetic diff --git a/scripts/docker/build.sh b/scripts/docker/build.sh index dd6e8541..aadffe94 100755 --- a/scripts/docker/build.sh +++ b/scripts/docker/build.sh @@ -151,13 +151,13 @@ docker build ${astrobee_source:-${rootdir}} \ --build-arg UBUNTU_VERSION=${UBUNTU_VERSION} \ --build-arg ROS_VERSION=${ROS_VERSION} \ --build-arg PYTHON=${PYTHON} \ - -t astrobee:msgs-ubuntu${UBUNTU_VERSION} + -t isaac/isaac:astrobee-msgs-ubuntu${UBUNTU_VERSION} docker build ${isaac_source:-${rootdir}} \ -f ${isaac_source:-${rootdir}}/scripts/docker/isaac_msgs.Dockerfile \ --build-arg UBUNTU_VERSION=${UBUNTU_VERSION} \ --build-arg ROS_VERSION=${ROS_VERSION} \ --build-arg PYTHON=${PYTHON} \ - -t isaac:msgs-ubuntu${UBUNTU_VERSION} + -t isaac/isaac:msgs-ubuntu${UBUNTU_VERSION} # Build IDI and MAST export IDI_PATH=${idi_source} diff --git a/scripts/docker/build_msgs_jar.Dockerfile b/scripts/docker/build_msgs_jar.Dockerfile new file mode 100644 index 00000000..ab93ca6b --- /dev/null +++ b/scripts/docker/build_msgs_jar.Dockerfile @@ -0,0 +1,39 @@ +# Copyright (c) 2021, United States Government, as represented by the +# Administrator of the National Aeronautics and Space Administration. +# +# All rights reserved. +# +# The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking +# platform" software is licensed under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with the +# License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This will set up an Astrobee docker container using the non-NASA install instructions. +# You must set the docker context to be the repository root directory + +ARG REMOTE=isaac +FROM ${REMOTE}/isaac:msgs-ubuntu16.04 + +RUN apt-get update && apt-get install -y \ + unzip \ + libc6-dev-i386 \ + lib32z1 \ + python-wstool \ + openjdk-8-jdk \ + ros-kinetic-rosjava \ + && rm -rf /var/lib/apt/lists/* + +# Compile msg jar files, genjava_message_artifacts only works with bash +RUN ["/bin/bash", "-c", "cd /src/msgs \ + && catkin config \ + && catkin build \ + && . devel/setup.bash \ + && genjava_message_artifacts --verbose -p ff_msgs ff_hw_msgs isaac_msgs isaac_hw_msgs"] diff --git a/scripts/docker/isaac_msgs.Dockerfile b/scripts/docker/isaac_msgs.Dockerfile index e180891b..e06c7232 100644 --- a/scripts/docker/isaac_msgs.Dockerfile +++ b/scripts/docker/isaac_msgs.Dockerfile @@ -20,8 +20,8 @@ # You must set the docker context to be the repository root directory ARG UBUNTU_VERSION=16.04 - -FROM astrobee:msgs-ubuntu${UBUNTU_VERSION} +ARG REMOTE=isaac +FROM ${REMOTE}/isaac:astrobee-msgs-ubuntu${UBUNTU_VERSION} ARG ROS_VERSION=kinetic ARG PYTHON="" diff --git a/scripts/setup/packages_bionic.lst b/scripts/setup/packages_bionic.lst index 8cdb6fa7..1ab3ee0b 100644 --- a/scripts/setup/packages_bionic.lst +++ b/scripts/setup/packages_bionic.lst @@ -2,4 +2,5 @@ doxygen python-catkin-tools ros-melodic-eigen-conversions ros-melodic-pcl-ros -libmnl-dev \ No newline at end of file +libmnl-dev +libproj-dev diff --git a/scripts/setup/packages_focal.lst b/scripts/setup/packages_focal.lst index df2c7115..87954901 100644 --- a/scripts/setup/packages_focal.lst +++ b/scripts/setup/packages_focal.lst @@ -4,4 +4,5 @@ python3-osrf-pycommon python3-rosdep ros-noetic-eigen-conversions ros-noetic-pcl-ros -libmnl-dev \ No newline at end of file +libmnl-dev +libproj-dev diff --git a/scripts/setup/packages_xenial.lst b/scripts/setup/packages_xenial.lst index 5e3e6ffb..2128f89a 100644 --- a/scripts/setup/packages_xenial.lst +++ b/scripts/setup/packages_xenial.lst @@ -2,4 +2,5 @@ doxygen python-catkin-tools ros-kinetic-eigen-conversions ros-kinetic-pcl-ros -libmnl-dev \ No newline at end of file +libmnl-dev +libproj-dev