踩坑 视觉SLAM 十四讲第二版 ch8 编译及运行问题

1.fmt相关

CMakeLists.txt中:在后面加上 fmt

target_link_libraries(optical_flow ${OpenCV_LIBS} fmt ) 

target_link_libraries(direct_method ${OpenCV_LIBS} ${Pangolin_LIBRARIES} fmt )

2.不存在用户定义的从 "std::_Bind<void (OpticalFlowTracker::*(OpticalFlowTracker *, std::_Placeholder<1>))(const cv::Range &range)>" 到 "const cv::ParallelLoopBody" 的适当转换:

error: invalid initialization of reference of type ‘const cv::ParallelLoopBody&’ from expression of type ‘std::_Bind_helper<false, void (OpticalFlowTracker::*)(const cv::Range&), OpticalFlowTracker*, const std::_Placeholder<1>&>::type {aka std::_Bind<void (OpticalFlowTracker::*(OpticalFlowTracker*, std::_Placeholder<1>))(const cv::Range&)>}’
                   std::bind(&OpticalFlowTracker::calculateOpticalFlow, &tracker, placeholders::_1));

原因:这是因为opencv3不支持std::bind函数

解决方式:

1.安装opencv4,然后把CMakeLists.txt中的find_package(OpenCV 3 REQUIRED)中的3改成4(不建议)

2.使用修改后的适配opencv3的:

转自此大神修改后的optical_flow.cpp

 
#include <opencv2/opencv.hpp>
#include <string>
#include <chrono>
#include <Eigen/Core>
#include <Eigen/Dense>
 
using namespace std;
using namespace cv;
 
string file_1 = "../LK1.png";  // first image
string file_2 = "../LK2.png";  // second image
 
/**
 * single level optical flow
 * @param [in] img1 the first image
 * @param [in] img2 the second image
 * @param [in] kp1 keypoints in img1
 * @param [in|out] kp2 keypoints in img2, if empty, use initial guess in kp1
 * @param [out] success true if a keypoint is tracked successfully
 * @param [in] inverse use inverse formulation?
 */
void OpticalFlowSingleLevel(
    const Mat &img1,
    const Mat &img2,
    const vector<KeyPoint> &kp1,
    vector<KeyPoint> &kp2,
    vector<bool> &success,
    bool inverse = false,
    bool has_initial_guess = false
);
 
/**
 * multi level optical flow, scale of pyramid is set to 2 by default
 * the image pyramid will be create inside the function
 * @param [in] img1 the first pyramid
 * @param [in] img2 the second pyramid
 * @param [in] kp1 keypoints in img1
 * @param [out] kp2 keypoints in img2
 * @param [out] success true if a keypoint is tracked successfully
 * @param [in] inverse set true to enable inverse formulation
 */
void OpticalFlowMultiLevel(
    const Mat &img1,
    const Mat &img2,
    const vector<KeyPoint> &kp1,
    vector<KeyPoint> &kp2,
    vector<bool> &success,
    bool inverse = false
);
 
/**
 * get a gray scale value from reference image (bi-linear interpolated)
 * @param img
 * @param x
 * @param y
 * @return the interpolated value of this pixel
 */
inline float GetPixelValue(const cv::Mat &img, float x, float y) {
    // boundary check
    if (x < 0) x = 0;
    if (y < 0) y = 0;
    if (x >= img.cols) x = img.cols - 1;
    if (y >= img.rows) y = img.rows - 1;
    uchar *data = &img.data[int(y) * img.step + int(x)];
    float xx = x - floor(x);
    float yy = y - floor(y);
    return float(
        (1 - xx) * (1 - yy) * data[0] +
        xx * (1 - yy) * data[1] +
        (1 - xx) * yy * data[img.step] +
        xx * yy * data[img.step + 1]
    );
}
 
/// Optical flow tracker and interface
class OpticalFlowTracker: public cv::ParallelLoopBody  {
private:
    const Mat &img1;
    const Mat &img2;
    const vector<KeyPoint> &kp1;
    vector<KeyPoint> &kp2;
    vector<bool> &success;
    bool inverse = true;
    bool has_initial = false;
 
public:
    OpticalFlowTracker(
        const Mat &img1_,
        const Mat &img2_,
        const vector<KeyPoint> &kp1_,
        vector<KeyPoint> &kp2_,
        vector<bool> &success_,
        bool inverse_ = true, bool has_initial_ = false) :
        img1(img1_), img2(img2_), kp1(kp1_), kp2(kp2_), success(success_), inverse(inverse_),
        has_initial(has_initial_) {}
 
    //  void calculateOpticalFlow(const Range &range);
 
    virtual void operator()(const Range &range) const {
        // parameters
        int half_patch_size = 4;
        int iterations = 10;
        for (size_t i = range.start; i < range.end; i++) {
            auto kp = kp1[i];
            double dx = 0, dy = 0; // dx,dy need to be estimated
            if (has_initial) {
                dx = kp2[i].pt.x - kp.pt.x;
                dy = kp2[i].pt.y - kp.pt.y;
            }
 
            double cost = 0, lastCost = 0;
            bool succ = true; // indicate if this point succeeded
 
            // Gauss-Newton iterations
            Eigen::Matrix2d H = Eigen::Matrix2d::Zero();    // hessian
            Eigen::Vector2d b = Eigen::Vector2d::Zero();    // bias
            Eigen::Vector2d J;  // jacobian
            for (int iter = 0; iter < iterations; iter++) {
                if (inverse == false) {
                    H = Eigen::Matrix2d::Zero();
                    b = Eigen::Vector2d::Zero();
                } else {
                    // only reset b
                    b = Eigen::Vector2d::Zero();
                }
 
                cost = 0;
 
                // compute cost and jacobian
                for (int x = -half_patch_size; x < half_patch_size; x++)
                    for (int y = -half_patch_size; y < half_patch_size; y++) {
                        double error = GetPixelValue(img1, kp.pt.x + x, kp.pt.y + y) -
                                    GetPixelValue(img2, kp.pt.x + x + dx, kp.pt.y + y + dy);;  // Jacobian
                        if (inverse == false) {
                            J = -1.0 * Eigen::Vector2d(
                                0.5 * (GetPixelValue(img2, kp.pt.x + dx + x + 1, kp.pt.y + dy + y) -
                                    GetPixelValue(img2, kp.pt.x + dx + x - 1, kp.pt.y + dy + y)),
                                0.5 * (GetPixelValue(img2, kp.pt.x + dx + x, kp.pt.y + dy + y + 1) -
                                    GetPixelValue(img2, kp.pt.x + dx + x, kp.pt.y + dy + y - 1))
                            );
                        } else if (iter == 0) {
                            // in inverse mode, J keeps same for all iterations
                            // NOTE this J does not change when dx, dy is updated, so we can store it and only compute error
                            J = -1.0 * Eigen::Vector2d(
                                0.5 * (GetPixelValue(img1, kp.pt.x + x + 1, kp.pt.y + y) -
                                    GetPixelValue(img1, kp.pt.x + x - 1, kp.pt.y + y)),
                                0.5 * (GetPixelValue(img1, kp.pt.x + x, kp.pt.y + y + 1) -
                                    GetPixelValue(img1, kp.pt.x + x, kp.pt.y + y - 1))
                            );
                        }
                        // compute H, b and set cost;
                        b += -error * J;
                        cost += error * error;
                        if (inverse == false || iter == 0) {
                            // also update H
                            H += J * J.transpose();
                        }
                    }
 
                // compute update
                Eigen::Vector2d update = H.ldlt().solve(b);
 
                if (std::isnan(update[0])) {
                    // sometimes occurred when we have a black or white patch and H is irreversible
                    cout << "update is nan" << endl;
                    succ = false;
                    break;
                }
 
                if (iter > 0 && cost > lastCost) {
                    break;
                }
 
                // update dx, dy
                dx += update[0];
                dy += update[1];
                lastCost = cost;
                succ = true;
 
                if (update.norm() < 1e-2) {
                    // converge
                    break;
                }
            }
 
            success[i] = succ;
 
            // set kp2
            kp2[i].pt = kp.pt + Point2f(dx, dy);
        }
    }
};
 
 
int main(int argc, char **argv) {
 
    // images, note they are CV_8UC1, not CV_8UC3
    Mat img1 = imread(file_1, 0);
    Mat img2 = imread(file_2, 0);
 
    // key points, using GFTT here.
    vector<KeyPoint> kp1;
    Ptr<GFTTDetector> detector = GFTTDetector::create(500, 0.01, 20); // maximum 500 keypoints
    detector->detect(img1, kp1);
 
    // now lets track these key points in the second image
    // first use single level LK in the validation picture
    vector<KeyPoint> kp2_single;
    vector<bool> success_single;
    OpticalFlowSingleLevel(img1, img2, kp1, kp2_single, success_single);
 
    // then test multi-level LK
    vector<KeyPoint> kp2_multi;
    vector<bool> success_multi;
    chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
    OpticalFlowMultiLevel(img1, img2, kp1, kp2_multi, success_multi, true);
    chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
    auto time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
    cout << "optical flow by gauss-newton: " << time_used.count() << endl;
 
    // use opencv's flow for validation
    vector<Point2f> pt1, pt2;
    for (auto &kp: kp1) pt1.push_back(kp.pt);
    vector<uchar> status;
    vector<float> error;
    t1 = chrono::steady_clock::now();
    cv::calcOpticalFlowPyrLK(img1, img2, pt1, pt2, status, error);
    t2 = chrono::steady_clock::now();
    time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
    cout << "optical flow by opencv: " << time_used.count() << endl;
 
    // plot the differences of those functions
    Mat img2_single;
    cv::cvtColor(img2, img2_single, CV_GRAY2BGR);
    for (int i = 0; i < kp2_single.size(); i++) {
        if (success_single[i]) {
            cv::circle(img2_single, kp2_single[i].pt, 2, cv::Scalar(0, 250, 0), 2);
            cv::line(img2_single, kp1[i].pt, kp2_single[i].pt, cv::Scalar(0, 250, 0));
        }
    }
 
    Mat img2_multi;
    cv::cvtColor(img2, img2_multi, CV_GRAY2BGR);
    for (int i = 0; i < kp2_multi.size(); i++) {
        if (success_multi[i]) {
            cv::circle(img2_multi, kp2_multi[i].pt, 2, cv::Scalar(0, 250, 0), 2);
            cv::line(img2_multi, kp1[i].pt, kp2_multi[i].pt, cv::Scalar(0, 250, 0));
        }
    }
 
    Mat img2_CV;
    cv::cvtColor(img2, img2_CV, CV_GRAY2BGR);
    for (int i = 0; i < pt2.size(); i++) {
        if (status[i]) {
            cv::circle(img2_CV, pt2[i], 2, cv::Scalar(0, 250, 0), 2);
            cv::line(img2_CV, pt1[i], pt2[i], cv::Scalar(0, 250, 0));
        }
    }
 
    cv::imshow("tracked single level", img2_single);
    cv::imshow("tracked multi level", img2_multi);
    cv::imshow("tracked by opencv", img2_CV);
    cv::waitKey(0);
 
    return 0;
}
 
void OpticalFlowSingleLevel(
    const Mat &img1,
    const Mat &img2,
    const vector<KeyPoint> &kp1,
    vector<KeyPoint> &kp2,
    vector<bool> &success,
    bool inverse, bool has_initial) {
    kp2.resize(kp1.size());
    success.resize(kp1.size());
    OpticalFlowTracker tracker(img1, img2, kp1, kp2, success, inverse, has_initial);
    parallel_for_(Range(0, kp1.size()), tracker);
}
 
 
 
void OpticalFlowMultiLevel(
    const Mat &img1,
    const Mat &img2,
    const vector<KeyPoint> &kp1,
    vector<KeyPoint> &kp2,
    vector<bool> &success,
    bool inverse) {
 
    // parameters
    int pyramids = 4;
    double pyramid_scale = 0.5;
    double scales[] = {1.0, 0.5, 0.25, 0.125};
 
    // create pyramids
    chrono::steady_clock::time_point t1 = chrono::steady_clock::now();
    vector<Mat> pyr1, pyr2; // image pyramids
    for (int i = 0; i < pyramids; i++) {
        if (i == 0) {
            pyr1.push_back(img1);
            pyr2.push_back(img2);
        } else {
            Mat img1_pyr, img2_pyr;
            cv::resize(pyr1[i - 1], img1_pyr,
                       cv::Size(pyr1[i - 1].cols * pyramid_scale, pyr1[i - 1].rows * pyramid_scale));
            cv::resize(pyr2[i - 1], img2_pyr,
                       cv::Size(pyr2[i - 1].cols * pyramid_scale, pyr2[i - 1].rows * pyramid_scale));
            pyr1.push_back(img1_pyr);
            pyr2.push_back(img2_pyr);
        }
    }
    chrono::steady_clock::time_point t2 = chrono::steady_clock::now();
    auto time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
    cout << "build pyramid time: " << time_used.count() << endl;
 
    // coarse-to-fine LK tracking in pyramids
    vector<KeyPoint> kp1_pyr, kp2_pyr;
    for (auto &kp:kp1) {
        auto kp_top = kp;
        kp_top.pt *= scales[pyramids - 1];
        kp1_pyr.push_back(kp_top);
        kp2_pyr.push_back(kp_top);
    }
 
    for (int level = pyramids - 1; level >= 0; level--) {
        // from coarse to fine
        success.clear();
        t1 = chrono::steady_clock::now();
        OpticalFlowSingleLevel(pyr1[level], pyr2[level], kp1_pyr, kp2_pyr, success, inverse, true);
        t2 = chrono::steady_clock::now();
        auto time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
        cout << "track pyr " << level << " cost time: " << time_used.count() << endl;
 
        if (level > 0) {
            for (auto &kp: kp1_pyr)
                kp.pt /= pyramid_scale;
            for (auto &kp: kp2_pyr)
                kp.pt /= pyramid_scale;
        }
    }
 
    for (auto &kp: kp2_pyr)
        kp2.push_back(kp);
}

同样direct_method.cpp:

且还有如下报错: mutex’ in namespace ‘std’ does not name a type

在最上面加入以下:

#include <thread>
#include <mutex>
#include <unistd.h>

完整版 direct_method.cpp如下:

#include <opencv2/opencv.hpp>
#include <sophus/se3.hpp>
#include <boost/format.hpp>
#include <pangolin/pangolin.h>
 
#include <thread>
#include <mutex>
#include <unistd.h>

using namespace std;
 
typedef vector<Eigen::Vector2d, Eigen::aligned_allocator<Eigen::Vector2d>> VecVector2d;
 
// Camera intrinsics
double fx = 718.856, fy = 718.856, cx = 607.1928, cy = 185.2157;
// baseline
double baseline = 0.573;
// paths
string left_file = "./left.png";
string disparity_file = "./disparity.png";
boost::format fmt_others("./%06d.png");    // other files
 
// useful typedefs
typedef Eigen::Matrix<double, 6, 6> Matrix6d;
typedef Eigen::Matrix<double, 2, 6> Matrix26d;
typedef Eigen::Matrix<double, 6, 1> Vector6d;
 
/**
 * pose estimation using direct method
 * @param img1
 * @param img2
 * @param px_ref
 * @param depth_ref
 * @param T21
 */
void DirectPoseEstimationMultiLayer(
        const cv::Mat &img1,
        const cv::Mat &img2,
        const VecVector2d &px_ref,
        const vector<double> depth_ref,
        Sophus::SE3d &T21
);
 
/**
 * pose estimation using direct method
 * @param img1
 * @param img2
 * @param px_ref
 * @param depth_ref
 * @param T21
 */
void DirectPoseEstimationSingleLayer(
        const cv::Mat &img1,
        const cv::Mat &img2,
        const VecVector2d &px_ref,
        const vector<double> depth_ref,
        Sophus::SE3d &T21
);
 
// bilinear interpolation
inline float GetPixelValue(const cv::Mat &img, float x, float y) {
    // boundary check
    if (x < 0) x = 0;
    if (y < 0) y = 0;
    if (x >= img.cols) x = img.cols - 1;
    if (y >= img.rows) y = img.rows - 1;
    uchar *data = &img.data[int(y) * img.step + int(x)];
    float xx = x - floor(x);
    float yy = y - floor(y);
    return float(
            (1 - xx) * (1 - yy) * data[0] +
            xx * (1 - yy) * data[1] +
            (1 - xx) * yy * data[img.step] +
            xx * yy * data[img.step + 1]
    );
}
 
/// class for accumulator jacobians in parallel
class JacobianAccumulator: public cv::ParallelLoopBody {
private:
    const cv::Mat &img1;
    const cv::Mat &img2;
    const VecVector2d &px_ref;
    const vector<double> depth_ref;
    Sophus::SE3d &T21;
    mutable VecVector2d projection; // projected points
 
    mutable std::mutex hessian_mutex;
    mutable Matrix6d H = Matrix6d::Zero();
    mutable Vector6d b = Vector6d::Zero();
    mutable double cost = 0;
 
public:
    JacobianAccumulator(
        const cv::Mat &img1_,
        const cv::Mat &img2_,
        const VecVector2d &px_ref_,
        const vector<double> depth_ref_,
        Sophus::SE3d &T21_) :
        img1(img1_), img2(img2_), px_ref(px_ref_), depth_ref(depth_ref_), T21(T21_) {
        projection = VecVector2d(px_ref.size(), Eigen::Vector2d(0, 0));
    }
 
    /// accumulate jacobians in a range
//    void accumulate_jacobian(const cv::Range &range);
 
 
    /// get hessian matrix
    Matrix6d hessian() const { return H; }
 
    /// get bias
    Vector6d bias() const { return b; }
 
    /// get total cost
    double cost_func() const { return cost; }
 
    /// get projected points
    VecVector2d projected_points() const { return projection; }
 
    /// reset h, b, cost to zero
    void reset() {
        H = Matrix6d::Zero();
        b = Vector6d::Zero();
        cost = 0;
    }
 
    virtual void operator()(const cv::Range& range) const {
 
        // parameters
        const int half_patch_size = 1;
        int cnt_good = 0;
        Matrix6d hessian = Matrix6d::Zero();
        Vector6d bias = Vector6d::Zero();
        double cost_tmp = 0;
 
        for (size_t i = range.start; i < range.end; i++) {
 
            // compute the projection in the second image
            Eigen::Vector3d point_ref =
                    depth_ref[i] * Eigen::Vector3d((px_ref[i][0] - cx) / fx, (px_ref[i][1] - cy) / fy, 1);
            Eigen::Vector3d point_cur = T21 * point_ref;
            if (point_cur[2] < 0)   // depth invalid
                continue;
 
            float u = fx * point_cur[0] / point_cur[2] + cx, v = fy * point_cur[1] / point_cur[2] + cy;
            if (u < half_patch_size || u > img2.cols - half_patch_size || v < half_patch_size ||
                v > img2.rows - half_patch_size)
                continue;
 
            projection[i] = Eigen::Vector2d(u, v);
            double X = point_cur[0], Y = point_cur[1], Z = point_cur[2],
                    Z2 = Z * Z, Z_inv = 1.0 / Z, Z2_inv = Z_inv * Z_inv;
            cnt_good++;
 
            // and compute error and jacobian
            for (int x = -half_patch_size; x <= half_patch_size; x++)
                for (int y = -half_patch_size; y <= half_patch_size; y++) {
 
                    double error = GetPixelValue(img1, px_ref[i][0] + x, px_ref[i][1] + y) -
                                   GetPixelValue(img2, u + x, v + y);
                    Matrix26d J_pixel_xi;
                    Eigen::Vector2d J_img_pixel;
 
                    J_pixel_xi(0, 0) = fx * Z_inv;
                    J_pixel_xi(0, 1) = 0;
                    J_pixel_xi(0, 2) = -fx * X * Z2_inv;
                    J_pixel_xi(0, 3) = -fx * X * Y * Z2_inv;
                    J_pixel_xi(0, 4) = fx + fx * X * X * Z2_inv;
                    J_pixel_xi(0, 5) = -fx * Y * Z_inv;
 
                    J_pixel_xi(1, 0) = 0;
                    J_pixel_xi(1, 1) = fy * Z_inv;
                    J_pixel_xi(1, 2) = -fy * Y * Z2_inv;
                    J_pixel_xi(1, 3) = -fy - fy * Y * Y * Z2_inv;
                    J_pixel_xi(1, 4) = fy * X * Y * Z2_inv;
                    J_pixel_xi(1, 5) = fy * X * Z_inv;
 
                    J_img_pixel = Eigen::Vector2d(
                            0.5 * (GetPixelValue(img2, u + 1 + x, v + y) - GetPixelValue(img2, u - 1 + x, v + y)),
                            0.5 * (GetPixelValue(img2, u + x, v + 1 + y) - GetPixelValue(img2, u + x, v - 1 + y))
                    );
 
                    // total jacobian
                    Vector6d J = -1.0 * (J_img_pixel.transpose() * J_pixel_xi).transpose();
 
                    hessian += J * J.transpose();
                    bias += -error * J;
                    cost_tmp += error * error;
                }
        }
 
        if (cnt_good) {
            // set hessian, bias and cost
            unique_lock<mutex> lck(hessian_mutex);
            H += hessian;
            b += bias;
            cost += cost_tmp / cnt_good;
        }
    }
 
 
};
 
 
 
int main(int argc, char **argv) {
 
    cv::Mat left_img = cv::imread(left_file, 0);
    cv::Mat disparity_img = cv::imread(disparity_file, 0);
    if (left_img.empty() || disparity_img.empty())
    {
        std::cout << "!!! Failed imread(): image not found" << std::endl;
        return 1;
    }
    // let's randomly pick pixels in the first image and generate some 3d points in the first image's frame
    cv::RNG rng;
    int nPoints = 2000;
    int boarder = 20;
    VecVector2d pixels_ref;
    vector<double> depth_ref;
    cout << "left_img.cols" << left_img.cols << endl;
    cout << "left_img: " << left_img << endl;
    // generate pixels in ref and load depth data
    for (int i = 0; i < nPoints; i++) {
        int x = rng.uniform(boarder, left_img.cols - boarder);  // don't pick pixels close to boarder
        int y = rng.uniform(boarder, left_img.rows - boarder);  // don't pick pixels close to boarder
        int disparity = disparity_img.at<uchar>(y, x);
        double depth = fx * baseline / disparity; // you know this is disparity to depth
        depth_ref.push_back(depth);
        pixels_ref.push_back(Eigen::Vector2d(x, y));
    }
 
    // estimates 01~05.png's pose using this information
    Sophus::SE3d T_cur_ref;
 
    for (int i = 1; i < 6; i++) {  // 1~10
        cv::Mat img = cv::imread((fmt_others % i).str(), 0);
        // try single layer by uncomment this line
        // DirectPoseEstimationSingleLayer(left_img, img, pixels_ref, depth_ref, T_cur_ref);
        DirectPoseEstimationMultiLayer(left_img, img, pixels_ref, depth_ref, T_cur_ref);
    }
    return 0;
}
 
void DirectPoseEstimationSingleLayer(
    const cv::Mat &img1,
    const cv::Mat &img2,
    const VecVector2d &px_ref,
    const vector<double> depth_ref,
    Sophus::SE3d &T21) {
 
    const int iterations = 10;
    double cost = 0, lastCost = 0;
    auto t1 = chrono::steady_clock::now();
    JacobianAccumulator jaco_accu(img1, img2, px_ref, depth_ref, T21);
 
    for (int iter = 0; iter < iterations; iter++) {
        jaco_accu.reset();
        cv::parallel_for_(cv::Range(0, px_ref.size()), jaco_accu);
        Matrix6d H = jaco_accu.hessian();
        Vector6d b = jaco_accu.bias();
 
        // solve update and put it into estimation
        Vector6d update = H.ldlt().solve(b);;
        T21 = Sophus::SE3d::exp(update) * T21;
        cost = jaco_accu.cost_func();
 
        if (std::isnan(update[0])) {
            // sometimes occurred when we have a black or white patch and H is irreversible
            cout << "update is nan" << endl;
            break;
        }
        if (iter > 0 && cost > lastCost) {
            cout << "cost increased: " << cost << ", " << lastCost << endl;
            break;
        }
        if (update.norm() < 1e-3) {
            // converge
            break;
        }
 
        lastCost = cost;
        cout << "iteration: " << iter << ", cost: " << cost << endl;
    }
 
    cout << "T21 = \n" << T21.matrix() << endl;
    auto t2 = chrono::steady_clock::now();
    auto time_used = chrono::duration_cast<chrono::duration<double>>(t2 - t1);
    cout << "direct method for single layer: " << time_used.count() << endl;
 
    // plot the projected pixels here
    cv::Mat img2_show;
    cv::cvtColor(img2, img2_show, CV_GRAY2BGR);
    VecVector2d projection = jaco_accu.projected_points();
    for (size_t i = 0; i < px_ref.size(); ++i) {
        auto p_ref = px_ref[i];
        auto p_cur = projection[i];
        if (p_cur[0] > 0 && p_cur[1] > 0) {
            cv::circle(img2_show, cv::Point2f(p_cur[0], p_cur[1]), 2, cv::Scalar(0, 250, 0), 2);
            cv::line(img2_show, cv::Point2f(p_ref[0], p_ref[1]), cv::Point2f(p_cur[0], p_cur[1]),
                     cv::Scalar(0, 250, 0));
        }
    }
    cv::imshow("current", img2_show);
    cv::waitKey();
}
 
 
void DirectPoseEstimationMultiLayer(
    const cv::Mat &img1,
    const cv::Mat &img2,
    const VecVector2d &px_ref,
    const vector<double> depth_ref,
    Sophus::SE3d &T21) {
 
    // parameters
    int pyramids = 4;
    double pyramid_scale = 0.5;
    double scales[] = {1.0, 0.5, 0.25, 0.125};
 
    // create pyramids
    vector<cv::Mat> pyr1, pyr2; // image pyramids
    for (int i = 0; i < pyramids; i++) {
        if (i == 0) {
            pyr1.push_back(img1);
            pyr2.push_back(img2);
        } else {
            cv::Mat img1_pyr, img2_pyr;
            cv::resize(pyr1[i - 1], img1_pyr,
                       cv::Size(pyr1[i - 1].cols * pyramid_scale, pyr1[i - 1].rows * pyramid_scale));
            cv::resize(pyr2[i - 1], img2_pyr,
                       cv::Size(pyr2[i - 1].cols * pyramid_scale, pyr2[i - 1].rows * pyramid_scale));
            pyr1.push_back(img1_pyr);
            pyr2.push_back(img2_pyr);
        }
    }
 
    double fxG = fx, fyG = fy, cxG = cx, cyG = cy;  // backup the old values
    for (int level = pyramids - 1; level >= 0; level--) {
        VecVector2d px_ref_pyr; // set the keypoints in this pyramid level
        for (auto &px: px_ref) {
            px_ref_pyr.push_back(scales[level] * px);
        }
 
        // scale fx, fy, cx, cy in different pyramid levels
        fx = fxG * scales[level];
        fy = fyG * scales[level];
        cx = cxG * scales[level];
        cy = cyG * scales[level];
        DirectPoseEstimationSingleLayer(pyr1[level], pyr2[level], px_ref_pyr, depth_ref, T21);
    }
 
}

图片可以改成绝对路径 : ./optical_flow 运行结果如下:

 ./build/direct_method :

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/44583.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

架构重构实践心得

一、前言 大多数的技术研发都对重构有所了解&#xff0c;而每个研发又都有自己的理解。从代码重构到架构重构&#xff0c;我参与了携程大型全链路重构项目&#xff0c;积累了一点经验心得&#xff0c;在此抛砖引玉和大家分享。 二、重构的定义 重构是指在不改变外部行为的情…

改进的北方苍鹰算法优化VMD参数,最小包络熵、样本熵、信息熵、排列熵(适应度函数可自行选择,一键修改)包含MATLAB源代码...

今天给大家带来一期由改进的北方苍鹰算法(SCNGO)优化VMD的两个参数。 同样以西储大学数据集为例&#xff0c;选用105.mat中的X105_BA_time.mat数据中1000个数据点。没有数据的看这篇文章。西储大学轴承诊断数据处理&#xff0c;matlab免费代码获取 选取四种适应度函数进行优化&…

信息安全运维经验

1.备份系统 国外主流&#xff1a;veritas NetBackUp&#xff08;NBU&#xff09;、IBM&#xff08;TSM&#xff09; (191条消息) 【大数据-文摘笔记】Veritas NBU简介_weixin_30501857的博客-CSDN博客 虚拟机玩转 Veritas NetBackup&#xff08;NBU&#xff09;之服务端安装…

【云计算小知识】云环境是什么意思?有什么优点?

随着云计算的快速发展&#xff0c;了解云计算相关知识也是运维人员必备的。那你知道云环境是什么意思&#xff1f;有什么优点&#xff1f;云环境安全威胁有哪些&#xff1f;如何保证云环境的运维安全&#xff1f;这里我们就来简单聊聊。 云环境是什么意思&#xff1f; 云环境是…

水环境综合治理监测系统:筑牢城市水生态安全屏障

水是生命之源&#xff0c;是人类赖以生存的基础。然而&#xff0c;随着工业化、城市化的快速发展&#xff0c;水污染问题日益凸显&#xff0c;给居民的环境卫生以及用水安全带来了巨大的威胁。因此&#xff0c;加强水环境综合治理&#xff0c;保护水资源和维护生态平衡&#xf…

微信小程序导入微信地址

获取用户收货地址。调起用户编辑收货地址原生界面&#xff0c;并在编辑完成后返回用户选择的地址。 1&#xff1a;原生微信小程序接口使用API&#xff1a;wx.chooseAddress(OBJECT) wx.chooseAddress({success (res) {console.log(res.userName)console.log(res.postalCode)c…

一篇文章搞定《APP的启动流程》

一篇文章搞定《APP的启动流程》 前言冷启动、温启动、热启动启动中的重要成员简介zygote进程InstrumentationSystemServer进程ActivityManagerServiceBinderActivityThread 启动的步骤详解一、点击桌面图标二、创建进程三、初始化APP进程四、APP进程与System_server的绑定五、初…

【软件架构】企业架构4A定义

文章目录 前言战略、BA、DA、AA、TA五者的关系1、业务架构&#xff08;BA&#xff09;2、数据架构&#xff08;DA&#xff09;3、应用架构&#xff08;AA&#xff09;4、技术架构&#xff08;TA&#xff09;总结 前言 业务架构是跨系统的业务架构蓝图&#xff0c;应用架构、数…

【Nodejs】nodejs内置模块(中)

1.路劲处理模块 path 1.1 模块概览 在nodejs中&#xff0c;path是个使用频率很高&#xff0c;但却让人又爱又恨的模块。部分因为文档说的不够清晰&#xff0c;部分因为接口的平台差异性。将path的接口按照用途归类&#xff0c;仔细琢磨琢磨&#xff0c;也就没那么费解了。 1.…

Python基于PyTorch实现卷积神经网络分类模型(CNN分类算法)项目实战

说明&#xff1a;这是一个机器学习实战项目&#xff08;附带数据代码文档视频讲解&#xff09;&#xff0c;如需数据代码文档视频讲解可以直接到文章最后获取。 1.项目背景 卷积神经网络&#xff0c;简称为卷积网络&#xff0c;与普通神经网络的区别是它的卷积层内的神经元只覆…

FUNBOX_1靶场详解

FUNBOX_1靶场复盘 这个系列的靶场给出的干扰因素都挺多的&#xff0c;必须从中找到有用的线索才可以。 这个靶场你扫描到ip地址后打开网页会发现&#xff0c;ip自动转换成域名了&#xff0c;所以我们需要添加一条hosts解析才可以。 192.168.102.190 funbox.fritz.box从目录…

vue中预览静态pdf文件

方法 // pdf预览 viewFileCompare() { const pdfUrl "/static/wjbd.pdf"; window.open(pdfUrl); }, // 下载 downloadFile(){ var a document.createElement("a"); a.href "/static/wjbd.pdf"; a.…

自学网络安全(黑客),遇到问题怎么解决

自学网络安全很容易学着学着就迷茫了&#xff0c;找到源头问题&#xff0c;解决它就可以了&#xff0c;所以首先咱们聊聊&#xff0c;学习网络安全方向通常会有哪些问题&#xff0c;看到后面有惊喜哦 1、打基础时间太长 学基础花费很长时间&#xff0c;光语言都有几门&#xf…

专项练习-1数据结构-02字符串

1. 下面关于「串」的叙述中&#xff0c;哪一个是不正确的&#xff1f;&#xff08; &#xff09; A 串是字符的有限序列 B 空串是由空格构成的串 C 模式匹配是串的一种重要运算 D 串既可以采用顺序存储&#xff0c;也可以采用链式存储 正确答案&#xff1a;B 官方解析&…

如何在3ds max中创建可用于真人场景的巨型机器人:第 1部分

推荐&#xff1a; NSDT场景编辑器助你快速搭建可二次开发的3D应用场景 1. 创建主体 步骤 1 打开 3ds Max。 打开 3ds Max 步骤 2 在左侧视口中&#xff0c;按键盘上的 Alt-B 键。它 打开视口配置窗口。 打开“锁定缩放/平移”和“匹配位图”选项。单击“文件”并转到参考 …

实验二十四、滞回比较器电压传输特性的测量

一、题目 滞回比较器电压传输特性的测量。 二、仿真电路 电路如图1所示。 为便于观察电压传输特性的变化&#xff0c;输入信号采用信号发生器产生的幅值为 10 V 10\,\textrm V 10V、频率为 20 Hz 20\,\textrm{Hz} 20Hz 的三角波电压。采用虚拟的运算放大电路&#xff0c;其…

从娱乐产品到效率工具,ARknovv首款AR眼镜回归“AR本质”

如果说2022年是AR的元年&#xff0c;2023年则有望成为消费级AR眼镜的新拐点。 今年AR眼镜行业发展明显加快&#xff0c;且不断有大厂入局&#xff1a;今年2月小米发布无线AR眼镜探索版&#xff1b;3月荣耀也推出了一款全新的观影眼镜&#xff1b;而苹果在6月发布的MR头显Visio…

[面试官,你坐好],今天我给你吹下卡顿监控

这是一篇面试总结稿&#xff0c;根据之前的面试过程以一种模拟面试的风格进行阐述。 面试官: 自我介绍下 诶&#xff1f;这面试官头发还比较多&#xff0c;应该不牛逼&#xff0c;心里踏实了不少。我: 面试官你好&#xff0c;我叫**&#xff0c;5年工作经验。曾经跟OPPO产品PK&…

甄云质量管理解决方案——插上数字“羽翼”,实现PPAP落地

导语 质量管理是供应链管理中非常重要的一环&#xff0c;通过制定和开展质量相关的活动&#xff0c;确保产品符合质量标准和一致性要求。质量管理水平的高低直接影响供应链的整体效率和最终效益&#xff0c;关乎企业形象和品牌声誉&#xff0c;已然是企业的生命线。 作为采购…

ES6:Object.assign方法详解

ES6&#xff1a;Object.assign方法详解 1、前言2、语法3、基本用法3.1 目标对象和源对象无重名属性3.2 目标对象和源对象有重名属性3.3 有多个源对象3.4 其他情况3.4.1 只有一个参数时&#xff0c;Object.assign会直接返回该参数3.4.2 如果该参数不是对象&#xff0c;则会先转成…