采用matplotlib可视化kitti

配置kitti_object_vis没成功,用kitti_object_vis的一些函数加上matplotlib进行可视化
在这里插入图片描述

import numpy as np
import matplotlib.pyplot as plt


import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def roty(t):
    """ Rotation about the y-axis. """
    c = np.cos(t)
    s = np.sin(t)
    return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])

def compute_box_3d(obj):
    """ Takes an object and a projection matrix (P) and projects the 3d
        bounding box into the image plane.
        Returns:
            corners_3d: (8,3) array in in rect camera coord.
    """
    # compute rotational matrix around yaw axis
    R = roty(obj.ry)

    # 3d bounding box dimensions
    l = obj.l
    w = obj.w
    h = obj.h

    # 3d bounding box corners
    x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
    y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
    z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]


    corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
    # print corners_3d.shape
    corners_3d[0, :] = corners_3d[0, :] + obj.t[0]
    corners_3d[1, :] = corners_3d[1, :] + obj.t[1]
    corners_3d[2, :] = corners_3d[2, :] + obj.t[2]

    return np.transpose(corners_3d)

def read_label(label_filename):
    lines = [line.rstrip() for line in open(label_filename)]
    objects = [Object3d(line) for line in lines if line[:8]!="DontCare"]
    return objects

def choice_points(points,k=int(1e4)):
    selected_indices = np.random.choice(len(points), k, replace=False)
    return points[selected_indices]
def inverse_rigid_trans(Tr):
    """ Inverse a rigid body transform matrix (3x4 as [R|t])
        [R'|-R't; 0|1]
    """
    inv_Tr = np.zeros_like(Tr)  # 3x4
    inv_Tr[0:3, 0:3] = np.transpose(Tr[0:3, 0:3])
    inv_Tr[0:3, 3] = np.dot(-np.transpose(Tr[0:3, 0:3]), Tr[0:3, 3])
    return inv_Tr

class Calibration(object):
    """ Calibration matrices and utils
        3d XYZ in <label>.txt are in rect camera coord.
        2d box xy are in image2 coord
        Points in <lidar>.bin are in Velodyne coord.

        y_image2 = P^2_rect * x_rect
        y_image2 = P^2_rect * R0_rect * Tr_velo_to_cam * x_velo
        x_ref = Tr_velo_to_cam * x_velo
        x_rect = R0_rect * x_ref

        P^2_rect = [f^2_u,  0,      c^2_u,  -f^2_u b^2_x;
                    0,      f^2_v,  c^2_v,  -f^2_v b^2_y;
                    0,      0,      1,      0]
                 = K * [1|t]

        image2 coord:
         ----> x-axis (u)
        |
        |
        v y-axis (v)

        velodyne coord:
        front x, left y, up z

        rect/ref camera coord:
        right x, down y, front z

        Ref (KITTI paper): http://www.cvlibs.net/publications/Geiger2013IJRR.pdf

        TODO(rqi): do matrix multiplication only once for each projection.
    """

    def __init__(self, calib_filepath, from_video=False):
        if from_video:
            calibs = self.read_calib_from_video(calib_filepath)
        else:
            calibs = self.read_calib_file(calib_filepath)
        # Projection matrix from rect camera coord to image2 coord
        self.P = calibs["P2"]
        self.P = np.reshape(self.P, [3, 4])
        # Rigid transform from Velodyne coord to reference camera coord
        self.V2C = calibs["Tr_velo_to_cam"]
        self.V2C = np.reshape(self.V2C, [3, 4])
        self.C2V = inverse_rigid_trans(self.V2C)
        # Rotation from reference camera coord to rect camera coord
        self.R0 = calibs["R0_rect"]
        self.R0 = np.reshape(self.R0, [3, 3])

        # Camera intrinsics and extrinsics
        self.c_u = self.P[0, 2]
        self.c_v = self.P[1, 2]
        self.f_u = self.P[0, 0]
        self.f_v = self.P[1, 1]
        self.b_x = self.P[0, 3] / (-self.f_u)  # relative
        self.b_y = self.P[1, 3] / (-self.f_v)

    def read_calib_file(self, filepath):
        """ Read in a calibration file and parse into a dictionary.
        Ref: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
        """
        data = {}
        with open(filepath, "r") as f:
            for line in f.readlines():
                line = line.rstrip()
                if len(line) == 0:
                    continue
                key, value = line.split(":", 1)
                # The only non-float values in these files are dates, which
                # we don't care about anyway
                try:
                    data[key] = np.array([float(x) for x in value.split()])
                except ValueError:
                    pass

        return data

    def read_calib_from_video(self, calib_root_dir):
        """ Read calibration for camera 2 from video calib files.
            there are calib_cam_to_cam and calib_velo_to_cam under the calib_root_dir
        """
        data = {}
        cam2cam = self.read_calib_file(
            os.path.join(calib_root_dir, "calib_cam_to_cam.txt")
        )
        velo2cam = self.read_calib_file(
            os.path.join(calib_root_dir, "calib_velo_to_cam.txt")
        )
        Tr_velo_to_cam = np.zeros((3, 4))
        Tr_velo_to_cam[0:3, 0:3] = np.reshape(velo2cam["R"], [3, 3])
        Tr_velo_to_cam[:, 3] = velo2cam["T"]
        data["Tr_velo_to_cam"] = np.reshape(Tr_velo_to_cam, [12])
        data["R0_rect"] = cam2cam["R_rect_00"]
        data["P2"] = cam2cam["P_rect_02"]
        return data

    def cart2hom(self, pts_3d):
        """ Input: nx3 points in Cartesian
            Oupput: nx4 points in Homogeneous by pending 1
        """
        n = pts_3d.shape[0]
        pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1))))
        return pts_3d_hom

    # ===========================
    # ------- 3d to 3d ----------
    # ===========================
    def project_velo_to_ref(self, pts_3d_velo):
        pts_3d_velo = self.cart2hom(pts_3d_velo)  # nx4
        return np.dot(pts_3d_velo, np.transpose(self.V2C))

    def project_ref_to_velo(self, pts_3d_ref):
        pts_3d_ref = self.cart2hom(pts_3d_ref)  # nx4
        return np.dot(pts_3d_ref, np.transpose(self.C2V))

    def project_rect_to_ref(self, pts_3d_rect):
        """ Input and Output are nx3 points """
        return np.transpose(np.dot(np.linalg.inv(self.R0), np.transpose(pts_3d_rect)))

    def project_ref_to_rect(self, pts_3d_ref):
        """ Input and Output are nx3 points """
        return np.transpose(np.dot(self.R0, np.transpose(pts_3d_ref)))

    def project_rect_to_velo(self, pts_3d_rect):
        """ Input: nx3 points in rect camera coord.
            Output: nx3 points in velodyne coord.
        """
        pts_3d_ref = self.project_rect_to_ref(pts_3d_rect)
        return self.project_ref_to_velo(pts_3d_ref)

    def project_velo_to_rect(self, pts_3d_velo):
        pts_3d_ref = self.project_velo_to_ref(pts_3d_velo)
        return self.project_ref_to_rect(pts_3d_ref)

    # ===========================
    # ------- 3d to 2d ----------
    # ===========================
    def project_rect_to_image(self, pts_3d_rect):
        """ Input: nx3 points in rect camera coord.
            Output: nx2 points in image2 coord.
        """
        pts_3d_rect = self.cart2hom(pts_3d_rect)
        pts_2d = np.dot(pts_3d_rect, np.transpose(self.P))  # nx3
        pts_2d[:, 0] /= pts_2d[:, 2]
        pts_2d[:, 1] /= pts_2d[:, 2]
        return pts_2d[:, 0:2]

    def project_velo_to_image(self, pts_3d_velo):
        """ Input: nx3 points in velodyne coord.
            Output: nx2 points in image2 coord.
        """
        pts_3d_rect = self.project_velo_to_rect(pts_3d_velo)
        return self.project_rect_to_image(pts_3d_rect)

    def project_8p_to_4p(self, pts_2d):
        x0 = np.min(pts_2d[:, 0])
        x1 = np.max(pts_2d[:, 0])
        y0 = np.min(pts_2d[:, 1])
        y1 = np.max(pts_2d[:, 1])
        x0 = max(0, x0)
        # x1 = min(x1, proj.image_width)
        y0 = max(0, y0)
        # y1 = min(y1, proj.image_height)
        return np.array([x0, y0, x1, y1])

    def project_velo_to_4p(self, pts_3d_velo):
        """ Input: nx3 points in velodyne coord.
            Output: 4 points in image2 coord.
        """
        pts_2d_velo = self.project_velo_to_image(pts_3d_velo)
        return self.project_8p_to_4p(pts_2d_velo)

    # ===========================
    # ------- 2d to 3d ----------
    # ===========================
    def project_image_to_rect(self, uv_depth):
        """ Input: nx3 first two channels are uv, 3rd channel
                   is depth in rect camera coord.
            Output: nx3 points in rect camera coord.
        """
        n = uv_depth.shape[0]
        x = ((uv_depth[:, 0] - self.c_u) * uv_depth[:, 2]) / self.f_u + self.b_x
        y = ((uv_depth[:, 1] - self.c_v) * uv_depth[:, 2]) / self.f_v + self.b_y
        pts_3d_rect = np.zeros((n, 3))
        pts_3d_rect[:, 0] = x
        pts_3d_rect[:, 1] = y
        pts_3d_rect[:, 2] = uv_depth[:, 2]
        return pts_3d_rect

    def project_image_to_velo(self, uv_depth):
        pts_3d_rect = self.project_image_to_rect(uv_depth)
        return self.project_rect_to_velo(pts_3d_rect)

    def project_depth_to_velo(self, depth, constraint_box=True):
        depth_pt3d = get_depth_pt3d(depth)
        depth_UVDepth = np.zeros_like(depth_pt3d)
        depth_UVDepth[:, 0] = depth_pt3d[:, 1]
        depth_UVDepth[:, 1] = depth_pt3d[:, 0]
        depth_UVDepth[:, 2] = depth_pt3d[:, 2]
        # print("depth_pt3d:",depth_UVDepth.shape)
        depth_pc_velo = self.project_image_to_velo(depth_UVDepth)
        # print("dep_pc_velo:",depth_pc_velo.shape)
        if constraint_box:
            depth_box_fov_inds = (
                (depth_pc_velo[:, 0] < cbox[0][1])
                & (depth_pc_velo[:, 0] >= cbox[0][0])
                & (depth_pc_velo[:, 1] < cbox[1][1])
                & (depth_pc_velo[:, 1] >= cbox[1][0])
                & (depth_pc_velo[:, 2] < cbox[2][1])
                & (depth_pc_velo[:, 2] >= cbox[2][0])
            )
            depth_pc_velo = depth_pc_velo[depth_box_fov_inds]
        return depth_pc_velo

class Object3d(object):
    """ 3d object label """

    def __init__(self, label_file_line):
        data = label_file_line.split(" ")
        data[1:] = [float(x) for x in data[1:]]

        # extract label, truncation, occlusion
        self.type = data[0]  # 'Car', 'Pedestrian', ...
        self.truncation = data[1]  # truncated pixel ratio [0..1]
        self.occlusion = int(
            data[2]
        )  # 0=visible, 1=partly occluded, 2=fully occluded, 3=unknown
        self.alpha = data[3]  # object observation angle [-pi..pi]

        # extract 2d bounding box in 0-based coordinates
        self.xmin = data[4]  # left
        self.ymin = data[5]  # top
        self.xmax = data[6]  # right
        self.ymax = data[7]  # bottom
        self.box2d = np.array([self.xmin, self.ymin, self.xmax, self.ymax])

        # extract 3d bounding box information
        self.h = data[8]  # box height
        self.w = data[9]  # box width
        self.l = data[10]  # box length (in meters)
        self.t = (data[11], data[12], data[13])  # location (x,y,z) in camera coord.
        self.ry = data[14]  # yaw angle (around Y-axis in camera coordinates) [-pi..pi]

    def estimate_diffculty(self):
        """ Function that estimate difficulty to detect the object as defined in kitti website"""
        # height of the bounding box
        bb_height = np.abs(self.xmax - self.xmin)

        if bb_height >= 40 and self.occlusion == 0 and self.truncation <= 0.15:
            return "Easy"
        elif bb_height >= 25 and self.occlusion in [0, 1] and self.truncation <= 0.30:
            return "Moderate"
        elif (
            bb_height >= 25 and self.occlusion in [0, 1, 2] and self.truncation <= 0.50
        ):
            return "Hard"
        else:
            return "Unknown"

    def print_object(self):
        print(
            "Type, truncation, occlusion, alpha: %s, %d, %d, %f"
            % (self.type, self.truncation, self.occlusion, self.alpha)
        )
        print(
            "2d bbox (x0,y0,x1,y1): %f, %f, %f, %f"
            % (self.xmin, self.ymin, self.xmax, self.ymax)
        )
        print("3d bbox h,w,l: %f, %f, %f" % (self.h, self.w, self.l))
        print(
            "3d bbox location, ry: (%f, %f, %f), %f"
            % (self.t[0], self.t[1], self.t[2], self.ry)
        )
        print("Difficulty of estimation: {}".format(self.estimate_diffculty()))

def load_velo_scan(velo_filename, dtype=np.float32, n_vec=4):
    scan = np.fromfile(velo_filename, dtype=dtype)
    scan = scan.reshape((-1, n_vec))
    return scan
    
def draw_gt_boxes3d(    gt_boxes3d,    ax,    color=(0, 1, 0),    line_width=2,    draw_text=True,    text_scale=(1, 1, 1),    color_list=None,    label=""):
    """ Draw 3D bounding boxes
    Args:
        gt_boxes3d: numpy array (n,8,3) for XYZs of the box corners
        ax: matplotlib axes handler
        color: RGB value tuple in range (0,1), box line color
        line_width: box line width
        draw_text: boolean, if true, write box indices beside boxes
        text_scale: three number tuple
        color_list: a list of RGB tuple, if not None, overwrite color.
        label: label to display beside the boxes
    Returns:
        ax: updated axes
    """
    num = len(gt_boxes3d)
    # print(gt_boxes3d)
    for n in range(num):
        b = gt_boxes3d[n]
        # b[:, [0,1, 2]] = b[:, [2,0, 1]]
        # b[:, [0,1, 2]] = b[:, [0,2,1]]
        ## b[:, [0,1, 2]] = b[:, [1,0,2]]
        ## b[:, [0,1, 2]] = b[:, [1,2,0]]
        ##b[:, [0,1, 2]] = b[:, [2,1,0]]
        if color_list is not None:
            color = color_list[n]
        if draw_text:
            ax.text(
                b[4, 0],
                b[4, 1],
                b[4, 2],
                label,
                color=color,
                fontsize=10,
                ha='center',
                va='center',
            )
        for k in range(0, 4):
            i, j = k, (k + 1) % 4
            ax.plot(
                [b[i, 0], b[j, 0]],
                [b[i, 1], b[j, 1]],
                [b[i, 2], b[j, 2]],
                color=color,
                linewidth=line_width,
            )

            i, j = k + 4, (k + 1) % 4 + 4
            ax.plot(
                [b[i, 0], b[j, 0]],
                [b[i, 1], b[j, 1]],
                [b[i, 2], b[j, 2]],
                color=color,
                linewidth=line_width,
            )

            i, j = k, k + 4
            ax.plot(
                [b[i, 0], b[j, 0]],
                [b[i, 1], b[j, 1]],
                [b[i, 2], b[j, 2]],
                color=color,
                linewidth=line_width,
            )

    return ax


color_dt={"Car":(0,1,0),"Pedestrian":(0,1,1),"Cyclist":(1,1,0)}
def MyDraw():
    points=load_velo_scan(lidar_path)
    objs=read_label(label_path)
    box_corners = [compute_box_3d(obj) for obj in objs]
    calib = Calibration(calib_path)
    box_corners =[ calib.project_rect_to_velo(box_corner) for box_corner in box_corners]
    if reduce_points:
        points=choice_points(points)   
    xyz = points[:, :3]
    color = points[:, 3]
    min_xyz = np.min(xyz, axis=0)
    max_xyz = np.max(xyz, axis=0)
    print(max_xyz)
    fig = plt.figure(figsize=(15, 15))  
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], c=color, cmap='jet', s=1,alpha=points[:, 3]) 
    ax.set_xlabel('X')
    ax.set_ylabel('Y')
    ax.set_zlabel('Z')

    # 设置坐标轴范围
    # ax.set_xlim(min_xyz[0], max_xyz[0])
    # ax.set_ylim(min_xyz[1], max_xyz[1])
    # ax.set_zlim(min_xyz[2], max_xyz[2])
    ax.set_xlim(-70, 70)
    ax.set_ylim(-70, 70)
    ax.set_zlim(-70, 70)

    ax=draw_gt_boxes3d(ax=ax,gt_boxes3d=box_corners,color_list=[color_dt[obj.type] for obj in objs])

    plt.axis('off')
    plt.grid(False)
    plt.tight_layout()
    plt.show()
    # if save_name:
    #     plt.savefig(save_name)  
num="004139"
lidar_path = "/mnt/kitti/training/velodyne/%s.bin"%(num)
label_path= "/mnt/kitti/training/label_2/%s.txt"%(num)
calib_path="/mnt/kitti/training/calib/%s.txt"%(num)
reduce_points=True
MyDraw()

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/560773.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

JavaWeb-登录校验

会话技术 浏览器使用的是http协议&#xff0c;多次请求间数据是不能共享的&#xff0c;例如我们要去访问用户数据的接口&#xff0c;但这时候用户是否已经登入了呢&#xff1f;是不知道的&#xff0c;为了解决这个问题&#xff0c;于是引入了会话跟踪技术。 会话&#xff1a;…

05—js对象

一、初识对象 JavaScript是面向对象编程&#xff08;Object Oriented Programming&#xff0c;OOP&#xff09;语言。 面对象是一种复合值&#xff1a;它将很多值集合在一起&#xff0c;可通过名字访问这些值。对象也可看做一种无序的数据集合&#xff0c;由若干个“键值对”…

iced 入门一

&#x1f4d5;作者简介&#xff1a; 过去日记&#xff0c;致力于Java、GoLang,Rust等多种编程语言&#xff0c;热爱技术&#xff0c;喜欢游戏的博主。 &#x1f4d8;相关专栏Rust初阶教程、go语言基础系列、spring教程等&#xff0c;大家有兴趣的可以看一看 &#x1f4d9;Jav…

基于ssm的企业在线培训系统论文

摘 要 传统办法管理信息首先需要花费的时间比较多&#xff0c;其次数据出错率比较高&#xff0c;而且对错误的数据进行更改也比较困难&#xff0c;最后&#xff0c;检索数据费事费力。因此&#xff0c;在计算机上安装企业在线培训系统软件来发挥其高效地信息处理的作用&#x…

每日一题

腐烂的苹果_牛客题霸_牛客网 思路分析:广度优先遍历&#xff0c;找到所有腐烂的苹果同时向四方扩散&#xff0c;就是第一轮把所有腐烂的苹果加入队列中&#xff0c;这就跟MQ的消息队列的原理差不多&#xff0c;第一次记录队列的长度&#xff0c;广度遍历一次&#xff0c;长度--…

第一个STM32F767IGT6核心板

一. 制作原因 起先是因为参加计算机设计大赛准备的板子&#xff0c;其作用是连接OV5640摄像头来识别车牌号&#xff0c;主要外设有摄像头&#xff0c;SDRAM&#xff0c;网口等。 二. 原理图和PCB 原理图 PCB 三. 测试 1. 测试SDRAM功能 按下按键我们可以在串口中看到内存…

【基础IO】谈谈动静态库(怒肝7000字)

文章目录 前言实验代码样例静态库生成一个静态库归档工具ar静态库的链接 动态库创建动态库加载动态库 动静态链接静态链接动态链接动静态链接的优缺点 前言 在软件开发中&#xff0c;库&#xff08;Library&#xff09;是一种方式&#xff0c;可以将代码打包成可重用的格式&…

【C语言】内存函数-memcpy-memmove-memset...用法及实现,沉淀自己!

&#x1f525;博客主页&#x1f525;&#xff1a;【 坊钰_CSDN博客 】 欢迎各位点赞&#x1f44d;评论✍收藏⭐ 目录 1. memcpy函数使用和模拟实现 2. memmove使用和模拟实现 3. memset函数的使用 4. memcmp函数的使用 1. memcpy函数使用和模拟实现 <string.h>-------…

机器学习理论基础—神经网络算法公式学习

机器学习理论基础—神经网络公式学习 M-P神经元 M-P神经元&#xff08;一个用来模拟生物行为的数学模型&#xff09;&#xff1a;接收n个输入(通常是来自其他神经 元)&#xff0c;并给各个输入赋予权重计算加权和&#xff0c;然后和自身特有的阈值进行比较 (作减法&#xff0…

pytorch-MNIST测试实战

这里写目录标题 1. 为什么test2. 如何做test3. 什么时候做test4. 完整代码 1. 为什么test 如下图&#xff1a;上下两幅图中蓝色分别表示train的accuracy和loss&#xff0c;黄色表示test的accuracy和loss&#xff0c;如果单纯看train的accuracy和loss曲线就会认为模型已经train…

【优质书籍推荐】Vue.js+Node.js全栈开发实战

大家好&#xff0c;我是爱编程的喵喵。双985硕士毕业&#xff0c;现担任全栈工程师一职&#xff0c;热衷于将数据思维应用到工作与生活中。从事机器学习以及相关的前后端开发工作。曾在阿里云、科大讯飞、CCF等比赛获得多次Top名次。现为CSDN博客专家、人工智能领域优质创作者。…

von Mises-Fisher Distribution (代码解析)

torch.distribution 中包含了很多概率分布的实现&#xff0c;本文首先通过均匀分布来说明 Distribution 的具体用法, 然后再解释 von Mises-Fisher 分布的实现, 其公式推导见 von Mises-Fisher Distribution. 1. torch.distribution.Distribution 以下是 Uniform 的源码: cl…

怎么使用JMeter进行性能测试?

一、简介 JMeter是Apache软件基金会下的一款开源的性能测试工具&#xff0c;完全由Java开发。它专注于对我们应用程序进行负载测试和性能测量&#xff0c;最初设计用于web应用程序&#xff0c;现在已经扩展到其他测试功能&#xff0c;比如&#xff1a;FTP、Database和LDAP等。…

Vue 3 项目构建与效率提升:vite-plugin-vue-setup-extend 插件应用指南

一、Vue3项目创建 前提是已安装Node.js&#xff08;点击跳转Node官网&#xff09; npm create vuelatest这一指令将会安装并执行 create-vue&#xff0c;它是 Vue 官方的项目脚手架工具。你将会看到一些诸如 TypeScript 和测试支持之类的可选功能提示&#xff1a; ✔ Projec…

跟着Carl大佬学leetcode之977 有序数组的平方

来点强调&#xff0c;刷题是按照代码随想录的顺序进行的&#xff0c;链接如下https://www.programmercarl.com/本系列是记录一些刷题心得和学习过程&#xff0c;就看到题目自己先上手试试&#xff0c;然后看程序员Carl大佬的解释&#xff0c;自己再敲一遍修修补补&#xff0c;练…

electron打包dist为可执行程序后记【electron-quick-start】

文章目录 目录 文章目录 前言 一、直接看效果 二、实现步骤 1.准备dist文件夹 2.NVM管理node版本 3.准备electron容器并npm run start 4.封装成可执行程序 1.手动下载electron对应版本的zip文件&#xff0c;解决打包缓慢问题 2.安装packager 3.配置打包命令执行内容…

【点云语义分割】弱监督点云语义分割自适应标签分布

Adaptive Annotation Distribution for Weakly Supervised Point Cloud Semantic Segmentation 摘要&#xff1a; 弱监督点云语义分割因其能够减轻对点云细粒度注释的严重依赖而备受关注。然而&#xff0c;在实际应用中&#xff0c;稀疏注释通常在点云中呈现出明显的非均匀分布…

Table表格(关于个人介绍与图片)

展开行&#xff1a; <el-table :data"gainData" :border"gainParentBorder" style"width: 100%"><el-table-column type"expand"><template #default"props"><div m"4"><h3>工作经…

封装个js分页插件

// 分页插件类 class PaginationPlugin {constructor(fetchDataURL, options {}) {this.fetchDataURL fetchDataURL;this.options {containerId: options.containerId || paginationContainer,dataSizeAttr: options.dataSizeAttr || toatalsize, // 修改为实际API返回的数据…

Flutter 的 showDialog 和 showCupertinoDialog 有什么区别?

我将我的 App 里用的 Flutter 升级到了 3.19&#xff0c;没想到&#xff0c;以前我用 showDialog 和 AlertDialog 组合创建的二次确认框&#xff0c;变得无敌难看了&#xff0c;大幅度增加了整个框的圆角和里面默认按钮的圆角。不得已&#xff0c;我必须修改一下&#xff0c;以…