目标跟踪算法(bytetrack)-tensorrt部署教程

一、本机安装python环境

conda create -n bytetrace_env python=3.8
activate bytetrace_env
conda install pytorch torchvision cudatoolkit=10.1 -c

检测GPU是否可用,不可用不行

import torch
print(torch.cuda.is_available())

安装bytetrack

git clone https://github.com/ifzhang/ByteTrack.git
cd ByteTrack
pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
python setup.py develop

在这里插入图片描述
上述即安装成功。
安装pycocotools

pip install cython
pip install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'

或者(Linux)

pip install git+https://gitee.com/pursuit_zhangyu/cocoapi.git#subdirectory=PythonAPI

windows下

pip install pycocotools-windows

二、安装tensorrt环境

下载tensorrt包
TensorRT-8.4.3.1.Windows10.x86_64.cuda-10.2.cudnn8.4.zip
在这里插入图片描述
将所有的dll复制到
C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\bin
并设置环境变量。
在这里插入图片描述
虚拟环境中python版本为python3.8

pip install tensorrt-8.4.3.1-cp38-none-win_amd64.whl

三、转换模型

https://pan.baidu.com/s/1PiP1kQfgxAIrnGUbFP6Wfg
qflm
获取bytetrack_s_mot17.pth.tar并创建pretrained进行存放

python tools/trt.py -f exps/example/mot/yolox_s_mix_det.py -c pretrained/bytetrack_s_mot17.pth.tar

最后在D:\git_clone\ByteTrack-main\YOLOX_outputs\yolox_s_mix_det目录下生成tensorrt模型与pth模型:
在这里插入图片描述

四、cmake生成eigen库并使用VS2015编译

https://pan.baidu.com/s/15kEfCxpy-T7tz60msxxExg
ueq4
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

五、下载opencv450

https://nchc.dl.sourceforge.net/project/opencvlibrary/4.5.0/opencv-4.5.0-vc14_vc15.exe?viasf=1
安装D:\opencv450

六、cmake生成bytetrack并使用VS2015编译

修改CMakeLists.txt

cmake_minimum_required(VERSION 2.6)

project(bytetrack)

add_definitions(-std=c++11)

option(CUDA_USE_STATIC_CUDA_RUNTIME OFF)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_BUILD_TYPE Debug)

find_package(CUDA REQUIRED)

include_directories(${
   PROJECT_SOURCE_DIR}/include)
include_directories(D:\VS2015_CUDA\TensorRT\eigen-3.3.9\build\install\include\eigen3)
link_directories(${
   PROJECT_SOURCE_DIR}/include)
# include and link dirs of cuda and tensorrt, you need adapt them if yours are different
# cuda
include_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\include)
link_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\lib\x64)
# cudnn
include_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\include)
link_directories(C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v10.0\lib\x64)
# tensorrt
include_directories(D:\VS2015_CUDA\TensorRT\TensorRT-8.4.3.1\include)
link_directories(D:\VS2015_CUDA\TensorRT\TensorRT-8.4.3.1\lib)

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -D_MWAITXINTRIN_H_INCLUDED")

set(OpenCV_INCLUDE_DIRS_DIRS D:\opencv450\build\include)
set(OpenCV_LIBS D:\opencv450\build\x64\vc14\lib)
include_directories(${
   OpenCV_INCLUDE_DIRS})

file(GLOB My_Source_Files ${
   PROJECT_SOURCE_DIR}/src/*.cpp)
add_executable(bytetrack ${
   My_Source_Files})
target_link_libraries(bytetrack nvinfer)
target_link_libraries(bytetrack cudart)
target_link_libraries(bytetrack ${
   OpenCV_LIBS})

add_definitions(-O2 -pthread)

使用CMake进行配置生成。
打开VS2015工程进行配置
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
opencv可以自己加进去编译。
在这里插入图片描述
修改bytetrack.cpp

int img_w = cap.get(cv::CAP_PROP_FRAME_WIDTH);
int img_h = cap.get(cv::CAP_PROP_FRAME_HEIGHT);

这里有个bug,高的参数不对,会影响后面的demo.mp4。
编译生成
在这里插入图片描述

七、使用Dependency Walker查看exe依赖

在这里插入图片描述
在这里插入图片描述

D:\VS2015_CUDA\TensorRT\cpp\model_trt.engine -i D:\VS2015_CUDA\TensorRT\cpp\palace.mp4

八、windows源码

bytetrack.cpp

#include <fstream>
#include <iostream>
#include <sstream>
#include <numeric>
#include <chrono>
#include <vector>
#include <opencv2/opencv.hpp>
#include "NvInfer.h"
#include "cuda_runtime_api.h"
#include "logging.h"
#include "BYTETracker.h"

#define CHECK(status) \
    do\
    {
     \
        auto ret = (status);\
        if (ret != 0)\
        {
     \
            cerr << "Cuda failure: " << ret << endl;\
            abort();\
        }\
    } while (0)

#define DEVICE 0  // GPU id
#define NMS_THRESH 0.7
#define BBOX_CONF_THRESH 0.1

using namespace nvinfer1;

// stuff we know about the network and the input/output blobs
static const int INPUT_W = 1088;
static const int INPUT_H = 608;
const char* INPUT_BLOB_NAME = "input_0";
const char* OUTPUT_BLOB_NAME = "output_0";
static Logger gLogger;

Mat static_resize(Mat& img) {
   
    float r = min(INPUT_W / (img.cols*1.0), INPUT_H / (img.rows*1.0));
    // r = std::min(r, 1.0f);
    int unpad_w = r * img.cols;
    int unpad_h = r * img.rows;
    Mat re(unpad_h, unpad_w, CV_8UC3);
    resize(img, re, re.size());
    Mat out(INPUT_H, INPUT_W, CV_8UC3, Scalar(114, 114, 114));
    re.copyTo(out(Rect(0, 0, re.cols, re.rows)));
    return out;
}

struct GridAndStride
{
   
    int grid0;
    int grid1;
    int stride;
};

static void generate_grids_and_stride(const int target_w, const int target_h, vector<int>& strides, vector<GridAndStride>& grid_strides)
{
   
    for (auto stride : strides)
    {
   
		GridAndStride GS;
        int num_grid_w = target_w / stride;
        int num_grid_h = target_h / stride;
        for (int g1 = 0; g1 < num_grid_h; g1++)
        {
   
            for (int g0 = 0; g0 < num_grid_w; g0++)
            {
   
				GS.grid0 = g0;
				GS.grid1 = g1;
				GS.stride = stride;
                grid_strides.push_back(GS);
            }
        }
    }
}

static inline float intersection_area(const Object& a, const Object& b)
{
   
    Rect_<float> inter = a.rect & b.rect;
    return inter.area();
}

static void qsort_descent_inplace(vector<Object>& faceobjects, int left, int right)
{
   
    int i = left;
    int j = right;
    float p = faceobjects[(left + right) / 2].prob;

    while (i <= j)
    {
   
        while (faceobjects[i].prob > p)
            i++;

        while (faceobjects[j].prob < p)
            j--;

        if (i <= j)
        {
   
            // swap
            swap(faceobjects[i], faceobjects[j]);

            i++;
            j--;
        }
    }

    #pragma omp parallel sections
    {
   
        #pragma omp section
        {
   
            if (left < j) qsort_descent_inplace(faceobjects, left, j);
        }
        #pragma omp section
        {
   
            if (i < right) qsort_descent_inplace(faceobjects, i, right);
        }
    }
}

static void qsort_descent_inplace(vector<Object>& objects)
{
   
    if (objects.empty())
        return;

    qsort_descent_inplace(objects, 0, objects.size() - 1);
}

static void nms_sorted_bboxes(const vector<Object>& faceobjects, vector<int>& picked, float nms_threshold)
{
   
    picked.clear();

    const int n = faceobjects.size();

    vector<float> areas(n);
    for (int i = 0; i < n; i++)
    {
   
        areas[i] = faceobjects[i].rect.area();
    }

    for (int i = 0; i < n; i++)
    {
   
        const Object& a = faceobjects[i];

        int keep = 1;
        for (int j = 0; j < (int)picked.size(); j++)
        {
   
            const Object& b = faceobjects[picked[j]];

            // intersection over union
            float inter_area = intersection_area(a, b);
            float union_area = areas[i] + areas[picked[j]] - inter_area;
            // float IoU = inter_area / union_area
            if (inter_area / union_area > nms_threshold)
                keep = 0;
        }

        if (keep)
            picked.push_back(i);
    }
}


static void generate_yolox_proposals(vector<GridAndStride> grid_strides, float* feat_blob, float prob_threshold, vector<Object>& objects)
{
   
    const int num_class = 1;

    const int num_anchors = grid_strides.size();

    for (int anchor_idx = 0; anchor_idx < num_anchors; anchor_idx++)
    {
   
        const int grid0 = grid_strides[anchor_idx].grid0;
        const int grid1 = grid_strides[anchor_idx].grid1;
        const int stride = grid_strides[anchor_idx].stride;

        const int basic_pos = anchor_idx * (num_class + 5);

        // yolox/models/yolo_head.py decode logic
        float x_center = (feat_blob[basic_pos+0] + grid0) * stride;
        float y_center = (feat_blob[basic_pos+1] + grid1) * stride;
        float w = exp(feat_blob[basic_pos+2]) * stride;
        float h = exp(feat_blob[basic_pos+3]) * stride;
        float x0 = x_center - w * 0.5f;
        float y0 = y_center - h * 0.5f;

        float box_objectness = feat_blob[basic_pos+4];
        for (int class_idx = 0; class_idx < num_class; class_idx++)
        {
   
            float box_cls_score = feat_blob[basic_pos + 5 + class_idx];
            float box_prob = box_objectness * box_cls_score;
            if (box_prob > prob_threshold)
            {
   
                Object obj;
                obj.rect.x = x0;
                obj.rect.y = y0;
                obj.rect.width = w;
                obj.rect.height = h;
                obj.label = class_idx;
                obj.prob = box_prob;

                objects.push_back(obj);
            }

        } // class loop

    } // point anchor loop
}

float* blobFromImage(Mat& img){
   
    cvtColor(img, img, COLOR_BGR2RGB);

    float* blob = new float[img.total()*3];
    int channels = 3;
    int img_h = img.rows;
    int img_w = img.cols;
    vector<float> mean = {
   0.485f, 0.456f, 0.406f};
    vector<float> std = {
   0.229f, 0.224f, 0.225f};
    for (size_t c = 0; c < channels; c++) 
    {
   
        for (size_t  h = 0; h < img_h; h++) 
        {
   
            for (size_t w = 0; w < img_w; w++) 
            {
   
                blob[c * img_w * img_h + h * img_w + w] =
                    (((float)img.at<Vec3b>(h, w)[c]) / 255.0f - mean[c]) / std[c];
            }
        }
    }
    return blob;
}


static void decode_outputs(float* prob, vector<Object>& objects, float scale, const int img_w, const int img_h) {
   
        vector<Object> proposals;
        vector<int> strides = {
   8, 16, 32};
        vector<GridAndStride> grid_strides;
        generate_grids_and_stride(INPUT_W, INPUT_H, strides, grid_strides);
        generate_yolox_proposals(grid_strides, prob,  BBOX_CONF_THRESH, proposals);
        //std::cout << "num of boxes before nms: " << proposals.size() << std::endl;

        qsort_descent_inplace(proposals);

        vector<int> picked;
        nms_sorted_bboxes(proposals, picked, NMS_THRESH);


        int count = picked.size();

        //std::cout << "num of boxes: " << count << std::endl;

        objects.resize(count);
        for (int i = 0; i < count; i++)
        {
   
            objects[i] = proposals[picked[i]];

            // adjust offset to original unpadded
            float x0 = (objects[i].rect.x) / scale;
            float y0 = (objects[i].rect.y) / scale;
            float x1 = (objects[i].rect.x + objects[i].rect.width) / scale;
            float y1 = (objects[i].rect.y + objects[i].rect.height) / scale;

            // clip
            // x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f);
            // y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f);
            // x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f);
            // y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f);

            objects[i].rect.x = x0;
            objects[i].rect.y = y0;
            objects[i].rect.width = x1 - x0;
            objects[i].rect.height = y1 - y0;
        }
}

const float color_list[80][3] =
{
   
    {
   0.000, 0.447, 0.741},
    {
   0.850, 0.325, 0.098},
    {
   0.929, 0.694, 0.125},
    {
   0.494, 0.184, 0.556},
    {
   0.466, 0.674, 0.188},
    {
   0.301, 0.745, 0.933},
    {
   0.635, 0.078, 0.184},
    {
   0.300, 0.300, 0.300},
    {
   0.600, 0.600, 0.600},
    {
   1.000, 0.000, 0.000},
    {
   1.000, 0.500, 0.000},
    {
   0.749, 0.749, 0.000},
    {
   0.000, 1.000, 0.000},
    {
   0.000, 0.000, 1.000},
    {
   0.667, 0.000, 1.000},
    {
   0.333, 0.333, 0.000},
    {
   0.333, 0.667, 0.000},
    {
   0.333, 1.000, 0.000},
    {
   0.667, 0.333, 0.000},
    {
   0.667, 0.667, 0.000},
    {
   0.667, 1.000, 0.000},
    {
   1.000, 0.333, 0.000},
    {
   1.000, 0.667, 0.000},
    {
   1.000, 1.000, 0.000},
    {
   0.000, 0.333, 0.500},
    {
   0.000, 0.667, 0.500},
    {
   0.000, 1.000, 0.500},
    {
   0.333, 0.000, 0.500},
    {
   0.333, 0.333, 0.500},
    {
   0.333, 0.667, 0.500},
    {
   0.333, 1.000, 0.500},
    {
   0.667, 0.000, 0.500},
    {
   0.667, 0.333, 0.500},
    {
   0.667, 0.667, 0.500},
    {
   0.667, 1.000, 0.500},
    {
   1.000, 0.000, 0.500},
    {
   1.000, 0.333, 0.500},
    {
   1.000, 0.667, 0.500},
    {
   1.000, 1.000, 0.500},
    {
   0.000, 0.333, 1.000},
    {
   0.000, 0.667, 1.000},
    {
   0.000, 1.000, 1.000},
    {
   0.333, 0.000, 1.000},
    {
   0.333, 0.333, 1.000},
    {
   0.333, 0.667, 1.000},
    {
   0.333, 1.000, 1.000},
    {
   0.667, 0.000, 1.000},
    {
   0.667, 0.333, 1.000},
    {
   0.667, 0.667, 1.000},
    {
   0.667, 1.000, 1.000},
    {
   1.000, 0.000, 1.000},
    {
   1.000, 0.333, 1.000},
    {
   1.000, 0.667, 1.000},
    {
   0.333, 0.000, 0.000},
    {
   0.500, 0.000, 0.000},
    {
   0.667, 0.000, 0.000},
    {
   0.833, 0.000, 0.000},
    {
   1.000, 0.000, 0.000},
    {
   0.000, 0.167, 0.000},
    {
   0.000, 0.333, 0.000},
    {
   0.000, 0.500, 0.000},
    {
   0.000, 0.667, 0.000},
    {
   0.000, 0.833, 0.000},
    {
   0.000, 1.000, 0.000},
    {
   0.000, 0.000, 0.167},
    {
   0.000, 0.000, 0.333},
    {
   0.000, 0.000, 0.500},
    {
   0.000, 0.000, 0.667},
    {
   0.000, 0.000, 0.833},
    {
   0.000, 0.000, 1.000},
    {
   0.000, 0.000, 0.000},
    {
   0.143, 0.143, 0.143},
    {
   0.286, 0.286, 0.286},
    {
   0.429, 0.429, 0.429},
    {
   0.571, 0.571, 0.571},
    {
   0.714, 0.714, 0.714},
    {
   0.857, 0.857, 0.857},
    {
   0.000, 0.447, 0.741},
    {
   0.314, 0.717, 0.741},
    {
   0.50, 0.5, 0}
};

void doInference(IExecutionContext& context, float* input, float* output, const int output_size, Size input_shape) {
   
    const ICudaEngine& engine = context.getEngine();

    // Pointers to input and output device buffers to pass to engine.
    // Engine requires exactly IEngine::getNbBindings() number of buffers.
    assert(engine.getNbBindings() == 2);
    void* buffers[2];

    // In order to bind the buffers, we need to know the names of the input and output tensors.
    // Note that indices are guaranteed to be less than IEngine::getNbBindings()
    const int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME);

    assert(engine.getBindingDataType(inputIndex) == nvinfer1::DataType::kFLOAT);
    const int outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
    assert(engine.getBindingDataType(outputIndex) == nvinfer1::DataType::kFLOAT);
    int mBatchSize = engine.getMaxBatchSize();

    // Create GPU buffers on device
    CHECK(cudaMalloc(&buffers[inputIndex], 3 * input_shape.height * input_shape.width * sizeof(float)));
    CHECK(cudaMalloc(&buffers[outputIndex], output_size*sizeof(float)));

    // Create stream
    cudaStream_t stream;
    CHECK(cudaStreamCreate(&stream));

    // DMA input batch data to device, infer on the batch asynchronously, and DMA output back to host
    CHECK(cudaMemcpyAsync(buffers[inputIndex], input, 3 * input_shape.height * input_shape.width * sizeof(float), cudaMemcpyHostToDevice, stream));
    context.enqueue(1, buffers, stream, nullptr);
    CHECK(cudaMemcpyAsync(output, buffers[outputIndex], output_size * sizeof(float), cudaMemcpyDeviceToHost, stream));
    cudaStreamSynchronize(stream);

    // Release stream and buffers
    cudaStreamDestroy(stream);
    CHECK(cudaFree(buffers[inputIndex]));
    CHECK(cudaFree(buffers[outputIndex]));
}

int main(int argc, char** argv) {
   
    cudaSetDevice(DEVICE);
    
    // create a model using the API directly and serialize it to a stream
    char *trtModelStream{
   nullptr};
    size_t size{
   0};

    if (argc == 4 && string(argv[2]) == "-i") {
   
        const string engine_file_path {
   argv[1]};
        ifstream file(engine_file_path, ios::binary);
        if (file.good()) {
   
            file.seekg(0, file.end);
            size = file.tellg();
            file.seekg(0, file.beg);
            trtModelStream = new char[size];
            assert(trtModelStream);
            file.read(trtModelStream, size);
            file.close();
        }
    } else {
   
        cerr << "arguments not right!" &

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/730927.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

VBA语言専攻T3学员领取资料通知

各位学员∶本周MF系列VBA技术资料增加631-635讲&#xff0c;T3学员看到通知后请免费领取,领取时间6月21日晚上19:00-6月22日晚上20:00。本次增加内容&#xff1a; MF631:提取某列数据的唯一值 MF632:自动调整文本并旋转到90度 MF633:仅复制格式 MF634:Mod运算判断奇数偶数 …

鸿蒙开发系统基础能力:【@ohos.accessibility (辅助功能)】

辅助功能 说明&#xff1a; 本模块首批接口从 API version 7 开始支持。后续版本的新增接口&#xff0c;采用上角标单独标记接口的起始版本。 导入模块 import accessibility from ohos.accessibility;AbilityState 辅助应用状态类型。 系统能力&#xff1a;以下各项对应的…

前瞻展望,中国信通院即将发布“2024云计算十大关键词”

人类对于未知领域的探索欲望&#xff0c;似乎总是无穷无尽&#xff0c;而探索欲反过来推动了技术的革新与进步。今年以来&#xff0c;AI大模型成为科技领域最为确定的趋势之一。在大模型开启的AI原生时代&#xff0c;AI原生正在重构云计算的演化逻辑和发展走向&#xff0c;MaaS…

rknn转换后精度差异很大,失真算子自纠

下面是添加了详细注释的优化代码&#xff1a; import cv2 import numpy as np import onnx import onnxruntime as rt from onnx import helper, shape_inferencedef get_all_node_names(model):"""获取模型中所有节点的名称。参数:model (onnx.ModelProto): O…

如何正确理解和评估品牌价值?

在当今这个品牌林立的商业世界里&#xff0c;我们常常听到企业家们满怀憧憬地谈论品牌梦想。 但究竟是什么驱使这些企业去打造一个品牌&#xff0c;到底是市场的激烈竞争&#xff0c;还是内心的情感寄托&#xff1f;亦或是社会发展的必然趋势&#xff0c;引领我们追求超越产品…

【shell脚本速成】函数

文章目录 一、函数1.1、函数介绍1.2、函数定义1.3、函数调用 &#x1f308;你好呀&#xff01;我是 山顶风景独好 &#x1f388;欢迎踏入我的博客世界&#xff0c;能与您在此邂逅&#xff0c;真是缘分使然&#xff01;&#x1f60a; &#x1f338;愿您在此停留的每一刻&#xf…

Java用文件流mask文本文件某些特定字段

思路 在Java中&#xff0c;如果你想要掩码&#xff08;mask&#xff09;文本文件中的某些特定字段&#xff0c;你可以按照以下步骤进行&#xff1a; 读取文本文件内容。找到并识别需要掩码的字段。用特定的掩码字符&#xff08;如星号*&#xff09;替换这些字段。将修改后的内…

Kubernates容器化JVM调优笔记(内存篇)

Kubernates容器化JVM调优笔记&#xff08;内存篇&#xff09; 先说结论背景思路方案 先说结论 1、首先如果是JDK8&#xff0c;需要使用JDK8_191版本以上&#xff0c;才支持容器化环境和以下参数&#xff0c;否则就更新到JDK10以上&#xff0c;选择对应的镜像构建就行了 2、在容…

cd 命令特殊路径符 mkdir命令

cd 特殊路径符 cd . 表示当前目录&#xff0c;比如 cd ./Desktop表示切换到当前目录下的Desktop目录内&#xff0c;和 cd Desktop效果一致。cd … 表示上一级目录&#xff0c;比如 cd … 即可切换到上一级目录&#xff0c;cd…/…切换到上二级目录。cd ~ 表示 HOME 目录&#…

【自动驾驶】运动底盘状态数据:里程计、IMU、运动学分析、串口通信协议

文章目录 控制器与运动底盘状态数据:里程计、IMU运动学分析与轮子运动学分析公式串口通信控制与反馈通讯协议串口通信反馈上行数据帧解析串口通信控制下行数据帧解析代码实现IMU、里程计数据的获取、解析、计算控制器与运动底盘状态数据:里程计、IMU 控制器需要负责外发底盘…

智慧园区解决方案PPT(53页)

## 1.1 智慧园区背景及需求分析 - 智慧园区的发展历程包括园区规划、经济、产业、企业、管理、理念的转变&#xff0c;强调管理模式创新&#xff0c;关注业务综合化、管理智慧化等发展。 ## 1.2 国家对智慧园区发展的政策 - 涉及多个国家部门&#xff0c;如工信部、住建部、…

【机器学习300问】129、RNN如何在情感分析任务中起作用的?

情感分析是自然语言处理&#xff08;NLP&#xff09;领域的一个重要分支&#xff0c;它的目标是自动检测和提取出非结构化文本数据中的主观信息&#xff08;比如&#xff1a;情绪、意见、评价等&#xff09; 一、情感分析任务案例 分析电商产品评论的情感倾向&#xff08;三分类…

OS复习笔记ch11-4

磁盘调度 磁盘的物理结构 经典的温彻斯特盘 其中的几个概念&#xff1a; 盘面&#xff1a;可以看成是一个操场的平面&#xff0c;不同的盘面通过中间的轴串在一起磁道&#xff1a;磁道可以看成是操场的跑道&#xff0c;我们知道操场上有外道和内道&#xff0c;最内道中间往…

homework 2024.06.17 math, UI

A的宽度225 B的宽度150 这样画出来就比较标准&#xff0c; 225 * 2 150 * 3 2A 3B

ASP.NET Core 6.0 多种部署方式

IIS 环境准备和部署 安装并配置 IIS 安装 IIS&#xff0c;在搜索输入并打开 启用或关闭 Windows 功能。 配置IIS 需要配置 ASPNETCore 部署IS 程序包安装 &#xff08;ASP.NET Core Module v2&#xff09; Download .NET 6.0 (Linux, macOS, and Windows).NET 6.0 downloads…

搭建一个简单的xxljob

数据库表结构&#xff1a; YyJobInfo&#xff1a; public class YyJobInfo {//定时任务idprivate int id;//该定时任务所属的执行器的idprivate int jobGroup;//定时任务描述private String jobDesc;//定时任务添加的时间private Date addTime;//定时任务的更新时间private D…

TIM: A Time Interval Machine for Audio-Visual Action Recognition

标题&#xff1a;TIM&#xff1a;一种用于视听动作识别的时间间隔机器 源文链接&#xff1a;openaccess.thecvf.com/content/CVPR2024/papers/Chalk_TIM_A_Time_Interval_Machine_for_Audio-Visual_Action_Recognition_CVPR_2024_paper.pdfhttps://openaccess.thecvf.com/cont…

Redis 持久化策略

Redis 提供了多种持久化机制&#xff0c;用于将数据保存到磁盘中&#xff0c;以防止因服务器重启或故障而导致的数据丢失。主要的持久化策略有两种&#xff1a;RDB (Redis Database) 和 AOF (Append Only File)&#xff0c;即当 Redis 服务器重新启动时&#xff0c;会读取相应的…

SEGGER Embedded Studio IDE移植embOS

SEGGER Embedded Studio IDE移植embOS 一、背景介绍二、任务目标三、技术实现3.1 获得embOS3.2 创建SES工程3.2.1 创建初始Solution和Project3.2.2 制作项目文件结构3.2.3 移植embOS库和有关头文件3.2.3.1 头文件3.2.3.2 库文件3.2.3.3 创建RTOSInit.c源文件3.2.3.4 OS_Error.c…

鸿蒙HarmonyOS NEXT角落里的知识:ArkTS高性能编程实践

概述 本文主要提供应用性能敏感场景下的高性能编程的相关建议&#xff0c;助力开发者开发出高性能的应用。高性能编程实践&#xff0c;是在开发过程中逐步总结出来的一些高性能的写法和建议&#xff0c;在业务功能实现过程中&#xff0c;我们要同步思考并理解高性能写法的原理…