c# onnx 调用yolo v11进行目标检测

先上图,支持图片,视频检测

 

FormYoloV11.cs 

using Microsoft.ML.OnnxRuntime;
using Microsoft.ML.OnnxRuntime.Tensors;
using OpenCvSharp;
using OpenCvSharp.Dnn;
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Drawing;
using System.IO;
using System.Linq;
using System.Text;
using System.Windows.Forms;

namespace WindowsFormsApp12
{
     
    public partial class FormYoloV11 : Form
    {
        public FormYoloV11()
        {
            InitializeComponent();
            InitializeCameraSelector();
            LoadModel();
            InitializeProcessingTimeLabel();
            timerUpdateLog = new System.Threading.Timer(UpdateTextBox, null, 1000, 1000);
        }

        private void FormYoloV11_Load(object sender, EventArgs e)
        {
             
        }
        private VideoCapture capture;
        private Timer timer;
        private InferenceSession onnx_session;
        private int input_height = 320;
        private int input_width = 240;
        private float ratio_height;
        private float ratio_width;
        private int box_num = 8400;
        private float conf_threshold = 0.25f;
        private float nms_threshold = 0.5f;
        private string[] class_names;
        private int class_num;
        private Label processingTimeLabel;

          

        private void InitializeCamera(int camerID)
        {
            // Initialize video capture for the default camera
            capture = new VideoCapture(camerID);
            if (!capture.IsOpened())
            {
                add_log("Unable to access the camera.");
                return;
            }

            // Reduce the camera resolution to speed up processing
            capture.Set(VideoCaptureProperties.FrameWidth, capture.FrameWidth / 2);
            capture.Set(VideoCaptureProperties.FrameHeight, capture.FrameHeight / 2);
            add_log("视频信息" + capture.FrameWidth + "," + capture.FrameHeight);
            // Set up a timer to fetch frames at regular intervals
            timer = new Timer { Interval = 40 }; // Approximately 30 frames per second
            timer.Tick += Timer_Tick;
            timer.Start();
        }

        private void LoadModel()
        {
            // Get the base directory of the current application
            string baseDirectory = AppDomain.CurrentDomain.BaseDirectory;

            // Set the model path and class label path
            string model_path = Path.Combine(baseDirectory, "model", "yolo11n.onnx");
            string classer_path = Path.Combine(baseDirectory, "model", "label_chinese.txt");
            class_names = File.ReadAllLines(classer_path, Encoding.UTF8);
            class_num = class_names.Length;

            // Create an inference session for the ONNX model

            SessionOptions options = new SessionOptions();
            options.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_INFO;
            try
            {
                options.AppendExecutionProvider_CUDA(0);
            }
            catch (Exception ex)
            {
                add_log($"Unable to use GPU for inference: {ex.Message}. Falling back to CPU.");
                options.AppendExecutionProvider_CPU(0);
            }
            options.IntraOpNumThreads = 1; // 限定只使用一个 CPU 核心
            onnx_session = new InferenceSession(model_path, options);
        }

        private void InitializeProcessingTimeLabel()
        {
            processingTimeLabel = new Label
            {
                Location = new System.Drawing.Point(10, 10),
                AutoSize = true,
                ForeColor = System.Drawing.Color.Black,
                BackColor = System.Drawing.Color.White,
                Font = new Font("Arial", 10, FontStyle.Bold)
            };
            Controls.Add(processingTimeLabel);
        }

        private void Timer_Tick(object sender, EventArgs e)
        {
            Mat frame = new Mat();
            capture.Read(frame);

            if (!frame.Empty())
            {
                Stopwatch stopwatch = Stopwatch.StartNew();
                ProcessFrame(frame);
                stopwatch.Stop();
                processingTimeLabel.Text = $"Processing Time: {stopwatch.ElapsedMilliseconds} ms";
            }
        }

        public static class BitmapConverterExtension
        {
            public static Bitmap ToBitmap(Mat mat)
            {
                return OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat);
            }

            public static Mat ToMat(Bitmap bitmap)
            {
                return OpenCvSharp.Extensions.BitmapConverter.ToMat(bitmap);
            }
        }

        public static void PutTextZH(Mat image, string text, OpenCvSharp.Point org, Scalar color, int fontSize, string fontName, bool italic, bool underline)
        {
            if (image.Empty())
            {
                throw new ArgumentException("The input image cannot be empty.");
            }

            // Create a new bitmap with the same size as the input Mat
            Bitmap bitmap = BitmapConverterExtension.ToBitmap(image);

            // Create graphics from the bitmap
            using (Graphics graphics = Graphics.FromImage(bitmap))
            {
                // Set text rendering properties
                graphics.TextRenderingHint = System.Drawing.Text.TextRenderingHint.AntiAlias;

                // Create font with specified properties
                FontStyle fontStyle = FontStyle.Regular;
                if (italic) fontStyle |= FontStyle.Italic;
                if (underline) fontStyle |= FontStyle.Underline;

                using (Font font = new Font(fontName, fontSize, fontStyle))
                using (Brush brush = new SolidBrush(System.Drawing.Color.FromArgb((int)color.Val2, (int)color.Val1, (int)color.Val0)))
                {
                    // Draw the text at the specified location
                    graphics.DrawString(text, font, brush, org.X, org.Y);
                }
            }

            // Convert the bitmap back to Mat
            var result = BitmapConverterExtension.ToMat(bitmap);
            result.CopyTo(image);
        }
        private System.Threading.Timer timerUpdateLog;
        public string bufferedLogs = "";
        private void UpdateTextBox(object state)
        {
            if (!string.IsNullOrEmpty(bufferedLogs))
            {
                textBox1.Invoke
                (
                //委托,托管无参数的任何方法 
                new MethodInvoker
                (
                delegate
                {
                    textBox1.AppendText(bufferedLogs);
                    textBox1.ScrollToCaret();
                    if (textBox1.Text.Length > 1000000)
                        textBox1.Text = textBox1.Text.Substring(900000);
                }
                )
                );



                bufferedLogs = "";
            }
        }
        public static string strlastLog_show = "";
        public void add_log(string log, bool bRemoveLast = false)
        {

            bufferedLogs += $"{DateTime.Now.ToString("HH:mm:ss   ")} {log}\r\n";
            strlastLog_show = log;
        }
        private void ProcessFrame(Mat frame)
        {
            int height = frame.Rows;
            int width = frame.Cols;
            Mat temp_image = frame.Clone();

            // Resize the image if necessary
            if (height > input_height || width > input_width)
            {
                float scale = Math.Min((float)input_height / height, (float)input_width / width);
                OpenCvSharp.Size new_size = new OpenCvSharp.Size((int)(width * scale), (int)(height * scale));
                Cv2.Resize(frame, temp_image, new_size);
            }

            ratio_height = (float)height / temp_image.Rows;
            ratio_width = (float)width / temp_image.Cols;

            Mat input_img = new Mat();
            Cv2.CopyMakeBorder(temp_image, input_img, 0, input_height - temp_image.Rows, 0, input_width - temp_image.Cols, BorderTypes.Constant, null);

            // Prepare input tensor
            Tensor<float> input_tensor = new DenseTensor<float>(new[] { 1, 3, 640, 640 });

            for (int y = 0; y < input_img.Height; y++)
            {
                for (int x = 0; x < input_img.Width; x++)
                {
                    input_tensor[0, 0, y, x] = input_img.At<Vec3b>(y, x)[0] / 255f;
                    input_tensor[0, 1, y, x] = input_img.At<Vec3b>(y, x)[1] / 255f;
                    input_tensor[0, 2, y, x] = input_img.At<Vec3b>(y, x)[2] / 255f;
                }
            }

            List<NamedOnnxValue> input_container = new List<NamedOnnxValue>
            {
                NamedOnnxValue.CreateFromTensor("images", input_tensor)
            };

            // Perform inference
            var ort_outputs = onnx_session.Run(input_container).ToArray();

            // Post-process the output
            float[] data = Transpose(ort_outputs[0].AsTensor<float>().ToArray(), 4 + class_num, box_num);

            float[] confidenceInfo = new float[class_num];
            float[] rectData = new float[4];

            List<DetectionResult> detResults = new List<DetectionResult>();

            for (int i = 0; i < box_num; i++)
            {
                Array.Copy(data, i * (class_num + 4), rectData, 0, 4);
                Array.Copy(data, i * (class_num + 4) + 4, confidenceInfo, 0, class_num);

                float score = confidenceInfo.Max();
                int maxIndex = Array.IndexOf(confidenceInfo, score);

                if (score > conf_threshold)
                {
                    int _centerX = (int)(rectData[0] * ratio_width);
                    int _centerY = (int)(rectData[1] * ratio_height);
                    int _width = (int)(rectData[2] * ratio_width);
                    int _height = (int)(rectData[3] * ratio_height);

                    detResults.Add(new DetectionResult(
                       maxIndex,
                       class_names[maxIndex],
                       new Rect(_centerX - _width / 2, _centerY - _height / 2, _width, _height),
                       score));
                }
            }

            // Apply Non-Maximum Suppression
            CvDnn.NMSBoxes(detResults.Select(x => x.Rect), detResults.Select(x => x.Confidence), conf_threshold, nms_threshold, out int[] indices);
            detResults = detResults.Where((x, index) => indices.Contains(index)).ToList();

            // Draw the detection results on the frame
            Mat processedFrame = frame.Clone();
            foreach (DetectionResult r in detResults)
            {
                PutTextZH(processedFrame, string.Format("{0}:{1:P0}", r.Class, r.Confidence), new OpenCvSharp.Point(r.Rect.TopLeft.X, r.Rect.TopLeft.Y + 30), Scalar.Red, 15, "", false, false);
                Cv2.Rectangle(processedFrame, r.Rect, Scalar.Red, thickness: 2);
            }

            // Display the original frame in pictureBox1
            pictureBox1.Image = new Bitmap(frame.ToMemoryStream());

            // Display the processed frame in pictureBox2
            pictureBox2.Image = new Bitmap(processedFrame.ToMemoryStream());
        }

        public unsafe float[] Transpose(float[] tensorData, int rows, int cols)
        {
            float[] transposedTensorData = new float[tensorData.Length];

            fixed (float* pTensorData = tensorData)
            {
                fixed (float* pTransposedData = transposedTensorData)
                {
                    for (int i = 0; i < rows; i++)
                    {
                        for (int j = 0; j < cols; j++)
                        {
                            int index = i * cols + j;
                            int transposedIndex = j * rows + i;
                            pTransposedData[transposedIndex] = pTensorData[index];
                        }
                    }
                }
            }
            return transposedTensorData;
        }

   

        private void button1_Click(object sender, EventArgs e)
        {
            int selectedCameraIndex = cameraSelector.SelectedIndex;
            if (selectedCameraIndex >= 0)
            {
                // Stop the current capture if it's running
                timer?.Stop();
                capture?.Release();

                // Initialize the selected camera
                InitializeCamera(selectedCameraIndex);
            }
        }
        private void InitializeCameraSelector()
        {


            // Populate the ComboBox with camera indices (0, 1, 2, ...)
            for (int i = 0; i < 5; i++)
            {
                using (VideoCapture tempCapture = new VideoCapture(i))
                {
                    if (tempCapture.IsOpened())
                    {
                        cameraSelector.Items.Add("Camera " + i);
                    }
                }
            }
            if (cameraSelector.Items.Count >= 1)
                cameraSelector.SelectedIndex = 0;

            cameraSelector.SelectedIndexChanged += CameraSelector_SelectedIndexChanged;

        }

        private void CameraSelector_SelectedIndexChanged(object sender, EventArgs e)
        {

        }

        string fileFilter = "*.*|*.bmp;*.jpg;*.jpeg;*.tiff;*.tiff;*.png";
        string image_path = "";

        DateTime dt1 = DateTime.Now;
        DateTime dt2 = DateTime.Now;
        private void button2_Click(object sender, EventArgs e)
        {
            OpenFileDialog ofd = new OpenFileDialog();
            ofd.Filter = fileFilter;
            if (ofd.ShowDialog() != DialogResult.OK) return;

            pictureBox1.Image = null;

            image_path = ofd.FileName;
            pictureBox1.Image = new Bitmap(image_path);

            textBox1.Text = "";
            pictureBox2.Image = null;
        }
        private void SaveAndOpenResultImage(Mat resultImage)
        {
            string resultImagePath = Path.Combine(Path.GetTempPath(), "result_image.png");
            resultImage.SaveImage(resultImagePath);
            System.Diagnostics.Process.Start(new System.Diagnostics.ProcessStartInfo(resultImagePath) { UseShellExecute = true });
        }

        private void button3_Click(object sender, EventArgs e)
        {
            if (image_path == "")
            {
                return;
            }

            button2.Enabled = false;
            pictureBox2.Image = null;
            textBox1.Text = "";
            Application.DoEvents();

            Mat image = new Mat(image_path);

            //图片缩放
            int height = image.Rows;
            int width = image.Cols;
            Mat temp_image = image.Clone();
            if (height > input_height || width > input_width)
            {
                float scale = Math.Min((float)input_height / height, (float)input_width / width);
                OpenCvSharp.Size new_size = new OpenCvSharp.Size((int)(width * scale), (int)(height * scale));
                Cv2.Resize(image, temp_image, new_size);
            }
            ratio_height = (float)height / temp_image.Rows;
            ratio_width = (float)width / temp_image.Cols;
            Mat input_img = new Mat();
            Cv2.CopyMakeBorder(temp_image, input_img, 0, input_height - temp_image.Rows, 0, input_width - temp_image.Cols, BorderTypes.Constant, null);

            //Cv2.ImShow("input_img", input_img);

            //输入Tensor
            Tensor<float> input_tensor = new DenseTensor<float>(new[] { 1, 3, 640, 640 });

            for (int y = 0; y < input_img.Height; y++)
            {
                for (int x = 0; x < input_img.Width; x++)
                {
                    input_tensor[0, 0, y, x] = input_img.At<Vec3b>(y, x)[0] / 255f;
                    input_tensor[0, 1, y, x] = input_img.At<Vec3b>(y, x)[1] / 255f;
                    input_tensor[0, 2, y, x] = input_img.At<Vec3b>(y, x)[2] / 255f;
                }
            }

            List<NamedOnnxValue> input_container = new List<NamedOnnxValue>
            {
                NamedOnnxValue.CreateFromTensor("images", input_tensor)
            };

            //推理
            dt1 = DateTime.Now;
            var ort_outputs = onnx_session.Run(input_container).ToArray();
            dt2 = DateTime.Now;

            float[] data = Transpose(ort_outputs[0].AsTensor<float>().ToArray(), 4 + class_num, box_num);

            float[] confidenceInfo = new float[class_num];
            float[] rectData = new float[4];

            List<DetectionResult> detResults = new List<DetectionResult>();

            for (int i = 0; i < box_num; i++)
            {
                Array.Copy(data, i * (class_num + 4), rectData, 0, 4);
                Array.Copy(data, i * (class_num + 4) + 4, confidenceInfo, 0, class_num);

                float score = confidenceInfo.Max(); // 获取最大值

                int maxIndex = Array.IndexOf(confidenceInfo, score); // 获取最大值的位置

                int _centerX = (int)(rectData[0] * ratio_width);
                int _centerY = (int)(rectData[1] * ratio_height);
                int _width = (int)(rectData[2] * ratio_width);
                int _height = (int)(rectData[3] * ratio_height);

                detResults.Add(new DetectionResult(
                   maxIndex,
                   class_names[maxIndex],
                   new Rect(_centerX - _width / 2, _centerY - _height / 2, _width, _height),
                   score));
            }

            //NMS
            CvDnn.NMSBoxes(detResults.Select(x => x.Rect), detResults.Select(x => x.Confidence), conf_threshold, nms_threshold, out int[] indices);
            detResults = detResults.Where((x, index) => indices.Contains(index)).ToList();

            //绘制结果
            Mat result_image = image.Clone();
            foreach (DetectionResult r in detResults)
            {
                //Cv2.PutText(result_image, $"{r.Class}:{r.Confidence:P0}", new OpenCvSharp.Point(r.Rect.TopLeft.X, r.Rect.TopLeft.Y - 10), HersheyFonts.HersheySimplex, 1, Scalar.Red, 2);
                PutTextZH(result_image, string.Format("{0}:{1:P0}", r.Class, r.Confidence), new OpenCvSharp.Point(r.Rect.TopLeft.X, r.Rect.TopLeft.Y + 30), Scalar.Red, 15, "", false, false);

                Cv2.Rectangle(result_image, r.Rect, Scalar.Red, thickness: 2);
            }

            pictureBox2.Image = new Bitmap(result_image.ToMemoryStream());
            textBox1.Text = "推理耗时:" + (dt2 - dt1).TotalMilliseconds + "ms";
            // 保存并打开结果图片
            SaveAndOpenResultImage(result_image);

            button2.Enabled = true;
        }

        private void Form2_Resize(object sender, EventArgs e)
        {
            int pictureBoxWidth = (this.ClientSize.Width - 30) / 2;
            pictureBox1.Width = pictureBoxWidth;
            pictureBox2.Width = pictureBoxWidth;
            pictureBox2.Left = pictureBox1.Right + 10;
        }

        private void FormYoloV11_FormClosing(object sender, FormClosingEventArgs e)
        { 
                // Release the camera and stop the timer
                timer?.Stop();
                capture?.Release(); 
        }
    }
}

 DetectResult.cs

using OpenCvSharp;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;

namespace WindowsFormsApp12
{
    public class DetectionResult
    {
        public DetectionResult(int ClassId, string Class, Rect Rect, float Confidence)
        {
            this.ClassId = ClassId;
            this.Confidence = Confidence;
            this.Rect = Rect;
            this.Class = Class;
        }

        public string Class { get; set; }

        public int ClassId { get; set; }

        public float Confidence { get; set; }

        public Rect Rect { get; set; }

    }
}

 FormYoloV11.Designer.cs


namespace WindowsFormsApp12
{
    partial class FormYoloV11
    {
        /// <summary>
        /// Required designer variable.
        /// </summary>
        private System.ComponentModel.IContainer components = null;

        /// <summary>
        /// Clean up any resources being used.
        /// </summary>
        /// <param name="disposing">true if managed resources should be disposed; otherwise, false.</param>
        protected override void Dispose(bool disposing)
        {
            if (disposing && (components != null))
            {
                components.Dispose();
            }
            base.Dispose(disposing);
        }

        #region Windows Form Designer generated code

        /// <summary>
        /// Required method for Designer support - do not modify
        /// the contents of this method with the code editor.
        /// </summary>
        private void InitializeComponent()
        {
            this.pictureBox1 = new System.Windows.Forms.PictureBox();
            this.pictureBox2 = new System.Windows.Forms.PictureBox();
            this.button1 = new System.Windows.Forms.Button();
            this.cameraSelector = new System.Windows.Forms.ComboBox();
            this.textBox1 = new System.Windows.Forms.TextBox();
            this.button2 = new System.Windows.Forms.Button();
            this.button3 = new System.Windows.Forms.Button();
            ((System.ComponentModel.ISupportInitialize)(this.pictureBox1)).BeginInit();
            ((System.ComponentModel.ISupportInitialize)(this.pictureBox2)).BeginInit();
            this.SuspendLayout();
            // 
            // pictureBox1
            // 
            this.pictureBox1.Anchor = ((System.Windows.Forms.AnchorStyles)(((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Bottom) 
            | System.Windows.Forms.AnchorStyles.Left)));
            this.pictureBox1.BackColor = System.Drawing.SystemColors.ActiveBorder;
            this.pictureBox1.Location = new System.Drawing.Point(1, 49);
            this.pictureBox1.Name = "pictureBox1";
            this.pictureBox1.Size = new System.Drawing.Size(387, 309);
            this.pictureBox1.SizeMode = System.Windows.Forms.PictureBoxSizeMode.Zoom;
            this.pictureBox1.TabIndex = 0;
            this.pictureBox1.TabStop = false;
            // 
            // pictureBox2
            // 
            this.pictureBox2.Anchor = ((System.Windows.Forms.AnchorStyles)(((System.Windows.Forms.AnchorStyles.Top | System.Windows.Forms.AnchorStyles.Bottom) 
            | System.Windows.Forms.AnchorStyles.Right)));
            this.pictureBox2.BackColor = System.Drawing.SystemColors.ActiveBorder;
            this.pictureBox2.BorderStyle = System.Windows.Forms.BorderStyle.FixedSingle;
            this.pictureBox2.Location = new System.Drawing.Point(411, 49);
            this.pictureBox2.Name = "pictureBox2";
            this.pictureBox2.Size = new System.Drawing.Size(387, 309);
            this.pictureBox2.SizeMode = System.Windows.Forms.PictureBoxSizeMode.Zoom;
            this.pictureBox2.TabIndex = 1;
            this.pictureBox2.TabStop = false;
            // 
            // button1
            // 
            this.button1.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Bottom | System.Windows.Forms.AnchorStyles.Left)));
            this.button1.Location = new System.Drawing.Point(148, 408);
            this.button1.Name = "button1";
            this.button1.Size = new System.Drawing.Size(75, 23);
            this.button1.TabIndex = 2;
            this.button1.Text = "打开摄像头";
            this.button1.UseVisualStyleBackColor = true;
            this.button1.Click += new System.EventHandler(this.button1_Click);
            // 
            // cameraSelector
            // 
            this.cameraSelector.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Bottom | System.Windows.Forms.AnchorStyles.Left)));
            this.cameraSelector.FormattingEnabled = true;
            this.cameraSelector.Location = new System.Drawing.Point(1, 411);
            this.cameraSelector.Name = "cameraSelector";
            this.cameraSelector.Size = new System.Drawing.Size(121, 20);
            this.cameraSelector.TabIndex = 3;
            // 
            // textBox1
            // 
            this.textBox1.Anchor = ((System.Windows.Forms.AnchorStyles)(((System.Windows.Forms.AnchorStyles.Bottom | System.Windows.Forms.AnchorStyles.Left) 
            | System.Windows.Forms.AnchorStyles.Right)));
            this.textBox1.Location = new System.Drawing.Point(1, 437);
            this.textBox1.Multiline = true;
            this.textBox1.Name = "textBox1";
            this.textBox1.Size = new System.Drawing.Size(798, 77);
            this.textBox1.TabIndex = 4;
            // 
            // button2
            // 
            this.button2.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Bottom | System.Windows.Forms.AnchorStyles.Left)));
            this.button2.Location = new System.Drawing.Point(236, 408);
            this.button2.Margin = new System.Windows.Forms.Padding(2);
            this.button2.Name = "button2";
            this.button2.Size = new System.Drawing.Size(75, 23);
            this.button2.TabIndex = 6;
            this.button2.Text = "打开图像";
            this.button2.UseVisualStyleBackColor = true;
            this.button2.Click += new System.EventHandler(this.button2_Click);
            // 
            // button3
            // 
            this.button3.Anchor = ((System.Windows.Forms.AnchorStyles)((System.Windows.Forms.AnchorStyles.Bottom | System.Windows.Forms.AnchorStyles.Left)));
            this.button3.Location = new System.Drawing.Point(309, 408);
            this.button3.Margin = new System.Windows.Forms.Padding(2);
            this.button3.Name = "button3";
            this.button3.Size = new System.Drawing.Size(75, 23);
            this.button3.TabIndex = 5;
            this.button3.Text = "检测图像";
            this.button3.UseVisualStyleBackColor = true;
            this.button3.Click += new System.EventHandler(this.button3_Click);
            // 
            // FormYoloV11
            // 
            this.AutoScaleDimensions = new System.Drawing.SizeF(6F, 12F);
            this.AutoScaleMode = System.Windows.Forms.AutoScaleMode.Font;
            this.ClientSize = new System.Drawing.Size(800, 511);
            this.Controls.Add(this.button2);
            this.Controls.Add(this.button3);
            this.Controls.Add(this.textBox1);
            this.Controls.Add(this.cameraSelector);
            this.Controls.Add(this.button1);
            this.Controls.Add(this.pictureBox2);
            this.Controls.Add(this.pictureBox1);
            this.Name = "FormYoloV11";
            this.Text = "FormYoloV11";
            this.FormClosing += new System.Windows.Forms.FormClosingEventHandler(this.FormYoloV11_FormClosing);
            this.Load += new System.EventHandler(this.FormYoloV11_Load);
            ((System.ComponentModel.ISupportInitialize)(this.pictureBox1)).EndInit();
            ((System.ComponentModel.ISupportInitialize)(this.pictureBox2)).EndInit();
            this.ResumeLayout(false);
            this.PerformLayout();

        }

        #endregion

        private System.Windows.Forms.PictureBox pictureBox1;
        private System.Windows.Forms.PictureBox pictureBox2;
        private System.Windows.Forms.Button button1;
        private System.Windows.Forms.ComboBox cameraSelector;
        private System.Windows.Forms.TextBox textBox1;
        private System.Windows.Forms.Button button2;
        private System.Windows.Forms.Button button3;
         
    }
}

参考文献

onnx模型部署(一) ONNXRuntime_caffe_luoganttcc-开放原子开发者工作坊

 pytorch gpu推理、onnxruntime gpu推理、tensorrt gpu推理比较,及安装教程,有详细代码解释-CSDN博客

 深度学习模型部署——基于Onnx Runtime的深度学习模型CPU与GPU部署(C++实现)-CSDN博客

GitHub - ultralytics/ultralytics: Ultralytics YOLO11 🚀 

 深度学习模型部署——基于Onnx Runtime的深度学习模型CPU与GPU部署(C++实现)-CSDN博客文章浏览阅读7.4k次,点赞37次,收藏133次。以上就是在win 10下使用Onnx Runtime用CPU与GPU来对onnx模型进行推理部署的对比,可以明显的看出来,使用GPU之后的推理速度,但在正式的大型项目中,在win下使用GPU部署模型是不建议,一般都会选择Linux,那样对GPU的利用率会高出不少,毕竟蚊腿肉也是肉。_onnx runtimehttps://blog.csdn.net/matt45m/article/details/139029398?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522B896ED99-0117-4362-B102-8EB067C5AF52%2522%252C%2522scm%2522%253A%252220140713.130102334..%2522%257D&request_id=B896ED99-0117-4362-B102-8EB067C5AF52&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~all~top_click~default-2-139029398-null-null.142^v100^pc_search_result_base1&utm_term=onnxruntime&spm=1018.2226.3001.4187

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/915970.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

【多语言】每种语言打印helloworld,编译为exe会占多大空间

文章目录 背景c语言 53KBc 53KBgo 1.8Mdart 4.6Mpython未测试nodejs未测试rust未测试java未测试cmd || bash || powershell 未测试other 背景 各个版本的helloworld&#xff0c;纯属闲的, 环境如下: - win10 - mingw: gcc8.1.0 - go1.21 - dart3.5.4c语言 53KB gcc main.c -…

前端搭建低代码平台,微前端如何选型?

目录 背景 一、微前端是什么&#xff1f; 二、三大特性 三、现有微前端解决方案 1、iframe 2、Web Components 3、ESM 4、EMP 5、Fronts 6、无界&#xff08;文档&#xff09; 7、qiankun 四、我们选择的方案 引入qiankun并使用&#xff08;src外层作为主应用&#xff09; 主应…

CVE-2024-2961漏洞的简单学习

简单介绍 PHP利用glibc iconv()中的一个缓冲区溢出漏洞&#xff0c;实现将文件读取提升为任意命令执行漏洞 在php读取文件的时候可以使用 php://filter伪协议利用 iconv 函数, 从而可以利用该漏洞进行 RCE 漏洞的利用场景 PHP的所有标准文件读取操作都受到了影响&#xff1…

InternVL 多模态模型部署微调实践

目录 0 什么是MLLM 1 开发机创建与使用 2 LMDeploy部署 2.1 环境配置 2.2 LMDeploy基本用法介绍 2.3 网页应用部署体验 3 XTuner微调实践 3.1 环境配置 3.2.配置文件参数解读 3.3 开始微调 4.体验模型美食鉴赏能力 0 什么是MLLM 多模态大语言模型 ( Multimodal Larg…

干货分享之Python爬虫与代理

嗨伙伴们&#xff0c;今天是干货分享哦&#xff0c;可千万不要错过。今天小蝌蚪教大家使用phthon时学会巧妙借用代理ip来更好地完成任务。 让我们先了解一下为什么说咱们要用爬虫代理ip呢&#xff0c;那是因为很多网站为了防止有人过度爬取数据&#xff0c;对自身资源造成损害…

鸿蒙学习生态应用开发能力全景图-赋能套件(1)

文章目录 赋能套件鸿蒙生态应用开发能力全景图 赋能套件 鸿蒙生态白皮书: 全面阐释了鸿蒙生态下应用开发核心理念、关键能力以及创新体验,旨在帮助开发者快速、准确、全面的了解鸿蒙开发套件给开发者提供的能力全景和未来的愿景。 视频课程: 基于真实的开发场景,提供向导式…

netcat工具安装和使用

netcat是一个功能强大的网络实用工具&#xff0c;可以从命令⾏跨⽹络读取和写⼊数据。 netcat是为Nmap项⽬编写的&#xff0c;是⽬前分散的Netcat版本系列的经典。 它旨在成为可靠的后端⼯具&#xff0c;可⽴即为其他应⽤程序和⽤户提供⽹络连接。 一&#xff0c;下载安装 1&a…

【PHP】ThinkPHP基础

下载composer ComposerA Dependency Manager for PHPhttps://getcomposer.org/ 安装composer 查看composer是否安装 composer composer --version 安装 ThinkPHP6 如果你是第一次安装的话&#xff0c;首次安装咱们需要打开控制台&#xff1a; 进入后再通过命令,在命令行下面&a…

【HarmonyOS】应用实现读取剪切板内容(安全控件和自读取)

【HarmonyOS】应用实现读取粘贴板内容(安全控件和自读取) 前言 三方应用 读取系统剪切板是比较常见的功能。可以实现功能入口的快捷激活跳转&#xff0c;以及用户粘贴操作的简化&#xff0c;增强用户的体验感。 但是在用户日渐注重隐私的今天&#xff0c;系统对于剪切板权限的…

飞牛云fnOS本地部署WordPress个人网站并一键发布公网远程访问

文章目录 前言1. Docker下载源设置2. Docker下载WordPress3. Docker部署Mysql数据库4. WordPress 参数设置5. 飞牛云安装Cpolar工具6. 固定Cpolar公网地址7. 修改WordPress配置文件8. 公网域名访问WordPress 前言 本文旨在详细介绍如何在飞牛云NAS上利用Docker部署WordPress&a…

解析安卓镜像包和提取DTB文件的操作日志

概述 想查看一下安卓的镜像包里都存了什么内容 步骤 使用RKDevTool_v3.15对RK3528_DC_HK1_RBOX_K8_Multi_WIFI_13_20230915.2153.img解包 路径: 高级(Advancing) > 固件(firmware) > 解包(unpacking)得到\Output\Android\Image boot.imguboot.imgsuper.img 处理boot.…

LeetCode 热题100(八)【二叉树】(3)

目录 8.11二叉树展开为链表&#xff08;中等&#xff09; 8.12从前序与中序遍历序列构造二叉树&#xff08;中等&#xff09; 8.13路径总和III&#xff08;中等&#xff09; 8.14二叉树的最近公共祖先&#xff08;中等&#xff09; 8.15二叉树中的最大路径和&#xff08;困…

FPGA实现PCIE3.0视频采集转SDI输出,基于XDMA+GS2971架构,提供工程源码和技术支持

目录 1、前言工程概述免责声明 2、相关方案推荐我已有的PCIE方案本博已有的 SDI 编解码方案本博客方案的PCIE2.0版本 3、PCIE基础知识扫描4、工程详细设计方案工程设计原理框图电脑端视频QT上位机XDMA配置及使用XDMA中断模块FDMA图像缓存Native视频时序生成RGB转BT1120SDI转HDM…

纽约大学:指导LLM提出澄清性问题

&#x1f4d6;标题&#xff1a;Modeling Future Conversation Turns to Teach LLMs to Ask Clarifying Questions &#x1f310;来源&#xff1a;arXiv, 2410.13788 &#x1f31f;摘要 &#x1f538;大型语言模型&#xff08;LLM&#xff09;必须经常对高度模糊的用户请求做出…

STM32F1学习——I2C通信

一、I2C通信一带多 在学习通信的时候&#xff0c;我们常会听到串口通信。但串口通信只限定两个设备之间&#xff0c;如果有多个设备&#xff0c;通信的两个设备就要连接上&#xff0c;接线复杂。所以有了总线式通信&#xff0c;在一条总线上可以连接多个设备&#xff0c;这些根…

当你想要conda安装遇到UnavailableInvalidChannel: HTTP 404 NOT FOUND for channel的问题

想要装个虚拟环境&#xff0c;结果遇到404。 看了第一个GitHub帖子中的一句话 UnavailableInvalidChannel: The channel is not accessible or is invalid. Navigator not launching. Issue #9473 conda/conda GitHub 想说那我就把这个not found的channel删掉吧&#xff…

Jmeter中的前置处理器(一)

前置处理器 1--JSR223 PreProcessor 功能特点 自定义数据处理&#xff1a;使用脚本语言处理请求数据&#xff0c;实现高度定制化的数据处理和生成。动态数据生成&#xff1a;在请求发送前生成动态数据&#xff0c;如随机数、时间戳等。变量设置&#xff1a;设置和修改 JMeter…

2023年高校大数据挑战赛A题中文文本纠错求解全过程文档及程序

2023年高校大数据挑战赛 A题 中文文本纠错 原题再现&#xff1a; 中文文本纠错的任务主要是针对中文文本中出现的错误进行检测和纠正&#xff0c;属于人工智能自然语言处理的研究子方向。中文文本纠错通常使用的场景有政务公文、裁判文书、新闻出版等&#xff0c;中文文本纠错…

使用CNN进行验证码识别:深度学习与图像预处理教程

验证码&#xff08;CAPTCHA&#xff09;广泛用于区分人类和自动化程序&#xff08;如机器人&#xff09;&#xff0c;通常由扭曲的字母、数字或符号组成。为了实现验证码的自动识别&#xff0c;深度学习尤其是卷积神经网络&#xff08;CNN&#xff09;非常有效。本文将带你一起…

基于 Python Django 的二手房间可视化系统分析

博主介绍&#xff1a;✌程序员徐师兄、7年大厂程序员经历。全网粉丝12w、csdn博客专家、掘金/华为云/阿里云/InfoQ等平台优质作者、专注于Java技术领域和毕业项目实战✌ &#x1f345;文末获取源码联系&#x1f345; &#x1f447;&#x1f3fb; 精彩专栏推荐订阅&#x1f447;…