OpenMMlab导出yolov3模型并用onnxruntime和tensorrt推理

导出onnx文件

直接使用脚本

import torch
from mmdet.apis import init_detector, inference_detector


config_file = './configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
checkpoint_file = 'yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth'
model = init_detector(config_file, checkpoint_file, device='cpu')  # or device='cuda:0'
torch.onnx.export(model, (torch.zeros(1, 3, 416, 416),), "out.onnx", opset_version=11)

导出的onnx结构如下:
在这里插入图片描述
输出是包含三个不同层级检测头的输出。若需要合并检测结果,需要修改脚本如下:

import torch
from itertools import repeat
from mmdet.apis import init_detector, inference_detector


config_file = './configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
checkpoint_file = 'yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth'
model = init_detector(config_file, checkpoint_file, device='cpu')  # or device='cuda:0'


class YOLOV3(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.model = init_detector(config_file, checkpoint_file, device='cpu')
        self.class_num = 80
        self.base_sizes = [[(116, 90), (156, 198), (373, 326)], 
                           [(30, 61), (62, 45), (59, 119)], 
                           [(10, 13), (16, 30), (33, 23)]]
        self.stride = [32, 16, 8]
        self.strides = [tuple(repeat(x, 2)) for x in self.stride]
        self.centers = [(x[0] / 2., x[1] / 2.) for x in self.strides]
        self.base_anchors=self.gen_base_anchors()
        
    def gen_base_anchors(self):
        multi_level_base_anchors = []
        for i, base_sizes_per_level in enumerate(self.base_sizes):
            center = self.centers[i]
            x_center, y_center = center
            base_anchors = []
            for base_size in base_sizes_per_level:
                w, h = base_size
                base_anchor = torch.Tensor([x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, y_center + 0.5 * h])
                base_anchors.append(base_anchor)
            base_anchors = torch.stack(base_anchors, dim=0)
            multi_level_base_anchors.append(base_anchors)
        return multi_level_base_anchors
             
    def _meshgrid(self, x, y):
        xx = x.repeat(y.shape[0])
        yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
        return xx, yy

    def grid_priors(self, featmap_sizes):
        multi_level_anchors = []
        for i in range(len(featmap_sizes)):
            base_anchors = self.base_anchors[i]
            feat_h, feat_w = featmap_sizes[i]
            stride_w, stride_h = self.strides[i]
            shift_x = torch.arange(0, feat_w) * stride_w
            shift_y = torch.arange(0, feat_h) * stride_h
            shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
            shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
            anchors = base_anchors[None, :, :] + shifts[:, None, :]
            anchors = anchors.view(-1, 4)           
            multi_level_anchors.append(anchors)
        return multi_level_anchors
    
    def decode(self, bboxes, pred_bboxes, stride):
        xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (pred_bboxes[..., :2] - 0.5) * stride
        whs = (bboxes[..., 2:] - bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()
        decoded_bboxes = torch.stack((xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] - whs[..., 1], 
                                      xy_centers[..., 0] + whs[..., 0], xy_centers[..., 1] + whs[..., 1]), dim=-1)
        return decoded_bboxes
        
    def forward(self, x):
        x = self.model.backbone(x)
        x = self.model.neck(x)
        pred_maps = self.model.bbox_head(x)
        
        flatten_preds = []
        flatten_strides = []
        for pred, stride in zip(pred_maps[0], self.stride):
            pred = pred.permute(0, 2, 3, 1).reshape(1, -1, 5+self.class_num)
            pred[..., :2] = pred[..., :2].sigmoid()
            flatten_preds.append(pred)
            flatten_strides.append(pred.new_tensor(stride).expand(pred.size(1)))
            
        flatten_preds = torch.cat(flatten_preds, dim=1)
        flatten_bbox_preds = flatten_preds[..., :4]  
        flatten_objectness = flatten_preds[..., 4].sigmoid()
        flatten_preds[..., 4] = flatten_objectness
        flatten_cls_scores = flatten_preds[..., 5:].sigmoid()
        flatten_preds[..., 5:] = flatten_cls_scores

        featmap_sizes = [pred_map.shape[-2:] for pred_map in pred_maps[0]]
        mlvl_anchors = self.grid_priors(featmap_sizes)
        flatten_anchors = torch.cat(mlvl_anchors)
        flatten_strides = torch.cat(flatten_strides)
        
        flatten_bboxes = self.decode(flatten_anchors, flatten_bbox_preds, flatten_strides.unsqueeze(-1))
        flatten_preds[..., :4] = flatten_bboxes
        
        return flatten_preds
    
    
model = YOLOV3().eval()
input = torch.zeros(1, 3, 416, 416, device='cpu')
torch.onnx.export(model, input, "out.onnx", opset_version=11)

导出的onnx结构如下:
在这里插入图片描述
安装mmdeploy的话,可以通过下面脚本导出onnx模型。

from mmdeploy.apis import torch2onnx
from mmdeploy.backend.sdk.export_info import export2SDK


img = 'bus.jpg'
work_dir = './work_dir/onnx/yolov3'
save_file = './end2end.onnx'
deploy_cfg = 'mmdeploy/configs/mmdet/detection/detection_onnxruntime_dynamic.py'
model_cfg = 'mmdetection/configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
model_checkpoint = 'checkpoints/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth'
device = 'cpu'

# 1. convert model to onnx
torch2onnx(img, work_dir, save_file, deploy_cfg, model_cfg, model_checkpoint, device)

# 2. extract pipeline info for sdk use (dump-info)
export2SDK(deploy_cfg, model_cfg, work_dir, pth=model_checkpoint, device=device)

onnx模型的结构如下:
在这里插入图片描述

onnxruntime推理

手动导出的onnx模型使用onnxruntime推理:

import cv2
import numpy as np
import onnxruntime


CLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
        'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
        'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
        'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
        'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
        'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
        'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
        'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
        'hair drier', 'toothbrush'] #coco80类别       
use_letterbox = True
input_shape = (416, 416)      


def nms(dets, thresh):
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    areas = (y2 - y1 + 1) * (x2 - x1 + 1)
    scores = dets[:, 4]
    keep = []
    index = scores.argsort()[::-1] 

    while index.size > 0:
        i = index[0]
        keep.append(i)
        x11 = np.maximum(x1[i], x1[index[1:]]) 
        y11 = np.maximum(y1[i], y1[index[1:]])
        x22 = np.minimum(x2[i], x2[index[1:]])
        y22 = np.minimum(y2[i], y2[index[1:]])
        w = np.maximum(0, x22 - x11 + 1)                              
        h = np.maximum(0, y22 - y11 + 1) 
        overlaps = w * h
        ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)
        idx = np.where(ious <= thresh)[0]
        index = index[idx + 1]
    return keep


def filter_box(org_box, conf_thres, iou_thres): #过滤掉无用的框
    org_box = np.squeeze(org_box)
    conf = org_box[..., 4] > conf_thres
    box = org_box[conf == True]

    cls_cinf = box[..., 5:]
    cls = []
    for i in range(len(cls_cinf)):
        cls.append(int(np.argmax(cls_cinf[i])))
    all_cls = list(set(cls))    
     
    output = []
    for i in range(len(all_cls)):
        curr_cls = all_cls[i]
        curr_cls_box = []
        curr_out_box = []
        for j in range(len(cls)):
            if cls[j] == curr_cls:
                box[j][4] *= cls_cinf[j][curr_cls]
                box[j][5] = curr_cls
                curr_cls_box.append(box[j][:6])
        curr_cls_box = np.array(curr_cls_box)
        curr_out_box = nms(curr_cls_box,iou_thres)
        for k in curr_out_box:
            output.append(curr_cls_box[k])
    output = np.array(output) #(4, 6)
    return output


def letterbox(im, new_shape=(416, 416), color=(114, 114, 114)):
    # Resize and pad image while meeting stride-multiple constraints
    shape = im.shape[:2]  # current shape [height, width]

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    
    # Compute padding
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))    
    dw, dh = (new_shape[1] - new_unpad[0])/2, (new_shape[0] - new_unpad[1])/2  # wh padding 
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    
    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im


def scale_boxes(input_shape, boxes, shape):
    # Rescale boxes (xyxy) from input_shape to shape
    gain = min(input_shape[0] / shape[0], input_shape[1] / shape[1])  # gain  = old / new
    pad = (input_shape[1] - shape[1] * gain) / 2, (input_shape[0] - shape[0] * gain) / 2  # wh padding

    boxes[..., [0, 2]] -= pad[0]  # x padding
    boxes[..., [1, 3]] -= pad[1]  # y padding
    boxes[..., :4] /= gain
    boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1])  # x1, x2
    boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0])  # y1, y2
    return boxes


def draw(image, box_data):
    box_data = scale_boxes(input_shape, box_data, image.shape)
    boxes = box_data[...,:4].astype(np.int32) 
    scores = box_data[...,4]
    classes = box_data[...,5].astype(np.int32)
   
    for box, score, cl in zip(boxes, scores, classes):
        top, left, right, bottom = box
        print('class: {}, score: {}, coordinate: [{}, {}, {}, {}]'.format(CLASSES[cl], score, top, left, right, bottom))
        cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 1)
        cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score), (top, left), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)


if __name__=="__main__":
    img = cv2.imread('bus.jpg')
    if use_letterbox:
        input = letterbox(img, input_shape)
    else:
        input = cv2.resize(img, input_shape)
    input = input[:, :, ::-1].transpose(2, 0, 1).astype(dtype=np.float32)  #BGR2RGB和HWC2CHW
    input[0,:] = (input[0,:] - 123.675) / 58.395   
    input[1,:] = (input[1,:] - 116.28) / 57.12
    input[2,:] = (input[2,:] - 103.53) / 57.375
    input = np.expand_dims(input, axis=0)
    
    onnx_session = onnxruntime.InferenceSession('yolov3.onnx', providers=['CPUExecutionProvider'])
        
    input_name=[]
    for node in onnx_session.get_inputs():
        input_name.append(node.name)

    output_name=[]
    for node in onnx_session.get_outputs():
        output_name.append(node.name)

    input_feed={}
    for name in input_name:
        input_feed[name] = input
        
    pred = onnx_session.run(None, input_feed)
    
    outbox = filter_box(pred, 0.5, 0.5)
    draw(img, outbox)
    cv2.imwrite('res.jpg', img)

mmdeploy导出的onnx模型使用onnxruntime推理:

import cv2
import numpy as np
import onnxruntime


CLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
        'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
        'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
        'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
        'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
        'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
        'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
        'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
        'hair drier', 'toothbrush'] #coco80类别       
use_letterbox = True
input_shape = (416, 416)      


def filter_box(org_box, conf_thres): #删除置信度小于conf_thres的BOX
    flag = org_box[0][..., 4] > conf_thres
    box = org_box[0][flag == True] 
    cls = org_box[1][flag == True].reshape(-1, 1) 
    output = np.concatenate((box, cls), axis=1)  
    return output


def letterbox(im, new_shape=(416, 416), color=(114, 114, 114)):
    # Resize and pad image while meeting stride-multiple constraints
    shape = im.shape[:2]  # current shape [height, width]

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    
    # Compute padding
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))    
    dw, dh = (new_shape[1] - new_unpad[0])/2, (new_shape[0] - new_unpad[1])/2  # wh padding 
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    
    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im


def scale_boxes(input_shape, boxes, shape):
    # Rescale boxes (xyxy) from input_shape to shape
    gain = min(input_shape[0] / shape[0], input_shape[1] / shape[1])  # gain  = old / new
    pad = (input_shape[1] - shape[1] * gain) / 2, (input_shape[0] - shape[0] * gain) / 2  # wh padding

    boxes[..., [0, 2]] -= pad[0]  # x padding
    boxes[..., [1, 3]] -= pad[1]  # y padding
    boxes[..., :4] /= gain
    boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1])  # x1, x2
    boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0])  # y1, y2
    return boxes


def draw(image, box_data):
    box_data = scale_boxes(input_shape, box_data, image.shape)
    boxes = box_data[...,:4].astype(np.int32) 
    scores = box_data[...,4]
    classes = box_data[...,5].astype(np.int32)
   
    for box, score, cl in zip(boxes, scores, classes):
        top, left, right, bottom = box
        print('class: {}, score: {}, coordinate: [{}, {}, {}, {}]'.format(CLASSES[cl], score, top, left, right, bottom))
        cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 1)
        cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score), (top, left), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)


if __name__=="__main__":
    img = cv2.imread('bus.jpg')
    if use_letterbox:
        input = letterbox(img, input_shape)
    else:
        input = cv2.resize(img, input_shape)
    input = input[:, :, ::-1].transpose(2, 0, 1).astype(dtype=np.float32)  #BGR2RGB和HWC2CHW
    input[0,:] = (input[0,:] - 123.675) / 58.395   
    input[1,:] = (input[1,:] - 116.28) / 57.12
    input[2,:] = (input[2,:] - 103.53) / 57.375
    input = np.expand_dims(input, axis=0)
    
    onnx_session = onnxruntime.InferenceSession('../work_dir/onnx/yolov3/end2end.onnx', providers=['CPUExecutionProvider'])
        
    input_name=[]
    for node in onnx_session.get_inputs():
        input_name.append(node.name)

    output_name=[]
    for node in onnx_session.get_outputs():
        output_name.append(node.name)

    input_feed={}
    for name in input_name:
        input_feed[name] = input
        
    pred = onnx_session.run(None, input_feed)
    
    outbox = filter_box(pred, 0.5)
    draw(img, outbox)
    cv2.imwrite('res.jpg', img)

直接使用mmdeploy的api推理:

from mmdeploy.apis import inference_model


model_cfg = 'mmdetection/configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
deploy_cfg = 'mmdeploy/configs/mmdet/detection/detection_onnxruntime_dynamic.py'
img = 'bus.jpg'
backend_files = ['work_dir/onnx/yolov3/end2end.onnx']
device = 'cpu'

result = inference_model(model_cfg, deploy_cfg, backend_files, img, device)
print(result)

或者

from mmdeploy_runtime import Detector
import cv2


# 读取图片
img = cv2.imread('bus.jpg')

# 创建检测器
detector = Detector(model_path='work_dir/onnx/yolov3', device_name='cpu')

# 执行推理
bboxes, labels, _ = detector(img)
# 使用阈值过滤推理结果,并绘制到原图中
indices = [i for i in range(len(bboxes))]
for index, bbox, label_id in zip(indices, bboxes, labels):
  [left, top, right, bottom], score = bbox[0:4].astype(int),  bbox[4]
  if score < 0.3:
      continue
  cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0))
cv2.imwrite('output_detection.png', img)

导出engine文件

这里通过trtexec转换onnx文件,LZ的版本是TensorRT-8.2.1.8。

./trtexec.exe --onnx=yolov3.onnx --saveEngine=yolov3.engine --workspace=20480

tensorrt推理

手动导出的模型使用tensorrt推理:

import cv2
import numpy as np
import tensorrt as trt
import pycuda.autoinit 
import pycuda.driver as cuda  


CLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
        'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
        'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
        'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
        'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
        'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
        'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
        'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
        'hair drier', 'toothbrush'] #coco80类别       
use_letterbox = True
input_shape = (416, 416)      


def nms(dets, thresh):
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    areas = (y2 - y1 + 1) * (x2 - x1 + 1)
    scores = dets[:, 4]
    keep = []
    index = scores.argsort()[::-1] 

    while index.size > 0:
        i = index[0]
        keep.append(i)
        x11 = np.maximum(x1[i], x1[index[1:]]) 
        y11 = np.maximum(y1[i], y1[index[1:]])
        x22 = np.minimum(x2[i], x2[index[1:]])
        y22 = np.minimum(y2[i], y2[index[1:]])
        w = np.maximum(0, x22 - x11 + 1)                              
        h = np.maximum(0, y22 - y11 + 1) 
        overlaps = w * h
        ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)
        idx = np.where(ious <= thresh)[0]
        index = index[idx + 1]
    return keep


def filter_box(org_box, conf_thres, iou_thres): #过滤掉无用的框
    org_box = np.squeeze(org_box)
    conf = org_box[..., 4] > conf_thres
    box = org_box[conf == True]

    cls_cinf = box[..., 5:]
    cls = []
    for i in range(len(cls_cinf)):
        cls.append(int(np.argmax(cls_cinf[i])))
    all_cls = list(set(cls))    
     
    output = []
    for i in range(len(all_cls)):
        curr_cls = all_cls[i]
        curr_cls_box = []
        curr_out_box = []
        for j in range(len(cls)):
            if cls[j] == curr_cls:
                box[j][4] *= cls_cinf[j][curr_cls]
                box[j][5] = curr_cls
                curr_cls_box.append(box[j][:6])
        curr_cls_box = np.array(curr_cls_box)
        curr_out_box = nms(curr_cls_box,iou_thres)
        for k in curr_out_box:
            output.append(curr_cls_box[k])
    output = np.array(output) #(4, 6)
    return output


def letterbox(im, new_shape=(416, 416), color=(114, 114, 114)):
    # Resize and pad image while meeting stride-multiple constraints
    shape = im.shape[:2]  # current shape [height, width]

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
    
    # Compute padding
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))    
    dw, dh = (new_shape[1] - new_unpad[0])/2, (new_shape[0] - new_unpad[1])/2  # wh padding 
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    
    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im


def scale_boxes(input_shape, boxes, shape):
    # Rescale boxes (xyxy) from input_shape to shape
    gain = min(input_shape[0] / shape[0], input_shape[1] / shape[1])  # gain  = old / new
    pad = (input_shape[1] - shape[1] * gain) / 2, (input_shape[0] - shape[0] * gain) / 2  # wh padding

    boxes[..., [0, 2]] -= pad[0]  # x padding
    boxes[..., [1, 3]] -= pad[1]  # y padding
    boxes[..., :4] /= gain
    boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1])  # x1, x2
    boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0])  # y1, y2
    return boxes


def draw(image, box_data):
    box_data = scale_boxes(input_shape, box_data, image.shape)
    boxes = box_data[...,:4].astype(np.int32) 
    scores = box_data[...,4]
    classes = box_data[...,5].astype(np.int32)
   
    for box, score, cl in zip(boxes, scores, classes):
        top, left, right, bottom = box
        print('class: {}, score: {}, coordinate: [{}, {}, {}, {}]'.format(CLASSES[cl], score, top, left, right, bottom))
        cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 1)
        cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score), (top, left), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)


if __name__=="__main__":
    logger = trt.Logger(trt.Logger.WARNING)
    with open("yolov3.engine", "rb") as f, trt.Runtime(logger) as runtime:
        engine = runtime.deserialize_cuda_engine(f.read())
    context = engine.create_execution_context()
    h_input = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(0)), dtype=np.float32)
    h_output = cuda.pagelocked_empty(trt.volume(context.get_binding_shape(1)), dtype=np.float32)
    d_input = cuda.mem_alloc(h_input.nbytes)
    d_output = cuda.mem_alloc(h_output.nbytes)
    stream = cuda.Stream()
    
    img = cv2.imread('bus.jpg')
    if use_letterbox:
        input = letterbox(img, input_shape)
    else:
        input = cv2.resize(img, input_shape)
    input = input[:, :, ::-1].transpose(2, 0, 1).astype(dtype=np.float32)  #BGR2RGB和HWC2CHW
    input[0,:] = (input[0,:] - 123.675) / 58.395   
    input[1,:] = (input[1,:] - 116.28) / 57.12
    input[2,:] = (input[2,:] - 103.53) / 57.375
    input = np.expand_dims(input, axis=0) 
    
    np.copyto(h_input, input.ravel())

    with engine.create_execution_context() as context:
        cuda.memcpy_htod_async(d_input, h_input, stream)
        context.execute_async_v2(bindings=[int(d_input), int(d_output)], stream_handle=stream.handle)
        cuda.memcpy_dtoh_async(h_output, d_output, stream)
        stream.synchronize()  
        pred = h_output.reshape(1, 10647, 85) 
        outbox = filter_box(pred, 0.5, 0.5)
        draw(img, outbox)
        cv2.imwrite('res.jpg', img)

使用mmdeploy的api推理:

from mmdeploy.apis import inference_model


model_cfg ='mmdetection/configs/yolo/yolov3_mobilenetv2_8xb24-ms-416-300e_coco.py'
deploy_cfg = 'mmdeploy/configs/mmdet/detection/detection_tensorrt_static-320x320.py'
img = 'bus.jpg'
backend_files = ['work_dir/trt/yolov3/end2end.engine']
device = 'cuda'

result = inference_model(model_cfg, deploy_cfg, backend_files, img, device)
print(result)

或者

from mmdeploy_runtime import Detector
import cv2


# 读取图片
img = cv2.imread('bus.jpg')

# 创建检测器
detector = Detector(model_path='work_dir/trt/yolox', device_name='cuda')

# 执行推理
bboxes, labels, _ = detector(img)
# 使用阈值过滤推理结果,并绘制到原图中
indices = [i for i in range(len(bboxes))]
for index, bbox, label_id in zip(indices, bboxes, labels):
  [left, top, right, bottom], score = bbox[0:4].astype(int),  bbox[4]
  if score < 0.3:
      continue
  cv2.rectangle(img, (left, top), (right, bottom), (0, 255, 0))
cv2.imwrite('output_detection.png', img)

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/142412.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

跨境电商源码:多语言支持与扩展的终极解决方案

随着全球电商市场的不断扩大&#xff0c;跨境电商源码的需求日益增长。对于想要拓展国际业务的电商企业来说&#xff0c;多语言支持显得尤为重要。在这方面&#xff0c;我们的跨境电商源码产品具备显著优势&#xff0c;不仅全面支持多语言&#xff0c;还方便扩展多个语言的CSDN…

PP-ChatOCRv2、PP-TSv2、大模型半监督学习工具...PaddleX新特性等你来pick!

小A是一名刚刚毕业的算法工程师&#xff0c;有一天&#xff0c;他被老板安排了一个活&#xff0c;要对一批合同扫描件进行自动化信息抽取&#xff0c;输出结构化的分析报表。OCR问题不大&#xff0c;但是怎么进行批量的结构化信息抽取呢&#xff1f;小A陷入了苦苦思索... 小B是…

【06】VirtualService高级流量功能

5.3 weight 部署demoapp v10和v11版本 --- apiVersion: apps/v1 kind: Deployment metadata:labels:app: demoappv10version: v1.0name: demoappv10 spec:progressDeadlineSeconds: 600replicas: 3selector:matchLabels:app: demoappversion: v1.0template:metadata:labels:app…

数据结构:红黑树的原理和实现

文章目录 红黑树的概念红黑树的性质红黑树的模拟实现红黑树的平衡问题 整体实现和测试 本篇用于进行红黑树的拆解和模拟实现&#xff0c;为之后的map和set的封装奠定基础 红黑树的概念 红黑树也是一种二叉搜索树&#xff0c;但是在每一个节点的内部新增了一个用以表示该节点颜…

【python自动化】Playwright基础教程(八)鼠标操作

【python自动化】Playwright基础教程(八)鼠标操作 本文目录 文章目录 【python自动化】Playwright基础教程(八)鼠标操作playwright系列回顾前文代码click模拟鼠标点击dblclick模拟鼠标双击down模拟鼠标按下move模拟鼠标移动up模拟鼠标释放wheel模拟鼠标滚动鼠标长按常用实战引…

mysql数据库可以执行定时任务

在一些业务需要中&#xff0c;经常需要一些定时任务。如Java的schedule&#xff0c;nodejs的node-schedule等。今天第一次接触了使用数据库的存储过程来执行定时任务。 本篇文章以MySQL数据库为例&#xff0c;介绍通过数据库设置定时任务的方法。本文中以介绍操作过程为主&…

三数之和问题

题目描述 给你一个整数数组 nums &#xff0c;判断是否存在三元组 [nums[i], nums[j], nums[k]] 满足 i ! j、i ! k 且 j ! k &#xff0c;同时还满足 nums[i] nums[j] nums[k] 0 。请你返回所有和为 0 且不重复的三元组。 注意&#xff1a;答案中不可以包含重复的三元组。…

asp.net core mvc之 布局

一、布局是什么&#xff1f; 布局是把每个页面的公共部分&#xff0c;提取成一个布局页面&#xff08;头、导航、页脚&#xff09;。 二、默认布局 _Layout.cshtml 默认的布局是在 /Views/Shared 目录的 _Layout.cshtml文件。通常Shared目录中的视图都是公共视图。该目录下的…

瑞利长度(Rayleigh length)

瑞利长度 Rayleigh length 在光学&#xff0c;特别是激光学中&#xff0c;我们设鞍腰部&#xff08;如图中所示的最低处&#xff09;为A&#xff0c;其横截面面积为a&#xff0c;沿光的传播方向&#xff0c;当横截面面积因为散射达到2a时&#xff0c;我们设此处为B&#xff0c;…

二维码智慧门牌管理系统升级解决方案:运营可视化之道

文章目录 前言一、系统概述二、数据可视化与运营决策 前言 随着科技的飞速发展和人们生活水平的提高&#xff0c;传统的门牌管理系统已经无法满足现代社会的需求。在这个信息化、智能化的时代&#xff0c;一款升级版的二维码智慧门牌管理系统应运而生&#xff0c;它将以全新的…

Vmware虚拟机重装 虚拟机能ping通主机,而主机不能ping通虚拟机的问题

CClean&#xff0c;用它把你电脑上已经卸载的软件但是注册表还没删干净的把注册表删干净&#xff0c;之前说的那种情况&#xff08;虚拟网络编辑器打不上勾&#xff09;就迎刃而解了。 Ps&#xff1a;CClean&#xff1a;再网上百度就可以查到&#xff0c;软件对用户也很友好&a…

(11.13 知识总结(路由层)

一、路由层 1.1路由匹配 1.1.1 什么是路由&#xff1f; 路由可以看成是跟在 ip 和 port 之后的地址 1.1.2 url( ) 方法 # 示例 urlpatterns [ url(r^admin/, admin.site.urls), url(r^login/, views.login_func), url(r^register/$, views.register_func), ] url…

【异步并发编程】使用aiohttp构建Web应用程序

文章目录 1. 写在前面1. 什么是aiohttp&#xff1f;1.1. 什么是异步编程&#xff1f; 2. 安装aiohttp3. 异步HTTP服务器4. 异步请求5. aiohttp REST实例 【作者主页】&#xff1a;吴秋霖 【作者介绍】&#xff1a;Python领域优质创作者、阿里云博客专家、华为云享专家。长期致力…

iOS:何为空指针和野指针

一&#xff1a;什么是空指针和野指针 1、空指针 ①.没有存储任何内存地址的指针就成为空指针&#xff08;NULL指针&#xff09; ②.空指针就是被赋值为0的指针&#xff0c;在没有被具体初始化之前&#xff0c;其值为0. //以下都是空指针&#xff0c;eg: Person *p1 NULL; …

Linux上C++通过LDAP协议使用kerberos认证AES加密连接到AD服务器

一.前言 记录自己在实现这个流程遇到的各种问题&#xff0c;因为我也是看了许多优质的文章以及组内大佬的帮助下才弄成的&#xff0c;这里推荐一个大佬的文章&#xff0c;写的非常优秀&#xff0c;比我这篇文章写得好得很多&#xff0c;最后我也是看这个大佬的代码最终才实现的…

一行JavaScrip可以做什么?

说在前面 JavaScript 提供了许多方便的方法和操作符来简化常见的任务&#xff0c;使得编程变得更加高效和便捷。无论是数学计算、字符串处理还是数据操作&#xff0c;JavaScript 都能帮助我们以简洁的方式实现所需功能。 代码 1、生成指定范围内的随机整数 const randomInt …

部分背包问题【贪心算法】

部分背包问题是一种经典的贪心问题&#xff0c;物品可以取一部分&#xff0c;也就是可以随意拆分的物品。 算法思路&#xff1a; 用列表保存每个物品的价值及总重量、平均价值&#xff08;性价比&#xff09;。输入数据同时计算每种物品的平均价值。使用自定义的compare函数以…

2023亚太杯数学建模思路 - 复盘:校园消费行为分析

文章目录 0 赛题思路1 赛题背景2 分析目标3 数据说明4 数据预处理5 数据分析5.1 食堂就餐行为分析5.2 学生消费行为分析 建模资料 0 赛题思路 &#xff08;赛题出来以后第一时间在CSDN分享&#xff09; https://blog.csdn.net/dc_sinor?typeblog 1 赛题背景 校园一卡通是集…

苹果独占鳌头,国产手机围攻,双十一“照妖镜”显露谁有真实力

随着双十一购物节的结束&#xff0c;电商平台也给出了各手机品牌的销量数据&#xff0c;苹果毫无疑问成为双十一的赢家&#xff0c;不过两家国产手机品牌也显露了他们的实力&#xff0c;已具有与苹果一战之力。 与去年双十一和今年618类似&#xff0c;苹果仍然占据热销榜前列&a…

信驰达科技加入车联网联盟(CCC),推进数字钥匙发展与应用

CCC)的会员。 图 1 深圳信驰达正式成为车联网联盟(CCC)会员 车联网联盟(CCC)是一个跨行业组织&#xff0c;致力于推动智能手机与汽车连接解决方案的技术发展。CCC涵盖了全球汽车和智能手机行业的大部分企业&#xff0c;拥有150多家成员公司。CCC成员公司包括智能手机和汽车制造…