【基于detectron2训练数据集】

基于detectron2训练自己的数据集

  • 1. 首先下载官方提供的baloon数据集
  • 2. 转换成detectron2的格式
  • 3. 训练
  • 4. 测试与评价

1. 首先下载官方提供的baloon数据集

import wget
from zipfile import ZipFile


def progress_bar(current, total, width=80):
    progress = current / total
    bar = '#' * int(progress * width)
    percentage = round(progress * 100, 2)
    print(f'[{bar:<{width}}] {percentage}%')


save_path = 'balloon_dataset.zip'
if True:
    url = 'https://github.com/matterport/Mask_RCNN/releases/download/v2.1/balloon_dataset.zip'
    try:
        wget.download(url, save_path, bar=progress_bar)
    except Exception as e:
        print(f'An error occurred: {e}')

extract_path = 'balloon_dataset'
with ZipFile(save_path, "r") as zip:
    zip.printdir()
    zip.extractall(extract_path)

2. 转换成detectron2的格式

# ----------------------------------------------------------------------------
# 转换成 detectron2 的数据格式
# if your dataset is in COCO format, this cell can be replaced by the following three lines:
# from detectron2.data.datasets import register_coco_instances
# register_coco_instances("my_dataset_train", {}, "json_annotation_train.json", "path/to/image/dir")
# register_coco_instances("my_dataset_val", {}, "json_annotation_val.json", "path/to/image/dir")
import cv2
import os
import json
import numpy as np
import random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import BoxMode


def get_balloon_dicts(img_dir):
    json_file = os.path.join(img_dir, "via_region_data.json")
    with open(json_file) as f:
        imgs_anns = json.load(f)

    dataset_dicts = []
    for idx, v in enumerate(imgs_anns.values()):
        record = {}

        filename = os.path.join(img_dir, v["filename"])
        height, width = cv2.imread(filename).shape[:2]

        record["file_name"] = filename
        record["image_id"] = idx
        record["height"] = height
        record["width"] = width

        annos = v["regions"]
        objs = []
        for _, anno in annos.items():
            assert not anno["region_attributes"]
            anno = anno["shape_attributes"]
            px = anno["all_points_x"]
            py = anno["all_points_y"]
            poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
            poly = [p for x in poly for p in x]

            obj = {
                "bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],
                "bbox_mode": BoxMode.XYXY_ABS,
                "segmentation": [poly],
                "category_id": 0,
            }
            objs.append(obj)
        record["annotations"] = objs
        dataset_dicts.append(record)
    return dataset_dicts

# 由于ballon中所有的图像标注都是存储在一个json文件中,所以需要分开,也可以自己保存成单个的json文件
for d in ["train", "val"]:
    DatasetCatalog.register("balloon_" + d, lambda d=d: get_balloon_dicts("balloon_dataset/balloon/" + d))
    MetadataCatalog.get("balloon_" + d).set(thing_classes=["balloon"])
balloon_metadata = MetadataCatalog.get("balloon_train")

dataset_dicts = get_balloon_dicts("balloon_dataset/balloon/train")
for d in random.sample(dataset_dicts, 3):
    img = cv2.imread(d["file_name"])
    visualizer = Visualizer(img[:, :, ::-1], metadata=balloon_metadata, scale=0.5)
    out = visualizer.draw_dataset_dict(d)
    cv2.imshow('img', out.get_image()[:, :, ::-1])
    cv2.waitKey(0)

# ----------------------------------------------------------------------------

3. 训练

# ----------------------------------------------------------------------------
# 转换成 detectron2 的数据格式
# if your dataset is in COCO format, this cell can be replaced by the following three lines:
# from detectron2.data.datasets import register_coco_instances
# register_coco_instances("my_dataset_train", {}, "json_annotation_train.json", "path/to/image/dir")
# register_coco_instances("my_dataset_val", {}, "json_annotation_val.json", "path/to/image/dir")
import cv2
import os
import json
import numpy as np
import random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import BoxMode


def get_balloon_dicts(img_dir):
    json_file = os.path.join(img_dir, "via_region_data.json")
    with open(json_file) as f:
        imgs_anns = json.load(f)

    dataset_dicts = []
    for idx, v in enumerate(imgs_anns.values()):
        record = {}

        filename = os.path.join(img_dir, v["filename"])
        height, width = cv2.imread(filename).shape[:2]

        record["file_name"] = filename
        record["image_id"] = idx
        record["height"] = height
        record["width"] = width

        annos = v["regions"]
        objs = []
        for _, anno in annos.items():
            assert not anno["region_attributes"]
            anno = anno["shape_attributes"]
            px = anno["all_points_x"]
            py = anno["all_points_y"]
            poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
            poly = [p for x in poly for p in x]

            obj = {
                "bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],
                "bbox_mode": BoxMode.XYXY_ABS,
                "segmentation": [poly],
                "category_id": 0,
            }
            objs.append(obj)
        record["annotations"] = objs
        dataset_dicts.append(record)
    return dataset_dicts

# 由于ballon中所有的图像标注都是存储在一个json文件中,所以需要分开,也可以自己保存成单个的json文件
for d in ["train", "val"]:
    DatasetCatalog.register("balloon_" + d, lambda d=d: get_balloon_dicts("balloon_dataset/balloon/" + d))
    MetadataCatalog.get("balloon_" + d).set(thing_classes=["balloon"])
balloon_metadata = MetadataCatalog.get("balloon_train")

cfg = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.DATASETS.TRAIN = ("balloon_train",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 0
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")  # Let training initialize from model zoo
cfg.SOLVER.IMS_PER_BATCH = 2  # This is the real "batch size" commonly known to deep learning people
cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
cfg.SOLVER.MAX_ITER = 300    # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset
cfg.SOLVER.STEPS = []        # do not decay learning rate
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128   # The "RoIHead batch size". 128 is faster, and good enough for this toy dataset (default: 512)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets)
# NOTE: this config means the number of classes, but a few popular unofficial tutorials incorrect uses num_classes+1 here.

os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()

4. 测试与评价

# ----------------------------------------------------------------------------
# 转换成 detectron2 的数据格式
# if your dataset is in COCO format, this cell can be replaced by the following three lines:
# from detectron2.data.datasets import register_coco_instances
# register_coco_instances("my_dataset_train", {}, "json_annotation_train.json", "path/to/image/dir")
# register_coco_instances("my_dataset_val", {}, "json_annotation_val.json", "path/to/image/dir")
import cv2
import os
import json
import numpy as np
import random
# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.engine import DefaultTrainer
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import BoxMode
from detectron2.utils.visualizer import ColorMode

from detectron2.evaluation import COCOEvaluator, inference_on_dataset
from detectron2.data import build_detection_test_loader
from multiprocessing import freeze_support

def get_balloon_dicts(img_dir):
    json_file = os.path.join(img_dir, "via_region_data.json")
    with open(json_file) as f:
        imgs_anns = json.load(f)

    dataset_dicts = []
    for idx, v in enumerate(imgs_anns.values()):
        record = {}

        filename = os.path.join(img_dir, v["filename"])
        height, width = cv2.imread(filename).shape[:2]

        record["file_name"] = filename
        record["image_id"] = idx
        record["height"] = height
        record["width"] = width

        annos = v["regions"]
        objs = []
        for _, anno in annos.items():
            assert not anno["region_attributes"]
            anno = anno["shape_attributes"]
            px = anno["all_points_x"]
            py = anno["all_points_y"]
            poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
            poly = [p for x in poly for p in x]

            obj = {
                "bbox": [np.min(px), np.min(py), np.max(px), np.max(py)],
                "bbox_mode": BoxMode.XYXY_ABS,
                "segmentation": [poly],
                "category_id": 0,
            }
            objs.append(obj)
        record["annotations"] = objs
        dataset_dicts.append(record)
    return dataset_dicts


if __name__ == '__main__':
    freeze_support()
    # Inference should use the config with parameters that are used in training
    # cfg now already contains everything we've set previously. We changed it a little bit for inference:
    cfg = get_cfg()
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set a custom testing threshold
    cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")  # path to the model we just trained
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # The "RoIHead batch size". 128 is faster, and good enough for this toy dataset (default: 512)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1  # only has one class (ballon). (see https://detectron2.readthedocs.io/tutorials/datasets.html#update-the-config-for-new-datasets)

    predictor = DefaultPredictor(cfg)

    # 由于ballon中所有的图像标注都是存储在一个json文件中,所以需要分开,也可以自己保存成单个的json文件
    for d in ["train", "val"]:
        DatasetCatalog.register("balloon_" + d, lambda d=d: get_balloon_dicts("balloon_dataset/balloon/" + d))
        MetadataCatalog.get("balloon_" + d).set(thing_classes=["balloon"])
    balloon_metadata = MetadataCatalog.get("balloon_train")

    dataset_dicts = get_balloon_dicts("balloon_dataset/balloon/val")
    for d in random.sample(dataset_dicts, 10):
        im = cv2.imread(d["file_name"])
        outputs = predictor(
            im)  # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
        v = Visualizer(im[:, :, ::-1],
                       metadata=balloon_metadata,
                       scale=0.5,
                       instance_mode=ColorMode.IMAGE
                       # remove the colors of unsegmented pixels. This option is only available for segmentation models
                       )
        out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
        cv2.imshow('result', out.get_image()[:, :, ::-1])
        # cv2.waitKey(0)

    evaluator = COCOEvaluator("balloon_val", output_dir="./output")
    val_loader = build_detection_test_loader(cfg, "balloon_val")
    print(inference_on_dataset(predictor.model, val_loader, evaluator))

在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述
在这里插入图片描述

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mfbz.cn/a/701646.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

rockchip ARM TrustZone

系统架构 分为安全世界和非安全世界。Loader、Trust 运行在安全世界&#xff1b;U-Boot、kernel、Android 运行在非安全世界里&#xff08;安全的 driver、APP 除外&#xff09;。 “hypervisor”常见释义为“管理程序”“虚拟机监控器”“超级监督者”。 它是一种可以创建、…

C#发送邮件

C#发送邮件代码&#xff0c;亲测可用。 using System; using System.Net; using System.Net.Mail;namespace MailSend {class Program{static void Main(string[] args){try{MailAddress receiver new MailAddress("666666666qq.com");//666666666qq.com 换成收件人…

天花板复合材料热释放率检测 ISO 5660-1燃烧试验

ISO 5660-1标准简介 ISO 5660-1是国际标准化组织&#xff08;ISO&#xff09;制定的一项关于燃烧试验的标准&#xff0c;专门用于测量材料在火灾条件下的热释放率、烟雾产生量和质量损失率。该标准的第1部分主要关注热释放率的测量和评估&#xff0c;是对材料在火灾条件下反应的…

桌面记事软件除了记事本还有什么

在忙碌的工作日&#xff0c;我的桌面总是堆满了各种文件、资料&#xff0c;还有贴满便签的记事本。每次需要查找某个信息或者确认接下来的计划时&#xff0c;我都要在杂乱的桌面上翻找好一会儿&#xff0c;这让我感到非常烦恼。 有一天&#xff0c;我急着找一个之前记录的重要…

地理信息科学中的大数据挑战

在信息化爆炸的时代&#xff0c;地理信息科学&#xff08;GIScience&#xff09;正经历着前所未有的变革&#xff0c;其中&#xff0c;地理空间大数据的涌现为科学研究与应用带来了前所未有的机遇与挑战。作为地理信息与遥感领域的探索者&#xff0c;本文旨在深入剖析地理空间大…

java面试整合全套

什么是Java &#xff08;定义 优点&#xff09; java是一个平台&#xff0c;由jvm和Java应用编程接口构成的一门面向编程语言。 不仅吸收了C语言的各种优点&#xff0c;还摒弃了c语言里面的多继承,指针等概念&#xff0c;因此java的特征主要有功能强大和简单易用的特征。 jav…

HarmonyOS(33) @LocalStorageProp使用指南

LocalStorageProp使用指南 说明使用示例参考资料 说明 不同于LocalStorageLink与LocalStorage建立的双向同步关系&#xff0c;LocalStorageProp装饰的变量与LocalStorage中给定属性建立单向同步关系。LocalStorageProp(key)是和LocalStorage中key对应的属性建立单向数据同步&a…

深入浅出通信原理 | 通信系统中的性能指标评估

微信公众号上线&#xff0c;搜索公众号小灰灰的FPGA,关注可获取相关源码&#xff0c;定期更新有关FPGA的项目以及开源项目源码&#xff0c;包括但不限于各类检测芯片驱动、低速接口驱动、高速接口驱动、数据信号处理、图像处理以及AXI总线等 本节目录 一、通信系统中的性能指…

uniapp学习(001 前期介绍)

零基础入门uniapp Vue3组合式API版本到咸虾米壁纸项目实战&#xff0c;开发打包微信小程序、抖音小程序、H5、安卓APP客户端等 总时长 23:40:00 共116P 此文章包含第1p-第p10的内容 简介 目录结构 效果 打包成小程序 配置开发者工具 打开安全按钮 使用uniapp的内置组件…

linux-计划任务

作用&#xff1a;定时自动完成特定的工作 计划任务的分类 一次性的计划任务&#xff1a;例如下周三对文档的重要文件备份一次 周期性的计划任务&#xff1a;每天12:00创建一个文件 命令 一次性的任务计划 at batch 周期性计划任务 crontab anacron 一次性计划任务 …

JVM (四)GC过程

一。概述 程序计数器、虚拟机栈、本地方法栈都是随线程生灭&#xff0c;栈帧随着方法的进入和退出做入栈和出栈操作&#xff0c;实现了自动的内存清理&#xff0c;因此&#xff0c;内存垃圾回收主要集中于Java堆和方法区中。 GC整体流程示意图&#xff1a; ① 年轻代对象的移动…

HyperAI超神经 x MoonBit | 与中科院、Intel 等专家共话基础软件前沿发展与期待

本次 Meetup 将讨论 MoonBit 编程语言、RuyiSDK、WAMR和 RISC-V 等技术&#xff0c;来现场参与不仅可以学习到最前沿的技术知识&#xff0c;更可与大咖面对面互动交流心得&#xff0c;还有美食茶歇与精美礼品&#xff0c;期待你的到来&#xff01; 扫码立即报名 ⬇️ 活动详情…

泛微证券行业数据中心方案:打造多样化的数据收集、汇总、分析、决策一体化报表

证券企业在日常办公、业务开展时&#xff0c;涉及了诸多数据需求&#xff0c;而且数据来源于多部门、多个系统。需要对数据获取、汇总、展现进行高效、可视化的操作&#xff0c;高效利用数据价值&#xff0c;助力企业运营更高效、风险更可控。 泛微基于证券行业数字经营分析的需…

数据中台:生产制造产业链的“智慧大脑”!

在当今激烈竞争的生产制造领域&#xff0c;数据中台正扮演着至关重要的角色&#xff0c;它就像是产业链的“智慧大脑”&#xff0c;引领着产业的发展方向&#xff01;数据中台在生产制造产业链、生态链中起到以下关键作用&#xff1a; 1. 数据整合与共享&#xff1a;将产业链各…

中国大模型站起来了!甚至被美国团队反向抄袭

一直以来&#xff0c;美国是公认的AI领域强者&#xff0c;我国AI技术虽然差不多&#xff0c;但始终落人一步。然而&#xff0c;近日斯坦福团队的AI模型却被指控抄袭中国AI模型&#xff0c;这下许多人都坐不住了。 被实锤抄袭的&#xff0c;是斯坦福大学AI团队&#xff0c;他们…

UE 像素流与 Web 协同开发

UE 像素流与 Web 协同开发 创建Web端应用Web向UE发送消息emitCommandemitConsoleCommandemitUIInteraction UE接收Web的消息UE向Web发送消息Web接收UE的消息UE 冻结帧 与Web交互主要涉及两个方面&#xff0c;一个是UE向Web发送消息&#xff0c;另一个就是Web端向UE程序发送消息…

功率 MOSFET、其电气特性定义

本应用笔记介绍了功率 MOSFET、其电气特性定义和使用说明。介绍了功率MOSFET的破坏机制和对策及其应用和电机驱动应用。 电气特性定义及使用说明 功率 MOSFET 额定值 导通电阻R_DS(on)与耐压V_DSS的关系 图2表示耐压VDSS20&#xff5e;100V额定元件与导通电阻R_DS(on)之间的…

如何理解质量

早年写过一篇未发表的论文《质量的相对性》&#xff0c;就是为了寻求到底什么才是质量这个问题的答案。现在&#xff0c;在准备了诸多超越以往的认知的概念之后&#xff0c;关于质量是什么的想法&#xff0c;也逐渐有了眉目。 质量有两种&#xff0c;一种叫做惯性质量&#xff…

使用OpenLLM在AMD GPU上的分步指南

Step-by-Step Guide to Use OpenLLM on AMD GPUs — ROCm Blogs 引言 OpenLLM是一个开源平台&#xff0c;旨在促进大型语言模型&#xff08;LLMs&#xff09;的部署和使用&#xff0c;支持多种模型&#xff0c;适应不同的应用&#xff0c;无论是在云环境还是本地环境中。在本教…

再开源一个小玩意儿,帮你找到电路板上的热点

ADLib上线 也半个多月了&#xff0c;这段时间做了一个全流程使用ADLib的小玩意儿&#xff0c;跟大家分享下过程。 这是一个利用红外测温传感器寻找电路板上的热点的工具&#xff0c;当然了&#xff0c;它也可以用来测量其他物体的温度&#xff0c;比如地暖水管铺设位置&#x…