亚博microros小车-原生ubuntu支持系列:11手指控制与手势识别

识别框架还是沿用之前的了MediaPipe Hand。

背景知识不摘重复,参见之前的:亚博microros小车-原生ubuntu支持系列:10-画笔-CSDN博客

手指控制

src/yahboom_esp32_mediapipe/yahboom_esp32_mediapipe/目录下新建文件10_HandCtrl.py,代码如下:

#!/usr/bin/env python3
# encoding: utf-8
import math
import time
import cv2 as cv
import numpy as np
import mediapipe as mp
import rclpy
from rclpy.node import Node
from cv_bridge import CvBridge
from sensor_msgs.msg import Image, CompressedImage

from rclpy.time import Time
import datetime

volPer = value = index = 0
effect = ["color", "thresh", "blur", "hue", "enhance"]
volBar = 400
class handDetector:
    def __init__(self, mode=False, maxHands=2, detectorCon=0.5, trackCon=0.5):
        self.tipIds = [4, 8, 12, 16, 20]
        self.mpHand = mp.solutions.hands
        self.mpDraw = mp.solutions.drawing_utils
        self.hands = self.mpHand.Hands(#模型初始化
            static_image_mode=mode,
            max_num_hands=maxHands,
            min_detection_confidence=detectorCon,
            min_tracking_confidence=trackCon
        )
        self.lmDrawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 0, 255), thickness=-1, circle_radius=15)
        self.drawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 255, 0), thickness=10, circle_radius=10)
    #距离计算
    def get_dist(self, point1, point2):
        x1, y1 = point1
        x2, y2 = point2
        return abs(math.sqrt(math.pow(abs(y1 - y2), 2) + math.pow(abs(x1 - x2), 2)))
    #计算角度
    def calc_angle(self, pt1, pt2, pt3):
        point1 = self.lmList[pt1][1], self.lmList[pt1][2]
        point2 = self.lmList[pt2][1], self.lmList[pt2][2]
        point3 = self.lmList[pt3][1], self.lmList[pt3][2]
        a = self.get_dist(point1, point2)
        b = self.get_dist(point2, point3)
        c = self.get_dist(point1, point3)
        try:#余弦定理
            radian = math.acos((math.pow(a, 2) + math.pow(b, 2) - math.pow(c, 2)) / (2 * a * b))
            angle = radian / math.pi * 180#弧度转角度
        except:
            angle = 0
        return abs(angle)

    def findHands(self, frame, draw=True):
        img = np.zeros(frame.shape, np.uint8)
        img_RGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)#图像格式转换
        self.results = self.hands.process(img_RGB)#检测
        if self.results.multi_hand_landmarks:
            for handLms in self.results.multi_hand_landmarks:#输出关键点
                if draw: self.mpDraw.draw_landmarks(img, handLms, self.mpHand.HAND_CONNECTIONS)
        return img

    def findPosition(self, frame, draw=True):
        self.lmList = []
        if self.results.multi_hand_landmarks:
            for id, lm in enumerate(self.results.multi_hand_landmarks[0].landmark):
                # print(id,lm)
                h, w, c = frame.shape
                cx, cy = int(lm.x * w), int(lm.y * h)
                # print(id, lm.x, lm.y, lm.z)
                self.lmList.append([id, cx, cy])#记录指点序号与坐标
                if draw: cv.circle(frame, (cx, cy), 15, (0, 0, 255), cv.FILLED)
        return self.lmList

    def frame_combine(slef,frame, src):
        if len(frame.shape) == 3:
            frameH, frameW = frame.shape[:2]
            srcH, srcW = src.shape[:2]
            dst = np.zeros((max(frameH, srcH), frameW + srcW, 3), np.uint8)
            dst[:, :frameW] = frame[:, :]
            dst[:, frameW:] = src[:, :]
        else:
            src = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
            frameH, frameW = frame.shape[:2]
            imgH, imgW = src.shape[:2]
            dst = np.zeros((frameH, frameW + imgW), np.uint8)
            dst[:, :frameW] = frame[:, :]
            dst[:, frameW:] = src[:, :]
        return dst

class MY_Picture(Node):
    def __init__(self, name):
        super().__init__(name)
        self.bridge = CvBridge()
        self.sub_img = self.create_subscription(
            CompressedImage, '/espRos/esp32camera', self.handleTopic, 1) #获取esp32传来的图像

        self.hand_detector = handDetector()
        self.volPer = self.value = self.index = 0
        self.effect = ["color", "thresh", "blur", "hue", "enhance"]
        self.volBar = 400
        self.last_stamp = None
        self.new_seconds = 0
        self.fps_seconds = 1

    def handleTopic(self, msg):
        self.last_stamp = msg.header.stamp  
        if self.last_stamp:
            total_secs = Time(nanoseconds=self.last_stamp.nanosec, seconds=self.last_stamp.sec).nanoseconds
            delta = datetime.timedelta(seconds=total_secs * 1e-9)
            seconds = delta.total_seconds()*100

            if self.new_seconds != 0:
                self.fps_seconds = seconds - self.new_seconds

            self.new_seconds = seconds#保留这次的值
        start = time.time()
        frame = self.bridge.compressed_imgmsg_to_cv2(msg)
        frame = cv.resize(frame, (640, 480))

        action = cv.waitKey(1) & 0xFF

        img  = self.hand_detector.findHands(frame)
        lmList = self.hand_detector.findPosition(frame, draw=False)
        if len(lmList) != 0:
            angle = self.hand_detector.calc_angle(4, 0, 8)#计算拇指,0点,食指尖的角度
            x1, y1 = lmList[4][1], lmList[4][2]
            x2, y2 = lmList[8][1], lmList[8][2]
            cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
            cv.circle(img, (x1, y1), 15, (255, 0, 255), cv.FILLED)
            cv.circle(img, (x2, y2), 15, (255, 0, 255), cv.FILLED)
            cv.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)
            cv.circle(img, (cx, cy), 15, (255, 0, 255), cv.FILLED)
            if angle <= 10: cv.circle(img, (cx, cy), 15, (0, 255, 0), cv.FILLED)
            self.volBar = np.interp(angle, [0, 70], [400, 150])
            self.volPer = np.interp(angle, [0, 70], [0, 100])
            self.value = np.interp(angle, [0, 70], [0, 255])
            # print("angle: {},self.value: {}".format(angle, self.value))
            print(f'mode:{self.effect[self.index]}')
        # 进行阈值二值化操作,大于阈值value的,使用255表示,小于阈值value的,使用0表示
        if self.effect[self.index]=="thresh":
            gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
            frame = cv.threshold(gray, self.value, 255, cv.THRESH_BINARY)[1]
        # 进行高斯滤波,(21, 21)表示高斯矩阵的长与宽都是21,标准差取value
        elif self.effect[self.index]=="blur":
            frame = cv.GaussianBlur(frame, (21, 21), np.interp(self.value, [0, 255], [0, 11]))
        # 色彩空间的转化,HSV转换为BGR
        elif self.effect[self.index]=="hue":
            frame = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
            frame[:, :, 0] += int(self.value)
            frame = cv.cvtColor(frame, cv.COLOR_HSV2BGR)
        # 调节对比度
        elif self.effect[self.index]=="enhance":
            enh_val = self.value / 40
            clahe = cv.createCLAHE(clipLimit=enh_val, tileGridSize=(8, 8))
            lab = cv.cvtColor(frame, cv.COLOR_BGR2LAB)
            lab[:, :, 0] = clahe.apply(lab[:, :, 0])
            frame = cv.cvtColor(lab, cv.COLOR_LAB2BGR)
        if action == ord('f'):
            self.index += 1
            if self.index >= len(self.effect): self.index = 0
        
        end = time.time()
        fps = 1 / ((end - start)+self.fps_seconds)
        text = "FPS : " + str(int(fps))
        cv.putText(frame, text, (20, 30), cv.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 1)
        cv.rectangle(img, (50, 150), (85, 400), (255, 0, 0), 3)
        cv.rectangle(img, (50, int(self.volBar)), (85, 400), (0, 255, 0), cv.FILLED)
        cv.putText(img, f'{int(self.volPer)}%', (40, 450), cv.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 3)
        dst = self.hand_detector.frame_combine(frame, img)
        cv.imshow('dst', dst)



def main():
    print("start it")
    rclpy.init()
    esp_img = MY_Picture("My_Picture")
    try:
        rclpy.spin(esp_img)
    except KeyboardInterrupt:
        pass
    finally:
        esp_img.destroy_node()
        rclpy.shutdown()

订阅esp32传出来的图像后,通过MediaPipe去做相关的识别后,再通过记录手指的点坐标,计算角4-0-8之间度数。本节与之前不同,增加了opencv输出的格式,"color", "thresh", "blur", "hue", "enhance"。默认是color,还有阈值化输出,高斯模糊等其他效果。按F键切换

构建后运行:

ros2 run yahboom_esp32_mediapipe HandCtrl

效果如下:

手势识别

src/yahboom_esp32_mediapipe/yahboom_esp32_mediapipe/目录下新建文件11_GestureRecognition.py,代码如下

#!/usr/bin/env python3
# encoding: utf-8
import math
import time
import cv2 as cv
import numpy as np
import mediapipe as mp
import rclpy
from rclpy.node import Node
from cv_bridge import CvBridge
from sensor_msgs.msg import Image, CompressedImage

from rclpy.time import Time
import datetime

class handDetector:
    def __init__(self, mode=False, maxHands=2, detectorCon=0.5, trackCon=0.5):
        self.tipIds = [4, 8, 12, 16, 20]
        self.mpHand = mp.solutions.hands
        self.mpDraw = mp.solutions.drawing_utils
        self.hands = self.mpHand.Hands(
            static_image_mode=mode,
            max_num_hands=maxHands,
            min_detection_confidence=detectorCon,
            min_tracking_confidence=trackCon
        )
        self.lmList = []
        self.lmDrawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 0, 255), thickness=-1, circle_radius=6)
        self.drawSpec = mp.solutions.drawing_utils.DrawingSpec(color=(0, 255, 0), thickness=2, circle_radius=2)

    def get_dist(self, point1, point2):
        x1, y1 = point1
        x2, y2 = point2
        return abs(math.sqrt(math.pow(abs(y1 - y2), 2) + math.pow(abs(x1 - x2), 2)))

    def calc_angle(self, pt1, pt2, pt3):
        point1 = self.lmList[pt1][1], self.lmList[pt1][2]
        point2 = self.lmList[pt2][1], self.lmList[pt2][2]
        point3 = self.lmList[pt3][1], self.lmList[pt3][2]
        a = self.get_dist(point1, point2)
        b = self.get_dist(point2, point3)
        c = self.get_dist(point1, point3)
        try:
            radian = math.acos((math.pow(a, 2) + math.pow(b, 2) - math.pow(c, 2)) / (2 * a * b))
            angle = radian / math.pi * 180
        except:
            angle = 0
        return abs(angle)


    def findHands(self, frame, draw=True):
        self.lmList = []
        img = np.zeros(frame.shape, np.uint8)
        img_RGB = cv.cvtColor(frame, cv.COLOR_BGR2RGB)
        self.results = self.hands.process(img_RGB)
        if self.results.multi_hand_landmarks:
            for i in range(len(self.results.multi_hand_landmarks)):
                if draw: self.mpDraw.draw_landmarks(frame, self.results.multi_hand_landmarks[i], self.mpHand.HAND_CONNECTIONS, self.lmDrawSpec, self.drawSpec)
                self.mpDraw.draw_landmarks(img, self.results.multi_hand_landmarks[i], self.mpHand.HAND_CONNECTIONS, self.lmDrawSpec, self.drawSpec)
                for id, lm in enumerate(self.results.multi_hand_landmarks[i].landmark):
                    h, w, c = frame.shape
                    cx, cy = int(lm.x * w), int(lm.y * h)
                    self.lmList.append([id, cx, cy])
        return frame, img

    def frame_combine(slef,frame, src):
        if len(frame.shape) == 3:
            frameH, frameW = frame.shape[:2]
            srcH, srcW = src.shape[:2]
            dst = np.zeros((max(frameH, srcH), frameW + srcW, 3), np.uint8)
            dst[:, :frameW] = frame[:, :]
            dst[:, frameW:] = src[:, :]
        else:
            src = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
            frameH, frameW = frame.shape[:2]
            imgH, imgW = src.shape[:2]
            dst = np.zeros((frameH, frameW + imgW), np.uint8)
            dst[:, :frameW] = frame[:, :]
            dst[:, frameW:] = src[:, :]
        return dst

    def fingersUp(self):
        fingers=[]
        # Thumb
        if (self.calc_angle(self.tipIds[0],
                            self.tipIds[0] - 1,
                            self.tipIds[0] - 2) > 150.0) and (
                self.calc_angle(
                    self.tipIds[0] - 1,
                    self.tipIds[0] - 2,
                    self.tipIds[0] - 3) > 150.0): fingers.append(1)
        else:
            fingers.append(0)
        # 4 finger
        for id in range(1, 5):
            if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
                fingers.append(1)
            else:
                fingers.append(0)
        return fingers

    def get_gesture(self):
        gesture = ""
        fingers = self.fingersUp()
        if self.lmList[self.tipIds[0]][2] > self.lmList[self.tipIds[1]][2] and \
                self.lmList[self.tipIds[0]][2] > self.lmList[self.tipIds[2]][2] and \
                self.lmList[self.tipIds[0]][2] > self.lmList[self.tipIds[3]][2] and \
                self.lmList[self.tipIds[0]][2] > self.lmList[self.tipIds[4]][2] : gesture = "Thumb_down"

        elif self.lmList[self.tipIds[0]][2] < self.lmList[self.tipIds[1]][2] and \
                self.lmList[self.tipIds[0]][2] < self.lmList[self.tipIds[2]][2] and \
                self.lmList[self.tipIds[0]][2] < self.lmList[self.tipIds[3]][2] and \
                self.lmList[self.tipIds[0]][2] < self.lmList[self.tipIds[4]][2] and \
                self.calc_angle(self.tipIds[1] - 1, self.tipIds[1] - 2, self.tipIds[1] - 3) < 150.0 : gesture = "Thumb_up"
        if fingers.count(1) == 3 or fingers.count(1) == 4:
            if fingers[0] == 1 and (
                    self.get_dist(self.lmList[4][1:], self.lmList[8][1:])<self.get_dist(self.lmList[4][1:], self.lmList[5][1:])
            ): gesture = "OK"
            elif fingers[2] == fingers[3] == 0: gesture = "Rock"
            elif fingers.count(1) == 3: gesture = "Three"
            else: gesture = "Four"
        elif fingers.count(1) == 0: gesture = "Zero"
        elif fingers.count(1) == 1: gesture = "One"
        elif fingers.count(1) == 2:
            if fingers[0] == 1 and fingers[4] == 1: gesture = "Six"
            elif fingers[0] == 1 and self.calc_angle(4, 5, 8) > 90: gesture = "Eight"
            elif fingers[0] == fingers[1] == 1 and self.get_dist(self.lmList[4][1:], self.lmList[8][1:]) < 50: gesture = "Heart_single"
            else: gesture = "Two"
        elif fingers.count(1)==5:gesture = "Five"
        if self.get_dist(self.lmList[4][1:], self.lmList[8][1:]) < 60 and \
                self.get_dist(self.lmList[4][1:], self.lmList[12][1:]) < 60 and \
                self.get_dist(self.lmList[4][1:], self.lmList[16][1:]) < 60 and \
                self.get_dist(self.lmList[4][1:], self.lmList[20][1:]) < 60 : gesture = "Seven"
        if self.lmList[self.tipIds[0]][2] < self.lmList[self.tipIds[1]][2] and \
                self.lmList[self.tipIds[0]][2] < self.lmList[self.tipIds[2]][2] and \
                self.lmList[self.tipIds[0]][2] < self.lmList[self.tipIds[3]][2] and \
                self.lmList[self.tipIds[0]][2] < self.lmList[self.tipIds[4]][2] and \
                self.calc_angle(self.tipIds[1] - 1, self.tipIds[1] - 2, self.tipIds[1] - 3) > 150.0 : gesture = "Eight"
        return gesture



class MY_Picture(Node):
    def __init__(self, name):
        super().__init__(name)
        self.bridge = CvBridge()
        self.sub_img = self.create_subscription(
            CompressedImage, '/espRos/esp32camera', self.handleTopic, 1) #获取esp32传来的图像

        self.hand_detector = handDetector(detectorCon=0.75)
        self.last_stamp = None
        self.new_seconds = 0
        self.fps_seconds = 1

    def handleTopic(self, msg):
        self.last_stamp = msg.header.stamp  
        if self.last_stamp:
            total_secs = Time(nanoseconds=self.last_stamp.nanosec, seconds=self.last_stamp.sec).nanoseconds
            delta = datetime.timedelta(seconds=total_secs * 1e-9)
            seconds = delta.total_seconds()*100

            if self.new_seconds != 0:
                self.fps_seconds = seconds - self.new_seconds

            self.new_seconds = seconds#保留这次的值
        start = time.time()
        frame = self.bridge.compressed_imgmsg_to_cv2(msg)
        frame = cv.resize(frame, (640, 480))

        cv.waitKey(1) 
        frame, img = self.hand_detector.findHands(frame, draw=False)

        if len(self.hand_detector.lmList) != 0:
            totalFingers = self.hand_detector.get_gesture()
            cv.rectangle(frame, (0, 430), (230, 480), (0, 255, 0), cv.FILLED)
            cv.putText(frame, str(totalFingers), (10, 470), cv.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
        
        end = time.time()
        fps = 1 / ((end - start)+self.fps_seconds)
        text = "FPS : " + str(int(fps))
        cv.putText(frame, text, (20, 30), cv.FONT_HERSHEY_SIMPLEX, 0.9, (0, 0, 255), 1)
        dist = self.hand_detector.frame_combine(frame, img)
        cv.imshow('dist', dist)


'''
Zero One Two Three Four Five Six Seven Eight
Ok: OK
Rock: rock
Thumb_up : 点赞
Thumb_down: 拇指向下
Heart_single: 单手比心
'''


def main():
    print("start it")
    rclpy.init()
    esp_img = MY_Picture("My_Picture")
    try:
        rclpy.spin(esp_img)
    except KeyboardInterrupt:
        pass
    finally:
        esp_img.destroy_node()
        rclpy.shutdown()

网上有不少这个例子,差异点可能在手势识别哪里,前面的hand模型都是一样的。

根据你预计的指点判断角度或者漏出的手指组合判断含义。有的也不太准确,大部分能识别。

构建后运行:ros2 run yahboom_esp32_mediapipe GestureRecognition

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:/a/959146.html

如若内容造成侵权/违法违规/事实不符,请联系我们进行投诉反馈qq邮箱809451989@qq.com,一经查实,立即删除!

相关文章

OpenCV:在图像中添加高斯噪声、胡椒噪声

目录 在图像中添加高斯噪声 高斯噪声的特性 添加高斯噪声的实现 给图像添加胡椒噪声 实现胡椒噪声的步骤 相关阅读 OpenCV&#xff1a;图像处理中的低通滤波-CSDN博客 OpenCV&#xff1a;高通滤波之索贝尔、沙尔和拉普拉斯-CSDN博客 OpenCV&#xff1a;图像滤波、卷积与…

【模拟集成电路】锁相环(phase-locked loops,PLL)设计_环形振荡器相关(简)

0. 前言 未来将会不定时更新PLL相关的文章&#xff0c;主要目的是作为个人的学习笔记&#xff0c;关于锁相环的基础&#xff0c;可以参考《模拟CMOS集成电路设计_Behzad Razavi》后面几章的内容&#xff0c;下面的文章主要参考书籍是的英文书籍《DESIGN OF CMOS PHASE‑LOCKED …

Qt简单迷宫游戏

目录 你将学到你将准备你将改变你将设计你将编程开始界面游玩界面胜利界面其它bug修复 你可扩展下一篇博客要说的东西 你将学到 Qt中QKeySequence对象的基本创建Qt中QShortcut对象的基本应用Qt中QSoundEffect对象的基本应用 你将准备 在开始制作Qt简单迷宫游戏之前&#xff…

SSM电子商城系统

&#x1f345;点赞收藏关注 → 添加文档最下方联系方式咨询本源代码、数据库&#x1f345; 本人在Java毕业设计领域有多年的经验&#xff0c;陆续会更新更多优质的Java实战项目希望你能有所收获&#xff0c;少走一些弯路。&#x1f345;关注我不迷路&#x1f345; 项目视频 电…

springboot3 集成 knife4j(接口文档)

提示&#xff1a;文章是集成 knife4j&#xff0c;而非 swagger2 或者 swagger3&#xff0c;效果如图 文章目录 前言一、添加依赖二、如何集成1.配置文件2.注解部分1.Tag2.Operation3.Parameter4.Schema 3.使用 总结 前言 提示&#xff1a;&#xff1a;大家在开发阶段&#xff…

亚博microros小车-原生ubuntu支持系列:7-脸部检测

背景知识 官网介绍&#xff1a; Face Mesh - mediapipe mpFaceMesh.FaceMesh() 类的参数有&#xff1a;self.staticMode, self.maxFaces, self.minDetectionCon, self.minTrackCon staticMode:是否将每帧图像作为静态图像处理。如果为 True&#xff0c;每帧都会进行人脸检测…

写作利器:如何用 PicGo + GitHub 图床提高创作效率

你好呀&#xff0c;欢迎来到 Dong雨 的技术小栈 &#x1f331; 在这里&#xff0c;我们一同探索代码的奥秘&#xff0c;感受技术的魅力 ✨。 &#x1f449; 我的小世界&#xff1a;Dong雨 &#x1f4cc; 分享我的学习旅程 &#x1f6e0;️ 提供贴心的实用工具 &#x1f4a1; 记…

thingsboard 动态报警

前言 考虑将报警上下限写入设备属性&#xff0c;设备遥测数据与设备属性实时做报警逻辑。这样做的好处在于&#xff0c;可以动态修改设备属性&#xff0c;进而修改设备报警触发上下限。 1、修改设备属性 基于mq &#xff0c;向设备写入属性。 topic v1/devices/me/attribut…

三、双链表

链表的种类有很多&#xff0c;单链表是不带头不循环单向链表&#xff0c;但双链表是带头循环双向链表&#xff0c;并且双链表还有一个哨兵位&#xff0c;哨兵位不是头节点 typedef int LTDataType;typedef struct ListNode{struct ListNode* next; //指针保存下⼀个结点的地址s…

(算法竞赛)使用广度优先搜索(BFS)解决迷宫最短路径问题

在这个充满奇思妙想的世界里&#xff0c;每一次探索都像是打开了一扇通往新世界的大门。今天&#xff0c;我们将踏上一段特别的旅程&#xff0c;去揭开那些隐藏在代码、算法、数学谜题或生活智慧背后的秘密。&#x1f389;&#x1f60a; 所以&#xff0c;系好安全带&#xff0…

支持大功率输出高速频闪的图像处理用光源控制器

机器视觉系统中的光源控制器在确保图像质量、提高系统稳定性、降低能耗以及方便系统扩展和升级等方面发挥着重要作用。它可提供稳定光源&#xff0c;调节参数&#xff0c;另外具有操作便捷性。 下面我们来看Gardasoft的光源控制器&#xff0c;Gardasoft拥有作为图像处理用LED光…

鸿蒙模块概念和应用启动相关类(HAP、HAR、HSP、AbilityStage、UIAbility、WindowStage、window)

目录 鸿蒙模块概念 HAP entry feature har shared 使用场景 HAP、HAR、HSP介绍 HAP、HAR、HSP开发 应用的启动 AbilityStage UIAbility WindowStage Window 拉起应用到显示到前台流程 鸿蒙模块概念 HAP hap包是手机安装的最小单元&#xff0c;1个app包含一个或…

为什么IDEA提示不推荐@Autowired❓️如果使用@Resource呢❓️

前言 在使用 Spring 框架时&#xff0c;依赖注入&#xff08;DI&#xff09;是一个非常重要的概念。通过注解&#xff0c;我们可以方便地将类的实例注入到其他类中&#xff0c;提升开发效率。Autowired又是被大家最为熟知的方式&#xff0c;但很多开发者在使用 IntelliJ IDEA …

【Uniapp-Vue3】uni-icons的安装和使用

一、uni-icon的安装 进入到如下页面中&#xff0c;点击“点击下载&安装”。 uni-icons 图标 | uni-app官网 点击“下载插件并导入HBuilder”&#xff0c;如果没有登录就登陆一下 网页中会打开Hbuilder&#xff0c;进入Hbuilder以后&#xff0c;选择需要使用该插件的项目进…

论文笔记(六十三)Understanding Diffusion Models: A Unified Perspective(三)

Understanding Diffusion Models: A Unified Perspective&#xff08;三&#xff09; 文章概括 文章概括 引用&#xff1a; article{luo2022understanding,title{Understanding diffusion models: A unified perspective},author{Luo, Calvin},journal{arXiv preprint arXiv:…

群晖docker获取私有化镜像http: server gave HTTP response to HTTPS client].

群晖docker获取私有化镜像提示http: server gave HTTP response to HTTPS clien 问题描述 层级时间用户事件Information2023/07/08 12:47:45cxlogeAdd image from xx.xx.31.240:1923/go-gitea/gitea:1.19.3Error2023/07/08 12:47:48cxlogeFailed to pull image [Get "http…

Charles 4.6.7 浏览器网络调试指南:HTTPS抓包(三)

概述 在现代互联网应用中&#xff0c;网络请求和响应是服务交互的核心。对于开发者和测试人员来说&#xff0c;能够准确捕获并分析这些请求&#xff0c;是保证系统稳定性和性能的关键。Charles作为一个强大的网络调试工具&#xff0c;不仅可以捕获普通的HTTP请求&#xff0c;还…

从spec到iso的koji使用

了解一下Linux发行版流程&#xff1a;:从spec到iso的koji使用 for Fedora 41。 Fedora 41有24235个包&#xff0c;我们选择 minimal 的几十个源码包&#xff0c;百多个rpm包构建。 配3台服务器 40C64G 44C64G 80C128G&#xff0c;有点大材小用&#xff0c;一台就够了 &#xf…

系统思考—复杂问题的根源分析

在企业中&#xff0c;许多问题看似简单&#xff0c;背后却潜藏着复杂的因果关系。传统的思维方式往往只能看到表面&#xff0c;而无法深入挖掘问题的真正根源。我们常常通过“表面解决”来应对眼前的症状&#xff0c;但这往往只是治标不治本。 比如&#xff0c;销量下降时&…

低代码开发:效率革命与市场机遇

一、引言 IT技术推动了全球信息化的浪潮&#xff0c;然而软件开发效率的提升却未能像摩尔定律那样迅速&#xff0c;逐渐成为发展的瓶颈。近年来&#xff0c;低代码领域发展迅猛&#xff0c;不仅诞生了估值超10亿美元的独角兽OutSystems&#xff0c;还吸引了AWS、Google、Micro…