介绍
本人机器学习小白,通过语言大模型+百度进行搜索,磕磕绊绊的实现了初步效果,能有一些锁头效果,但识别速度不是非常快,且没有做敌友区分,效果不是非常的理想,但在4399小游戏中爽一下还是可以的!。
思路
1.先通过yolov5实现对电脑屏幕的实时检测,只获取中心部分画面,减少其他人物的识别,提高识别速度
2.筛选只留下【person】的人物框
3.获取第一个框的坐标点,并计算出框的中上坐标点,以此粗略的当作人物的头部
4.操作鼠标定位的中心点
5.模拟鼠标左键点击,完成射击
代码
1.先下载github上yolov5的项目
git clone https://github.com/ultralytics/yolov5.git
2.在项目中添加test.py
import time
import cv2
import mediapipe as mp
import pyautogui
import pydirectinput
import numpy as np
import tkinter as tk
import torch
import warnings
warnings.filterwarnings("ignore", category=FutureWarning, module="torch.cuda.amp.autocast")
root = tk.Tk()
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
root.destroy()
# 获取屏幕的尺寸
crop_width = 500
crop_height = 500
start_x = (screen_width - crop_width) // 2
start_y = (screen_height - crop_height) // 2
# 加载预训练模型
model = torch.hub.load('./', 'custom', path='yolov5s.pt', source='local')
def readScreen():
# 初始化MediaPipe姿态检测对象
mp_pose = mp.solutions.pose
pose = mp_pose.Pose(static_image_mode=False, min_detection_confidence=0.5, min_tracking_confidence=0.5)
while True:
# 使用pyautogui进行屏幕截图,指定截图区域为屏幕左半边
#screenshot = pyautogui.screenshot(region=(start_x, start_y, 500, 500))
screenshot = pyautogui.screenshot(region=(start_x, start_y, crop_width, crop_height))
# 将截图转换为OpenCV格式(BGR格式),因为mediapipe处理的图像通常为RGB格式,后续会进行转换
frame = cv2.cvtColor(np.array(screenshot), cv2.COLOR_RGB2BGR)
# 获取期望的缩小后显示窗口的宽度和高度(这里设置为原屏幕宽高的一定比例,可根据需求调整)
display_width = int(screen_width * 0.4) # 示例,可修改比例
display_height = int(screen_height * 0.4) # 示例,可修改比例
# 计算缩放比例,保持图像宽高比进行缩放
scale_width = display_width / frame.shape[1]
scale_height = display_height / frame.shape[0]
scale = min(scale_width, scale_height)
# 缩放图像
resized_frame = cv2.resize(frame, (int(frame.shape[1] * scale), int(frame.shape[0] * scale)))
# 进行人体姿态检测
# 定义锐化卷积核
kernel = np.array([[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]])
# 假设frame是已经获取的游戏截图(BGR格式)
sharpened_frame = cv2.filter2D(frame, -1, kernel)
#results = pose.process(sharpened_frame)
# 进行推理
results = model(sharpened_frame)
# 解析结果
detections = results.xyxy[0].cpu().numpy() # [x1, y1, x2, y2, confidence, class]
# 在截图上绘制检测结果
for detection in detections:
x1, y1, x2, y2, confidence, cls = detection
class_name = model.names[int(cls)]
if class_name == 'person':
# 根据之前计算的缩放比例scale,对坐标值进行缩放调整
scaled_x1 = int(x1 * scale)
scaled_y1 = int(y1 * scale)
scaled_x2 = int(x2 * scale)
scaled_y2 = int(y2 * scale)
# 计算矩形框中心点坐标(在缩放后的图像坐标体系下)
center_x = (scaled_x1 + scaled_x2) // 2
center_y = (scaled_y1 + scaled_y2) // 2
offset_y = (scaled_y2 - scaled_y1) / 4
# 将缩放后的坐标转换回原始屏幕坐标体系(考虑截图区域的偏移)
screen_center_x = start_x + (center_x / scale)
screen_center_y = start_y + (center_y / scale) - offset_y
# 使用pyautogui将鼠标移动到计算出的屏幕坐标位置
try:
#pydirectinput.click(button='left', x=int(screen_center_x), y=int(screen_center_y))
pydirectinput.moveTo(int(screen_center_x), int(screen_center_y+10))
click_left_button()
except pyautogui.FailSafeException:
print("鼠标移动超出安全范围,可能无法正确定位。")
except Exception as e:
print(f"鼠标移动出现其他错误: {e}")
#cv2.rectangle(resized_frame, (scaled_x1, scaled_y1), (scaled_x2, scaled_y2), (0, 255, 0), 2)
#cv2.putText(resized_frame, f"{class_name}: {confidence:.2f}", (scaled_x1, scaled_y1 - 10),
#cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
break
#show(resized_frame)
# 显示视频帧
#cv2.imshow('Frame', frame)
# 将处理后的帧写入输出视频文件
if cv2.waitKey(1) & 0xFF == 27: # 按ESC键退出
break
cv2.destroyAllWindows()
pose.close()
def click_left_button():
# 执行点击鼠标左键的操作
print("点击鼠标")
pydirectinput.mouseDown()
time.sleep(0.05)
pydirectinput.mouseUp()
def show(frame):
# 创建一个窗口,并设置为可调整大小和始终在最前端
cv2.namedWindow('Pose Recognition Result', cv2.WINDOW_NORMAL | cv2.WINDOW_FREERATIO | cv2.WINDOW_GUI_EXPANDED | cv2.WINDOW_AUTOSIZE)
cv2.setWindowProperty('Pose Recognition Result', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.setWindowProperty('Pose Recognition Result', cv2.WND_PROP_TOPMOST, 1)
cv2.imshow('Pose Recognition Result', frame)
if __name__ == '__main__':
readScreen()
效果图
后记
测试是使用的 4399 小游戏中的【火线精英】这个游戏,需要在360打开,还需要安装flush插件,然后打的基本也是人机,在满足这些前提下,可以爽一下,哈哈哈