可以稳刷的单机版本

This commit is contained in:
Ray
2025-10-29 10:28:38 +08:00
commit 8294cab51b
21 changed files with 897 additions and 0 deletions

8
.idea/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,8 @@
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml

8
.idea/huojv.iml generated Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="jdk" jdkName="dnf" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

View File

@@ -0,0 +1,6 @@
<component name="InspectionProjectProfileManager">
<settings>
<option name="USE_PROJECT_PROFILE" value="false" />
<version value="1.0" />
</settings>
</component>

7
.idea/misc.xml generated Normal file
View File

@@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="Black">
<option name="sdkName" value="D:\CONDA\anaconda3" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="dnf" project-jdk-type="Python SDK" />
</project>

8
.idea/modules.xml generated Normal file
View File

@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/huojv.iml" filepath="$PROJECT_DIR$/.idea/huojv.iml" />
</modules>
</component>
</project>

BIN
best.pt Normal file

Binary file not shown.

BIN
best0.pt Normal file

Binary file not shown.

300
main.py Normal file
View File

@@ -0,0 +1,300 @@
import cv2
from utils.get_image import get_image
from utils.mouse import mouse_gui
from ultralytics import YOLO
import time
import serial
import ch9329Comm
import time
import random
import math
from utils import shizi
model = YOLO(r"best.pt").to('cuda')
model0 = YOLO(r"best0.pt").to('cuda')
keyboard = ch9329Comm.keyboard.DataComm()
mouse = ch9329Comm.mouse.DataComm(1920, 1080)
kong_detections = {
'center': None,
'next': None,
'npc1': None,
'npc2': None,
'npc3': None,
'npc4': None,
'boss': None,
'daojv': [],
'gw': [],
'zhaozi': None
}
left=0
top=30
k=0#控制转圈的方向
panduan=False#是否在图内
boss_pd=False#是否到boss关卡
rw=(632,378)
def yolo_shibie(im_PIL,detections,model):
results = model(im_PIL)#目标检测
for result in results:
for i in range(len(result.boxes.xyxy)):
left, top, right, bottom = result.boxes.xyxy[i]
scalar_tensor = result.boxes.cls[i]
value = scalar_tensor.item()
label = result.names[int(value)]
if label=='center'or label=='next' or label=='boss' or label=='zhaozi':
player_x = int(left+(right-left)/2)
player_y = int(top+(bottom-top)/2)+30
RW = [player_x, player_y]
detections[label] = RW
elif label=='daojv' or label=='gw':
player_x = int(left + (right - left) / 2)
player_y = int(top + (bottom - top) / 2) + 30
RW = [player_x, player_y]
detections[label].append(RW)
elif label=='npc1' or label=='npc2' or label=='npc3' or label=='npc4':
player_x = int(left+(right-left)/2)
player_y = int(bottom)+30
RW = [player_x, player_y]
detections[label] = RW
return detections
def sq(p1, p2):
"""计算两点之间的欧式距离"""
return math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
def process_points(points):
if not points:
return None # 空列表情况
n = len(points)
if n == 1:
# 只有一个点,直接返回
return points[0]
elif n == 2:
# 两个点取中点
x = (points[0][0] + points[1][0]) / 2
y = (points[0][1] + points[1][1]) / 2
return [x, y]
else:
# 随机选3个点
sample_points = random.sample(points, 3)
# 对每个点计算到这3个点的总距离
min_sum = float('inf')
best_point = None
for p in points:
dist_sum = sum(sq(p, sp) for sp in sample_points)
if dist_sum < min_sum:
min_sum = dist_sum
best_point = p
return best_point
def move_randomly(rw, k):
k = k % 4
suiji_t=float(random.randint(10,13)/10)
if k == 0:
keyboard.send_data("66")
time.sleep(suiji_t)
keyboard.release() # Release
elif k == 1:
keyboard.send_data("88")
time.sleep(suiji_t)
keyboard.release() # Release
elif k == 2:
keyboard.send_data("44")
time.sleep(suiji_t)
keyboard.release() # Release
elif k == 3:
keyboard.send_data("22")
time.sleep(suiji_t)
keyboard.release() # Release
return k + 1
def move_to(rw,mb):
v=470
if rw[0]>=mb[0]:
keyboard.send_data("44")
time.sleep(float(abs(rw[0]-mb[0])/v))
keyboard.release() # Release
else:
keyboard.send_data("66")
time.sleep(float(abs(rw[0] - mb[0]) / v))
keyboard.release() # Release
if rw[1]>=mb[1]:
keyboard.send_data("88")
time.sleep(float(abs(rw[1] - mb[1]) / v))
keyboard.release() # Release
else:
keyboard.send_data("22")
time.sleep(float(abs(rw[1] - mb[1]) / v))
keyboard.release()
i=0
while True:
detections = {
'center': None,
'next': None,
'npc1': None,
'npc2': None,
'npc3': None,
'npc4': None,
'boss': None,
'daojv': [],
'gw': [],
'zhaozi':None
}
im_opencv = get_image.get_frame()#[RGB,PIL]
detections=yolo_shibie(im_opencv[1],detections,model)
if shizi.tuwai(im_opencv[0]): # 进图算法
im_opencv = get_image.get_frame() # [RGB,PIL]
detections = yolo_shibie(im_opencv[1], detections, model0)
print('当前在城镇中')
if detections['npc1'] is not None and sq(rw, detections['npc1']) > 80:
print("向npc1靠近")
print(sq(rw, detections['npc1']))
move_to(rw, detections['npc1'])
continue
elif detections['npc1'] is not None and sq(rw, detections['npc1']) <= 80:
print("在npc旁边向上走")
print(sq(rw, detections['npc1']))
mb = (detections['npc1'][0], detections['npc1'][1] - 1010)
move_to(rw, mb)
continue
elif detections['npc3'] is not None and detections['npc4'] is None:
print("在npc3旁边向右走")
mb = (rw[0], detections['npc3'][1]-50)
move_to(rw, mb)
mb = (rw[0] + 700, rw[1])
move_to(rw, mb)
continue
elif detections['npc4'] is not None:
if sq(detections['npc4'], rw) < 50:
print("离npc4很近 直接进入")
keyboard.send_data("DD")
time.sleep(0.15)
keyboard.release()
time.sleep(1)
im_opencv = get_image.get_frame() # [RGB,PIL]
if shizi.daoying(im_opencv[0]):
mouse_gui.send_data_absolute(rw[0], rw[1] - 110, may=1)
time.sleep(1)
continue
else:
print("离npc4有点远 点击进入")
move_to(rw, detections['npc4'])
time.sleep(1)
im_opencv = get_image.get_frame() # [RGB,PIL]
if shizi.daoying(im_opencv[0]):
mouse_gui.send_data_absolute(rw[0], rw[1] - 110, may=1)
time.sleep(1)
continue
elif shizi.tiaozhan(im_opencv[0]):#开启挑战
print('进入塔4')
mouse_gui.send_data_absolute(left+1100,top+600,may=1)
time.sleep(0.3)
mouse_gui.send_data_absolute(left + 433, top + 455,may=1)
panduan = True
continue
elif shizi.jieshu(im_opencv[0]):#结束挑战
print('结束挑战')
mouse_gui.send_data_absolute(left+542,top+644,may=1)
time.sleep(0.8)
mouse_gui.send_data_absolute(left + 706, top + 454,may=1)
continue
elif panduan :#图内情况
print("在图内")
if shizi.shuzi(im_opencv[0]) :
boss_pd = True
print("进入到boss")
if shizi.fuhuo(im_opencv[0]):
print('点击复活')
mouse_gui.send_data_absolute(left + 536, top + 627, may=1)
mouse_gui.send_data_absolute(rw[0], rw[1], may=0)
continue
if detections['zhaozi'] is not None:
move_to(rw,detections['zhaozi'])
continue
if len(detections['daojv'])!=0:
move_to(rw,process_points(detections['daojv']))
for i in range(3+len(detections['daojv'])):
keyboard.send_data("AA")
time.sleep(0.15)
keyboard.release()
continue
if shizi.tuichu(im_opencv[0]) and detections['next'] is None and len(detections['daojv'])==0 and len(detections['gw'])==0 and boss_pd:
print("识别到可以退出挑战!!!!!!!!!!!!!!!!!!")
for i in range(3):
time.sleep(0.5)
im_opencv = get_image.get_frame()#[RGB,PIL]
detections = {
'center': None,
'next': None,
'npc1': None,
'npc2': None,
'npc3': None,
'npc4': None,
'boss': None,
'daojv': [],
'gw': [],
'zhaozi': None
}
detections = yolo_shibie(im_opencv[1], detections,model)
if detections['next'] is not None or len(detections['daojv'])!=0 or len(detections['gw'])!=0 or detections['boss'] is not None:
break
else:
mouse_gui.send_data_absolute(left + 640, top + 40,may=1)#点击退出
panduan = False#退出挑战
boss_pd = False
time.sleep(2.0)
continue
if detections['center'] is None and detections['next'] is None and (len(detections['gw'])!=0 or detections["boss"] is not None):#识别不到中心情况 但是有怪物
print("未检测到中心点,但是有怪物")
if len(detections['gw'])!=0:
move_to(rw,detections['gw'][0])
time.sleep(0.26)
elif detections['boss'] is not None:#跟随boss
boss_suiji1=random.randint(-30,30)
boss_suiji2 = random.randint(-30, 30)
detections['boss']=(detections['boss'][0]+boss_suiji1,detections['boss'][1]+boss_suiji2)
move_to(rw,detections['boss'])
time.sleep(0.7)
continue
elif (detections['center'] is not None and detections['next'] is None and len(detections['gw'])!=0) or (boss_pd==True and detections['center'] is not None and detections['next'] is None) :#识别到中心 但是有怪物
if detections['center'][0]>=rw[0] and detections['center'][1]<rw[1]:#3
mb=(rw[0]+100,rw[1])
elif detections['center'][0]<=rw[0] and detections['center'][1]<rw[1]:#4
mb=(rw[0], rw[1]-100)
elif detections['center'][0]<=rw[0] and detections['center'][1]>rw[1]:#1
mb=(rw[0]-100, rw[1])
elif detections['center'][0]>=rw[0] and detections['center'][1]>rw[1]:#2
mb=(rw[0], rw[1]+100)
move_to(rw, mb)
time.sleep(0.25)
continue
elif boss_pd==True and detections['center'] is None and detections['next'] is None:#boss出现了 但是没有中心
k=move_randomly(rw,k)
continue
elif detections['next'] is not None:
print('进入下一层啦啦啦啦啦啦')
panduan = True
move_to(rw,detections['next'])
for i in range(2):
keyboard.send_data("DD")
time.sleep(0.15)
keyboard.release()
continue
else:
k = move_randomly(rw, k)
continue
elif shizi.daoying(im_opencv[0]):
mouse_gui.send_data_absolute(rw[0], rw[1] - 110, may=1)
time.sleep(1)
continue

43
test.py Normal file
View File

@@ -0,0 +1,43 @@
from utils.get_image import get_image
import cv2
import time
# 创建摄像头实例
camera = get_image
# 添加延迟确保摄像头初始化完成
time.sleep(1.0)
frame_count = 0
start_time = time.time()
while True:
# 获取帧数据
frame_data = camera.get_frame()
# 检查是否成功获取帧
if frame_data is None:
print("⚠️ 无法从摄像头获取帧,重试中...")
time.sleep(0.1) # 短暂延迟后重试
continue
# 提取OpenCV格式的图像
image_rgb = frame_data[0]
# 转换为BGR格式OpenCV默认格式
image_bgr = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2BGR)
# 保存图像
cv2.imwrite('test.jpg', image_bgr)
# 显示帧率信息
frame_count += 1
elapsed = time.time() - start_time
if elapsed > 1.0: # 每秒更新一次帧率
fps = frame_count / elapsed
print(f"帧率: {fps:.1f} FPS")
frame_count = 0
start_time = time.time()
# 添加短暂延迟防止CPU过载
time.sleep(0.01)

BIN
utils/10.21_820.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 309 KiB

115
utils/WindowsAPI.py Normal file
View File

@@ -0,0 +1,115 @@
import numpy, cv2
import win32gui, win32api, win32con, win32ui
import time
from PIL import Image
def get_window_position(window_title):
"""
获取指定窗口的左上角在桌面的位置
:param window_title: 窗口标题(支持模糊匹配)
:return: (x, y) 坐标元组,未找到返回 None
"""
# 查找目标窗口句柄
target_hwnd = None
def enum_windows_callback(hwnd, _):
nonlocal target_hwnd
if win32gui.IsWindowVisible(hwnd):
title = win32gui.GetWindowText(hwnd)
if window_title.lower() in title.lower():
# 检查是否是顶级窗口(排除子窗口)
if win32gui.GetParent(hwnd) == 0:
target_hwnd = hwnd
return False # 停止枚举
return True # 继续枚举
# 枚举所有窗口
win32gui.EnumWindows(enum_windows_callback, None)
# 获取窗口位置
if target_hwnd:
rect = win32gui.GetWindowRect(target_hwnd)
return rect[0], rect[1]
return None, None
class WindowsAPI():
def __init__(self, hwnd=None, region=None):
# 如果传入 hwnd 则直接使用传入的句柄,否则设为 None
self.hWnd = hwnd
self.region = region # region 格式为 (left, top, right, bottom)
def setRegion(self, region):
"""设置截图区域"""
self.region = region
def getDesktopImg(self):
if not self.region:
print("请传入有效的截图区域")
return None
left, top, right, bottom = self.region
width = right - left
height = bottom - top
# 获取桌面的设备上下文句柄
hWndDC = win32gui.GetWindowDC(win32gui.GetDesktopWindow())
# 创建设备描述表
mfcDC = win32ui.CreateDCFromHandle(hWndDC)
# 内存设备描述表
saveDC = mfcDC.CreateCompatibleDC()
# 创建位图对象
saveBitMap = win32ui.CreateBitmap()
# 分配存储空间
saveBitMap.CreateCompatibleBitmap(mfcDC, width, height)
# 将位图对象选入到内存设备描述表
saveDC.SelectObject(saveBitMap)
# 截取指定区域
saveDC.BitBlt((0, 0), (width, height), mfcDC, (left, top), win32con.SRCCOPY)
# 获取位图信息
signedIntsArray = saveBitMap.GetBitmapBits(True)
im_opencv = numpy.frombuffer(signedIntsArray, dtype='uint8')
im_opencv.shape = (height, width, 4)
# 内存释放
win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(win32gui.GetDesktopWindow(), hWndDC)
im_opencv = cv2.cvtColor(im_opencv, cv2.COLOR_BGR2RGB) # rgb 修改通道数并转换图像
# im_opencv=im_opencv[40:-1, 2:]
im_PIL = Image.fromarray(im_opencv) # 图像改成对象类型
return [im_opencv,im_PIL]
def showDesktopImg(self):
imgs = self.getDesktopImg()
if imgs is None:
print("无法获取截图")
return
im_opencv = imgs[0] # 取 OpenCV 图像
cv2.imshow("Desktop Screenshot", im_opencv)
cv2.waitKey(0)
cv2.destroyAllWindows()
# window_title = "Torchlight:Infinite"
# left, top = get_window_position(window_title)
#
# if left is None or top is None:
# print(f"错误: 未找到标题包含 '{window_title}' 的窗口")
# exit(1)
#
# print(f"找到窗口 '{window_title}' 位置: ({left}, {top})")
# 2. 设置截图区域 (左上角x, 左上角y, 右下角x, 右下角y)
# width, height = 1282, 761
custom_region = (0, 30, 1280, 30+720)
winApi = WindowsAPI(region=custom_region)
print(winApi.region)
# winApi.showDesktopImg()

102
utils/caiji.py Normal file
View File

@@ -0,0 +1,102 @@
import cv2
from PIL import Image
import numpy as np
import time
import os
class CaptureCard:
def __init__(self, device_index=0, width=1920, height=1080, save_dir="screenshots"):
"""
初始化采集卡(或摄像头)
:param device_index: 设备索引号(一般是 0/1/2
:param width: 采集宽度
:param height: 采集高度
:param save_dir: 截图保存目录
"""
self.device_index = device_index
self.width = width
self.height = height
self.cap = None
self.region = None
self.save_dir = save_dir
os.makedirs(save_dir, exist_ok=True)
def open(self):
"""打开采集卡"""
self.cap = cv2.VideoCapture(self.device_index, cv2.CAP_DSHOW)
if not self.cap.isOpened():
self.cap = cv2.VideoCapture(self.device_index)
if not self.cap.isOpened():
raise RuntimeError(f"无法打开采集设备 index={self.device_index}")
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
print(f"采集卡已打开:{self.width}x{self.height}")
def close(self):
"""关闭采集卡"""
if self.cap:
self.cap.release()
self.cap = None
print("采集卡已关闭。")
cv2.destroyAllWindows()
def getDesktopImg(self):
"""从采集卡获取一帧图像"""
if self.cap is None:
self.open()
ret, frame = self.cap.read()
if not ret:
print("无法从采集卡读取帧")
return None
im_opencv = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.region:
left, top, right, bottom = self.region
im_opencv = im_opencv[top:bottom, left:right]
im_PIL = Image.fromarray(im_opencv)
return [im_opencv, im_PIL]
def preview(self):
"""实时预览 + 每5秒自动截图"""
if self.cap is None:
self.open()
print("'q' 退出实时预览")
last_capture_time = time.time()
screenshot_count = 0
while True:
ret, frame = self.cap.read()
if not ret:
print("无法读取视频帧")
break
if self.region:
left, top, right, bottom = self.region
frame = frame[top:bottom, left:right]
# 显示视频
cv2.imshow("CaptureCard Preview", frame)
# 每5秒自动截图
now = time.time()
if now - last_capture_time >= 5:
screenshot_count += 1
filename = os.path.join(self.save_dir, f"screenshot_{screenshot_count}.jpg")
cv2.imwrite(filename, frame)
print(f"[截图] 已保存:{filename}")
last_capture_time = now
# 按 'q' 退出
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
self.close()
if __name__ == "__main__":
card = CaptureCard(device_index=0, width=1920, height=1080)
card.preview()

73
utils/get_image.py Normal file
View File

@@ -0,0 +1,73 @@
import time
from PIL import Image
import cv2
# class GetImage:
# def __init__(self, cam_index=0, width=1920, height=1080):
# self.cap = cv2.VideoCapture(cam_index,cv2.CAP_DSHOW)
#
# if not self.cap.isOpened():
# raise RuntimeError(f"无法打开摄像头 {cam_index}")
# self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
# self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
# print(f"✅ 摄像头 {cam_index} 打开成功,分辨率 {width}x{height}")
# def get_frame(self):
# ret, im_opencv = self.cap.read()
# im_opencv = cv2.cvtColor(im_opencv, cv2.COLOR_BGR2RGB) # rgb 修改通道数并转换图像
# im_opencv = im_opencv[30:30+720, 0:1280]#裁剪尺寸
# im_PIL = Image.fromarray(im_opencv) # 图像改成对象类型
#
# return [im_opencv, im_PIL]
# def release(self):
# self.cap.release()
# cv2.destroyAllWindows()
# print("🔚 摄像头已释放")
# def __del__(self):
# # 以防忘记手动释放
# if hasattr(self, "cap") and self.cap.isOpened():
# self.release()
#
# get_image = GetImage()
#
# if __name__ == '__main__':
# while True:
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
# a=get_image.get_frame()
# cv2.imshow('image',a[0])
# print(a[0].shape)
#
#
# cv2.destroyAllWindows()
import threading
class GetImage:
def __init__(self, cam_index=0, width=1920, height=1080):
self.cap = cv2.VideoCapture(cam_index, cv2.CAP_DSHOW)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.frame = None
self.running = True
threading.Thread(target=self.update, daemon=True).start()
def update(self):
while self.running:
ret, frame = self.cap.read()
if ret:
self.frame = frame
def get_frame(self):
if self.frame is None:
return None
im_opencv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
im_opencv = im_opencv[30:30+720, 0:1280]
im_PIL = Image.fromarray(im_opencv)
return [im_opencv, im_PIL]
def release(self):
self.running = False
time.sleep(0.2)
self.cap.release()
cv2.destroyAllWindows()
get_image = GetImage()

57
utils/mouse.py Normal file
View File

@@ -0,0 +1,57 @@
import random
import time
import ch9329Comm
import time
import random
import serial
serial.ser = serial.Serial('COM6', 9600) # 开启串口
mouse = ch9329Comm.mouse.DataComm(1920, 1080)
def bezier_point(t, p0, p1, p2, p3):
"""计算三次贝塞尔曲线上的点"""
x = (1-t)**3 * p0[0] + 3*(1-t)**2*t*p1[0] + 3*(1-t)*t**2*p2[0] + t**3*p3[0]
y = (1-t)**3 * p0[1] + 3*(1-t)**2*t*p1[1] + 3*(1-t)*t**2*p2[1] + t**3*p3[1]
return (x, y)
def move_mouse_bezier(mouse, start, end, duration=1, steps=120):
"""
用贝塞尔曲线模拟鼠标移动(安全版)
"""
x1, y1 = start
x2, y2 = end
# 控制点(轻微随机)
ctrl1 = (x1 + (x2 - x1) * random.uniform(0.2, 0.4) + random.randint(-20, 20),
y1 + (y2 - y1) * random.uniform(0.1, 0.4) + random.randint(-20, 20))
ctrl2 = (x1 + (x2 - x1) * random.uniform(0.6, 0.8) + random.randint(-20, 20),
y1 + (y2 - y1) * random.uniform(0.6, 0.9) + random.randint(-20, 20))
# 生成轨迹
points = [bezier_point(t, (x1, y1), ctrl1, ctrl2, (x2, y2)) for t in [i/steps for i in range(steps+1)]]
delay = duration / steps
for (x, y) in points:
# 坐标裁剪,防止越界或负数
x_safe = max(0, min(1919, int(x)))
y_safe = max(0, min(1079, int(y)))
mouse.send_data_absolute(x_safe, y_safe)
time.sleep(delay * random.uniform(0.6, 1.0))
# 最后一步确保到达终点
x2_safe = max(0, min(1919, int(x2)))
y2_safe = max(0, min(1079, int(y2)))
mouse.send_data_absolute(x2_safe, y2_safe)
class Mouse_guiji():
def __init__(self):
self.point=(0,0)
def send_data_absolute(self, x, y,may=0):
move_mouse_bezier(mouse, self.point, (x,y), duration=1, steps=120)
if may == 1:#点击左
mouse.click()
elif may == 2:
mouse.click1()#点击右
self.point=(x,y)
mouse_gui = Mouse_guiji()

115
utils/shizi.py Normal file
View File

@@ -0,0 +1,115 @@
import ddddocr
import cv2
ocr = ddddocr.DdddOcr()
def fuhuo(image):
image = image[603:641, 460:577]
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# 将裁剪后的图像编码成二进制格式
_, img_encoded = cv2.imencode('.png', image)
# 将图像编码结果转换为字节流
img_bytes = img_encoded.tobytes()
result = ocr.classification(img_bytes)
print(result)
if result == "记录点复活" or result == "记灵点复活":
return True
else:
return False
def jieshu(image):
image=image[623:623+41, 472:472+167]
image=cv2.cvtColor(image,cv2.COLOR_RGB2BGR)
# 将裁剪后的图像编码成二进制格式
_, img_encoded = cv2.imencode('.png', image)
# 将图像编码结果转换为字节流
img_bytes = img_encoded.tobytes()
result = ocr.classification(img_bytes)
if result=="结束挑战":
return True
else:
return False
def tiaozhan(image):
image = image[576:614, 1023:1138]
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# 将裁剪后的图像编码成二进制格式
_, img_encoded = cv2.imencode('.png', image)
# 将图像编码结果转换为字节流
img_bytes = img_encoded.tobytes()
result = ocr.classification(img_bytes)
if result == "开启挑战":
return True
else:
return False
def tuichu(image):
image = image[24:58, 569:669]
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# 将裁剪后的图像编码成二进制格式
_, img_encoded = cv2.imencode('.png', image)
# 将图像编码结果转换为字节流
img_bytes = img_encoded.tobytes()
result = ocr.classification(img_bytes)
print(result)
if result[1:] == "出挑战" or result[0:2] == "退出" :
return True
else:
return False
def tuwai(image):
image = image[59:93, 1226:1275]
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# 将裁剪后的图像编码成二进制格式
_, img_encoded = cv2.imencode('.png', image)
# 将图像编码结果转换为字节流
img_bytes = img_encoded.tobytes()
result = ocr.classification(img_bytes)
print(result)
if result == "tap" or result=='tqp' or result=='top':
return True
else:
return False
def daoying(image):
image = image[260:289, 570:628]
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# 将裁剪后的图像编码成二进制格式
_, img_encoded = cv2.imencode('.png', image)
# 将图像编码结果转换为字节流
img_bytes = img_encoded.tobytes()
result = ocr.classification(img_bytes)
print(result)
if result == "倒影" or result=="到影" :
return True
else:
return False
def shuzi(image):
image=image[50:91,610:666]
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# 将裁剪后的图像编码成二进制格式
_, img_encoded = cv2.imencode('.png', image)
# 将图像编码结果转换为字节流
img_bytes = img_encoded.tobytes()
result = ocr.classification(img_bytes)
print(result)
if result=="40":
return True
def test(image):
# 将裁剪后的图像编码成二进制格式
_, img_encoded = cv2.imencode('.png', image)
# 将图像编码结果转换为字节流
img_bytes = img_encoded.tobytes()
result = ocr.classification(img_bytes)
print(result)
if __name__ == '__main__':
image=cv2.imread('10.21_820.jpg')
shuzi(image)
test(image)

BIN
utils/城镇.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 KiB

BIN
utils/开启挑战.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

BIN
utils/结束挑战.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

BIN
utils/记录点复活.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.2 KiB

BIN
utils/退出挑战.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.5 KiB

55
yolo_test.py Normal file
View File

@@ -0,0 +1,55 @@
import cv2
from utils.get_image import get_image
from ultralytics import YOLO
model = YOLO(r"best0.pt").to('cuda')
def yolo_shibie(im_PIL, detections):
results = model(im_PIL)
result = results[0]
# ✅ 获取绘制好框的图像
frame_with_boxes = result.plot()
# ✅ 用 OpenCV 动态显示
cv2.imshow("YOLO实时检测", frame_with_boxes)
# ESC 或 Q 键退出
if cv2.waitKey(1) & 0xFF in [27, ord('q')]:
return None
# ✅ 提取检测信息
for i in range(len(result.boxes.xyxy)):
left, top, right, bottom = result.boxes.xyxy[i]
cls_id = int(result.boxes.cls[i])
label = result.names[cls_id]
if label in ['center', 'next', 'npc1', 'npc2', 'npc3', 'npc4', 'boss', 'zhaozi']:
player_x = int(left + (right - left) / 2) + 3
player_y = int(top + (bottom - top) / 2) + 40
detections[label] = [player_x, player_y]
elif label in ['daojv', 'gw']:
player_x = int(left + (right - left) / 2) + 3
player_y = int(top + (bottom - top) / 2) + 40
detections[label].append([player_x, player_y])
return detections
while True:
detections = {
'center': None, 'next': None,
'npc1': None, 'npc2': None, 'npc3': None, 'npc4': None,
'boss': None, 'zhaozi': None,
'daojv': [], 'gw': []
}
im_opencv = get_image.get_frame() # [RGB, PIL]
detections = yolo_shibie(im_opencv[1], detections)
if detections is None: # 用户退出
break
print(detections)
cv2.destroyAllWindows()