def redline_thread():
"""红线识别主线程"""
global sensor, redline_osd_img, threshold, line_following_enabled, button_last_state, last_press_time
while not redline_stop:
# 从摄像头通道1获取RGB565图像
img = sensor.snapshot(chn=CAM_CHN_ID_1)
# 检测按键状态
button_state = key.value()
current_time = time.ticks_ms()
# 检测按键状态变化(上升沿)
if button_state == 1 and button_last_state == 0:
# 检查按键是否在消抖时间外
if current_time - last_press_time > debounce_delay:
# 学习颜色阈值
threshold = learn_color_threshold(sensor)
# 启用巡线功能
line_following_enabled = True
last_press_time = current_time
# 更新按键状态
button_last_state = button_state
# 如果已经学习到颜色阈值并且巡线功能已启用
if threshold and line_following_enabled:
# 进行巡线检测
width = img.width()
height = img.height()
# 计算巡线检测的起始行和结束行
start_y = int(height * START_Y_RATIO)
end_y = int(height * END_Y_RATIO)
# 计算每个分割区域的宽度
section_width = width // LINE_SECTION_COUNT
# 初始化巡线检测结果列表
detection_result = ['0'] * LINE_SECTION_COUNT
# 遍历每个分割区域
for i in range(LINE_SECTION_COUNT):
# 定义当前区域的感兴趣区域
roi = (i * section_width, start_y, section_width, end_y - start_y)
# 在图像上绘制当前区域的矩形框
img.draw_rectangle(roi, color=(0, 255, 0), thickness=2)
# 在当前区域内查找符合颜色阈值的色块
blobs = img.find_blobs([threshold], roi=roi, pixels_threshold=BLOB_PIXELS_THRESHOLD,
area_threshold=BLOB_AREA_THRESHOLD, merge=True)
# 如果找到色块
if blobs:
# 找到最大的色块
largest_blob = max(blobs, key=lambda b: b.pixels())
# 在最大色块的中心绘制一个圆圈
img.draw_circle(largest_blob.cx(), largest_blob.cy(), 10, color=(0, 255, 0), thickness=2)
# 更新检测结果列表
detection_result[i] = '1'
# 判断是否需要左转
left_turn = ('1' in detection_result[3:4]) and (detection_result[0] == '1' or detection_result[1] == '1')
# 判断是否需要右转
right_turn = ('1' in detection_result[3:4]) and (detection_result[6] == '1' or detection_result[7] == '1')
# 初始化路口类型
intersection_type = None
# 判断是否为T字路口
if left_turn and right_turn:
intersection_type = "T字路口"
# 判断是否为左转路口
elif left_turn:
intersection_type = "左转路口"
# 判断是否为右转路口
elif right_turn:
intersection_type = "右转路口"
# 打印状态信息
print_status(img, detection_result, intersection_type)
else:
# 在图像上绘制学习区域的矩形框
img.draw_rectangle(LEARNING_ROI, color=(255, 0, 0), thickness=2)
# 显示提示信息
img.draw_string(10, 10, "请按下按键学习颜色阈值", color=(255, 0, 0), scale=2)
# 显示图像到OSD层
redline_osd_img.draw_image(img, 0, 0)
Display.show_image(redline_osd_img)
time.sleep_ms(20) # 控制循环频率
# ---------- AI 线程 ----------
def ai_thread():
"""AI目标检测线程"""
global sensor, det_osd_img, det_stop
kmodel_path = "/sdcard/best.kmodel"
labels = ["1", "2", "3", "4", "5", "6", "7", "8"]
model_input_size = [224, 224]
display_mode = "lcd"
confidence_threshold = 0.7
nms_threshold = 0.2
# 初始化显示与模型
yolo = YOLOv8(
task_type="detect",
mode="video",
kmodel_path=kmodel_path,
labels=labels,
rgb888p_size=rgb888p_size,
model_input_size=model_input_size,
display_size=display_size,
conf_thresh=confidence_threshold,
nms_thresh=nms_threshold,
max_boxes_num=4,
debug_mode=0
)
yolo.config_preprocess()
try:
while not det_stop:
# 获取一帧RGBP888的数据
img_rgbp888 = sensor.snapshot(CAM_CHN_ID_2)
# 转成ulab.numpy格式
img = img_rgbp888.to_numpy_ref()
# 执行目标检测
res = yolo.run(img)
# 处理检测结果
for detection in res:
x1, y1, x2, y2 = detection[0], detection[1], detection[2], detection[3]
conf = detection[4]
class_id = int(detection[5])
label = labels[class_id]
# 计算中心点坐标
center_x = int((x1 + x2) / 2)
center_y = int((y1 + y2) / 2)
# 标签编码
Dat_intersection = 0x00
if label == "1":
Dat_intersection = 0x01
elif label == "2":
Dat_intersection = 0x02
elif label == "3":
Dat_intersection = 0x03
elif label == "4":
Dat_intersection = 0x04
elif label == "5":
Dat_intersection = 0x05
elif label == "6":
Dat_intersection = 0x06
elif label == "7":
Dat_intersection = 0x07
elif label == "8":
Dat_intersection = 0x08
# 判断左右位置
midpoint = rgb888p_size[0] // 2 # 计算图像中心线
position_byte = 0x01 # 默认左侧标识 (0x01)
if center_x > midpoint: # 当中心点在右侧区域
position_byte = 0x02 # 右侧标识 (0x02)
# 构建数据帧
FH = bytearray([
0xA3, 0xB3, # 帧头
position_byte, # 位置标识 (0x01左, 0x02右)
Dat_intersection, # 数字标签
0xC3 # 帧尾
])
print(FH)
uart.write(FH)
# 显示结果
yolo.draw_result(res, det_osd_img)
gc.collect()
except Exception as e:
print("AI线程异常:", e)
finally:
# 释放资源
yolo.deinit()
