波特率设置非115200的值,断电保存开机MIPI屏幕无显示

Viewed 28

重现步骤

from libs.AIBase import AIBase
from libs.AI2D import Ai2d
import os
import ujson
from media.media import *
from media.sensor import *
from time import *
import nncase_runtime as nn
import ulab.numpy as np
import time
import utime
import image
import random
import gc
import sys
import aidemo
from machine import Pin
from machine import UART
from machine import FPIOA
import time
from media.display import * #导入display模块,使用display相关接口

fpioa = FPIOA()

fpioa.set_function(48,FPIOA.UART4_TXD)
fpioa.set_function(49,FPIOA.UART4_RXD)

uart4 = UART(UART.UART4, baudrate=115200, bits=UART.EIGHTBITS, parity=UART.PARITY_NONE, stop=UART.STOPBITS_ONE)

WIFI_NAME = "CMCC-h14a"
WIFI_PASSWORD = "p276297u"

last_send_time = 0
send_interval = 20 # 20秒的发送冷却间隔

human_count = 0
consecutive_human_frames = 0

class ScopedTiming:
def init(self, info="", enable_profile=True):
self.info = info
self.enable_profile = enable_profile

def __enter__(self):
    if self.enable_profile:
        self.start_time = time.time_ns()
    return self

def __exit__(self, exc_type, exc_value, traceback):
    if self.enable_profile:
        elapsed_time = time.time_ns() - self.start_time
        print(f"{self.info} took {elapsed_time / 1000000:.2f} ms")

class PipeLine:
def init(self,rgb888p_size=[224,224],display_size=[1920,1080],display_mode="lcd",debug_mode=0,osd_layer_num=1):
# sensor给AI的图像分辨率
self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]]
# 视频输出VO图像分辨率
self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]]
# 视频显示模式,支持:"lcd","hdmi"
self.display_mode=display_mode
# sensor对象
self.sensor=None
# osd显示Image对象
self.osd_img=None
self.debug_mode=debug_mode
self.osd_layer_num = osd_layer_num

# PipeLine初始化函数
def create(self,sensor=None,hmirror=None,vflip=None,fps=60):
    with ScopedTiming("init PipeLine",self.debug_mode > 0):
        os.exitpoint(os.EXITPOINT_ENABLE)
        nn.shrink_memory_pool()
        # 初始化并配置sensor
        brd=os.uname()[-1]
        if brd=="k230d_canmv_bpi_zero":
            self.sensor = Sensor(fps=30) if sensor is None else sensor
        elif brd=="k230_canmv_lckfb":
            self.sensor = Sensor(fps=30) if sensor is None else sensor
        elif brd=="k230d_canmv_atk_dnk230d":
            self.sensor = Sensor(fps=30) if sensor is None else sensor
        else:
            self.sensor = Sensor(fps=fps) if sensor is None else sensor
        self.sensor.reset()
        if hmirror is not None and (hmirror==True or hmirror==False):
            self.sensor.set_hmirror(hmirror)
        if vflip is not None and (vflip==True or vflip==False):
            self.sensor.set_vflip(vflip)
        # 通道0直接给到显示VO,格式为YUV420
        self.sensor.set_framesize(w = self.display_size[0], h = self.display_size[1])
        self.sensor.set_pixformat(PIXEL_FORMAT_YUV_SEMIPLANAR_420)
        # 通道1用于截图,格式为RGB888
        self.sensor.set_framesize(w = 480, h = 320, chn=CAM_CHN_ID_1)
        self.sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_1)
        # 通道2给到AI做算法处理,格式为RGB888
        self.sensor.set_framesize(w = self.rgb888p_size[0], h = self.rgb888p_size[1], chn=CAM_CHN_ID_2)
        self.sensor.set_pixformat(PIXEL_FORMAT_RGB_888_PLANAR, chn=CAM_CHN_ID_2)

        # OSD图像初始化
        self.osd_img = image.Image(self.display_size[0], self.display_size[1], image.ARGB8888)

        sensor_bind_info = self.sensor.bind_info(x = 0, y = 0, chn = CAM_CHN_ID_0)
        Display.bind_layer(**sensor_bind_info, layer = Display.LAYER_VIDEO1)

        # 初始化显示
        if self.display_mode=="hdmi":
            # 设置为LT9611显示,默认1920x1080
            Display.init(Display.LT9611,osd_num=self.osd_layer_num, to_ide = True)
        else:
            # 设置为ST7701显示,默认480x800
            Display.init(Display.ST7701, width=self.display_size[0], height=self.display_size[1], osd_num=self.osd_layer_num, to_ide=False)

        # 设置bind通道的帧率,防止生产者太快
        display_fps = Display.fps()
        self.sensor._set_chn_fps(chn = CAM_CHN_ID_0, fps = display_fps)

        # media初始化
        MediaManager.init()
        # 启动sensor
        # self.sensor.run()

# 获取一帧图像数据,返回格式为ulab的array数据
def get_frame(self):
    with ScopedTiming("get a frame",self.debug_mode > 0):
        frame = self.sensor.snapshot(chn=CAM_CHN_ID_2)
        input_np=frame.to_numpy_ref()
        return input_np

def get_snapshot(self):
    with ScopedTiming("get a frame",self.debug_mode > 0):
        frame = self.sensor.snapshot(chn=CAM_CHN_ID_1)
        input_np=frame.to_numpy_ref()
        return input_np

# 在屏幕上显示osd_img
def show_image(self):
    with ScopedTiming("show result",self.debug_mode > 0):
        Display.show_image(self.osd_img, 0, 0, Display.LAYER_OSD3)

# PipeLine销毁函数
def destroy(self):
    with ScopedTiming("deinit PipeLine",self.debug_mode > 0):
        os.exitpoint(os.EXITPOINT_ENABLE_SLEEP)
        # stop sensor
        self.sensor.stop()
        # deinit lcd
        Display.deinit()
        time.sleep_ms(50)
        # deinit media buffer
        MediaManager.deinit()

if name=="main":

# 显示模式,默认"lcd"
display_mode="lcd"
display_size=[640,480]
# 模型路径
kmodel_path="/sdcard/examples/kmodel/yolov8n_320.kmodel"
labels = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
# 其它参数设置
confidence_threshold = 0.2
nms_threshold = 0.2
max_boxes_num = 50
rgb888p_size=[320,320]

# 初始化PipeLine
sensor = Sensor(id = 1, width=1280, height=960) # 构建摄像头对象
pl = PipeLine(rgb888p_size=rgb888p_size, display_size=display_size, display_mode=display_mode)
pl.create(sensor=sensor)  # 创建PipeLine实例

img_wifi = image.Image(640, 480, image.RGB888)
img_wifi.draw_rectangle(0, 0, 640, 480, color=(0, 0, 0), thickness=2, fill=True)
WIFI_INIT()
img_wifi.draw_rectangle(0, 0, 640, 480, color=(0, 0, 0), thickness=2, fill=True)
time.sleep_ms(100)

sensor.run()
ob_det=ObjectDetectionApp(kmodel_path,labels=labels,model_input_size=[320,320],max_boxes_num=max_boxes_num,confidence_threshold=confidence_threshold,nms_threshold=nms_threshold,rgb888p_size=rgb888p_size,display_size=display_size,debug_mode=0)
ob_det.config_preprocess()


try:
    while True:
        os.exitpoint()
        with ScopedTiming("total", 1):
            # 获取当前帧数据
            img = pl.get_frame()
            # 推理当前帧
            res = ob_det.run(img)

            # 检查每一帧的检测结果,是否包含人(label=0)
            human_detected_in_frame = False
            for detection in res:
                # res中的每一项是一个数组,[x1, y1, x2, y2, prob, label]
                if detection[5] == 0:  # 如果标签是0,即人
                    human_detected_in_frame = True
                    break  # 如果在当前帧中找到人,可以跳出循环

            # 如果当前帧检测到人
            if human_detected_in_frame:
                consecutive_human_frames += 1
            else:
                consecutive_human_frames = 0  # 重置计数器,因为当前帧没有检测到人

            # 如果连续检测到10帧人,则增加统计计数
            if consecutive_human_frames >= 10:
                human_count += 1
                consecutive_human_frames = 0  # 重置计数器,等待下一次检测连续5帧
                print(human_count)

                # 获取当前时间
                current_time = time.time()

                # 如果当前时间与上次发送时间的间隔超过20秒,则发送指令
                if current_time - last_send_time >= send_interval:
                    img_upload = pl.get_snapshot()
                    img_flat = img_upload.flatten()
                    raw_bytes = bytes(img_flat)

                    if not send_at_command(uart4, f"AT+SOCKET=4,X.X.X.X,9000,0,1", "OK", 5000):
                        print("SOCKET连接失败")
                    # 3. 调用发送函数
                    success = send_image_by_chunks_safe(
                        uart4,
                        conn_id=1,
                        img_bytes=raw_bytes,
                        chunk_size=1023,
                        chunks_per_send=64,        # ✅ 改成 30 块,4092 字节
                        chunk_delay_ms=5,          # ✅ 放大 chunk 间隔,保护串口
                        block_delay_ms=500,        # ✅ 放大 block 间隔,保护模块
                        max_retries=10             # ✅ 多试几次提高成功率
                    )
                    if success:
                        print("图像发送成功")
                    else:
                        print("图像发送失败")
                    # 更新上次发送时间
                    last_send_time = current_time
                    # 4. 断开连接
                    send_at_command(uart4, "AT+SOCKETAUTOTT=0", "OK", 2000, delay_ms=1000)
                    send_at_command(uart4, "AT+SOCKETDEL=1", "OK", 2000, delay_ms=1000)

            # 绘制结果到PipeLine的osd图像-
            ob_det.draw_result(pl, res)
            # 显示当前的绘制结果
            pl.show_image()
            gc.collect()

except Exception as e:
    sys.print_exception(e)

finally:
    ob_det.deinit()
    pl.destroy()

期待结果和实际结果
写入main.py文件到CanMV后,上电开机应当和连上IDE运行一样,会在MIPI屏幕上显示WIFI初始化的一些信息,然后进入YOLO。但当K230D和WIFI模块的波特率同步设置为非115200的值,例如1152000,1000000,4000000等值时,只有在IDE点击绿色运行按钮时可以在屏幕上正常显示信息,然后启动YOLO识别;但是一旦脱机直接上电运行屏幕却无任何显示。值得注意的是:波特率的设置都是同步设置完毕并在IDE上运行确认所有功能正常的。当K230D和WIFI模块的波特率同步恢复默认设置为115200后,脱机上电的WIFI初始化信息又可以正常显示在MIPI屏幕上。

软硬件版本信息
正点原子K230D BOX,WIFI模块安信可WB2-01F,固件版本CanMV-K230D_ATK_DNK230D_micropython_v1.2-0-g9e4d9ca_nncase_v2.9.0.img。

错误日志
暂无

尝试解决过程
如实际结果所描述

补充材料
PipeLine函数为了机器启动屏幕不闪烁,我将sensor.run()语句注释掉了,并在WIDI初始化后启动。

1 Answers

你好,可以在uart0上接一个串口模块查看一下开机启动以及运行脚本输出得日志信息吗