问题描述
运行代码一直显示sensor没有定义,run等函数不存在等问题,是不是深度学习之后无法使用OpenCV库里面的内容
复现步骤
import os, gc, time
from libs.PlatTasks import DetectionApp
from libs.PipeLine import PipeLine
from libs.Utils import *
from media.sensor import *
from media.display import *
from media.media import *
from time import ticks_ms
from machine import FPIOA
from machine import Pin
from machine import PWM
from machine import Timer
sensor = None
sensor.run()
clock = time.clock()
save_folder = "/data/data/images/"
# Set display mode: options are 'hdmi', 'lcd', 'lt9611', 'st7701', 'hx8399'
# 'hdmi' defaults to 'lt9611' (1920x1080); 'lcd' defaults to 'st7701' (800x480)
display_mode = "lt9611"
# Define the input size for the RGB888P video frames
rgb888p_size = [1280, 720]
# Set root directory path for model and config
root_path = "/sdcard/mp_deployment_source/"
# Load deployment configuration
deploy_conf = read_json(root_path + "/deploy_config.json")
kmodel_path = root_path + deploy_conf["kmodel_path"] # KModel path
labels = deploy_conf["categories"] # Label list
confidence_threshold = deploy_conf["confidence_threshold"] # Confidence threshold
nms_threshold = deploy_conf["nms_threshold"] # NMS threshold
model_input_size = deploy_conf["img_size"] # Model input size
nms_option = deploy_conf["nms_option"] # NMS strategy
model_type = deploy_conf["model_type"] # Detection model type
anchors = []
if model_type == "AnchorBaseDet":
anchors = deploy_conf["anchors"][0] + deploy_conf["anchors"][1] + deploy_conf["anchors"][2]
# Inference configuration
inference_mode = "video" # Inference mode: 'video'
debug_mode = 0 # Debug mode flag
# Create and initialize the video/display pipeline
pl = PipeLine(rgb888p_size=rgb888p_size, display_mode=display_mode)
pl.create()
display_size = pl.get_display_size()
# Initialize object detection application
det_app = DetectionApp(inference_mode,kmodel_path,labels,model_input_size,anchors,model_type,confidence_threshold,nms_threshold,rgb888p_size,display_size,debug_mode=debug_mode)
# Configure preprocessing for the model
det_app.config_preprocess()
# Main loop: capture, run inference, display results
while True:
with ScopedTiming("total", 1):
clock.tick()
os.exitpoint()
img = sensor.snapshot(chn=CAM_CHN_ID_0)
img = pl.get_frame() # Capture current frame
res = det_app.run(img) # Run inference
det_app.draw_result(pl.osd_img, res) # Draw detection results
if res['scores'] == []:
a = 0
print(a)
else:
if res["scores"][0] > 0.8:
if res["idx"][0] == 0:
os.mkdir(save_folder+"1")
print("will collect {} class in {} s".format(class_lst[class_id], 3-i))
time.sleep_ms(1000)
pl.show_image() # Show result on display
gc.collect() # Run garbage collection
# Cleanup: These lines will only run if the loop is interrupted (e.g., by an IDE break or external interruption)
det_app.deinit() # De-initialize detection app
pl.destroy() # Destroy pipeline instance
硬件板卡
庐山派
软件版本
CanMV_K230_LCKFB_micropython_v1.3-0-g8dd764f_nncase_v2.9.0.img