import argparse import os, sys import shutil import time from pathlib import Path import platform BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(BASE_DIR) print(sys.path) import cv2 import torch import numpy as np import torchvision.transforms as transforms from lib.dataset import LoadImages, LoadStreams from tqdm import tqdm import onnxruntime as ort from common_utils import * normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) transform=transforms.Compose([ transforms.ToTensor(), normalize, ]) def get_args(): parser = argparse.ArgumentParser() parser.add_argument('--source', type=str, default='./calib-imgs', help='source, use numbers for input devices e.g. 0 for /dev/video0') # file/folder ex:inference/images parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument("-c", "--compile", action="store_true", help="Run in Model compilation mode") parser.add_argument('--model', type=str, default='weights/yolopx_optimized.onnx', help="model path") parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument("--debug", type=int, default=0, help="Debug info level 0,1,2,3") parser.add_argument('--log', type=int, default=2, help='Log severity level.0:Verbose, 1:Info, 2:Warning. 3:Error, 4:Fatal.') parser.add_argument('--cal-frames', type=int, default=20, help='Number of frames/images to be used for calibration') parser.add_argument('--cal-iter', type=int, default=50, help='Number of iterations for calibration') parser.add_argument('--tensor-bits', type=int, default=8, help='Number of bits for TIDL tensor and weights') parser.add_argument('--arti-folder', type=str, default='./custom-artifacts/artifacts', help='where to save the compiled artifacts') # file/folder opt = parser.parse_args() return opt def compile(opt): if platform.machine() == "aarch64": print( "Compilation of models is only supported on x86 machine \n\ Please do the compilation on PC and copy artifacts for running on TIDL devices " ) exit(-1) model_path = opt.model c7x_firmware_version = "11_00_08_00" #set variable for firmware version. compile_options = {} compile_options.update(optional_options) so = ort.SessionOptions() so.log_severity_level = opt.log compile_options['artifacts_folder'] = opt.arti_folder compile_options['tidl_tools_path'] = os.environ.get("TIDL_TOOLS_PATH") # compile_options['advanced_options:c7x_firmware_version'] = c7x_firmware_version # compile_options['deny_list'] = "ScatterND" compile_options['debug_level'] = opt.debug compile_options['tensor_bits'] = opt.tensor_bits compile_options['advanced_options:calibration_frames'] = opt.cal_frames compile_options['advanced_options:calibration_iterations'] = opt.cal_iter print(f"compile_options: {compile_options}") calib_images = os.listdir('calib-imgs') import onnx os.makedirs(compile_options["artifacts_folder"], exist_ok=True) for root, dirs, files in os.walk( compile_options["artifacts_folder"], topdown=False ): [os.remove(os.path.join(root, f)) for f in files] [os.rmdir(os.path.join(root, d)) for d in dirs] EP_list = ['TIDLCompilationProvider', 'CPUExecutionProvider'] # Shape inference needed for offload to C7x onnx.shape_inference.infer_shapes_path( model_path, model_path ) onnx_session = ort.InferenceSession( model_path, providers=EP_list, provider_options=[compile_options, {}], sess_options=so ) print(f"EP: {onnx_session.get_providers()}") input_name = onnx_session.get_inputs()[0].name # Set Dataloader if opt.source.isnumeric(): dataset = LoadStreams(opt.source, img_size=opt.img_size) bs = len(dataset) # batch_size else: dataset = LoadImages(opt.source, img_size=opt.img_size) bs = 1 # batch_size # Run inference vid_path, vid_writer = None, None img = torch.zeros((1, 3, opt.img_size, opt.img_size)) # init img for i, (path, img, img_det, vid_cap,shapes) in tqdm(enumerate(dataset),total = len(dataset)): # Inference print(f"input image dimension: {img.shape}") # Transform image - Normalize img = transform(img) img = np.expand_dims(img, axis=0) print(f"image dimension, type: {img.shape}, {type(img)}") # Run the model det_out, s8, s16, s32, da_seg_out,ll_seg_out = onnx_session.run(None, {input_name: img}) if __name__ == '__main__': opt = get_args() compile(opt)