0

0

车牌识别LPRNet

P粉084495128

P粉084495128

发布时间:2025-07-22 11:04:21

|

711人浏览过

|

来源于php中文网

原创

该项目为v1.0版本的车牌识别项目,对数据集做了更新,先对车牌矫正再识别,降低任务难度,40个epoch训练达验证集98.4%精度。实现模型与batch解耦,保证推理精度不受batch影响,可与车牌检测项目搭配。包含完整训练推理过程、模型转onnx及检查推理、数据集构建等内容。

☞☞☞AI 智能聊天, 问答助手, AI 智能搜索, 免费无限量使用 DeepSeek R1 模型☜☜☜

车牌识别lprnet - php中文网

车牌识别

LPRNet端到端训练车牌识别

本项目包括

  1. 完整训练推理过程
  2. 模型转onnx以及onnx的检查和推理

数据集构建

In [1]
# 数据解压!unzip -o -q -d /home/aistudio/data /home/aistudio/data/data17968/CCPD2019.zip
In [4]
"""
CCPD数据集的图片名称即是label:
0152-4_14-224&551_398&624-388&610_224&624_234&565_398&551-0_0_30_27_31_9_31-97-108.jpg
        ^      ^       ^       ^       ^       ^       ^            ^         ^   ^
        |  框左上角  框右下角  右下角点 左下角点 左上角点  右上角点      车牌号码    亮度  模糊度
水平/垂直倾角
亮度数值越大,车牌越亮;模糊度数值越小,车牌越模糊。
"""import cv2import osimport numpy as npfrom tqdm.notebook import tqdm 

# 参考 https://blog.csdn.net/qq_36516958/article/details/114274778from PIL import Image# 根据4顶点对图片矫正def four_point_transform(image, pts):
    rect = pts.astype('float32')
    br_x, br_y, bl_x, bl_y, tl_x, tl_y, tr_x, tr_y = rect
    widthA = np.sqrt(((br_x - bl_x) ** 2) + ((br_y - bl_y) ** 2))
    widthB = np.sqrt(((tr_x - tl_x) ** 2) + ((tr_y - tl_y) ** 2))
    maxWidth = max(int(widthA), int(widthB))
    heightA = np.sqrt(((tr_x - br_x) ** 2) + ((tr_y - br_y) ** 2))
    heightB = np.sqrt(((tl_x - bl_x) ** 2) + ((tl_y - bl_y) ** 2))
    maxHeight = max(int(heightA), int(heightB))
    rect =  np.array([[tl_x, tl_y], [tr_x, tr_y], [br_x, br_y], [bl_x, bl_y]], dtype='float32')
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype = "float32")
    M = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))    return warped# CCPD车牌有重复,应该是不同角度或者模糊程度path = r'data/ccpd_base'  # 改成自己的车牌路径provinces = ["皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "O"]
alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',             'X', 'Y', 'Z', 'O']
ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X',       'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']

save_path = 'rec_images/data/'if not os.path.exists(save_path):
    os.makedirs(save_path)

num = 0for filename in tqdm(os.listdir(path)):
    num += 1
    result = ""
    _, _, box, points, plate, brightness, blurriness = filename.split('-')
    list_plate = plate.split('_')  # 读取车牌
    result += provinces[int(list_plate[0])]
    result += alphabets[int(list_plate[1])]
    result += ads[int(list_plate[2])] + ads[int(list_plate[3])] + ads[int(list_plate[4])] + ads[int(list_plate[5])] + ads[int(list_plate[6])]    # 新能源车牌的要求,如果不是新能源车牌可以删掉这个if
    # if result[2] != 'D' and result[2] != 'F' \
    #         and result[-1] != 'D' and result[-1] != 'F':
    #     print(filename)
    #     print("Error label, Please check!")
    #     assert 0, "Error label ^~^!!!"
    # print(result)
    
    img_path = os.path.join(path, filename)
    img = cv2.imread(img_path)    assert os.path.exists(img_path), "image file {} dose not exist.".format(img_path)

    br, bl, tl, tr = points.split('_')
    br_x, br_y = [float(i) for i in br.split('&')]
    bl_x, bl_y = [float(i) for i in bl.split('&')]
    tl_x, tl_y = [float(i) for i in tl.split('&')]
    tr_x, tr_y = [float(i) for i in tr.split('&')]
    landmarks = np.array([br_x, br_y, bl_x, bl_y, tl_x, tl_y, tr_x, tr_y], dtype='float32')

    img = four_point_transform(img, landmarks)
    img = cv2.resize(img, (94, 24))
    cv2.imencode('.jpg', img)[1].tofile(os.path.join(save_path, r"{}.jpg".format(result)))
  0%|          | 0/95774 [00:00

数据集划分

In [5]
import osimport random

image_dir = "rec_images/data" train_file = 'rec_images/train.txt'eval_file = 'rec_images/valid.txt'dataset_list = os.listdir(image_dir)

train_num = 0valid_num = 0for img_name in dataset_list:    if '.jpg' not in img_name:        print(img_name)        continue
    probo = random.randint(1, 100)    if(probo <= 80): # train
        with open(train_file, 'a') as f_train:
            f_train.write(img_name+'\n')
        train_num+=1
    else: #valid
        with open(eval_file, 'a') as f_eval:
            f_eval.write(img_name+'\n')
        valid_num+=1print(f'train: {train_num}, val:{valid_num}')
.ipynb_checkpoints
train: 62959, val:15937

Dataloader

数据读取

In [1]
import osfrom paddle.io import Datasetfrom PIL import Imageimport numpy as np

CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',         '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',         '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',         '新',         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',         'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',         'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',         'W', 'X', 'Y', 'Z', 'I', 'O', '-'
         ]

CHARS_DICT = {char:i for i, char in enumerate(CHARS)}class LprnetDataloader(Dataset):
    def __init__(self, target_path, label_text, transforms=None):
        super().__init__()
        self.transforms = transforms
        self.target_path = target_path        with open(label_text) as f:
            self.data = f.readlines()    def __getitem__(self, index):
        img_name = self.data[index].strip()
        img_path = os.path.join(self.target_path, img_name)
        data = Image.open(img_path)
        
        label = []
        img_label = img_name.split('.', 1)[0]        for c in img_label:
            label.append(CHARS_DICT[c])        if len(label) == 8:            if self.check(label) == False:                print(imgname)                assert 0, "Error label ^~^!!!"

        if self.transforms is not None:
            data = self.transforms(data)

        data = np.array(data, dtype=np.float32)
        np_label = np.array(label, dtype=np.int64)        return data, np_label, len(np_label)    def __len__(self):
        return len(self.data)    
    def check(self, label):
        if label[2] != CHARS_DICT['D'] and label[2] != CHARS_DICT['F'] \                and label[-1] != CHARS_DICT['D'] and label[-1] != CHARS_DICT['F']:            print("Error label, Please check!")            return False
        else:            return True

组batch

将不同长度的label,padding为以最大标签长度的同一尺寸,shape为(batch_size,max_label_length)

In [2]
def collate_fn(batch):
    # 图片输入已经规范到相同大小,这里只需要对标签进行padding
    batch_size = len(batch)    # 找出标签最长的
    batch_temp = sorted(batch, key=lambda sample: len(sample[1]), reverse=True)
    max_label_length = len(batch_temp[0][1])    # 以最大的长度创建0张量
    labels = np.zeros((batch_size, max_label_length), dtype='int64')
    label_lens = []
    img_list = []    for x in range(batch_size):
        sample = batch[x]
        tensor = sample[0]
        target = sample[1]
        label_length = sample[2]
        img_list.append(tensor)        # 将数据插入都0张量中,实现了padding
        labels[x, :label_length] = target[:]
        label_lens.append(len(target))
    label_lens = paddle.to_tensor(label_lens, dtype='int64')  # ctcloss需要
    imgs = paddle.to_tensor(img_list, dtype='float32')
    labels = paddle.to_tensor(labels, dtype="int32")  # ctcloss仅支持int32的labels
    return imgs, labels, label_lens

数据前处理

这里数据集量挺多,各种情况的数据都有(天气,角度,模糊),就不再做数据增强的操作了。

就简单做个归一化操作就好了,训练的时候对数据进行ToTensor + Normalize

import paddle.vision.transforms as T

train_transforms = T.Compose([     
            T.ToTensor(data_format='CHW'),  # 这里的CHW是指数据的输出格式
            T.Normalize(
                [0.5, 0.5, 0.5],  # 在totensor的时候已经将图片缩放到0-1
                [0.5, 0.5, 0.5],
                data_format='CHW'  # 这里是数据输入格式
            ), 
        ])

LPRNet网络

网络结构

In [3]
import paddle.nn as nnimport paddle


CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',         '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',         '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',         '新',         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',         'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',         'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',         'W', 'X', 'Y', 'Z', 'I', 'O', '-'
         ]class small_basic_block(nn.Layer):
    def __init__(self, ch_in, ch_out):
        super(small_basic_block, self).__init__()
        self.block = nn.Sequential(
            nn.Conv2D(ch_in, ch_out // 4, kernel_size=1),
            nn.ReLU(),
            nn.Conv2D(ch_out // 4, ch_out // 4, kernel_size=(3, 1), padding=(1, 0)),
            nn.ReLU(),
            nn.Conv2D(ch_out // 4, ch_out // 4, kernel_size=(1, 3), padding=(0, 1)),
            nn.ReLU(),
            nn.Conv2D(ch_out // 4, ch_out, kernel_size=1),
        )    def forward(self, x):
        return self.block(x)class maxpool_3d(nn.Layer):
    def __init__(self, kernel_size, stride):
        super(maxpool_3d, self).__init__()        assert(len(kernel_size)==3 and len(stride)==3)
        kernel_size2d1 = kernel_size[-2:]
        stride2d1 = stride[-2:]
        kernel_size2d2 = (1, kernel_size[0])
        stride2d2 = (1, stride[0])
        self.maxpool1 = nn.MaxPool2D(kernel_size=kernel_size2d1, stride=stride2d1)
        self.maxpool2 = nn.MaxPool2D(kernel_size=kernel_size2d2, stride=stride2d2)    def forward(self,x):
        x = self.maxpool1(x)
        x = x.transpose((0, 3, 2, 1))
        x = self.maxpool2(x)
        x = x.transpose((0, 3, 2, 1))        return xclass LPRNet(nn.Layer):
    def __init__(self, lpr_max_len, class_num, dropout_rate):
        super(LPRNet, self).__init__()
        self.lpr_max_len = lpr_max_len
        self.class_num = class_num
        self.backbone = nn.Sequential(
            nn.Conv2D(in_channels=3, out_channels=64, kernel_size=3, stride=1),    # 0  [bs,3,24,94] -> [bs,64,22,92]
            nn.BatchNorm2D(num_features=64),                                       # 1  -> [bs,64,22,92]
            nn.ReLU(),                                                             # 2  -> [bs,64,22,92]
            maxpool_3d(kernel_size=(1, 3, 3), stride=(1, 1, 1)),                 # 3  -> [bs,64,20,90]
            small_basic_block(ch_in=64, ch_out=128),                               # 4  -> [bs,128,20,90]
            nn.BatchNorm2D(num_features=128),                                      # 5  -> [bs,128,20,90]
            nn.ReLU(),                                                             # 6  -> [bs,128,20,90]
            maxpool_3d(kernel_size=(1, 3, 3), stride=(2, 1, 2)),                 # 7  -> [bs,64,18,44]
            small_basic_block(ch_in=64, ch_out=256),                               # 8  -> [bs,256,18,44]
            nn.BatchNorm2D(num_features=256),                                      # 9  -> [bs,256,18,44]
            nn.ReLU(),                                                             # 10 -> [bs,256,18,44]
            small_basic_block(ch_in=256, ch_out=256),                              # 11 -> [bs,256,18,44]
            nn.BatchNorm2D(num_features=256),                                      # 12 -> [bs,256,18,44]
            nn.ReLU(),                                                             # 13 -> [bs,256,18,44]
            maxpool_3d(kernel_size=(1, 3, 3), stride=(4, 1, 2)),                 # 14 -> [bs,64,16,21]
            nn.Dropout(dropout_rate),                                              # 15 -> [bs,64,16,21]
            nn.Conv2D(in_channels=64, out_channels=256, kernel_size=(1, 4), stride=1),   # 16 -> [bs,256,16,18]
            nn.BatchNorm2D(num_features=256),                                            # 17 -> [bs,256,16,18]
            nn.ReLU(),                                                                   # 18 -> [bs,256,16,18]
            nn.Dropout(dropout_rate),                                                    # 19 -> [bs,256,16,18]
            nn.Conv2D(in_channels=256, out_channels=class_num, kernel_size=(13, 1), stride=1),  # class_num=68  20  -> [bs,68,4,18]
            nn.BatchNorm2D(num_features=class_num),                                             # 21 -> [bs,68,4,18]
            nn.ReLU(),                                                                          # 22 -> [bs,68,4,18]
        )
        self.container = nn.Sequential(
            nn.Conv2D(in_channels=448+self.class_num, out_channels=self.class_num, kernel_size=(1, 1), stride=(1, 1)),
        )    def forward(self, x):
        keep_features = list()        for i, layer in enumerate(self.backbone.children()):
            x = layer(x)            if i in [2, 6, 13, 22]:
                keep_features.append(x)

        global_context = list()        # keep_features: [bs,64,22,92]  [bs,128,20,90] [bs,256,18,44] [bs,68,4,18]
        for i, f in enumerate(keep_features):            if i in [0, 1]:                # [bs,64,22,92] -> [bs,64,4,18]
                # [bs,128,20,90] -> [bs,128,4,18]
                f = nn.AvgPool2D(kernel_size=5, stride=5)(f)            if i in [2]:                # [bs,256,18,44] -> [bs,256,4,18]
                f = nn.AvgPool2D(kernel_size=(4, 10), stride=(4, 2))(f)

            f_pow = paddle.pow(f, 2)     # [bs,64,4,18]  所有元素求平方
            # f_mean = paddle.mean(f_pow)  # 1 所有元素求平均
            f_mean = paddle.mean(f_pow, axis=[1,2,3], keepdim=True) 
            f = paddle.divide(f, f_mean)    # [bs,64,4,18]  所有元素除以这个均值
            global_context.append(f)

        x = paddle.concat(global_context, 1)  # [bs,516,4,18]
        x = self.container(x)  # -> [bs, 68, 4, 18]   head头
        logits = paddle.mean(x, axis=2)  # -> [bs, 68, 18]  # 68 字符类别数   18字符序列长度

        return logits

权重初始化函数

In [4]
# 使用model.applay的方法,可以修改到每一个子层def init_weight(model):
    for name, layer in model.named_sublayers():        if isinstance(layer, nn.Conv2D):
            weight_attr = nn.initializer.KaimingNormal()
            bias_attr = nn.initializer.Constant(0.)
            init_bias = paddle.create_parameter(layer.bias.shape, attr=bias_attr, dtype='float32')
            init_weight = paddle.create_parameter(layer.weight.shape, attr=weight_attr, dtype='float32')
            layer.weight = init_weight
            layer.bias = init_bias        elif isinstance(layer, nn.BatchNorm2D):
            weight_attr = nn.initializer.XavierUniform()
            bias_attr = nn.initializer.Constant(0.)
            init_bias = paddle.create_parameter(layer.bias.shape, attr=bias_attr, dtype='float32')
            init_weight = paddle.create_parameter(layer.weight.shape, attr=weight_attr, dtype='float32')
            layer.weight = init_weight
            layer.bias = init_bias

损失函数

损失函数是CTCLoss,需要传入的参数有:

  1. logits: 概率序列, shape=[max_logit_length, batch_size, num_classes+1]

    数据类型仅支持float32

  2. lbels: padding后的标签序列,shape=[batch_size, max_label_length]

    数据类型仅支持int32

  3. input_lengths: 输入logits数据中的每个序列的长度,shape=[batch_size]

    数据类型仅支持int64

  4. label_lengths: label中每个序列的长度,shape=[batch_size]

    数据类型仅支持int64

ctcloss文档:https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/nn/CTCLoss_cn.html

准确率计算函数

In [5]
import numpy as np

CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',         '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',         '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',         '新',         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',         'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',         'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',         'W', 'X', 'Y', 'Z', 'I', 'O', '-'
         ]class ACC:
    def __init__(self):
        self.Tp = 0
        self.Tn_1 = 0
        self.Tn_2 = 0
        self.acc = 0

    def batch_update(self, batch_label, label_lengths, pred):
        for i, label in enumerate(batch_label):
            length = label_lengths[i]
            label = label[:length]
            pred_i = pred[i, :, :]
            preb_label = []            for j in range(pred_i.shape[1]):  # T
                preb_label.append(np.argmax(pred_i[:, j], axis=0))
            no_repeat_blank_label = []
            pre_c = preb_label[0]            if pre_c != len(CHARS) - 1:  # 非空白
                no_repeat_blank_label.append(pre_c)            for c in preb_label:  # dropout repeate label and blank label
                if (pre_c == c) or (c == len(CHARS) - 1):                    if c == len(CHARS) - 1:
                        pre_c = c                    continue
                no_repeat_blank_label.append(c)
                pre_c = c            # print('no_repeat_blank_label:', no_repeat_blank_label)
            # print('gt_label:', label)
            if len(label) != len(no_repeat_blank_label):
                self.Tn_1 += 1
            elif (np.asarray(label) == np.asarray(no_repeat_blank_label)).all():
                self.Tp += 1
            else:
                self.Tn_2 += 1
        self.acc = self.Tp * 1.0 / (self.Tp + self.Tn_1 + self.Tn_2)    def clear(self):
        self.Tp = 0
        self.Tn_1 = 0
        self.Tn_2 = 0
        self.acc = 0print(len(CHARS))
68

加载预训练参数

一次训练没有到位,在之前的权重参数基础上继续训练,需要加载预训练权重。

若有预训练权重可以加载预训练权重

BtoC购物网站源码
BtoC购物网站源码

一、源码特点1、采用典型的三层架构技术进行开发,作品非常不错,功能很全面,欢迎下载学习交流二、功能介绍本源码是一个购物网站源码。分为前台用户界面和后台管理员界面,分别实现不同功能。三、菜单功能前台用户功能1、主页:显示主页面2、鞋子:鞋子所有品牌及相关信息3、手机数码:数码品牌及相关信息4、护肤品:所有品牌及相关信息5、新闻资讯:本页资讯;行业资讯6、我的账户;我的收藏;我的购物车;我的评论;退出

下载
In [6]
# 保存的权重路径:runs/lprnet_best.pdparamsimport osdef load_pretrained(model, path=None):
    print('params loading...')    if not (os.path.isdir(path) or os.path.exists(path + '.pdparams')):        raise ValueError("Model pretrain path {} does not "
                         "exists.".format(path))
    param_state_dict = paddle.load(path + ".pdparams")
    model.set_dict(param_state_dict)    print(f'load {path + ".pdparams"} success...')    return

训练

训练得到的模型为:runs/lprnet_best_2.pdparams

矫正后的图片一定程度降低了任务难度,这里进行了40个epoch的训练,最终验证集精度98.4%

In [7]
import paddle.vision.transforms as Tfrom paddle.io import DataLoaderimport timefrom statistics import mean# 参数定义EPOCH = 40IMGSIZE = (94, 24)
IMGDIR = 'rec_images/data'TRAINFILE = 'rec_images/train.txt'VALIDFILE = 'rec_images/valid.txt'SAVEFOLDER = './runs'DROPOUT = 0.LEARNINGRATE = 0.001LPRMAXLEN = 18TRAINBATCHSIZE = 256EVALBATCHSIZE = 256NUMWORKERS = 2  # 若dataloader报错,调小该参数,或直接改为0WEIGHTDECAY = 0.001# 图片预处理train_transforms = T.Compose([  
            T.ColorJitter(0.2,0.2,0.2),
            T.ToTensor(data_format='CHW'), 
            T.Normalize(
                [0.5, 0.5, 0.5],  # 在totensor的时候已经将图片缩放到0-1
                [0.5, 0.5, 0.5],
                data_format='CHW' 
            ), 
        ])
eval_transforms = T.Compose([    
            T.ToTensor(data_format='CHW'), 
            T.Normalize(
                [0.5, 0.5, 0.5], 
                [0.5, 0.5, 0.5],
                data_format='CHW' 
            ), 
        ])# 数据加载train_data_set = LprnetDataloader(IMGDIR, TRAINFILE, train_transforms)
eval_data_set = LprnetDataloader(IMGDIR, VALIDFILE, eval_transforms)
train_loader = DataLoader(
    train_data_set, 
    batch_size=TRAINBATCHSIZE, 
    shuffle=True, 
    num_workers=NUMWORKERS, 
    drop_last=True, 
    collate_fn=collate_fn
)
eval_loader = DataLoader(
    eval_data_set, 
    batch_size=EVALBATCHSIZE, 
    shuffle=False, 
    num_workers=NUMWORKERS, 
    drop_last=False, 
    collate_fn=collate_fn
)# 定义lossloss_func = nn.CTCLoss(len(CHARS)-1)# input_length, loss计算需要input_length = np.ones(shape=TRAINBATCHSIZE) * LPRMAXLEN
input_length = paddle.to_tensor(input_length, dtype='int64')# LPRNet网络,初始化/加载预训练参数model = LPRNet(LPRMAXLEN, len(CHARS), DROPOUT)
model.apply(init_weight)  # 首次训练时初始化# 定义优化器def make_optimizer(base_lr, parameters=None):
    momentum = 0.9
    weight_decay = WEIGHTDECAY
    scheduler = paddle.optimizer.lr.CosineAnnealingDecay(
        learning_rate=base_lr, eta_min=0.01*base_lr, T_max=EPOCH, verbose=1)

    scheduler = paddle.optimizer.lr.LinearWarmup(  # 第一次训练的时候考虑模型权重不稳定,添加warmup策略
        learning_rate=scheduler,
        warmup_steps=5,
        start_lr=base_lr/5,
        end_lr=base_lr,
        verbose=True)

    optimizer = paddle.optimizer.Momentum(
        learning_rate=scheduler,
        weight_decay=paddle.regularizer.L2Decay(weight_decay),
        momentum=momentum,
        parameters=parameters)    return optimizer, scheduler
optim, scheduler = make_optimizer(LEARNINGRATE, parameters=model.parameters())# accacc_train = ACC()
acc_eval = ACC()
BESTACC = 0.5# 训练流程for epoch in range(EPOCH):

    start_time = time.localtime(time.time())
    str_time = time.strftime("%Y-%m-%d %H:%M:%S", start_time)    print(f'{str_time} || Epoch {epoch} start:')

    model.train()    for batch_id, bath_data in enumerate(train_loader):
        img_data, label_data, label_lens = bath_data
        
        predict = model(img_data)
        logits  = paddle.transpose(predict, (2,0,1))  # for ctc loss: T x N x C

        loss = loss_func(logits , label_data, input_length, label_lens)
        acc_train.batch_update(label_data, label_lens, predict)        if batch_id % 50 == 0:            print(f'epoch:{epoch}, batch_id:{batch_id}, loss:{loss.item():.4f}, \
            acc:{acc_train.acc:.4f} Tp/Tn_1/Tn_2: {acc_train.Tp}/{acc_train.Tn_1}/{acc_train.Tn_2}')
        
        loss.backward()
        optim.step()
        optim.clear_grad()
    acc_train.clear()    
    # save
    if epoch and epoch % 20 == 0:
        paddle.save(model.state_dict(), os.path.join(SAVEFOLDER,f'lprnet_{epoch}_2.pdparams'))
        paddle.save(optim.state_dict(), os.path.join(SAVEFOLDER,f'lprnet_{epoch}_2.pdopt'))        print(f'Saved log ecpch-{epoch}')    # eval
    with paddle.no_grad():
        model.eval()
        loss_list = []        for batch_id, bath_data in enumerate(eval_loader):
            img_data, label_data, label_lens = bath_data
            predict = model(img_data)
            logits = paddle.transpose(predict, (2,0,1))
            loss = loss_func(logits, label_data, input_length, label_lens)
            acc_eval.batch_update(label_data, label_lens, predict)
            loss_list.append(loss.item())        print(f'Eval of epoch {epoch} => acc:{acc_eval.acc:.4f}, loss:{mean(loss_list):.4f}')        # save best model
        if acc_eval.acc > BESTACC:
            paddle.save(model.state_dict(), os.path.join(SAVEFOLDER,f'lprnet_best_2.pdparams'))
            paddle.save(optim.state_dict(), os.path.join(SAVEFOLDER,f'lprnet_best_2.pdopt'))
            BESTACC = acc_eval.acc            print(f'Saved best model of epoch{epoch}, acc {acc_eval.acc:.4f}, save path "{SAVEFOLDER}"')
        acc_eval.clear()    
    # 学习率衰减策略
    scheduler.step()
Epoch 0: CosineAnnealingDecay set learning rate to 0.001.
Epoch 0: LinearWarmup set learning rate to 0.0002.
2023-07-27 13:05:14 || Epoch 0 start:
W0727 13:05:14.896270 20153 gpu_resources.cc:61] Please NOTE: device: 0, GPU Compute Capability: 7.0, Driver API Version: 11.2, Runtime API Version: 11.2
W0727 13:05:14.900187 20153 gpu_resources.cc:91] device: 0, cuDNN Version: 8.2.
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:712: UserWarning: When training, we now always track global mean and variance.
  "When training, we now always track global mean and variance."
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/math_op_patch.py:277: UserWarning: The dtype of left and right variables are not the same, left dtype is paddle.float32, but right dtype is paddle.int64, the right dtype will convert to paddle.float32
  .format(lhs_dtype, rhs_dtype, lhs_dtype))
epoch:0, batch_id:0, loss:71.7745,             acc:0.0000 Tp/Tn_1/Tn_2: 0/242/14
epoch:0, batch_id:50, loss:4.1406,             acc:0.0000 Tp/Tn_1/Tn_2: 0/11919/1137
epoch:0, batch_id:100, loss:2.7202,             acc:0.0000 Tp/Tn_1/Tn_2: 0/23156/2700
epoch:0, batch_id:150, loss:2.3612,             acc:0.0000 Tp/Tn_1/Tn_2: 1/34437/4218
epoch:0, batch_id:200, loss:1.9747,             acc:0.0003 Tp/Tn_1/Tn_2: 15/45563/5878
Eval of epoch 0 => acc:0.0053, loss:1.7451
Epoch 1: LinearWarmup set learning rate to 0.00036.
2023-07-27 13:07:32 || Epoch 1 start:
epoch:1, batch_id:0, loss:1.8066,             acc:0.0000 Tp/Tn_1/Tn_2: 0/208/48
epoch:1, batch_id:50, loss:1.4053,             acc:0.0102 Tp/Tn_1/Tn_2: 133/10502/2421
epoch:1, batch_id:100, loss:1.0782,             acc:0.0284 Tp/Tn_1/Tn_2: 735/19852/5269
epoch:1, batch_id:150, loss:0.8829,             acc:0.0548 Tp/Tn_1/Tn_2: 2119/28197/8340
epoch:1, batch_id:200, loss:0.6519,             acc:0.0886 Tp/Tn_1/Tn_2: 4559/35534/11363
Eval of epoch 1 => acc:0.3610, loss:0.5038
Epoch 2: LinearWarmup set learning rate to 0.0005200000000000001.
2023-07-27 13:11:21 || Epoch 2 start:
epoch:2, batch_id:0, loss:0.4789,             acc:0.3516 Tp/Tn_1/Tn_2: 90/116/50
epoch:2, batch_id:50, loss:0.3491,             acc:0.4154 Tp/Tn_1/Tn_2: 5423/5085/2548
epoch:2, batch_id:100, loss:0.2775,             acc:0.4883 Tp/Tn_1/Tn_2: 12626/8683/4547
epoch:2, batch_id:150, loss:0.2271,             acc:0.5498 Tp/Tn_1/Tn_2: 21252/11238/6166
epoch:2, batch_id:200, loss:0.1476,             acc:0.5987 Tp/Tn_1/Tn_2: 30809/13097/7550
Eval of epoch 2 => acc:0.8157, loss:0.1444
Saved best model of epoch2, acc 0.8157, save path "./runs"
Epoch 3: LinearWarmup set learning rate to 0.00068.
2023-07-27 13:17:25 || Epoch 3 start:
epoch:3, batch_id:0, loss:0.1045,             acc:0.8320 Tp/Tn_1/Tn_2: 213/28/15
epoch:3, batch_id:50, loss:0.1356,             acc:0.8227 Tp/Tn_1/Tn_2: 10741/1188/1127
epoch:3, batch_id:100, loss:0.0764,             acc:0.8366 Tp/Tn_1/Tn_2: 21631/2146/2079
epoch:3, batch_id:150, loss:0.1323,             acc:0.8478 Tp/Tn_1/Tn_2: 32772/2902/2982
epoch:3, batch_id:200, loss:0.0691,             acc:0.8577 Tp/Tn_1/Tn_2: 44132/3486/3838
Eval of epoch 3 => acc:0.9084, loss:0.0740
Saved best model of epoch3, acc 0.9084, save path "./runs"
Epoch 4: LinearWarmup set learning rate to 0.00084.
2023-07-27 13:19:41 || Epoch 4 start:
epoch:4, batch_id:0, loss:0.1121,             acc:0.8867 Tp/Tn_1/Tn_2: 227/15/14
epoch:4, batch_id:50, loss:0.0609,             acc:0.9091 Tp/Tn_1/Tn_2: 11869/419/768
epoch:4, batch_id:100, loss:0.0481,             acc:0.9139 Tp/Tn_1/Tn_2: 23629/768/1459
epoch:4, batch_id:150, loss:0.0587,             acc:0.9171 Tp/Tn_1/Tn_2: 35450/1082/2124124
epoch:4, batch_id:200, loss:0.0702,             acc:0.9203 Tp/Tn_1/Tn_2: 47356/1337/2763
Eval of epoch 4 => acc:0.9371, loss:0.0509
Saved best model of epoch4, acc 0.9371, save path "./runs"
Epoch 0: CosineAnnealingDecay set learning rate to 0.001.
Epoch 5: LinearWarmup set learning rate to 0.001.
2023-07-27 13:21:59 || Epoch 5 start:
epoch:5, batch_id:0, loss:0.0351,             acc:0.9336 Tp/Tn_1/Tn_2: 239/6/11
epoch:5, batch_id:50, loss:0.0328,             acc:0.9372 Tp/Tn_1/Tn_2: 12236/249/571
epoch:5, batch_id:100, loss:0.0846,             acc:0.9387 Tp/Tn_1/Tn_2: 24270/461/1125
epoch:5, batch_id:150, loss:0.0242,             acc:0.9410 Tp/Tn_1/Tn_2: 36375/657/1624
epoch:5, batch_id:200, loss:0.0518,             acc:0.9416 Tp/Tn_1/Tn_2: 48453/859/2144
Eval of epoch 5 => acc:0.9513, loss:0.0396
Saved best model of epoch5, acc 0.9513, save path "./runs"
Epoch 1: CosineAnnealingDecay set learning rate to 0.0009984740801978985.
Epoch 6: LinearWarmup set learning rate to 0.0009984740801978985.
2023-07-27 13:24:17 || Epoch 6 start:
epoch:6, batch_id:0, loss:0.0390,             acc:0.9453 Tp/Tn_1/Tn_2: 242/4/10
epoch:6, batch_id:50, loss:0.0319,             acc:0.9527 Tp/Tn_1/Tn_2: 12412438/155/463
epoch:6, batch_id:100, loss:0.0267,             acc:0.9526 Tp/Tn_1/Tn_2: 24630/296/930
epoch:6, batch_id:150, loss:0.0330,             acc:0.9526 Tp/Tn_1/Tn_2: 36825/429/1402
epoch:6, batch_id:200, loss:0.0202,             acc:0.9534 Tp/Tn_1/Tn_2: 49056/560/1840
Eval of epoch 6 => acc:0.9589, loss:0.0337
Saved best model of epoch6, acc 0.9589, save path "./runs"
Epoch 2: CosineAnnealingDecay set learning rate to 0.0009939057285945933.
Epoch 7: LinearWarmup set learning rate to 0.0009939057285945933.
2023-07-27 13:26:33 || Epoch 7 start:
epoch:7, batch_id:0, loss:0.0181,             acc:0.9766 Tp/Tn_1/Tn_2: 250/0/6
epoch:7, batch_id:50, loss:0.0189,             acc:0.9609 Tp/Tn_1/Tn_2: 12546/118/392
epoch:7, batch_id:100, loss:0.0424,             acc:0.9590 Tp/Tn_1/Tn_2: 24795/234/827
epoch:7, batch_id:150, loss:0.0381,             acc:0.9611 Tp/Tn_1/Tn_2: 37154/322/1180
epoch:7, batch_id:200, loss:0.0206,             acc:0.9615 Tp/Tn_1/Tn_2: 49475/431/1550
Eval of epoch 7 => acc:0.9644, loss:0.0298
Saved best model of epoch7, acc 0.9644, save path "./runs"
Epoch 3: CosineAnnealingDecay set learning rate to 0.00098632311059685.
Epoch 8: LinearWarmup set learning rate to 0.00098632311059685.
2023-07-27 13:28:51 || Epoch 8 start:
epoch:8, batch_id:0, loss:0.0230,             acc:0.9570 Tp/Tn_1/Tn_2: 245/0/11
epoch:8, batch_id:50, loss:0.0259,             acc:0.9634 Tp/Tn_1/Tn_2: 12578/91/387
epoch:8, batch_id:100, loss:0.0092,             acc:0.9656 Tp/Tn_1/Tn_2: 24966/177/713
epoch:8, batch_id:150, loss:0.0376,             acc:0.9657 Tp/Tn_1/Tn_2: 37332/264/1060
epoch:8, batch_id:200, loss:0.0304,             acc:0.9662 Tp/Tn_1/Tn_2: 49718/344/1394
Eval of epoch 8 => acc:0.9681, loss:0.0268
Saved best model of epoch8, acc 0.9681, save path "./runs"
Epoch 4: CosineAnnealingDecay set learning rate to 0.0009757729755661011.
Epoch 9: LinearWarmup set learning rate to 0.0009757729755661011.
2023-07-27 13:31:10 || Epoch 9 start:
epoch:9, batch_id:0, loss:0.0178,             acc:0.9531 Tp/Tn_1/Tn_2: 244/1/11
epoch:9, batch_id:50, loss:0.0270,             acc:0.9685 Tp/Tn_1/Tn_2: 12645/74/337
epoch:9, batch_id:100, loss:0.0288,             acc:0.9699 Tp/Tn_1/Tn_2: 25078/155/623
epoch:9, batch_id:150, loss:0.0193,             acc:0.9697 Tp/Tn_1/Tn_2: 37483/223/950
epoch:9, batch_id:200, loss:0.0158,             acc:0.9703 Tp/Tn_1/Tn_2: 49927/287/1241242
Eval of epoch 9 => acc:0.9695, loss:0.0239
Saved best model of epoch9, acc 0.9695, save path "./runs"
Epoch 5: CosineAnnealingDecay set learning rate to 0.000962320368593087.
Epoch 10: LinearWarmup set learning rate to 0.000962320368593087.
2023-07-27 13:33:30 || Epoch 10 start:
epoch:10, batch_id:0, loss:0.0241,             acc:0.9531 Tp/Tn_1/Tn_2: 244/3/9
epoch:10, batch_id:50, loss:0.0086,             acc:0.9722 Tp/Tn_1/Tn_2: 12693/66/297
epoch:10, batch_id:100, loss:0.0416,             acc:0.9717 Tp/Tn_1/Tn_2: 25125/129/602
epoch:10, batch_id:150, loss:0.0239,             acc:0.9724 Tp/Tn_1/Tn_2: 37588/191/877
epoch:10, batch_id:200, loss:0.0162,             acc:0.9728 Tp/Tn_1/Tn_2: 50054/263/1139
Eval of epoch 10 => acc:0.9715, loss:0.0213
Saved best model of epoch10, acc 0.9715, save path "./runs"
Epoch 6: CosineAnnealingDecay set learning rate to 0.0009460482294732421.
Epoch 11: LinearWarmup set learning rate to 0.0009460482294732421.
2023-07-27 13:35:46 || Epoch 11 start:
epoch:11, batch_id:0, loss:0.0308,             acc:0.9570 Tp/Tn_1/Tn_2: 245/0/11
epoch:11, batch_id:50, loss:0.0199,             acc:0.9739 Tp/Tn_1/Tn_2: 12715/77/264
epoch:11, batch_id:100, loss:0.0190,             acc:0.9752 Tp/Tn_1/Tn_2: 25215/125/516
epoch:11, batch_id:150, loss:0.0100,             acc:0.9751 Tp/Tn_1/Tn_2: 37692/182/782
epoch:11, batch_id:200, loss:0.0172,             acc:0.9749 Tp/Tn_1/Tn_2: 50166/244/1046
Eval of epoch 11 => acc:0.9745, loss:0.0195
Saved best model of epoch11, acc 0.9745, save path "./runs"
Epoch 7: CosineAnnealingDecay set learning rate to 0.0009270568813552756.
Epoch 12: LinearWarmup set learning rate to 0.0009270568813552756.
2023-07-27 13:38:05 || Epoch 12 start:
epoch:12, batch_id:0, loss:0.0274,             acc:0.9609 Tp/Tn_1/Tn_2: 246/3/7
epoch:12, batch_id:50, loss:0.0134,             acc:0.9765 Tp/Tn_1/Tn_2: 12749/46/261
epoch:12, batch_id:100, loss:0.0206,             acc:0.9773 Tp/Tn_1/Tn_2: 25269/93/494
epoch:12, batch_id:150, loss:0.0182,             acc:0.9766 Tp/Tn_1/Tn_2: 37753/151/752
epoch:12, batch_id:200, loss:0.0233,             acc:0.9762 Tp/Tn_1/Tn_2: 50230/217/1009
Eval of epoch 12 => acc:0.9757, loss:0.0177
Saved best model of epoch12, acc 0.9757, save path "./runs"
Epoch 8: CosineAnnealingDecay set learning rate to 0.000905463412215599.
Epoch 13: LinearWarmup set learning rate to 0.000905463412215599.
2023-07-27 13:40:19 || Epoch 13 start:
epoch:13, batch_id:0, loss:0.0058,             acc:0.9922 Tp/Tn_1/Tn_2: 254/1/1
epoch:13, batch_id:50, loss:0.0078,             acc:0.9788 Tp/Tn_1/Tn_2: 12779/43/234
epoch:13, batch_id:100, loss:0.0080,             acc:0.9780 Tp/Tn_1/Tn_2: 25287/92/477
epoch:13, batch_id:150, loss:0.0355,             acc:0.9780 Tp/Tn_1/Tn_2: 37804/141/711
epoch:13, batch_id:200, loss:0.0257,             acc:0.9784 Tp/Tn_1/Tn_2: 50345/190/921
Eval of epoch 13 => acc:0.9772, loss:0.0168
Saved best model of epoch13, acc 0.9772, save path "./runs"
Epoch 9: CosineAnnealingDecay set learning rate to 0.0008814009529720154.
Epoch 14: LinearWarmup set learning rate to 0.0008814009529720154.
2023-07-27 13:42:34 || Epoch 14 start:
epoch:14, batch_id:0, loss:0.0075,             acc:0.9883 Tp/Tn_1/Tn_2: 253/0/3
epoch:14, batch_id:50, loss:0.0097,             acc:0.9809 Tp/Tn_1/Tn_2: 12807/48/201
epoch:14, batch_id:100, loss:0.0088,             acc:0.9803 Tp/Tn_1/Tn_2: 25346/96/414
epoch:14, batch_id:150, loss:0.0239,             acc:0.9795 Tp/Tn_1/Tn_2: 37862/148/646
epoch:14, batch_id:200, loss:0.0091,             acc:0.9794 Tp/Tn_1/Tn_2: 50397/194/865
Eval of epoch 14 => acc:0.9778, loss:0.0162
Saved best model of epoch14, acc 0.9778, save path "./runs"
Epoch 10: CosineAnnealingDecay set learning rate to 0.000855017856687341.
Epoch 15: LinearWarmup set learning rate to 0.000855017856687341.
2023-07-27 13:44:53 || Epoch 15 start:
epoch:15, batch_id:0, loss:0.0075,             acc:0.9844 Tp/Tn_1/Tn_2: 252/0/4
epoch:15, batch_id:50, loss:0.0075,             acc:0.9814 Tp/Tn_1/Tn_2: 12813/28/215
epoch:15, batch_id:100, loss:0.0178,             acc:0.9817 Tp/Tn_1/Tn_2: 25383/66/407
epoch:15, batch_id:150, loss:0.0193,             acc:0.9820 Tp/Tn_1/Tn_2: 37959/100/597
epoch:15, batch_id:200, loss:0.0099,             acc:0.9818 Tp/Tn_1/Tn_2: 50517/147/792
Eval of epoch 15 => acc:0.9785, loss:0.0155
Saved best model of epoch15, acc 0.9785, save path "./runs"
Epoch 11: CosineAnnealingDecay set learning rate to 0.0008264767839234411.
Epoch 16: LinearWarmup set learning rate to 0.0008264767839234411.
2023-07-27 13:47:12 || Epoch 16 start:
epoch:16, batch_id:0, loss:0.0216,             acc:0.9688 Tp/Tn_1/Tn_2: 248/0/8
epoch:16, batch_id:50, loss:0.0061,             acc:0.9833 Tp/Tn_1/Tn_2: 12838/35/183
epoch:16, batch_id:100, loss:0.0131,             acc:0.9831 Tp/Tn_1/Tn_2: 25420/70/366
epoch:16, batch_id:150, loss:0.0040,             acc:0.9834 Tp/Tn_1/Tn_2: 38013/117/526
epoch:16, batch_id:200, loss:0.0064,             acc:0.9831 Tp/Tn_1/Tn_2: 50587/160/709
Eval of epoch 16 => acc:0.9786, loss:0.0148
Saved best model of epoch16, acc 0.9786, save path "./runs"
Epoch 12: CosineAnnealingDecay set learning rate to 0.0007959536998847742.
Epoch 17: LinearWarmup set learning rate to 0.0007959536998847742.
2023-07-27 13:49:29 || Epoch 17 start:
epoch:17, batch_id:0, loss:0.0169,             acc:0.9844 Tp/Tn_1/Tn_2: 252/0/4
epoch:17, batch_id:50, loss:0.0070,             acc:0.9845 Tp/Tn_1/Tn_2: 12853/42/161
epoch:17, batch_id:100, loss:0.0067,             acc:0.9825 Tp/Tn_1/Tn_2: 25403/80/373
epoch:17, batch_id:150, loss:0.0193,             acc:0.9827 Tp/Tn_1/Tn_2: 37989/129/538
epoch:17, batch_id:200, loss:0.0085,             acc:0.9833 Tp/Tn_1/Tn_2: 50599/155/702
Eval of epoch 17 => acc:0.9797, loss:0.0144
Saved best model of epoch17, acc 0.9797, save path "./runs"
Epoch 13: CosineAnnealingDecay set learning rate to 0.0007636367895343947.
Epoch 18: LinearWarmup set learning rate to 0.0007636367895343947.
2023-07-27 13:51:45 || Epoch 18 start:
epoch:18, batch_id:0, loss:0.0047,             acc:0.9961 Tp/Tn_1/Tn_2: 255/0/1
epoch:18, batch_id:50, loss:0.0091,             acc:0.9848 Tp/Tn_1/Tn_2: 12857/32/167
epoch:18, batch_id:100, loss:0.0121,             acc:0.9844 Tp/Tn_1/Tn_2: 25452/59/345
epoch:18, batch_id:150, loss:0.0130,             acc:0.9846 Tp/Tn_1/Tn_2: 38059/90/507
epoch:18, batch_id:200, loss:0.0141,             acc:0.9846 Tp/Tn_1/Tn_2: 50663/130/663
Eval of epoch 18 => acc:0.9804, loss:0.0138
Saved best model of epoch18, acc 0.9804, save path "./runs"
Epoch 14: CosineAnnealingDecay set learning rate to 0.0007297252973710757.
Epoch 19: LinearWarmup set learning rate to 0.0007297252973710757.
2023-07-27 13:54:00 || Epoch 19 start:
epoch:19, batch_id:0, loss:0.0076,             acc:0.9844 Tp/Tn_1/Tn_2: 252/0/4
epoch:19, batch_id:50, loss:0.0050,             acc:0.9849 Tp/Tn_1/Tn_2: 12859/32/165
epoch:19, batch_id:100, loss:0.0146,             acc:0.9853 Tp/Tn_1/Tn_2: 25476/63/317
epoch:19, batch_id:150, loss:0.0035,             acc:0.9858 Tp/Tn_1/Tn_2: 38107/87/462
epoch:19, batch_id:200, loss:0.0061,             acc:0.9856 Tp/Tn_1/Tn_2: 50713/120/623
Eval of epoch 19 => acc:0.9812, loss:0.0138
Saved best model of epoch19, acc 0.9812, save path "./runs"
Epoch 15: CosineAnnealingDecay set learning rate to 0.0006944282990207195.
Epoch 20: LinearWarmup set learning rate to 0.0006944282990207195.
2023-07-27 13:56:15 || Epoch 20 start:
epoch:20, batch_id:0, loss:0.0080,             acc:0.9883 Tp/Tn_1/Tn_2: 253/0/3
epoch:20, batch_id:50, loss:0.0089,             acc:0.9860 Tp/Tn_1/Tn_2: 12873/25/158
epoch:20, batch_id:100, loss:0.0161,             acc:0.9858 Tp/Tn_1/Tn_2: 25488/55/313
epoch:20, batch_id:150, loss:0.0112,             acc:0.9864 Tp/Tn_1/Tn_2: 38132/77/447
epoch:20, batch_id:200, loss:0.0057,             acc:0.9862 Tp/Tn_1/Tn_2: 50744/107/605
Saved log ecpch-20
Eval of epoch 20 => acc:0.9813, loss:0.0134
Saved best model of epoch20, acc 0.9813, save path "./runs"
Epoch 16: CosineAnnealingDecay set learning rate to 0.000657963412215599.
Epoch 21: LinearWarmup set learning rate to 0.000657963412215599.
2023-07-27 13:58:31 || Epoch 21 start:
epoch:21, batch_id:0, loss:0.0319,             acc:0.9844 Tp/Tn_1/Tn_2: 252/3/1
epoch:21, batch_id:50, loss:0.0204,             acc:0.9850 Tp/Tn_1/Tn_2: 12860/29/167
epoch:21, batch_id:100, loss:0.0085,             acc:0.9858 Tp/Tn_1/Tn_2: 25488/53/315
epoch:21, batch_id:150, loss:0.0323,             acc:0.9862 Tp/Tn_1/Tn_2: 38124124/79/453
epoch:21, batch_id:200, loss:0.0075,             acc:0.9866 Tp/Tn_1/Tn_2: 50764/104/588
Eval of epoch 21 => acc:0.9824, loss:0.0128
Saved best model of epoch21, acc 0.9824, save path "./runs"
Epoch 17: CosineAnnealingDecay set learning rate to 0.0006205554551086733.
Epoch 22: LinearWarmup set learning rate to 0.0006205554551086733.
2023-07-27 14:00:46 || Epoch 22 start:
epoch:22, batch_id:0, loss:0.0116,             acc:0.9844 Tp/Tn_1/Tn_2: 252/0/4
epoch:22, batch_id:50, loss:0.0096,             acc:0.9880 Tp/Tn_1/Tn_2: 12899/23/134
epoch:22, batch_id:100, loss:0.0068,             acc:0.9870 Tp/Tn_1/Tn_2: 25521/63/272
epoch:22, batch_id:150, loss:0.0063,             acc:0.9876 Tp/Tn_1/Tn_2: 38175/84/397
epoch:22, batch_id:200, loss:0.0104,             acc:0.9878 Tp/Tn_1/Tn_2: 50826/102/528
Eval of epoch 22 => acc:0.9822, loss:0.0129
Epoch 18: CosineAnnealingDecay set learning rate to 0.0005824350601949143.
Epoch 23: LinearWarmup set learning rate to 0.0005824350601949143.
2023-07-27 14:03:02 || Epoch 23 start:
epoch:23, batch_id:0, loss:0.0049,             acc:0.9922 Tp/Tn_1/Tn_2: 254/1/1
epoch:23, batch_id:50, loss:0.0056,             acc:0.9878 Tp/Tn_1/Tn_2: 12897/21/138
epoch:23, batch_id:100, loss:0.0058,             acc:0.9876 Tp/Tn_1/Tn_2: 25535/50/271
epoch:23, batch_id:150, loss:0.0023,             acc:0.9876 Tp/Tn_1/Tn_2: 38175/77/404
epoch:23, batch_id:200, loss:0.0041,             acc:0.9878 Tp/Tn_1/Tn_2: 50827/99/530
Eval of epoch 23 => acc:0.9822, loss:0.0125
Epoch 19: CosineAnnealingDecay set learning rate to 0.0005438372523852833.
Epoch 24: LinearWarmup set learning rate to 0.0005438372523852833.
2023-07-27 14:05:22 || Epoch 24 start:
epoch:24, batch_id:0, loss:0.0029,             acc:0.9961 Tp/Tn_1/Tn_2: 255/0/1
epoch:24, batch_id:50, loss:0.0065,             acc:0.9869 Tp/Tn_1/Tn_2: 12885/28/143
epoch:24, batch_id:100, loss:0.0044,             acc:0.9877 Tp/Tn_1/Tn_2: 25537/52/267
epoch:24, batch_id:150, loss:0.0028,             acc:0.9891 Tp/Tn_1/Tn_2: 38234/65/357
epoch:24, batch_id:200, loss:0.0007,             acc:0.9887 Tp/Tn_1/Tn_2: 50875/83/498
Eval of epoch 24 => acc:0.9834, loss:0.0121
Saved best model of epoch24, acc 0.9834, save path "./runs"
Epoch 20: CosineAnnealingDecay set learning rate to 0.000505.
Epoch 25: LinearWarmup set learning rate to 0.000505.
2023-07-27 14:07:41 || Epoch 25 start:
epoch:25, batch_id:0, loss:0.0111,             acc:0.9844 Tp/Tn_1/Tn_2: 252/0/4
epoch:25, batch_id:50, loss:0.0031,             acc:0.9896 Tp/Tn_1/Tn_2: 12920/21/115
epoch:25, batch_id:100, loss:0.0046,             acc:0.9896 Tp/Tn_1/Tn_2: 25586/45/225
epoch:25, batch_id:150, loss:0.0075,             acc:0.9891 Tp/Tn_1/Tn_2: 38236/73/347
epoch:25, batch_id:200, loss:0.0018,             acc:0.9892 Tp/Tn_1/Tn_2: 50901/97/458
Eval of epoch 25 => acc:0.9830, loss:0.0120
Epoch 21: CosineAnnealingDecay set learning rate to 0.0004661627476147168.
Epoch 26: LinearWarmup set learning rate to 0.0004661627476147168.
2023-07-27 14:09:58 || Epoch 26 start:
epoch:26, batch_id:0, loss:0.0084,             acc:0.9922 Tp/Tn_1/Tn_2: 254/0/2
epoch:26, batch_id:50, loss:0.0024,             acc:0.9897 Tp/Tn_1/Tn_2: 12922/29/105
epoch:26, batch_id:100, loss:0.0021,             acc:0.9894 Tp/Tn_1/Tn_2: 25583/52/221
epoch:26, batch_id:150, loss:0.0017,             acc:0.9892 Tp/Tn_1/Tn_2: 38239/66/351
epoch:26, batch_id:200, loss:0.0042,             acc:0.9893 Tp/Tn_1/Tn_2: 50906/81/469
Eval of epoch 26 => acc:0.9829, loss:0.0120
Epoch 22: CosineAnnealingDecay set learning rate to 0.0004275649398050859.
Epoch 27: LinearWarmup set learning rate to 0.0004275649398050859.
2023-07-27 14:12:15 || Epoch 27 start:
epoch:27, batch_id:0, loss:0.0072,             acc:0.9844 Tp/Tn_1/Tn_2: 252/0/4
epoch:27, batch_id:50, loss:0.0022,             acc:0.9881 Tp/Tn_1/Tn_2: 12900/32/124124
epoch:27, batch_id:100, loss:0.0169,             acc:0.9889 Tp/Tn_1/Tn_2: 25568/57/231
epoch:27, batch_id:150, loss:0.0024,             acc:0.9893 Tp/Tn_1/Tn_2: 38243/71/342
epoch:27, batch_id:200, loss:0.0031,             acc:0.9894 Tp/Tn_1/Tn_2: 50909/89/458
Eval of epoch 27 => acc:0.9833, loss:0.0119
Epoch 23: CosineAnnealingDecay set learning rate to 0.0003894445448913269.
Epoch 28: LinearWarmup set learning rate to 0.0003894445448913269.
2023-07-27 14:14:31 || Epoch 28 start:
epoch:28, batch_id:0, loss:0.0051,             acc:0.9844 Tp/Tn_1/Tn_2: 252/0/4
epoch:28, batch_id:50, loss:0.0014,             acc:0.9895 Tp/Tn_1/Tn_2: 12919/20/117
epoch:28, batch_id:100, loss:0.0064,             acc:0.9897 Tp/Tn_1/Tn_2: 25589/44/223
epoch:28, batch_id:150, loss:0.0022,             acc:0.9896 Tp/Tn_1/Tn_2: 38253/61/342
epoch:28, batch_id:200, loss:0.0049,             acc:0.9895 Tp/Tn_1/Tn_2: 50918/85/453
Eval of epoch 28 => acc:0.9833, loss:0.0117
Epoch 24: CosineAnnealingDecay set learning rate to 0.0003520365877844011.
Epoch 29: LinearWarmup set learning rate to 0.0003520365877844011.
2023-07-27 14:16:48 || Epoch 29 start:
epoch:29, batch_id:0, loss:0.0044,             acc:0.9883 Tp/Tn_1/Tn_2: 253/1/2
epoch:29, batch_id:50, loss:0.0087,             acc:0.9891 Tp/Tn_1/Tn_2: 12914/22/120
epoch:29, batch_id:100, loss:0.0026,             acc:0.9901 Tp/Tn_1/Tn_2: 25600/35/221
epoch:29, batch_id:150, loss:0.0053,             acc:0.9900 Tp/Tn_1/Tn_2: 38271/59/326
epoch:29, batch_id:200, loss:0.0129,             acc:0.9900 Tp/Tn_1/Tn_2: 50942/79/435
Eval of epoch 29 => acc:0.9838, loss:0.0116
Saved best model of epoch29, acc 0.9838, save path "./runs"
Epoch 25: CosineAnnealingDecay set learning rate to 0.0003155717009792806.
Epoch 30: LinearWarmup set learning rate to 0.0003155717009792806.
2023-07-27 14:19:02 || Epoch 30 start:
epoch:30, batch_id:0, loss:0.0021,             acc:0.9961 Tp/Tn_1/Tn_2: 255/0/1
epoch:30, batch_id:50, loss:0.0052,             acc:0.9897 Tp/Tn_1/Tn_2: 12921/20/115
epoch:30, batch_id:100, loss:0.0035,             acc:0.9902 Tp/Tn_1/Tn_2: 25602/46/208
epoch:30, batch_id:150, loss:0.0025,             acc:0.9903 Tp/Tn_1/Tn_2: 38281/73/302
epoch:30, batch_id:200, loss:0.0030,             acc:0.9902 Tp/Tn_1/Tn_2: 50954/88/414
Eval of epoch 30 => acc:0.9839, loss:0.0116
Saved best model of epoch30, acc 0.9839, save path "./runs"
Epoch 26: CosineAnnealingDecay set learning rate to 0.0002802747026289244.
Epoch 31: LinearWarmup set learning rate to 0.0002802747026289244.
2023-07-27 14:21:17 || Epoch 31 start:
epoch:31, batch_id:0, loss:0.0085,             acc:0.9844 Tp/Tn_1/Tn_2: 252/0/4
epoch:31, batch_id:50, loss:0.0040,             acc:0.9926 Tp/Tn_1/Tn_2: 12960/7/89
epoch:31, batch_id:100, loss:0.0104,             acc:0.9915 Tp/Tn_1/Tn_2: 25636/32/188
epoch:31, batch_id:150, loss:0.0017,             acc:0.9908 Tp/Tn_1/Tn_2: 38300/55/301
epoch:31, batch_id:200, loss:0.0060,             acc:0.9902 Tp/Tn_1/Tn_2: 50952/81/423
Eval of epoch 31 => acc:0.9842, loss:0.0115
Saved best model of epoch31, acc 0.9842, save path "./runs"
Epoch 27: CosineAnnealingDecay set learning rate to 0.0002463632104656054.
Epoch 32: LinearWarmup set learning rate to 0.0002463632104656054.
2023-07-27 14:23:36 || Epoch 32 start:
epoch:32, batch_id:0, loss:0.0040,             acc:0.9961 Tp/Tn_1/Tn_2: 255/0/1
epoch:32, batch_id:50, loss:0.0020,             acc:0.9917 Tp/Tn_1/Tn_2: 12947/19/90
epoch:32, batch_id:100, loss:0.0056,             acc:0.9910 Tp/Tn_1/Tn_2: 25623/36/197
epoch:32, batch_id:150, loss:0.0016,             acc:0.9906 Tp/Tn_1/Tn_2: 38291/57/308
epoch:32, batch_id:200, loss:0.0043,             acc:0.9906 Tp/Tn_1/Tn_2: 50974/80/402
Eval of epoch 32 => acc:0.9838, loss:0.0115
Epoch 28: CosineAnnealingDecay set learning rate to 0.00021404630011522585.
Epoch 33: LinearWarmup set learning rate to 0.00021404630011522585.
2023-07-27 14:25:51 || Epoch 33 start:
epoch:33, batch_id:0, loss:0.0026,             acc:0.9961 Tp/Tn_1/Tn_2: 255/0/1
epoch:33, batch_id:50, loss:0.0023,             acc:0.9904 Tp/Tn_1/Tn_2: 12931/23/102
epoch:33, batch_id:100, loss:0.0130,             acc:0.9909 Tp/Tn_1/Tn_2: 25620/41/195
epoch:33, batch_id:150, loss:0.0057,             acc:0.9908 Tp/Tn_1/Tn_2: 38302/55/299
epoch:33, batch_id:200, loss:0.0029,             acc:0.9911 Tp/Tn_1/Tn_2: 50999/74/383
Eval of epoch 33 => acc:0.9839, loss:0.0115
Epoch 29: CosineAnnealingDecay set learning rate to 0.00018352321607655915.
Epoch 34: LinearWarmup set learning rate to 0.00018352321607655915.
2023-07-27 14:28:06 || Epoch 34 start:
epoch:34, batch_id:0, loss:0.0104,             acc:0.9883 Tp/Tn_1/Tn_2: 253/1/2
epoch:34, batch_id:50, loss:0.0019,             acc:0.9918 Tp/Tn_1/Tn_2: 12949/21/86
epoch:34, batch_id:100, loss:0.0054,             acc:0.9913 Tp/Tn_1/Tn_2: 25632/40/184
epoch:34, batch_id:150, loss:0.0043,             acc:0.9911 Tp/Tn_1/Tn_2: 38312/52/292
epoch:34, batch_id:200, loss:0.0021,             acc:0.9910 Tp/Tn_1/Tn_2: 50994/75/387
Eval of epoch 34 => acc:0.9836, loss:0.0114
Epoch 30: CosineAnnealingDecay set learning rate to 0.000154982143312659.
Epoch 35: LinearWarmup set learning rate to 0.000154982143312659.
2023-07-27 14:30:23 || Epoch 35 start:
epoch:35, batch_id:0, loss:0.0033,             acc:0.9961 Tp/Tn_1/Tn_2: 255/1/0
epoch:35, batch_id:50, loss:0.0076,             acc:0.9917 Tp/Tn_1/Tn_2: 12948/18/90
epoch:35, batch_id:100, loss:0.0009,             acc:0.9917 Tp/Tn_1/Tn_2: 25642/38/176
epoch:35, batch_id:150, loss:0.0035,             acc:0.9915 Tp/Tn_1/Tn_2: 38328/57/271
epoch:35, batch_id:200, loss:0.0037,             acc:0.9914 Tp/Tn_1/Tn_2: 51013/72/371
Eval of epoch 35 => acc:0.9843, loss:0.0113
Saved best model of epoch35, acc 0.9843, save path "./runs"
Epoch 31: CosineAnnealingDecay set learning rate to 0.0001285990470279847.
Epoch 36: LinearWarmup set learning rate to 0.0001285990470279847.
2023-07-27 14:32:39 || Epoch 36 start:
epoch:36, batch_id:0, loss:0.0057,             acc:0.9883 Tp/Tn_1/Tn_2: 253/0/3
epoch:36, batch_id:50, loss:0.0112,             acc:0.9913 Tp/Tn_1/Tn_2: 12942/22/92
epoch:36, batch_id:100, loss:0.0022,             acc:0.9909 Tp/Tn_1/Tn_2: 25621/42/193
epoch:36, batch_id:150, loss:0.0024,             acc:0.9913 Tp/Tn_1/Tn_2: 38319/55/282
epoch:36, batch_id:200, loss:0.0041,             acc:0.9913 Tp/Tn_1/Tn_2: 51007/76/373
Eval of epoch 36 => acc:0.9841, loss:0.0113
Epoch 32: CosineAnnealingDecay set learning rate to 0.00010453658778440107.
Epoch 37: LinearWarmup set learning rate to 0.00010453658778440107.
2023-07-27 14:34:57 || Epoch 37 start:
epoch:37, batch_id:0, loss:0.0035,             acc:0.9961 Tp/Tn_1/Tn_2: 255/0/1
epoch:37, batch_id:50, loss:0.0025,             acc:0.9913 Tp/Tn_1/Tn_2: 12943/18/95
epoch:37, batch_id:100, loss:0.0018,             acc:0.9918 Tp/Tn_1/Tn_2: 25644/36/176
epoch:37, batch_id:150, loss:0.0049,             acc:0.9919 Tp/Tn_1/Tn_2: 38341/52/263
epoch:37, batch_id:200, loss:0.0031,             acc:0.9915 Tp/Tn_1/Tn_2: 51020/71/365
Eval of epoch 37 => acc:0.9843, loss:0.0113
Saved best model of epoch37, acc 0.9843, save path "./runs"
Epoch 33: CosineAnnealingDecay set learning rate to 8.294311864472437e-05.
Epoch 38: LinearWarmup set learning rate to 8.294311864472437e-05.
2023-07-27 14:37:16 || Epoch 38 start:
epoch:38, batch_id:0, loss:0.0019,             acc:0.9961 Tp/Tn_1/Tn_2: 255/0/1
epoch:38, batch_id:50, loss:0.0070,             acc:0.9921 Tp/Tn_1/Tn_2: 12953/16/87
epoch:38, batch_id:100, loss:0.0009,             acc:0.9921 Tp/Tn_1/Tn_2: 25653/41/162
epoch:38, batch_id:150, loss:0.0053,             acc:0.9922 Tp/Tn_1/Tn_2: 38356/56/244
epoch:38, batch_id:200, loss:0.0121,             acc:0.9921 Tp/Tn_1/Tn_2: 51048/72/336
Eval of epoch 38 => acc:0.9839, loss:0.0113
Epoch 34: CosineAnnealingDecay set learning rate to 6.395177052675794e-05.
Epoch 39: LinearWarmup set learning rate to 6.395177052675794e-05.
2023-07-27 14:39:32 || Epoch 39 start:
epoch:39, batch_id:0, loss:0.0059,             acc:0.9883 Tp/Tn_1/Tn_2: 253/0/3
epoch:39, batch_id:50, loss:0.0037,             acc:0.9926 Tp/Tn_1/Tn_2: 12960/17/79
epoch:39, batch_id:100, loss:0.0056,             acc:0.9923 Tp/Tn_1/Tn_2: 25657/30/169
epoch:39, batch_id:150, loss:0.0050,             acc:0.9924 Tp/Tn_1/Tn_2: 38361/48/247
epoch:39, batch_id:200, loss:0.0080,             acc:0.9920 Tp/Tn_1/Tn_2: 51044/61/351
Eval of epoch 39 => acc:0.9843, loss:0.0113
Epoch 35: CosineAnnealingDecay set learning rate to 4.7679631406913064e-05.
Epoch 40: LinearWarmup set learning rate to 4.7679631406913064e-05.

验证,测试

当batchsize从256设置为1时,验证集的准确率从95.23%降低到93.96%,可能是网络中如下代码的问题

# line 103f_pow = paddle.pow(f, 2)f_mean = paddle.mean(f_pow)f = paddle.divide(f, f_mean)

这里的mean方法与batch耦合,可以考虑限制维度来解耦(只在每个batch内做平均)比如:

f_mean = paddle.mean(f_pow, axis=[1,2,3], keepdim=True)

模型对batch解耦后,改变batchsize大小,不会影响最终精度

验证

In [9]
import paddle.vision.transforms as Tfrom paddle.io import DataLoaderimport timefrom statistics import mean# 参数定义IMGSIZE = (94, 24)
IMGDIR = 'rec_images/data'VALIDFILE = 'rec_images/valid.txt'LPRMAXLEN = 18EVALBATCHSIZE = 1  # batch sizeNUMWORKERS = 2# 图片预处理eval_transforms = T.Compose([    
            T.ToTensor(data_format='CHW'), 
            T.Normalize(
                [0.5, 0.5, 0.5],  # 在totensor的时候已经将图片缩放到0-1
                [0.5, 0.5, 0.5],
                data_format='CHW' 
            ), 
        ])# 数据加载eval_data_set = LprnetDataloader(IMGDIR, VALIDFILE, eval_transforms)
eval_loader = DataLoader(
    eval_data_set, 
    batch_size=EVALBATCHSIZE, 
    shuffle=False, 
    num_workers=NUMWORKERS, 
    drop_last=False, 
    collate_fn=collate_fn
)# 定义lossloss_func = nn.CTCLoss(len(CHARS)-1)# input_length, loss计算需要input_length = np.ones(shape=TRAINBATCHSIZE) * LPRMAXLEN
input_length = paddle.to_tensor(input_length, dtype='int64')# LPRNet网络,添加模型权重model = LPRNet(LPRMAXLEN, len(CHARS), DROPOUT)
load_pretrained(model, 'runs/lprnet_best_2')  

# accacc_eval = ACC()# 验证# evalwith paddle.no_grad():
    model.eval()
    loss_list = []    for batch_id, bath_data in enumerate(eval_loader):
        img_data, label_data, label_lens = bath_data
        predict = model(img_data)
        logits = paddle.transpose(predict, (2,0,1))
        loss = loss_func(logits, label_data, input_length, label_lens)
        acc_eval.batch_update(label_data, label_lens, predict)
        loss_list.append(loss.item())    print(f'Eval from {VALIDFILE} => acc:{acc_eval.acc:.4f}, loss:{mean(loss_list):.4f}')
    acc_eval.clear()
params loading...
load runs/lprnet_best_2.pdparams success...
Eval from rec_images/valid.txt => acc:0.9843, loss:0.0114

预测结果可视化

这里仍然是以动态图的方式进行预测,想要部署的话建议转静态图

https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/jit/index_cn.html

In [11]
"""此部分为测试的可视化代码, 后处理可参考"""import cv2import matplotlib.pyplot as pltimport numpy as npimport paddle
%matplotlib inline

img_path = 'rec_images/data/皖AF358Z.jpg'img_data = cv2.imread(img_path)
img_data = img_data[:,:,::-1]  # BGR to RGBplt.imshow(img_data)
plt.axis('off')
plt.show()# 数据前处理img_data = cv2.resize(img_data,(94, 24))
img_data = (img_data - 127.5) / 127.5  # 归一化img_data = np.transpose(img_data, (2,0,1))  # HWC to CHWimg_data = np.expand_dims(img_data, 0)  # to BCHWimg_tensor = paddle.to_tensor(img_data, dtype='float32')  # shape == [1, 3, 24, 94]print(img_tensor.shape)# 加载模型, 预测CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',         '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',         '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',         '新',         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',         'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',         'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',         'W', 'X', 'Y', 'Z', 'I', 'O', '-'
         ]
LPRMAXLEN = 18model = LPRNet(LPRMAXLEN, len(CHARS), dropout_rate=0)
load_pretrained(model, 'runs/lprnet_best_2')
out_data = model(img_tensor)  # out_data.shape == [1, 68, 18]# 后处理,单张图片数据def reprocess(pred):
    pred_data = pred[0]
    pred_label = np.argmax(pred_data, axis=0)
    no_repeat_blank_label = []
    pre_c = pred_label[0]    if pre_c != len(CHARS) - 1:  # 非空白
        no_repeat_blank_label.append(pre_c)    for c in pred_label:  # dropout repeate label and blank label
        if (pre_c == c) or (c == len(CHARS) - 1):            if c == len(CHARS) - 1:
                pre_c = c            continue
        no_repeat_blank_label.append(c)
        pre_c = c
    char_list = [CHARS[i] for i in no_repeat_blank_label]    return ''.join(char_list)

rep_result = reprocess(out_data)print(rep_result)  # 皖AF358Z
[1, 3, 24, 94]
params loading...
load runs/lprnet_best_2.pdparams success...
皖AF358Z

TODO

  1. 数据集分布不均,这里只用了ccpd2019的蓝牌,可以使用ccpd2020包含绿牌数据

  2. 大部分车牌都是“皖”,可以适当添加其他省份的车牌数据

3. 车牌裁切没有做矫正,想提高精度,可考虑加上车牌的矫正算法

4. 本车牌识别网络模型与batch数据耦合,可以尝试解耦后再训练

  1. 网络模型已经固定了输出序列的长度18,考虑修改为能自定义长度,让模型能适用于更多场景

模型导出

导出onnx

这里将模型从动态图导出onnx文件, 直接使用api:paddle.onnx.export

https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/onnx/export_cn.html

In [12]
model = LPRNet(18, 68, dropout_rate=0)
load_pretrained(model, 'runs/lprnet_best_2')

save_path = 'save_onnx/lprnet' # 需要保存的路径x_spec = paddle.static.InputSpec([1, 3, 24, 94], 'float32', 'image') 
paddle.onnx.export(model, save_path, input_spec=[x_spec], opset_version=11)
params loading...
load runs/lprnet_best_2.pdparams success...
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:712: UserWarning: When training, we now always track global mean and variance.
  "When training, we now always track global mean and variance."
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:712: UserWarning: When training, we now always track global mean and variance.
  "When training, we now always track global mean and variance."
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:712: UserWarning: When training, we now always track global mean and variance.
  "When training, we now always track global mean and variance."
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:712: UserWarning: When training, we now always track global mean and variance.
  "When training, we now always track global mean and variance."
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/nn/layer/norm.py:712: UserWarning: When training, we now always track global mean and variance.
  "When training, we now always track global mean and variance."
2023-07-27 15:03:51 [INFO]	Static PaddlePaddle model saved in save_onnx/paddle_model_static_onnx_temp_dir.
[Paddle2ONNX] Start to parse PaddlePaddle model...
[Paddle2ONNX] Model file path: save_onnx/paddle_model_static_onnx_temp_dir/model.pdmodel
[Paddle2ONNX] Paramters file path: save_onnx/paddle_model_static_onnx_temp_dir/model.pdiparams
[Paddle2ONNX] Start to parsing Paddle model...
[Paddle2ONNX] Use opset_version = 11 for ONNX export.
[Paddle2ONNX] PaddlePaddle model is exported as ONNX format now.
2023-07-27 15:03:52 [INFO]	ONNX model saved in save_onnx/lprnet.onnx.

onnx测试

可以在https://netron.app/ 中查看可视化结构

更多参考:https://www.paddlepaddle.org.cn/documentation/docs/zh/guides/advanced/model_to_onnx_cn.html

In [ ]
!pip install onnx==1.10.2!pip install onnxruntime==1.9.0
In [14]
"""检查onnx是否合理,模型的版本、图的结构、节点及其输入和输出"""import onnx
onnx_model = onnx.load("save_onnx/lprnet.onnx")
check = onnx.checker.check_model(onnx_model)print('check: ', check)
check:  None
In [15]
# 导入所需的库import numpy as npimport onnxruntimeimport paddle# 随机生成输入,用于验证 Paddle 和 ONNX 的推理结果是否一致x = np.random.random((1, 3, 24, 94)).astype('float32')# predict by ONNXRuntimeonnx_path = "save_onnx/lprnet.onnx"ort_sess = onnxruntime.InferenceSession(onnx_path)
ort_inputs = {ort_sess.get_inputs()[0].name: x}
ort_outs = ort_sess.run(None, ort_inputs)print("Exported model has been predicted by ONNXRuntime!")# predict by Paddlemodel = paddle.jit.load("save_onnx/paddle_model_static_onnx_temp_dir/model")  # 上一步中导出onnx的时候会保存静态图文件到输出目录model.eval()
paddle_input = paddle.to_tensor(x)
paddle_outs = model(paddle_input)print("Original model has been predicted by Paddle!")# compare ONNXRuntime and Paddle resultsnp.testing.assert_allclose(ort_outs[0], paddle_outs.numpy(), rtol=1.0, atol=1e-05)print("The difference of results between ONNXRuntime and Paddle looks good!")
Exported model has been predicted by ONNXRuntime!
Original model has been predicted by Paddle!
The difference of results between ONNXRuntime and Paddle looks good!

onnx推理

推理与上面相同,只是添加了实际数据的前处理和模型输出的后处理部分

In [16]
import onnxruntimeimport cv2import matplotlib.pyplot as pltimport numpy as np
%matplotlib inline# 数据预处理img_path = 'rec_images/data/皖AF358Z.jpg'img_data = cv2.imread(img_path)
img_data = img_data[:,:,::-1]  # BGR to RGBplt.imshow(img_data)
plt.axis('off')
plt.show()

img_data = cv2.resize(img_data,(94, 24))
img_data = (img_data - 127.5) / 127.5  # 归一化img_data = np.transpose(img_data, (2,0,1))  # HWC to CHWimg_data = np.expand_dims(img_data, 0)  # to BCHWnp_data = np.array(img_data, dtype=np.float32)# 加载 ONNX 模型生成推理用 sessonnx_path = "save_onnx/lprnet.onnx"sess = onnxruntime.InferenceSession(onnx_path)# 使用 ONNXRuntime 推理ort_inputs = {sess.get_inputs()[0].name: np_data}
result, = sess.run(None, ort_inputs)# 推理结果后处理CHARS = ['京', '沪', '津', '渝', '冀', '晋', '蒙', '辽', '吉', '黑',         '苏', '浙', '皖', '闽', '赣', '鲁', '豫', '鄂', '湘', '粤',         '桂', '琼', '川', '贵', '云', '藏', '陕', '甘', '青', '宁',         '新',         '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',         'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',         'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',         'W', 'X', 'Y', 'Z', 'I', 'O', '-'
         ]def reprocess(pred):
    pred_data = pred[0]
    pred_label = np.argmax(pred_data, axis=0)
    no_repeat_blank_label = []
    pre_c = pred_label[0]    if pre_c != len(CHARS) - 1:  # 非空白
        no_repeat_blank_label.append(pre_c)    for c in pred_label:  # dropout repeate label and blank label
        if (pre_c == c) or (c == len(CHARS) - 1):            if c == len(CHARS) - 1:
                pre_c = c            continue
        no_repeat_blank_label.append(c)
        pre_c = c
    char_list = [CHARS[i] for i in no_repeat_blank_label]    return ''.join(char_list)

plate_str = reprocess(result)print(plate_str)  # 皖AF358Z
皖AF358Z

相关专题

更多
html版权符号
html版权符号

html版权符号是“©”,可以在html源文件中直接输入或者从word中复制粘贴过来,php中文网还为大家带来html的相关下载资源、相关课程以及相关文章等内容,供大家免费下载使用。

609

2023.06.14

html在线编辑器
html在线编辑器

html在线编辑器是用于在线编辑的工具,编辑的内容是基于HTML的文档。它经常被应用于留言板留言、论坛发贴、Blog编写日志或等需要用户输入普通HTML的地方,是Web应用的常用模块之一。php中文网为大家带来了html在线编辑器的相关教程、以及相关文章等内容,供大家免费下载使用。

646

2023.06.21

html网页制作
html网页制作

html网页制作是指使用超文本标记语言来设计和创建网页的过程,html是一种标记语言,它使用标记来描述文档结构和语义,并定义了网页中的各种元素和内容的呈现方式。本专题为大家提供html网页制作的相关的文章、下载、课程内容,供大家免费下载体验。

466

2023.07.31

html空格
html空格

html空格是一种用于在网页中添加间隔和对齐文本的特殊字符,被用于在网页中插入额外的空间,以改变元素之间的排列和对齐方式。本专题为大家提供html空格的相关的文章、下载、课程内容,供大家免费下载体验。

245

2023.08.01

html是什么
html是什么

HTML是一种标准标记语言,用于创建和呈现网页的结构和内容,是互联网发展的基石,为网页开发提供了丰富的功能和灵活性。本专题为大家提供html相关的各种文章、以及下载和课程。

2886

2023.08.11

html字体大小怎么设置
html字体大小怎么设置

在网页设计中,字体大小的选择是至关重要的。合理的字体大小不仅可以提升网页的可读性,还能够影响用户对网页整体布局的感知。php中文网将介绍一些常用的方法和技巧,帮助您在HTML中设置合适的字体大小。

503

2023.08.11

html转txt
html转txt

html转txt的方法有使用文本编辑器、使用在线转换工具和使用Python编程。本专题为大家提供html转txt相关的文章、下载、课程内容,供大家免费下载体验。

311

2023.08.31

html文本框代码怎么写
html文本框代码怎么写

html文本框代码:1、单行文本框【<input type="text" style="height:..;width:..;" />】;2、多行文本框【textarea style=";height:;"></textare】。

423

2023.09.01

Golang gRPC 服务开发与Protobuf实战
Golang gRPC 服务开发与Protobuf实战

本专题系统讲解 Golang 在 gRPC 服务开发中的完整实践,涵盖 Protobuf 定义与代码生成、gRPC 服务端与客户端实现、流式 RPC(Unary/Server/Client/Bidirectional)、错误处理、拦截器、中间件以及与 HTTP/REST 的对接方案。通过实际案例,帮助学习者掌握 使用 Go 构建高性能、强类型、可扩展的 RPC 服务体系,适用于微服务与内部系统通信场景。

6

2026.01.15

热门下载

更多
网站特效
/
网站源码
/
网站素材
/
前端模板

精品课程

更多
相关推荐
/
热门推荐
/
最新课程
最新Python教程 从入门到精通
最新Python教程 从入门到精通

共4课时 | 0.7万人学习

Django 教程
Django 教程

共28课时 | 3.1万人学习

SciPy 教程
SciPy 教程

共10课时 | 1.1万人学习

关于我们 免责申明 举报中心 意见反馈 讲师合作 广告合作 最新更新
php中文网:公益在线php培训,帮助PHP学习者快速成长!
关注服务号 技术交流群
PHP中文网订阅号
每天精选资源文章推送

Copyright 2014-2026 https://www.php.cn/ All Rights Reserved | php.cn | 湘ICP备2023035733号