我用tensorflow 2改写了代码,但是训练完之后准确率很低,而且损失值下降的也不行

来源:10-16 人脸关键点模型训练编程实例(3)

慕田峪7505890

2022-01-02

import tensorflow as tf
import numpy as np
from tensorflow.keras import models, layers, regularizers, initializers, losses, optimizers, callbacks
import os
import cv2

def get_one_batch(batch_size, type, path):
    '''
    数据读取
    :param batch_size:
    :param type:
    :param path:
    :return:
    '''
    if type == 0:  # train
        file = os.path.join(path, "train.tfrecords")
    else:  # test
        file = os.path.join(path, "test.tfrecords")
    dataset = tf.data.TFRecordDataset([file])
    if type == 0:
        dataset = dataset.shuffle(batch_size // 2).batch(batch_size, drop_remainder=True)
    else:
        dataset = dataset.batch(batch_size, drop_remainder=True)
    return dataset

def senet_blob(net, in_dim, on_dim, stride):
    '''
    构建残差连接+SENet
    :param net: 特征图
    :param in_dim: 输入特征图通道数
    :param on_dim: 输出特征图通道数
    :param stride: 卷积核步长
    :return:
    '''
    bk = net
    net = layers.Conv2D(in_dim // 4, (1, 1), activation='relu',
                        # 权重初始化
                        kernel_initializer=initializers.variance_scaling(),
                        # 权重正则
                        kernel_regularizer=regularizers.l2(0.00001))(net)
    net = layers.BatchNormalization()(net)
    net = layers.Conv2D(in_dim // 4, (3, 3), activation='relu',
                        kernel_initializer=initializers.variance_scaling(),
                        kernel_regularizer=regularizers.l2(0.00001))(net)
    net = layers.BatchNormalization()(net)
    net = layers.Conv2D(on_dim, (1, 1), activation='relu',
                        kernel_initializer=initializers.variance_scaling(),
                        kernel_regularizer=regularizers.l2(0.00001))(net)
    net = layers.BatchNormalization()(net)
    if stride > 1:
        net = layers.AveragePooling2D(pool_size=(stride * 2 - 1, stride * 2 - 1),
                                      strides=stride, padding='same')(net)
        bk = layers.AveragePooling2D(pool_size=(stride * 2 - 1, stride * 2 - 1),
                                            strides=stride)(bk)
    if in_dim != on_dim:
        bk = layers.Conv2D(on_dim, (1, 1), activation='relu',
                           kernel_initializer=initializers.variance_scaling(),
                           kernel_regularizer=regularizers.l2(0.00001))(bk)
    # SENet部分,nHWc,加权操作
    squeeze = tf.reduce_mean(net, axis=[1, 2])
    ex = layers.Dense(on_dim // 16, activation='relu')(squeeze)
    ex = layers.Dense(on_dim, activation='sigmoid')(ex)
    net = net * tf.reshape(ex, [-1, 1, 1, on_dim])
    # 跳连的部分
    net = layers.add([bk, net])
    return net


def SENet(input_x, is_training=True, keep_prob=0.8):
    net = layers.Conv2D(32, (3, 3), activation='relu',
                        kernel_initializer=initializers.variance_scaling(),
                        kernel_regularizer=regularizers.l2(0.00001))(input_x)
    net = layers.AveragePooling2D(pool_size=(3, 3), strides=2)(net)
    net = senet_blob(net, 32, 64, 2)
    net = senet_blob(net, 64, 128, 2)
    net = senet_blob(net, 128, 256, 2)
    net = senet_blob(net, 256, 512, 2)
    net = tf.reduce_mean(net, axis=[1, 2])
    net = layers.Dense(1024, activation='relu')(net)
    net = layers.AlphaDropout(rate=keep_prob)(net)
    net = layers.Dense(136)(net)
    return net

if __name__ == "__main__":

    batch_size = 1000
    dataset = get_one_batch(batch_size, type=0, path="/Users/admin/Documents/300W_LP/tfrecord_basic")
    dataset_test = get_one_batch(batch_size, type=1, path="/Users/admin/Documents/300W_LP/tfrecord_basic")
    expected_features = {
        "image": tf.io.FixedLenFeature([], dtype=tf.string),
        "label": tf.io.FixedLenFeature([136], dtype=tf.float32)
    }
    i = 0
    for serialized_example_tensor in dataset:
        ex = tf.io.parse_example(serialized_example_tensor, expected_features)
        images = tf.io.decode_raw(ex['image'], tf.uint8)
        images = tf.cast(tf.reshape(images, (batch_size, 128, 128, 3)), dtype=tf.float32)
        input = layers.Input(shape=images.shape[1:])
        logits = SENet(input)
        if i == 0:
            model = models.Model(inputs=input, outputs=logits)
            print(model.summary())
            model.compile(loss=losses.mean_squared_error,
                          optimizer=optimizers.Adam(learning_rate=0.001, decay=0.98),
                          metrics=['accuracy'])
        labels = ex['label']
        logdir = "/Users/admin/Documents/300W_LP/callbacks"
        if not os.path.exists(logdir):
            os.mkdir(logdir)
        output_model_file = os.path.join(logdir, "senet.h5")
        callback = [
            callbacks.TensorBoard(logdir),
            callbacks.ModelCheckpoint(output_model_file, save_best_only=True),
            callbacks.EarlyStopping(patience=5, min_delta=1e-3)
        ]
        history = model.fit(images, labels, epochs=10, callbacks=callback)
        i += 1
    for test_tensor in dataset_test:
        ex_test = tf.io.parse_example(test_tensor, expected_features)
        images_test = tf.io.decode_raw(ex_test['image'], tf.uint8)
        images_test = tf.cast(tf.reshape(images_test, (batch_size, 128, 128, 3)), dtype=tf.float32)
        labels_test = ex_test['label']
        print(model.evaluate(images_test, labels_test))

最终得到的准确率低的吓人

Epoch 10/10
32/32 [==============================] - 13s 419ms/step - loss: 1.5366 - accuracy: 0.0050
WARNING:tensorflow:Can save best model only with val_loss available, skipping.
WARNING:tensorflow:Early stopping conditioned on metric `val_loss` which is not available. Available metrics are: loss,accuracy
32/32 [==============================] - 5s 133ms/step - loss: 0.2943 - accuracy: 0.1810
[0.2943294048309326, 0.1809999942779541]
32/32 [==============================] - 4s 131ms/step - loss: 0.4661 - accuracy: 0.1770
[0.46614906191825867, 0.1770000010728836]
32/32 [==============================] - 4s 129ms/step - loss: 0.2837 - accuracy: 0.1820
[0.28367236256599426, 0.18199999630451202]
32/32 [==============================] - 4s 136ms/step - loss: 0.3318 - accuracy: 0.1620
[0.3318333625793457, 0.16200000047683716]
32/32 [==============================] - 5s 150ms/step - loss: 0.3497 - accuracy: 0.1720
[0.3497335910797119, 0.1720000058412552]
32/32 [==============================] - 5s 150ms/step - loss: 0.2819 - accuracy: 0.1770
[0.2819477617740631, 0.1770000010728836]

老师能帮我看看么

写回答

1回答

会写代码的好厨师

2022-01-11

可以把完整的项目工程文件qq发给我,我抽空帮你调试下。

0
0

Python3+TensorFlow打造人脸识别智能小程序

理论与实战项目双管齐下,让AI技术真正落地应用,适合毕设展示。

1087 学习 · 538 问题

查看课程