Andrew Ng 吴恩达的深度学习课程作业 COURSE 4 Week 01 Convolution model - Application (TF2)

使用TensorFlow 2.6.0版本改写TensorFlow 1的代码,分别使用v1兼容模式和TF2的Eager Execution动态执行图。

1.TF1兼容模式

1.0 - TensorFlow model
"""
be careful!
the result is related to the version of tensorflow!!
"""
import math

import h5py
import matplotlib.pyplot as plt
import numpy as np
import scipy
import tensorflow as tf
from cnn_utils import *
from PIL import Image
from scipy import ndimage
from tensorflow.python.framework import ops


tf.compat.v1.disable_eager_execution()

%matplotlib inline
np.random.seed(1)

# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()

# Example of a picture
index = 6
plt.imshow(X_train_orig[index])
print("y = " + str(np.squeeze(Y_train_orig[:, index])))

X_train = X_train_orig / 255.0
X_test = X_test_orig / 255.0
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print("number of training examples = " + str(X_train.shape[0]))
print("number of test examples = " + str(X_test.shape[0]))
print("X_train shape: " + str(X_train.shape))
print("Y_train shape: " + str(Y_train.shape))
print("X_test shape: " + str(X_test.shape))
print("Y_test shape: " + str(Y_test.shape))
conv_layers = {}
1.1 - Create placeholders
# GRADED FUNCTION: create_placeholders


def create_placeholders(n_H0, n_W0, n_C0, n_y):
    """
    Creates the placeholders for the tensorflow session.

    Arguments:
    n_H0 -- scalar, height of an input image
    n_W0 -- scalar, width of an input image
    n_C0 -- scalar, number of channels of the input
    n_y -- scalar, number of classes

    Returns:
    X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype "float"
    Y -- placeholder for the input labels, of shape [None, n_y] and dtype "float"
    """

    ### START CODE HERE ### (≈2 lines)
    X = tf.compat.v1.placeholder(tf.float32, shape=[None, n_H0, n_W0, n_C0])
    Y = tf.compat.v1.placeholder(tf.float32, shape=[None, n_y])
    ### END CODE HERE ###

    return X, Y

X, Y = create_placeholders(64, 64, 3, 6)
print("X = " + str(X))
print("Y = " + str(Y))
1.2 - Initialize parameters
# GRADED FUNCTION: initialize_parameters


def initialize_parameters():
    """
    Initializes weight parameters to build a neural network with tensorflow. The shapes are:
                        W1 : [4, 4, 3, 8]
                        W2 : [2, 2, 8, 16]
    Returns:
    parameters -- a dictionary of tensors containing W1, W2
    """

    tf.random.set_seed(1)  # so that your "random" numbers match ours

    ### START CODE HERE ### (approx. 2 lines of code)
    W1 = tf.compat.v1.get_variable(
        "W1", [4, 4, 3, 8], initializer=tf.initializers.GlorotUniform(seed=0)
    )
    W2 = tf.compat.v1.get_variable(
        "W2", [2, 2, 8, 16], initializer=tf.initializers.GlorotUniform(seed=0)
    )
    ### END CODE HERE ###

    parameters = {"W1": W1, "W2": W2}

    return parameters

ops.reset_default_graph()
with tf.compat.v1.Session() as sess:
    parameters = initialize_parameters()
    init = tf.compat.v1.global_variables_initializer()
    sess.run(init)
    print("W1 = " + str(parameters["W1"].eval()[1, 1, 1]))
    print("W2 = " + str(parameters["W2"].eval()[1, 1, 1]))
1.3 - Forward propagation
# GRADED FUNCTION: forward_propagation


def forward_propagation(X, parameters):
    """
    Implements the forward propagation for the model:
    CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED

    Arguments:
    X -- input dataset placeholder, of shape (input size, number of examples)
    parameters -- python dictionary containing your parameters "W1", "W2"
                  the shapes are given in initialize_parameters

    Returns:
    Z3 -- the output of the last LINEAR unit
    """

    # Retrieve the parameters from the dictionary "parameters"
    W1 = parameters["W1"]
    W2 = parameters["W2"]

    ### START CODE HERE ###
    # CONV2D: stride of 1, padding 'SAME'
    Z1 = tf.nn.conv2d(X, W1, strides=[1, 1, 1, 1], padding="SAME")
    print("Z1 shape = ", Z1.shape)
    # RELU
    A1 = tf.nn.relu(Z1)
    print("A1 shape = ", A1.shape)
    # MAXPOOL: window 8x8, sride 8, padding 'SAME'
    P1 = tf.nn.max_pool(A1, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding="SAME")
    print("P1 shape = ", P1.shape)
    # CONV2D: filters W2, stride 1, padding 'SAME'
    Z2 = tf.nn.conv2d(P1, W2, strides=[1, 1, 1, 1], padding="SAME")
    print("Z2 shape = ", Z2.shape)
    # RELU
    A2 = tf.nn.relu(Z2)
    print("A2 shape = ", A2.shape)
    # MAXPOOL: window 4x4, stride 4, padding 'SAME'
    P2 = tf.nn.max_pool(A2, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding="SAME")
    print("P2 shape = ", P2.shape)
    # FLATTEN
    P2 = tf.keras.layers.Flatten()(P2)
    print("P2 shape = ", P2.shape)
    # FULLY-CONNECTED without non-linear activation function (not not call softmax).
    # 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
    Z3 = tf.compat.v1.layers.dense(P2, 6, activation=None)
    print("Z3 shape = ", Z3.shape)
    ### END CODE HERE ###

    return Z3

ops.reset_default_graph()

with tf.compat.v1.Session() as sess:
    np.random.seed(1)
    X, Y = create_placeholders(64, 64, 3, 6)
    parameters = initialize_parameters()
    Z3 = forward_propagation(X, parameters)
    init = tf.compat.v1.global_variables_initializer()
    sess.run(init)
    a = sess.run(Z3, {X: np.random.randn(2, 64, 64, 3), Y: np.random.randn(2, 6)})
    print("Z3 = " + str(a))
1.4 - Compute cost
# GRADED FUNCTION: compute_cost


def compute_cost(Z3, Y):
    """
    Computes the cost

    Arguments:
    Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (6, number of examples)
    Y -- "true" labels vector placeholder, same shape as Z3

    Returns:
    cost - Tensor of the cost function
    """

    ### START CODE HERE ### (1 line of code)
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z3, labels=Y))
    ### END CODE HERE ###

    return cost

ops.reset_default_graph()

with tf.compat.v1.Session() as sess:
    np.random.seed(1)
    X, Y = create_placeholders(64, 64, 3, 6)
    parameters = initialize_parameters()
    print
    Z3 = forward_propagation(X, parameters)
    cost = compute_cost(Z3, Y)
    init = tf.compat.v1.global_variables_initializer()
    sess.run(init)
    a = sess.run(cost, {X: np.random.randn(4, 64, 64, 3), Y: np.random.randn(4, 6)})
    print("cost = " + str(a))
1.5 - Model
# GRADED FUNCTION: model


def model(
    X_train,
    Y_train,
    X_test,
    Y_test,
    learning_rate=0.009,
    num_epochs=200,
    minibatch_size=64,
    print_cost=True,
):
    """
    Implements a three-layer ConvNet in Tensorflow:
    CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED

    Arguments:
    X_train -- training set, of shape (None, 64, 64, 3)
    Y_train -- test set, of shape (None, n_y = 6)
    X_test -- training set, of shape (None, 64, 64, 3)
    Y_test -- test set, of shape (None, n_y = 6)
    learning_rate -- learning rate of the optimization
    num_epochs -- number of epochs of the optimization loop
    minibatch_size -- size of a minibatch
    print_cost -- True to print the cost every 100 epochs

    Returns:
    train_accuracy -- real number, accuracy on the train set (X_train)
    test_accuracy -- real number, testing accuracy on the test set (X_test)
    parameters -- parameters learnt by the model. They can then be used to predict.
    """

    ops.reset_default_graph()  # to be able to rerun the model without overwriting tf variables
    tf.random.set_seed(1)  # to keep results consistent (tensorflow seed)
    seed = 3  # to keep results consistent (numpy seed)
    (m, n_H0, n_W0, n_C0) = X_train.shape
    n_y = Y_train.shape[1]
    costs = []  # To keep track of the cost

    # Create Placeholders of the correct shape
    ### START CODE HERE ### (1 line)
    X, Y = create_placeholders(64, 64, 3, 6)
    ### END CODE HERE ###

    # Initialize parameters
    ### START CODE HERE ### (1 line)
    parameters = initialize_parameters()
    ### END CODE HERE ###

    # Forward propagation: Build the forward propagation in the tensorflow graph
    ### START CODE HERE ### (1 line)
    Z3 = forward_propagation(X, parameters)
    ### END CODE HERE ###

    # Cost function: Add cost function to tensorflow graph
    ### START CODE HERE ### (1 line)
    cost = compute_cost(Z3, Y)
    ### END CODE HERE ###

    # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost.
    ### START CODE HERE ### (1 line)
    optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate).minimize(
        cost
    )
    ### END CODE HERE ###

    # Initialize all the variables globally
    init = tf.compat.v1.global_variables_initializer()

    # Start the session to compute the tensorflow graph
    with tf.compat.v1.Session() as sess:
        # Run the initialization
        sess.run(init)

        # Do the training loop
        for epoch in range(num_epochs):
            minibatch_cost = 0.0
            num_minibatches = int(
                m / minibatch_size
            )  # number of minibatches of size minibatch_size in the train set
            seed = seed + 1
            minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)

            for minibatch in minibatches:
                # Select a minibatch
                (minibatch_X, minibatch_Y) = minibatch
                # IMPORTANT: The line that runs the graph on a minibatch.
                # Run the session to execute the optimizer and the cost, the feedict should contain a minibatch for (X,Y).
                ### START CODE HERE ### (1 line)
                _, temp_cost = sess.run(
                    [optimizer, cost], feed_dict={X: minibatch_X, Y: minibatch_Y}
                )
                ### END CODE HERE ###

                minibatch_cost += temp_cost / num_minibatches

            # Print the cost every epoch
            if print_cost == True and epoch % 5 == 0:
                print("Cost after epoch %i: %f" % (epoch, minibatch_cost))
            if print_cost == True and epoch % 1 == 0:
                costs.append(minibatch_cost)

        # plot the cost
        plt.plot(np.squeeze(costs))
        plt.ylabel("cost")
        plt.xlabel("iterations (per tens)")
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()

        # Calculate the correct predictions
        predict_op = tf.argmax(Z3, 1)
        correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))

        # Calculate accuracy on the test set
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        print(accuracy)
        train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
        test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
        print("Train Accuracy:", train_accuracy)
        print("Test Accuracy:", test_accuracy)

        return train_accuracy, test_accuracy, parameters

_, _, parameters = model(X_train, Y_train, X_test, Y_test)
1.6 - Result
Cost after epoch 0: 1.911903
Cost after epoch 5: 1.557961
Cost after epoch 10: 1.148252
Cost after epoch 15: 0.909686
Cost after epoch 20: 0.691366
Cost after epoch 25: 0.591482
Cost after epoch 30: 0.516434
Cost after epoch 35: 0.422076
Cost after epoch 40: 0.426731
Cost after epoch 45: 0.316498
Cost after epoch 50: 0.271206
Cost after epoch 55: 0.217374
Cost after epoch 60: 0.194462
Cost after epoch 65: 0.235810
Cost after epoch 70: 0.163237
Cost after epoch 75: 0.160460
Cost after epoch 80: 0.144300
Cost after epoch 85: 0.189282
Cost after epoch 90: 0.122686
Cost after epoch 95: 0.110457
Cost after epoch 100: 0.128274
Cost after epoch 105: 0.121849
Cost after epoch 110: 0.086279
Cost after epoch 115: 0.051895
Cost after epoch 120: 0.068288
Cost after epoch 125: 0.077729
Cost after epoch 130: 0.048763
Cost after epoch 135: 0.056662
Cost after epoch 140: 0.047897
Cost after epoch 145: 0.076555
Cost after epoch 150: 0.027694
Cost after epoch 155: 0.022746
Cost after epoch 160: 0.019129
Cost after epoch 165: 0.028242
Cost after epoch 170: 0.035235
Cost after epoch 175: 0.051823
Cost after epoch 180: 0.011300
Cost after epoch 185: 0.009745
Cost after epoch 190: 0.018539
Cost after epoch 195: 0.008087

Tensor("Mean_1:0", shape=(), dtype=float32)
Train Accuracy: 1.0
Test Accuracy: 0.8333333
 

2.TF2 Eager Execution动态执行图

2.1 - Read data
"""
be careful!
the result is related to the version of tensorflow!!
"""
import math

import h5py
import matplotlib.pyplot as plt
import numpy as np
import scipy
import tensorflow as tf
from cnn_utils import *
from PIL import Image
from scipy import ndimage
from tensorflow.keras import datasets, layers, models
from tensorflow.keras.utils import to_categorical
from tensorflow.python.framework import ops

%matplotlib inline
np.random.seed(1)

# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()

# Example of a picture
index = 9
plt.imshow(X_train_orig[index])
print("y = " + str(np.squeeze(Y_train_orig[:, index])))
2.2 - Model
def model(X, Y, X_test, Y_test, num_epochs=10, batch_size=32, learning_rate=0.0001):
    # Get the number of training examples
    # m = X.shape[0]

    # Define a sequential model
    model = tf.keras.models.Sequential()
    model.add(
        layers.Conv2D(
            8, (3, 3), activation="relu", padding="same", input_shape=(64, 64, 3)
        )
    )
    model.add(layers.MaxPooling2D((8, 8), strides=(8, 8), padding="same"))
    model.add(layers.Conv2D(16, (3, 3), activation="relu", padding="same"))
    model.add(layers.MaxPooling2D((4, 4), strides=(4, 4), padding="same"))
    model.add(layers.Flatten())
    model.add(layers.Dense(6, activation="softmax"))
    model.summary()
    # Compile the model with the Adam optimizer, categorical crossentropy loss function,
    # and tracking categorical accuracy as a metric
    model.compile(
        optimizer=tf.keras.optimizers.Adam(learning_rate=learning_rate),
        loss=tf.keras.losses.categorical_crossentropy,
        metrics=[tf.keras.metrics.CategoricalAccuracy()],
    )

    # create tracer
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir="./tensorBoard", histogram_freq=1, profile_batch=100000000
    )

    # Fit the model to the training data
    model.fit(
        X,
        Y,
        epochs=num_epochs,
        batch_size=batch_size,
        verbose=2,
        callbacks=[tensorboard_callback],
    )

    # Evaluate and print the accuracy on the training set
    print("Train accuracy = ", model.evaluate(X, Y))

    # Evaluate and print the accuracy on the test set
    print("Test accuracy = ", model.evaluate(X_test, Y_test))

    # Return the trained model
    return model
2.3 - Training
model = model(
    X_train,
    Y_train,
    X_test,
    Y_test,
    num_epochs=200,
    batch_size=64,
    learning_rate=0.009,
)
2.4 - Result
Model: "sequential"
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d (Conv2D)              (None, 64, 64, 8)         224       
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 8, 8, 8)           0         
_________________________________________________________________
conv2d_1 (Conv2D)            (None, 8, 8, 16)          1168      
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 2, 2, 16)          0         
_________________________________________________________________
flatten (Flatten)            (None, 64)                0         
_________________________________________________________________
dense (Dense)                (None, 6)                 390       
=================================================================
Total params: 1,782
Trainable params: 1,782
Non-trainable params: 0
_________________________________________________________________
Epoch 1/200
17/17 - 1s - loss: 1.8649 - categorical_accuracy: 0.1630
Epoch 2/200
17/17 - 1s - loss: 1.7924 - categorical_accuracy: 0.1750
Epoch 3/200
17/17 - 1s - loss: 1.7903 - categorical_accuracy: 0.1796
Epoch 4/200
17/17 - 1s - loss: 1.7875 - categorical_accuracy: 0.2352
Epoch 5/200
17/17 - 1s - loss: 1.7842 - categorical_accuracy: 0.2370
Epoch 6/200
17/17 - 1s - loss: 1.7787 - categorical_accuracy: 0.2519
Epoch 7/200
17/17 - 1s - loss: 1.7676 - categorical_accuracy: 0.2593
Epoch 8/200
17/17 - 1s - loss: 1.7454 - categorical_accuracy: 0.2565
Epoch 9/200
17/17 - 1s - loss: 1.7170 - categorical_accuracy: 0.2852
Epoch 10/200
17/17 - 1s - loss: 1.6695 - categorical_accuracy: 0.3269
Epoch 11/200
17/17 - 1s - loss: 1.5925 - categorical_accuracy: 0.3602
Epoch 12/200
17/17 - 1s - loss: 1.5214 - categorical_accuracy: 0.3815
Epoch 13/200
17/17 - 1s - loss: 1.3944 - categorical_accuracy: 0.4778
Epoch 14/200
17/17 - 1s - loss: 1.2839 - categorical_accuracy: 0.5278
Epoch 15/200
17/17 - 1s - loss: 1.2357 - categorical_accuracy: 0.5231
Epoch 16/200
17/17 - 1s - loss: 1.1304 - categorical_accuracy: 0.5731
Epoch 17/200
17/17 - 1s - loss: 0.9930 - categorical_accuracy: 0.6361
Epoch 18/200
17/17 - 1s - loss: 0.9319 - categorical_accuracy: 0.6537
Epoch 19/200
17/17 - 1s - loss: 0.8862 - categorical_accuracy: 0.6713
Epoch 20/200
17/17 - 1s - loss: 0.8255 - categorical_accuracy: 0.6944
Epoch 21/200
17/17 - 1s - loss: 0.8070 - categorical_accuracy: 0.6972
Epoch 22/200
17/17 - 1s - loss: 0.7470 - categorical_accuracy: 0.7278
Epoch 23/200
17/17 - 1s - loss: 0.7327 - categorical_accuracy: 0.7370
Epoch 24/200
17/17 - 1s - loss: 0.6892 - categorical_accuracy: 0.7528
Epoch 25/200
17/17 - 1s - loss: 0.6806 - categorical_accuracy: 0.7509
Epoch 26/200
17/17 - 1s - loss: 0.6578 - categorical_accuracy: 0.7565
Epoch 27/200
17/17 - 1s - loss: 0.6319 - categorical_accuracy: 0.7694
Epoch 28/200
17/17 - 1s - loss: 0.6332 - categorical_accuracy: 0.7593
Epoch 29/200
17/17 - 1s - loss: 0.6285 - categorical_accuracy: 0.7630
Epoch 30/200
17/17 - 1s - loss: 0.6178 - categorical_accuracy: 0.7778
Epoch 31/200
17/17 - 1s - loss: 0.5963 - categorical_accuracy: 0.7759
Epoch 32/200
17/17 - 1s - loss: 0.6223 - categorical_accuracy: 0.7750
Epoch 33/200
17/17 - 1s - loss: 0.6162 - categorical_accuracy: 0.7694
Epoch 34/200
17/17 - 1s - loss: 0.5894 - categorical_accuracy: 0.7898
Epoch 35/200
17/17 - 1s - loss: 0.5939 - categorical_accuracy: 0.7861
Epoch 36/200
17/17 - 1s - loss: 0.5594 - categorical_accuracy: 0.7907
Epoch 37/200
17/17 - 1s - loss: 0.5334 - categorical_accuracy: 0.8111
Epoch 38/200
17/17 - 1s - loss: 0.5387 - categorical_accuracy: 0.8065
Epoch 39/200
17/17 - 1s - loss: 0.5630 - categorical_accuracy: 0.7954
Epoch 40/200
17/17 - 1s - loss: 0.5363 - categorical_accuracy: 0.8111
Epoch 41/200
17/17 - 1s - loss: 0.5149 - categorical_accuracy: 0.8176
Epoch 42/200
17/17 - 1s - loss: 0.4964 - categorical_accuracy: 0.8231
Epoch 43/200
17/17 - 1s - loss: 0.5073 - categorical_accuracy: 0.8194
Epoch 44/200
17/17 - 1s - loss: 0.4905 - categorical_accuracy: 0.8148
Epoch 45/200
17/17 - 1s - loss: 0.5001 - categorical_accuracy: 0.8250
Epoch 46/200
17/17 - 1s - loss: 0.4959 - categorical_accuracy: 0.8194
Epoch 47/200
17/17 - 1s - loss: 0.4717 - categorical_accuracy: 0.8417
Epoch 48/200
17/17 - 1s - loss: 0.4586 - categorical_accuracy: 0.8398
Epoch 49/200
17/17 - 1s - loss: 0.4578 - categorical_accuracy: 0.8306
Epoch 50/200
17/17 - 1s - loss: 0.4522 - categorical_accuracy: 0.8361
Epoch 51/200
17/17 - 1s - loss: 0.4506 - categorical_accuracy: 0.8370
Epoch 52/200
17/17 - 1s - loss: 0.4732 - categorical_accuracy: 0.8324
Epoch 53/200
17/17 - 1s - loss: 0.4544 - categorical_accuracy: 0.8343
Epoch 54/200
17/17 - 1s - loss: 0.4392 - categorical_accuracy: 0.8389
Epoch 55/200
17/17 - 1s - loss: 0.4305 - categorical_accuracy: 0.8472
Epoch 56/200
17/17 - 1s - loss: 0.4348 - categorical_accuracy: 0.8463
Epoch 57/200
17/17 - 1s - loss: 0.4178 - categorical_accuracy: 0.8472
Epoch 58/200
17/17 - 1s - loss: 0.4208 - categorical_accuracy: 0.8528
Epoch 59/200
17/17 - 1s - loss: 0.4237 - categorical_accuracy: 0.8398
Epoch 60/200
17/17 - 1s - loss: 0.4375 - categorical_accuracy: 0.8407
Epoch 61/200
17/17 - 1s - loss: 0.3961 - categorical_accuracy: 0.8565
Epoch 62/200
17/17 - 1s - loss: 0.3937 - categorical_accuracy: 0.8657
Epoch 63/200
17/17 - 1s - loss: 0.3893 - categorical_accuracy: 0.8611
Epoch 64/200
17/17 - 1s - loss: 0.3938 - categorical_accuracy: 0.8500
Epoch 65/200
17/17 - 1s - loss: 0.3806 - categorical_accuracy: 0.8694
Epoch 66/200
17/17 - 1s - loss: 0.3999 - categorical_accuracy: 0.8509
Epoch 67/200
17/17 - 1s - loss: 0.3876 - categorical_accuracy: 0.8602
Epoch 68/200
17/17 - 1s - loss: 0.3660 - categorical_accuracy: 0.8648
Epoch 69/200
17/17 - 1s - loss: 0.3753 - categorical_accuracy: 0.8648
Epoch 70/200
17/17 - 1s - loss: 0.3696 - categorical_accuracy: 0.8685
Epoch 71/200
17/17 - 1s - loss: 0.3780 - categorical_accuracy: 0.8583
Epoch 72/200
17/17 - 1s - loss: 0.3863 - categorical_accuracy: 0.8620
Epoch 73/200
17/17 - 1s - loss: 0.3760 - categorical_accuracy: 0.8630
Epoch 74/200
17/17 - 1s - loss: 0.3660 - categorical_accuracy: 0.8778
Epoch 75/200
17/17 - 1s - loss: 0.3573 - categorical_accuracy: 0.8722
Epoch 76/200
17/17 - 1s - loss: 0.3571 - categorical_accuracy: 0.8685
Epoch 77/200
17/17 - 1s - loss: 0.3371 - categorical_accuracy: 0.8824
Epoch 78/200
17/17 - 1s - loss: 0.3737 - categorical_accuracy: 0.8583
Epoch 79/200
17/17 - 1s - loss: 0.3415 - categorical_accuracy: 0.8778
Epoch 80/200
17/17 - 1s - loss: 0.3422 - categorical_accuracy: 0.8731
Epoch 81/200
17/17 - 1s - loss: 0.3606 - categorical_accuracy: 0.8630
Epoch 82/200
17/17 - 1s - loss: 0.3710 - categorical_accuracy: 0.8676
Epoch 83/200
17/17 - 1s - loss: 0.3680 - categorical_accuracy: 0.8778
Epoch 84/200
17/17 - 1s - loss: 0.3597 - categorical_accuracy: 0.8685
Epoch 85/200
17/17 - 1s - loss: 0.3247 - categorical_accuracy: 0.8907
Epoch 86/200
17/17 - 1s - loss: 0.3214 - categorical_accuracy: 0.8824
Epoch 87/200
17/17 - 1s - loss: 0.3246 - categorical_accuracy: 0.8843
Epoch 88/200
17/17 - 1s - loss: 0.3223 - categorical_accuracy: 0.8907
Epoch 89/200
17/17 - 1s - loss: 0.3409 - categorical_accuracy: 0.8722
Epoch 90/200
17/17 - 1s - loss: 0.3203 - categorical_accuracy: 0.8907
Epoch 91/200
17/17 - 1s - loss: 0.3150 - categorical_accuracy: 0.8870
Epoch 92/200
17/17 - 1s - loss: 0.3092 - categorical_accuracy: 0.8852
Epoch 93/200
17/17 - 1s - loss: 0.3337 - categorical_accuracy: 0.8824
Epoch 94/200
17/17 - 1s - loss: 0.3298 - categorical_accuracy: 0.8843
Epoch 95/200
17/17 - 1s - loss: 0.3423 - categorical_accuracy: 0.8648
Epoch 96/200
17/17 - 1s - loss: 0.3145 - categorical_accuracy: 0.8861
Epoch 97/200
17/17 - 1s - loss: 0.2863 - categorical_accuracy: 0.9000
Epoch 98/200
17/17 - 1s - loss: 0.2860 - categorical_accuracy: 0.8926
Epoch 99/200
17/17 - 1s - loss: 0.2908 - categorical_accuracy: 0.8833
Epoch 100/200
17/17 - 1s - loss: 0.2903 - categorical_accuracy: 0.8889
Epoch 101/200
17/17 - 1s - loss: 0.3150 - categorical_accuracy: 0.8852
Epoch 102/200
17/17 - 1s - loss: 0.2983 - categorical_accuracy: 0.8935
Epoch 103/200
17/17 - 1s - loss: 0.2981 - categorical_accuracy: 0.9000
Epoch 104/200
17/17 - 1s - loss: 0.3027 - categorical_accuracy: 0.8898
Epoch 105/200
17/17 - 1s - loss: 0.2863 - categorical_accuracy: 0.9028
Epoch 106/200
17/17 - 1s - loss: 0.3328 - categorical_accuracy: 0.8713
Epoch 107/200
17/17 - 1s - loss: 0.3134 - categorical_accuracy: 0.8769
Epoch 108/200
17/17 - 1s - loss: 0.3348 - categorical_accuracy: 0.8852
Epoch 109/200
17/17 - 1s - loss: 0.2884 - categorical_accuracy: 0.8944
Epoch 110/200
17/17 - 1s - loss: 0.2820 - categorical_accuracy: 0.8991
Epoch 111/200
17/17 - 1s - loss: 0.3068 - categorical_accuracy: 0.8917
Epoch 112/200
17/17 - 1s - loss: 0.2872 - categorical_accuracy: 0.9000
Epoch 113/200
17/17 - 1s - loss: 0.2793 - categorical_accuracy: 0.8935
Epoch 114/200
17/17 - 1s - loss: 0.2714 - categorical_accuracy: 0.9065
Epoch 115/200
17/17 - 1s - loss: 0.2819 - categorical_accuracy: 0.8963
Epoch 116/200
17/17 - 1s - loss: 0.2932 - categorical_accuracy: 0.8926
Epoch 117/200
17/17 - 1s - loss: 0.3181 - categorical_accuracy: 0.8769
Epoch 118/200
17/17 - 1s - loss: 0.2834 - categorical_accuracy: 0.8926
Epoch 119/200
17/17 - 1s - loss: 0.3167 - categorical_accuracy: 0.8815
Epoch 120/200
17/17 - 1s - loss: 0.3063 - categorical_accuracy: 0.8898
Epoch 121/200
17/17 - 1s - loss: 0.2800 - categorical_accuracy: 0.9028
Epoch 122/200
17/17 - 1s - loss: 0.2590 - categorical_accuracy: 0.9019
Epoch 123/200
17/17 - 1s - loss: 0.2701 - categorical_accuracy: 0.9009
Epoch 124/200
17/17 - 1s - loss: 0.2643 - categorical_accuracy: 0.9111
Epoch 125/200
17/17 - 1s - loss: 0.2664 - categorical_accuracy: 0.9037
Epoch 126/200
17/17 - 1s - loss: 0.2799 - categorical_accuracy: 0.8991
Epoch 127/200
17/17 - 1s - loss: 0.2835 - categorical_accuracy: 0.8852
Epoch 128/200
17/17 - 1s - loss: 0.2939 - categorical_accuracy: 0.8944
Epoch 129/200
17/17 - 1s - loss: 0.2870 - categorical_accuracy: 0.8926
Epoch 130/200
17/17 - 1s - loss: 0.2863 - categorical_accuracy: 0.9000
Epoch 131/200
17/17 - 1s - loss: 0.2908 - categorical_accuracy: 0.8981
Epoch 132/200
17/17 - 1s - loss: 0.3431 - categorical_accuracy: 0.8722
Epoch 133/200
17/17 - 1s - loss: 0.3322 - categorical_accuracy: 0.8759
Epoch 134/200
17/17 - 1s - loss: 0.2863 - categorical_accuracy: 0.8852
Epoch 135/200
17/17 - 1s - loss: 0.2530 - categorical_accuracy: 0.9111
Epoch 136/200
17/17 - 1s - loss: 0.2504 - categorical_accuracy: 0.9093
Epoch 137/200
17/17 - 1s - loss: 0.2780 - categorical_accuracy: 0.9019
Epoch 138/200
17/17 - 1s - loss: 0.2531 - categorical_accuracy: 0.9065
Epoch 139/200
17/17 - 1s - loss: 0.2575 - categorical_accuracy: 0.9139
Epoch 140/200
17/17 - 1s - loss: 0.2756 - categorical_accuracy: 0.8907
Epoch 141/200
17/17 - 1s - loss: 0.2524 - categorical_accuracy: 0.9065
Epoch 142/200
17/17 - 1s - loss: 0.2626 - categorical_accuracy: 0.8926
Epoch 143/200
17/17 - 1s - loss: 0.2568 - categorical_accuracy: 0.9037
Epoch 144/200
17/17 - 1s - loss: 0.2533 - categorical_accuracy: 0.9120
Epoch 145/200
17/17 - 1s - loss: 0.2642 - categorical_accuracy: 0.9056
Epoch 146/200
17/17 - 1s - loss: 0.2311 - categorical_accuracy: 0.9194
Epoch 147/200
17/17 - 1s - loss: 0.2463 - categorical_accuracy: 0.9185
Epoch 148/200
17/17 - 1s - loss: 0.2348 - categorical_accuracy: 0.9120
Epoch 149/200
17/17 - 1s - loss: 0.2481 - categorical_accuracy: 0.9083
Epoch 150/200
17/17 - 1s - loss: 0.2484 - categorical_accuracy: 0.9028
Epoch 151/200
17/17 - 1s - loss: 0.2318 - categorical_accuracy: 0.9176
Epoch 152/200
17/17 - 1s - loss: 0.2374 - categorical_accuracy: 0.9130
Epoch 153/200
17/17 - 1s - loss: 0.2355 - categorical_accuracy: 0.9130
Epoch 154/200
17/17 - 1s - loss: 0.2269 - categorical_accuracy: 0.9074
Epoch 155/200
17/17 - 1s - loss: 0.2400 - categorical_accuracy: 0.9157
Epoch 156/200
17/17 - 1s - loss: 0.2442 - categorical_accuracy: 0.9083
Epoch 157/200
17/17 - 1s - loss: 0.2484 - categorical_accuracy: 0.9111
Epoch 158/200
17/17 - 1s - loss: 0.2377 - categorical_accuracy: 0.9102
Epoch 159/200
17/17 - 1s - loss: 0.2207 - categorical_accuracy: 0.9213
Epoch 160/200
17/17 - 1s - loss: 0.2114 - categorical_accuracy: 0.9194
Epoch 161/200
17/17 - 1s - loss: 0.2257 - categorical_accuracy: 0.9130
Epoch 162/200
17/17 - 1s - loss: 0.2729 - categorical_accuracy: 0.8972
Epoch 163/200
17/17 - 1s - loss: 0.2468 - categorical_accuracy: 0.9120
Epoch 164/200
17/17 - 1s - loss: 0.2307 - categorical_accuracy: 0.9241
Epoch 165/200
17/17 - 1s - loss: 0.2449 - categorical_accuracy: 0.9037
Epoch 166/200
17/17 - 1s - loss: 0.2300 - categorical_accuracy: 0.9185
Epoch 167/200
17/17 - 1s - loss: 0.2249 - categorical_accuracy: 0.9185
Epoch 168/200
17/17 - 1s - loss: 0.2352 - categorical_accuracy: 0.9139
Epoch 169/200
17/17 - 1s - loss: 0.2107 - categorical_accuracy: 0.9278
Epoch 170/200
17/17 - 1s - loss: 0.2362 - categorical_accuracy: 0.9148
Epoch 171/200
17/17 - 1s - loss: 0.2308 - categorical_accuracy: 0.9120
Epoch 172/200
17/17 - 1s - loss: 0.2319 - categorical_accuracy: 0.9102
Epoch 173/200
17/17 - 1s - loss: 0.2160 - categorical_accuracy: 0.9231
Epoch 174/200
17/17 - 1s - loss: 0.2106 - categorical_accuracy: 0.9213
Epoch 175/200
17/17 - 1s - loss: 0.2321 - categorical_accuracy: 0.9185
Epoch 176/200
17/17 - 1s - loss: 0.2390 - categorical_accuracy: 0.9046
Epoch 177/200
17/17 - 1s - loss: 0.2302 - categorical_accuracy: 0.9194
Epoch 178/200
17/17 - 1s - loss: 0.2166 - categorical_accuracy: 0.9213
Epoch 179/200
17/17 - 1s - loss: 0.2497 - categorical_accuracy: 0.9120
Epoch 180/200
17/17 - 1s - loss: 0.2184 - categorical_accuracy: 0.9194
Epoch 181/200
17/17 - 1s - loss: 0.2205 - categorical_accuracy: 0.9130
Epoch 182/200
17/17 - 1s - loss: 0.2101 - categorical_accuracy: 0.9278
Epoch 183/200
17/17 - 1s - loss: 0.2260 - categorical_accuracy: 0.9148
Epoch 184/200
17/17 - 1s - loss: 0.2104 - categorical_accuracy: 0.9250
Epoch 185/200
17/17 - 1s - loss: 0.2142 - categorical_accuracy: 0.9185
Epoch 186/200
17/17 - 1s - loss: 0.2144 - categorical_accuracy: 0.9204
Epoch 187/200
17/17 - 1s - loss: 0.1982 - categorical_accuracy: 0.9352
Epoch 188/200
17/17 - 1s - loss: 0.2075 - categorical_accuracy: 0.9269
Epoch 189/200
17/17 - 1s - loss: 0.1985 - categorical_accuracy: 0.9278
Epoch 190/200
17/17 - 1s - loss: 0.1981 - categorical_accuracy: 0.9259
Epoch 191/200
17/17 - 1s - loss: 0.1869 - categorical_accuracy: 0.9380
Epoch 192/200
17/17 - 1s - loss: 0.2005 - categorical_accuracy: 0.9352
Epoch 193/200
17/17 - 1s - loss: 0.1970 - categorical_accuracy: 0.9306
Epoch 194/200
17/17 - 1s - loss: 0.2068 - categorical_accuracy: 0.9259
Epoch 195/200
17/17 - 1s - loss: 0.2260 - categorical_accuracy: 0.9083
Epoch 196/200
17/17 - 1s - loss: 0.2149 - categorical_accuracy: 0.9204
Epoch 197/200
17/17 - 1s - loss: 0.2171 - categorical_accuracy: 0.9157
Epoch 198/200
17/17 - 1s - loss: 0.1980 - categorical_accuracy: 0.9324
Epoch 199/200
17/17 - 1s - loss: 0.1920 - categorical_accuracy: 0.9269
Epoch 200/200
17/17 - 1s - loss: 0.2077 - categorical_accuracy: 0.9296
34/34 [==============================] - 0s 1ms/step - loss: 0.2137 - categorical_accuracy: 0.9241
Train accuracy =  [0.21369488537311554, 0.9240740537643433]
4/4 [==============================] - 0s 2ms/step - loss: 0.7327 - categorical_accuracy: 0.7750
Test accuracy =  [0.7327048778533936, 0.7749999761581421]

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值