Andrew_NG第一章第四周的作业整理

本文参考他人文章,旨在构建多层神经网络。目标包括使用非线性激活函数、计算交叉熵损失、实现前后传播。文中给出了所需资料的百度网盘下载链接,还提及操作系统为ubuntu18.04,显卡为GTX1080ti,python版本为2.7或3.7,可加入QQ群获取环境配置细节和学习资料。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

深层神经网络的搭建(原创) @water


声明
   首先声明本文参考【Kulbear】的github上的文章,本文参考Planar data classification with one hidden layer,我基于他的文章加以自己的理解发表这篇博客。


目标:

  • 构建多层的神经网络神经网络。
  • 使用具有非线性激活功能激活函数,例如tanh,rule。
  • 计算交叉熵损失(损失函数)。
  • 实现向前和向后传播。

本文所使用的资料已上传到百度网盘点击下载,请在开始之前下载好所需资料。



操作系统:ubuntu18.04
显卡:GTX1080ti
python版本:2.7(3.7)
QQ群加入深度学习交流群 获取更多环境配置细节和学习资料


# -*- coding: utf-8 -*-
from __future__ import division

import numpy as np
import h5py
import matplotlib.pyplot as plt
import testCases #参见资料包,或者在文章底部copy
from dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward #参见资料包
import lr_utils #参见资料包,或者在文章底部copy
import torch

def initialize_parameters_deep(layers_dims):
    '''
    this function used to initialize deep_neural kernal parameter
    :param layers_dims: [5,4,3]
    :return: parameters: {“W1”:W1,“b1”:b1,...,“WL”:WL,“bL”:bL}
    '''
    np.random.seed(3)
    parameters={}
    L = len(layers_dims)
    print L
    for l in range(1,L):
        parameters["W" + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1])/ np.sqrt(layers_dims[l - 1])
        parameters["b" + str(l)] = np.zeros((layers_dims[l], 1))
        print '++++++++++++++++++++++++++++',np.sqrt(layers_dims[l - 1])
    return parameters

def linear_forward(A,W,b,activation):
    '''
    forward_propagation
    :param A: A[L-1] ndim (n[L-1],m)
    :param W: W[L]   ndim (n[L],n[L-1])
    :param b: b[L]   ndim (1,1)
    :return:
           A[L]   ndim (n[L],m)
           cache = (linear_cache,activation_cache)
           linear_cache=(A,W,b)  activation_cache=Z[L]
    '''
    Z = np.dot(W,A) + b
    assert(Z.shape == (W.shape[0],A.shape[1]))
    linear_cache = (A,W,b)

    if activation == "sigmoid":
        A, activation_cache = sigmoid(Z)
    elif activation == "relu":
        A, activation_cache = relu(Z)

    assert(A.shape == (W.shape[0],A.shape[1]))
    cache = (linear_cache,activation_cache)

    return A,cache

def L_model_forward(X,parameters):
    '''
    computer deep_neural from 1 to L
    :param X: numpy,ndim(n0,m)
    :param parameters: {“W1”:W1,“b1”:b1,...,“WL”:WL,“bL”:bL}
    :return:   A[L]   ndim (n[L],m)
               cache = (linear_cache,activation_cache)
              linear_cache=(A,W,b)  activation_cache=Z[L]
    '''
    caches = []
    A = X
    L = len(parameters)//2
    for l in range(1,L):
        A_prev  = A
        A,cache = linear_forward(A_prev,parameters["W"+str(l)],parameters["b"+str(l)],"relu")
        caches.append(cache)
    AL, cache = linear_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], "sigmoid")
    caches.append(cache)
    assert (AL.shape == (1, X.shape[1]))
    return AL, caches

def compute_cost(AL,Y):
    """
    实施等式(4)定义的成本函数。

    参数:
        AL - 与标签预测相对应的概率向量,维度为(1,示例数量)
        Y - 标签向量(例如:如果不是猫,则为0,如果是猫则为1),维度为(1,数量)

    返回:
        cost - 交叉熵成本
    """
    m = Y.shape[1]
    cost = -np.sum(np.multiply(np.log(AL),Y) + np.multiply(np.log(1 - AL), 1 - Y)) / m

    cost = np.squeeze(cost)
    assert(cost.shape == ())
    return cost

def linear_backward(dA,cache,activation):
    '''

    :param dA - 当前层l的激活后的梯度值
    :param cache - 我们存储的用于有效计算反向传播的值的元组(值为linear_cache,activation_cache) linear_cache=(A,W,b)  activation_cache=Z[L]
    :param activation - 要在此层中使用的激活函数名,字符串类型,【"sigmoid" | "relu"】
    :return:
           dA_prev - 相对于激活(前一层l-1)的成本梯度值,与A_prev维度相同
           dW - 相对于W(当前层l)的成本梯度值,与W的维度相同
           db - 相对于b(当前层l)的成本梯度值,与b的维度相同
    '''
    linear_cache, activation_cache = cache
    A_prev, W, b = linear_cache
    m = A_prev.shape[1]

    if activation == "relu":
        dZ = relu_backward(dA, activation_cache)
    elif activation == "sigmoid":
        dZ = sigmoid_backward(dA, activation_cache)

    dW = np.dot(dZ, A_prev.T) / m
    db = np.sum(dZ, axis=1, keepdims=True) / m
    dA_prev = np.dot(W.T, dZ)

    assert (dA_prev.shape == A_prev.shape)
    assert (dW.shape == W.shape)
    assert (db.shape == b.shape)

    return dA_prev, dW, db

def L_model_backward(AL,Y,caches):
    """
    对[LINEAR-> RELU] *(L-1) - > LINEAR - > SIGMOID组执行反向传播,就是多层网络的向后传播

    参数:
     AL - 概率向量,正向传播的输出(L_model_forward())
     Y - 标签向量(例如:如果不是猫,则为0,如果是猫则为1),维度为(1,数量)
     caches - 包含以下内容的cache列表:
                 linear_activation_forward("relu")的cache,不包含输出层
                 linear_activation_forward("sigmoid")的cache

    返回:
     grads - 具有梯度值的字典
              grads [“dA”+ str(l)] = ...
              grads [“dW”+ str(l)] = ...
              grads [“db”+ str(l)] = ...
    """
    grads = {}
    L = len(caches)
    m = AL.shape[1]
    Y = Y.reshape(AL.shape)
    dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))

    current_cache = caches[L-1]
    grads["dA" + str(L)], grads["dW" + str(L)], grads["db" + str(L)] = linear_backward(dAL, current_cache, "sigmoid")

    for l in reversed(range(L-1)):
        current_cache = caches[l]
        dA_prev_temp, dW_temp, db_temp = linear_backward(grads["dA" + str(l + 2)], current_cache, "relu")
        grads["dA" + str(l + 1)] = dA_prev_temp
        grads["dW" + str(l + 1)] = dW_temp
        grads["db" + str(l + 1)] = db_temp

    return grads

def update_parameters(parameters, grads, learning_rate):
    """
    使用梯度下降更新参数

    参数:
     parameters - 包含你的参数的字典
     grads - 包含梯度值的字典,是L_model_backward的输出

    返回:
     parameters - 包含更新参数的字典
                   参数[“W”+ str(l)] = ...
                   参数[“b”+ str(l)] = ...
    """
    L = len(parameters) // 2 #整除
    for l in range(L):
        parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * grads["dW" + str(l + 1)]
        parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * grads["db" + str(l + 1)]

    return parameters

def L_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False,isPlot=True):
    """
    实现一个L层神经网络:[LINEAR-> RELU] *(L-1) - > LINEAR-> SIGMOID。

    参数:
        X - 输入的数据,维度为(n_x,例子数)
        Y - 标签,向量,0为非猫,1为猫,维度为(1,数量)
        layers_dims - 层数的向量,维度为(n_y,n_h,···,n_h,n_y)
        learning_rate - 学习率
        num_iterations - 迭代的次数
        print_cost - 是否打印成本值,每100次打印一次
        isPlot - 是否绘制出误差值的图谱

    返回:
     parameters - 模型学习的参数。 然后他们可以用来预测。
    """
    np.random.seed(1)
    costs = []

    parameters = initialize_parameters_deep(layers_dims)

    for i in range(0,num_iterations):
        AL , caches = L_model_forward(X,parameters)
        cost = compute_cost(AL,Y)
        grads = L_model_backward(AL,Y,caches)
        parameters = update_parameters(parameters,grads,learning_rate)

        #打印成本值,如果print_cost=False则忽略
        if i % 100 == 0:
            #记录成本
            costs.append(cost)
            #是否打印成本值
            if print_cost:
                print "第", i ,"次迭代,成本值为:" ,np.squeeze(cost)
    #迭代完成,根据条件绘制图
    if isPlot:
        plt.plot(np.squeeze(costs))
        plt.ylabel('cost')
        plt.xlabel('iterations (per tens)')
        plt.title("Learning rate =" + str(learning_rate))
        plt.show()
    return parameters

def predict(X, y, parameters):
    """
    该函数用于预测L层神经网络的结果,当然也包含两层

    参数:
     X - 测试集
     y - 标签
     parameters - 训练模型的参数

    返回:
     p - 给定数据集X的预测
    """

    m = X.shape[1]
    n = len(parameters) // 2 # 神经网络的层数
    p = np.zeros((1,m))
    #根据参数前向传播
    probas, caches = L_model_forward(X, parameters)
    for i in range(0, probas.shape[1]):
        if probas[0,i] > 0.5:
            p[0,i] = 1
        else:
            p[0,i] = 0

    print("准确度为: "  + str(float(np.sum((p == y))/m)))

    return p

train_set_x_orig , train_set_y , test_set_x_orig , test_set_y , classes = lr_utils.load_dataset()
print train_set_x_orig.shape
print train_set_y.shape
train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T

train_x = train_x_flatten / 255
train_y = train_set_y
test_x = test_x_flatten / 255
test_y = test_set_y
print '+++++++++++++++++++='
print test_x.shape
print test_y.shape
print '+++++++++++++++++++='

import skimage.io as io
import cv2
from PIL import Image
import torch


# # 使用opencv读取图像 #(1200, 1200, 3)
# img_cv = cv2.imread('images/my_reboot.jpg')            # cv2.imread()------np.array, (H x W xC), [0, 255], BGR
# img_cv = cv2.cvtColor(img_cv, cv2.COLOR_BGR2RGB)
# plt.imshow(img_cv)
# reSize1 = cv2.resize(img_cv, (64 ,64), interpolation=cv2.INTER_AREA).reshape(12288,1)
# print 'reSize1.shape',reSize1.shape
# my_label_y = [1]
# plt.figure()
# plt.imshow(reSize1 )
# plt.show()

# from scipy import ndimage
#
# import scipy
#
layers_dims = [12288, 20, 7, 5, 1] #  5-layer model
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True,isPlot=True)
pred_train = predict(train_x, train_y, parameters) #训练集
pred_test = predict(test_x, test_y, parameters) #测试集
# pred_test = predict(reSize1, my_label_y, parameters) #测试集
# ## START CODE HERE ##
# my_image = "my_reboot.jpg"
# my_label_y = [1]
# fname = "images/" + my_image
# image = np.array(ndimage.imread(fname, flatten=False))
# my_image = scipy.misc.imresize(image, size=(64,64)).reshape((64*64*3,1))
# my_predicted_image = predict(my_image, my_label_y, parameters)
#
# plt.imshow(image)
# print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") +  "\" picture.")

##########################################################################
# pred_train = predict(train_x, train_y, parameters) #训练集
# pred_test = predict(test_x, test_y, parameters) #测试集
# def print_mislabeled_images(classes, X, y, p):
#     """
#     绘制预测和实际不同的图像。
#         X - 数据集
#         y - 实际的标签
#         p - 预测
#     """
#     a = p + y
#     mislabeled_indices = np.asarray(np.where(a == 1))
#     plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots
#     num_images = len(mislabeled_indices[0])
#     for i in range(num_images):
#         index = mislabeled_indices[1][i]
#
#         plt.subplot(3, num_images, i + 1)
#         plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')
#         plt.axis('off')
#         plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))
# # print_mislabeled_images(classes, test_x, test_y,pred_test)
#
#
#
# my_image = "my_image.jpg"
# my_label_y = [1]
# fname = "images/" + my_image
# image = np.array(ndimage.imread(fname, flatten=False))
# my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
# my_predicted_image = predict(my_image, my_label_y, 5)
# plt.imshow(image)
# print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") +  "\" picture.")
#
# plt.show()








评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值