python - 计算深度神经网络关于输入的偏导数

标签 python neural-network backpropagation derivative

我正在尝试计算具有 2 个或更多隐藏层的神经网络相对于其输入的导数。所以不是“标准反向传播”,因为我对输出如何随权重变化不感兴趣。而且我不打算使用它来训练我的网络(如果这需要删除反向传播标签,请告诉我,但我怀疑我需要的并没有太大不同)

我对这里的导数感兴趣的原因是,我有一个测试集,它有时为我提供匹配的 [x1, x2] : [y] 对,有时为我提供一个 [x1, x2]:[d(y)/dx1][x1, x2]:[d(y)/dx2]。然后,我使用粒子群算法来训练我的网络。

我喜欢图表,所以这里简单说一下我的网络:

My network

我想要的是compute_derivative方法返回以下形式的numpy数组:

enter image description here

这是我到目前为止的尝试,但我似乎无法在最后获得与我的输入数量匹配的数组。我不知道我做错了什么。

def compute_derivative(self):
"""Computes the network derivative and returns an array with the change in output with respect to each input"""
    self.compute_layer_derivative(0)
    for l in np.arange(1,self.size):
        dl = self.compute_layer_derivative(l)
        dprev = self.layers[l-1].derivatives
        self.output_derivatives = dl.T.dot(dprev)

    return self.output_derivatives

def compute_layer_derivative(self, l_id):
    wL = self.layers[l_id].w
    zL = self.layers[l_id].output
    daL = self.layers[l_id].f(zL, div=1)
    daLM = np.repeat(daL,wL.shape[0], axis=0)

    self.layers[l_id].derivatives = np.multiply(daLM,wL)

    return self.layers[l_id].derivatives

如果您想运行整个代码,我已经制作了一个删减的带注释的版本,它将与复制粘贴一起使用(见下文)。感谢您的帮助!

# -*- coding: utf-8 -*-

import numpy as np

def sigmoid(x, div = 0):
    if div == 1: #first derivative f'
        return np.exp(-x) / (1. + np.exp(-x))**2.
    if div == 2: # second derivative f''
        return - np.exp(x) * (np.exp(x) - 1) / (1. + np.exp(x))**3.
    return 1. / (1. + np.exp(-x)) # f

def linear(x, div = 0):
    if div == 1: #first derivative f'
        return np.full(x.shape,1)
    if div > 2:  # second derivative f''
        return np.zeros(x.shape)
    return x # f

class Layer():
    def __init__(self, in_n, h_n, activation, bias = True, debug = False):
        self.w = 2*np.random.random((in_n, h_n)) - 1 # synaptic weights with 0 mean
        self.f = activation
        self.output = None
        self.activation = None
        self.derivatives = np.array([[None for i in range(in_n+1)]]) #+1 for global dev
        if bias:
            self.b = 2*np.random.random((1, h_n)) - 1
        else:
            self.b = None

        if debug:
            self.w = np.full((in_n, h_n), 1.)
            if self.b is not None: self.b = np.full((1, h_n), 1.)

    def compute(self, inputs):
        if self.w.shape[0] != inputs.shape[1]:
            raise ValueError("Inputs dimensions do not match test data dim.")
        if self.b is None:
            self.output = np.dot(inputs, self.w)
        else:
            self.output = np.dot(inputs, self.w) + self.b

        self.activation = self.f(self.output)

class NeuralNetwork():
    def __init__(self, nb_layers, in_NN, h_density, out_NN, debug = False):
        self.debug = debug
        self.layers = []
        self.size = nb_layers+1  
        self.output_derivatives = None
        self.output = None
        self.in_N = in_NN
        self.out_N = out_NN
        if debug: 
            print("Input Layer with {} inputs.".format(in_NN))

        #create hidden layers
        current_inputs = in_NN
        for l in range(self.size - 1):
            self.layers.append(Layer(current_inputs, h_density, sigmoid, debug = debug))
            current_inputs = h_density
            if debug:
                print("Hidden Layer {} with {} inputs and {} neurons.".format(l+1, self.layers[l].w.shape[0], self.layers[l].w.shape[1]))
        #creat output layer
        self.layers.append(Layer(current_inputs, out_NN, linear, bias=False, debug = debug))
        if debug:
            print("Output Layer with {} inputs and {} outputs.".format(self.layers[-1].w.shape[0], self.layers[-1].w.shape[1]))
            #print("with w: {}".format(self.layers[l].w))
            print("ANN size = {}, with {} Layers\n\n".format( self.size, len(self.layers)))

    def compute(self, point):
        curr_inputs = point
        for l in range(self.size):
            self.layers[l].compute(curr_inputs)
            curr_inputs = self.layers[l].activation
        self.output = curr_inputs
        if self.debug: print("ANN output: ",curr_inputs)
        return self.output

    def compute_derivative(self, order, point):
        """ If the network has not been computed, compute it before getting
            the derivative. This might be a bit expensive..."""
        if self.layers[self.size-1].output is None:
            self.compute(point)

        #Compute output layer total derivative
        self.compute_layer_derivative(self.size-1, order)
        self.output_derivatives = self.get_partial_derivatives_to_outputs(self.size-1)
        print(self.output_derivatives)

        for l in np.arange(1,self.size):
            l = self.size-1 - l
            self.compute_layer_derivative(l, order)
            if l > 0: #if we are not at first hidden layer compute the total derivative
                self.output_derivatives *= self.get_total_derivative_to_inputs(l)
            else:# get the each output derivative with respect to each input
                backprop_dev_to_outs = np.repeat(np.matrix(self.output_derivatives),self.in_N, axis=0).T
                dev_to_inputs = np.repeat(np.matrix(self.get_partial_derivatives_to_inputs(l)).T,self.out_N, axis=1).T
                self.output_derivatives = np.multiply(backprop_dev_to_outs, dev_to_inputs)

            if self.debug: print("output derivatives: ",self.output_derivatives)
        return self.output_derivatives

    def get_total_derivative(self,l_id):
        return np.sum(self.get_partial_derivatives_to_inputs(l_id))

    def get_total_derivative_to_inputs(self,l_id):
        return np.sum(self.get_partial_derivatives_to_inputs(l_id))

    def get_partial_derivatives_to_inputs(self,l_id):
        return np.sum(self.layers[l_id].derivatives, axis=1)    

    def get_partial_derivatives_to_outputs(self,l_id):
        return np.sum(self.layers[l_id].derivatives, axis=0)

    def compute_layer_derivative(self, l_id, order):  
        if self.debug: print("\n\ncurrent layer is ", l_id)
        wL = self.layers[l_id].w
        zL = self.layers[l_id].output
        daL = self.layers[l_id].f(zL, order)
        daLM = np.repeat(daL,wL.shape[0], axis=0)

        self.layers[l_id].derivatives = np.multiply(daLM,wL)

        if self.debug:
            print("L_id: {}, a_f: {}".format(l_id, self.layers[l_id].f))
            print("L_id: {}, dev: {}".format(l_id, self.get_total_derivative_to_inputs(l_id)))

        return self.layers[l_id].derivatives

#nb_layers, in_NN, h_density, out_NN, debug = False
nn = NeuralNetwork(1,2,2,1, debug= True)
nn.compute(np.array([[1,1]]))# head value
nn.compute_derivative(1,np.array([[1,1]])) #first derivative

根据 Sirguy 的回复编辑的答案:

# Here we assume that the layer has sigmoid activation
def Jacobian(x = np.array([[1,1]]), w = np.array([[1,1],[1,1]]), b = np.array([[1,1]])):
    return sigmoid_d(x.dot(w) + b) * w # J(S, x)

如果网络有 2 个带有 sigmoid 激活的隐藏层和一个带有 sigmoid 激活的输出层(这样我们就可以使用与上面相同的函数),我们有:

J_L1 =  Jacobian(x = np.array([[1,1]])) # where [1,1] are the inputs of to the network (i.e. values of the neuron in the input layer)
J_L2 =  Jacobian(x = np.array([[3,3]])) # where [3,3] are the neuron values of layer 1 before activation
# in the output layer the weights and biases are adjusted as there is 1 neuron rather than 2
J_Lout = Jacobian(x = np.array([[2.90514825, 2.90514825]]), w = np.array([[1],[1]]), b = np.array([[1]]))# where [2.905,2.905] are the neuron values of layer 2 before activation
J_out_to_in = J_Lout.T.dot(J_L2).dot(J_L1)

最佳答案

以下是我如何得出您的示例应该给出的内容:

# i'th component of vector-valued function S(x) (sigmoid-weighted layer)
S_i(x) = 1 / 1 + exp(-w_i . x + b_i) # . for matrix multiplication here

# i'th component of vector-valued function L(x) (linear-weighted layer)
L_i(x) = w_i . x # different weights than S.
# as it happens our L(x) output 1 value, so is in fact a scalar function

F(x) = L(S(x)) # final output value

#derivative of F, denoted as J(F, x) to mean the Jacobian of the function F, evaluated at x.
J(F, x) = J(L(S(x)), x) = J(L, S(x)) . J(S, x) # chain rule for multivariable, vector-valued functions

#First, what's the derivative of L?
J(L, S(x)) = L 

这通常是一个令人惊讶的结果,但您可以通过计算 M 的偏导数自行验证这一点。 x 为某个随机矩阵 M。如果您计算所有导数并将它们放入雅可比行列式中,您将得到M

#Now what's the derivative of S? Compute via formula
d(S_i(x)/dx_j) = w_ij * exp(-w_i.x+b_i) / (1 + exp(-w_i.x+b_i))**2 #w_ij, is the j'th component of the vector w_i
#For the gradient of a S_i (which is just one component of S), we get
J(S_i, x) = (exp(-w_i . x + b_i) / (1 + exp(-w_i . x + b_i))**2) * w_i # remember this is a vector because w_i is a vector

现在以 1 的调试示例为例。

w_i = b = x = [1, 1]

#define a to make this less cluttered
a = exp(-w_i . x + b) = exp(-3)

J(S_i, x) = a / (1 + a)^2 * [1, 1]
J(S, x) = a / (1 + a)^2 * [[1, 1], [1, 1]]
J(L, S(x)) = [1, 1] #Doesn't depend on S(x)

J(F, x) = J(L, S(x)) . J(S, x) = (a / (1 + a)**2) * [1, 1] . [[1, 1], [1, 1]]
J(F, x) = (a / (1 + a)**2) * [2, 2] = (2 * a / (1 + a)**2) * [1, 1]
J(F, x) = [0.0903533, 0.0903533]

希望这能帮助您重新组织一下代码。您不能仅使用 w_i 的值来计算此处的导数。 x,您将分别需要 w_ix 来正确计算所有内容。

编辑

因为我发现这个东西很有趣,这是我的 python 脚本 计算神经网络的值和一阶导数:

import numpy as np

class Layer:
    def __init__(self, weights_matrix, bias_vector, sigmoid_activation = True):
        self.weights_matrix = weights_matrix
        self.bias_vector = bias_vector
        self.sigmoid_activation = sigmoid_activation

    def compute_value(self, x_vector):
        result = np.add(np.dot(self.weights_matrix, x_vector), self.bias_vector)
        if self.sigmoid_activation:
            result = np.exp(-result)
            result = 1 / (1 + result)

        return result

    def compute_value_and_derivative(self, x_vector):
        if not self.sigmoid_activation:
            return (self.compute_value(x_vector), self.weights_matrix)
        temp = np.add(np.dot(self.weights_matrix, x_vector), self.bias_vector)
        temp = np.exp(-temp)
        value = 1.0 / (1 + temp)
        temp = temp / (1 + temp)**2
        #pre-multiplying by a diagonal matrix multiplies each row by
        #the corresponding diagonal element
        #(1st row with 1st value, 2nd row with 2nd value, etc...)
        jacobian = np.dot(np.diag(temp), self.weights_matrix)
        return (value, jacobian)

class Network:
    def __init__(self, layers):
        self.layers = layers

    def compute_value(self, x_vector):
        for l in self.layers:
            x_vector = l.compute_value(x_vector)

        return x_vector

    def compute_value_and_derivative(self, x_vector):
        x_vector, jacobian = self.layers[0].compute_value_and_derivative(x_vector)
        for l in self.layers[1:]:
            x_vector, j = l.compute_value_and_derivative(x_vector)
            jacobian = np.dot(j, jacobian)

        return x_vector, jacobian

#first weights
l1w = np.array([[1,1],[1,1]])
l1b = np.array([1,1])

l2w = np.array([[1,1],[1,1]])
l2b = np.array([1,1])

l3w = np.array([1, 1])
l3b = np.array([0])

nn = Network([Layer(l1w, l1b),
              Layer(l2w, l2b),
              Layer(l3w, l3b, False)])

r = nn.compute_value_and_derivative(np.array([1,1]))
print r

关于python - 计算深度神经网络关于输入的偏导数,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/52185475/

相关文章:

python - Fabric sed函数不替换python文件中的字符串

python - 在 opencv python 中获取蒙版的轮廓

python - 无需从目录中提取即可恢复 Tensorflow 模型

algorithm - 为什么限制权重大小可以防止机器学习中的过度拟合

python - 值错误 : The shape of the input to "Flatten" is not fully defined

python - 终端关闭时应用程序关闭,如何停止?

neural-network - 在 pytorch 中反向传播时自动更新自定义层参数

python - 如何在 CNTK 中应用自定义正则化(使用 python)?

artificial-intelligence - 弹性反向传播中的错误?

python - 用 python 处理 xml