deep-learning - caffe reshape /上采样全连接层

标签 deep-learning reshape caffe

假设我们有一个像这样的层:

layer {
  name: "fully-connected"
  type: "InnerProduct"
  bottom: "bottom"
  top: "top"
  inner_product_param {
    num_output: 1
  }
}

输出是batch_size x 1。在几篇论文中(例如顶部的link1第3页图片,或顶部的link2第4页)我看到他们最终使用了这样的层使用 2D 图像进行像素级预测。如何将其转换为二维图像?我正在考虑 reshape 或反卷积,但我不知道这是如何工作的。一个简单的例子会很有帮助

更新:我的输入图像为 304x228,ground_truth(深度图像)为 75x55。

################# Main net ##################

layer {
  name: "conv1"
  type: "Convolution"
  bottom: "data"
  top: "conv1"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 96
    kernel_size: 11
    stride: 4
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu1"
  type: "ReLU"
  bottom: "conv1"
  top: "conv1"
}
layer {
  name: "norm1"
  type: "LRN"
  bottom: "conv1"
  top: "norm1"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "pool1"
  type: "Pooling"
  bottom: "norm1"
  top: "pool1"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "conv2"
  type: "Convolution"
  bottom: "pool1"
  top: "conv2"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 2
    kernel_size: 5
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relu2"
  type: "ReLU"
  bottom: "conv2"
  top: "conv2"
}
layer {
  name: "norm2"
  type: "LRN"
  bottom: "conv2"
  top: "norm2"
  lrn_param {
    local_size: 5
    alpha: 0.0001
    beta: 0.75
  }
}
layer {
  name: "pool2"
  type: "Pooling"
  bottom: "norm2"
  top: "pool2"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "conv3"
  type: "Convolution"
  bottom: "pool2"
  top: "conv3"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu3"
  type: "ReLU"
  bottom: "conv3"
  top: "conv3"
}
layer {
  name: "conv4"
  type: "Convolution"
  bottom: "conv3"
  top: "conv4"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 384
    pad: 1
    kernel_size: 3
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relu4"
  type: "ReLU"
  bottom: "conv4"
  top: "conv4"
}
layer {
  name: "conv5"
  type: "Convolution"
  bottom: "conv4"
  top: "conv5"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 256
    pad: 1
    kernel_size: 3
    group: 2
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relu5"
  type: "ReLU"
  bottom: "conv5"
  top: "conv5"
}
layer {
  name: "pool5"
  type: "Pooling"
  bottom: "conv5"
  top: "pool5"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}
layer {
  name: "fc6"
  type: "InnerProduct"
  bottom: "pool5"
  top: "fc6"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  inner_product_param {
    num_output: 4096
    weight_filler {
      type: "gaussian"
      std: 0.005
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}
layer {
  name: "relufc6"
  type: "ReLU"
  bottom: "fc6"
  top: "fc6"
}
layer {
  name: "drop6"
  type: "Dropout"
  bottom: "fc6"
  top: "fc6"
  dropout_param {
    dropout_ratio: 0.5
  }
}

layer {
  name: "fc7"
  type: "InnerProduct"
  bottom: "fc6"
  top: "fc7"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  inner_product_param {
    num_output: 4070
    weight_filler {
      type: "gaussian"
      std: 0.005
    }
    bias_filler {
      type: "constant"
      value: 0.1
    }
  }
}

layer {
  type: "Reshape"
  name: "reshape"
  bottom: "fc7"
  top: "fc7_reshaped"
  reshape_param {
    shape { dim:  1  dim: 1  dim:  55 dim: 74 }
  }
}

layer {
  name: "deconv1"
  type: "Deconvolution"
  bottom: "fc7_reshaped"
  top: "deconv1"
  convolution_param {
    num_output: 64
    kernel_size: 5
    pad: 2
    stride: 1
      #group: 256
    weight_filler {
        type: "bilinear"
    }
    bias_term: false
  }
}

#########################

layer {
  name: "conv6"
  type: "Convolution"
  bottom: "data"
  top: "conv6"
  param {
    lr_mult: 1
    decay_mult: 1
  }
  param {
    lr_mult: 2
    decay_mult: 0
  }
  convolution_param {
    num_output: 63
    kernel_size: 9
    stride: 2
    pad: 1
    weight_filler {
      type: "gaussian"
      std: 0.01
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}
layer {
  name: "relu6"
  type: "ReLU"
  bottom: "conv6"
  top: "conv6"
}

layer {
  name: "pool6"
  type: "Pooling"
  bottom: "conv6"
  top: "pool6"
  pooling_param {
    pool: MAX
    kernel_size: 3
    stride: 2
  }
}

########################
layer {
  name: "concat"
  type: "Concat"
  bottom: "deconv1"
  bottom: "pool6"
  top: "concat"
  concat_param {
    concat_dim: 1
  }
}

layer {
  name: "conv7"
  type: "Convolution"
  bottom: "concat"
  top: "conv7"
  convolution_param {
    num_output: 64
    kernel_size: 5
    pad: 2
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.011
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
    name: "relu7"
    type: "ReLU"
    bottom: "conv7"
    top: "conv7"
    relu_param{
    negative_slope: 0.01
        engine: CUDNN
    }
}

layer {
  name: "conv8"
  type: "Convolution"
  bottom: "conv7"
  top: "conv8"
  convolution_param {
    num_output: 64
    kernel_size: 5
    pad: 2
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.011
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
    name: "relu8"
    type: "ReLU"
    bottom: "conv8"
    top: "conv8"
    relu_param{
    negative_slope: 0.01
        engine: CUDNN
    }
}

layer {
  name: "conv9"
  type: "Convolution"
  bottom: "conv8"
  top: "conv9"
  convolution_param {
    num_output: 1
    kernel_size: 5
    pad: 2
    stride: 1
    weight_filler {
      type: "gaussian"
      std: 0.011
    }
    bias_filler {
      type: "constant"
      value: 0
    }
  }
}

layer {
    name: "relu9"
    type: "ReLU"
    bottom: "conv9"
    top: "result"
    relu_param{
    negative_slope: 0.01
        engine: CUDNN
    }
}

日志:

I1108 19:34:57.239722  4277 data_layer.cpp:41] output data size: 1,1,228,304
I1108 19:34:57.243340  4277 data_layer.cpp:41] output data size: 1,1,55,74
I1108 19:34:57.247392  4277 net.cpp:150] Setting up conv1
I1108 19:34:57.247407  4277 net.cpp:157] Top shape: 1 96 55 74 (390720)
I1108 19:34:57.248191  4277 net.cpp:150] Setting up pool1
I1108 19:34:57.248196  4277 net.cpp:157] Top shape: 1 96 27 37 (95904)
I1108 19:34:57.253263  4277 net.cpp:150] Setting up conv2
I1108 19:34:57.253276  4277 net.cpp:157] Top shape: 1 256 27 37 (255744)
I1108 19:34:57.254202  4277 net.cpp:150] Setting up pool2
I1108 19:34:57.254220  4277 net.cpp:157] Top shape: 1 256 13 18 (59904)
I1108 19:34:57.269943  4277 net.cpp:150] Setting up conv3
I1108 19:34:57.269961  4277 net.cpp:157] Top shape: 1 384 13 18 (89856)
I1108 19:34:57.285303  4277 net.cpp:150] Setting up conv4
I1108 19:34:57.285338  4277 net.cpp:157] Top shape: 1 384 13 18 (89856)
I1108 19:34:57.294801  4277 net.cpp:150] Setting up conv5
I1108 19:34:57.294841  4277 net.cpp:157] Top shape: 1 256 13 18 (59904)
I1108 19:34:57.295207  4277 net.cpp:150] Setting up pool5
I1108 19:34:57.295210  4277 net.cpp:157] Top shape: 1 256 6 9 (13824)
I1108 19:34:57.743222  4277 net.cpp:150] Setting up fc6
I1108 19:34:57.743259  4277 net.cpp:157] Top shape: 1 4096 (4096)
I1108 19:34:57.881680  4277 net.cpp:150] Setting up fc7
I1108 19:34:57.881718  4277 net.cpp:157] Top shape: 1 4070 (4070)

I1108 19:34:57.881826  4277 net.cpp:150] Setting up reshape
I1108 19:34:57.881846  4277 net.cpp:157] Top shape: 1 1 55 74 (4070)

I1108 19:34:57.884768  4277 net.cpp:150] Setting up conv6
I1108 19:34:57.885309  4277 net.cpp:150] Setting up pool6
I1108 19:34:57.885327  4277 net.cpp:157] Top shape: 1 63 55 74 (256410)

I1108 19:34:57.885395  4277 net.cpp:150] Setting up concat
I1108 19:34:57.885412  4277 net.cpp:157] Top shape: 1 64 55 74 (260480)

I1108 19:34:57.886759  4277 net.cpp:150] Setting up conv7
I1108 19:34:57.886786  4277 net.cpp:157] Top shape: 1 64 55 74 (260480)

I1108 19:34:57.897269  4277 net.cpp:150] Setting up conv8
I1108 19:34:57.897303  4277 net.cpp:157] Top shape: 1 64 55 74 (260480)
I1108 19:34:57.899129  4277 net.cpp:150] Setting up conv9
I1108 19:34:57.899138  4277 net.cpp:157] Top shape: 1 1 55 74 (4070)

最佳答案

对于像素级预测,最后一个全连接层的 num_output 值不会为 1。它将等于输入图像的w*h

是什么让你觉得该值为 1?

编辑 1:

以下是link1第3页图中提到的各层的尺寸:

LAYER        OUTPUT DIM [c*h*w]
course1     96*h1*w1     conv layer
course2     256*h2*w2    conv layer
course3     384*h3*w3    conv layer
course4     384*h4*w4    conv layer
course5     256*h5*w5    conv layer
course6     4096*1*1     fc layer
course7     X*1*1        fc layer    where 'X' could be interpreted as w*h

为了进一步理解这一点,我们假设我们有一个网络来预测图像的像素。图片尺寸为10*10。因此,fc 层的最终输出也将具有尺寸 100*1*1(如类(class) 7 中所示)。这可以解释为 10*10。

现在的问题是,一维数组如何正确预测二维图像。为此,您必须注意,使用可能与像素数据相对应的标签来计算此输出的损失。因此,在训练过程中,权重将学习预测像素数据。

编辑2:

尝试在caffe中使用draw_net.py绘制网络,结果如下: enter image description here

conv6fc6连接的relu层具有相同的名称,导致绘制图像中的连接复杂。我不确定这是否会在训练期间导致一些问题,但我建议您将其中一个 relu 层重命名为唯一的名称,以避免一些不可预见的问题。

回到你的问题,在完全连接的层之后似乎没有发生上采样。如日志中所示:

I1108 19:34:57.881680  4277 net.cpp:150] Setting up fc7
I1108 19:34:57.881718  4277 net.cpp:157] Top shape: 1 4070 (4070)

I1108 19:34:57.881826  4277 net.cpp:150] Setting up reshape
I1108 19:34:57.881846  4277 net.cpp:157] Top shape: 1 1 55 74 (4070)

I1108 19:34:57.884768  4277 net.cpp:150] Setting up conv6
I1108 19:34:57.885309  4277 net.cpp:150] Setting up pool6
I1108 19:34:57.885327  4277 net.cpp:157] Top shape: 1 63 55 74 (256410)

fc7 的输出尺寸为 4070*1*1。其形状被 reshape 为 1*55*74,以作为输入传递到 conv6 层。

整个网络的输出是在conv9中产生的,其输出维度为1*55*74,与标签的维度完全相同(深度数据)。

如果我的答案仍然不清楚,请务必指出您认为上采样发生的位置。

关于deep-learning - caffe reshape /上采样全连接层,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/40483458/

相关文章:

python - 如何使用 sparse_softmax_cross_entropy_with_logits 在 tensorflow 中实现加权交叉熵损失

deep-learning - 为什么我们在 PyTorch 中将序列设为 "pack"?

python - 在 Tensorflow 中使用 3d 转置卷积时计算 output_shape

python - 如何将行字符串值转换为特征

python - 从 python 嵌套列表在 pandas 中创建新列

python - Caffe - draw_net_to_file - 'Classifier' 对象没有属性 'name'

audio - 我想知道 'd-vector'进行说话人区分

machine-learning - caffe中基于VGG16制作跳层连接网络出错

重新排列数据以与数据对齐

machine-learning - 具有多个损失层的 Caffe