c++ - std::vector 行为、移动和复制

标签 c++ c++11 operator-overloading assignment-operator

为了获得更多 C++11 经验,我在空闲时间用 cpp 做了一个神经网络。但是我遇到了一些我无法弄清楚自己的问题。

struct neuronsLayer
{
    vector<real> ac;

    neuronsLayer(int s)
    {
        std::cout<<"neuronLayer 1"<<std::endl;
        ac = vector<real>(s,0.1f);
    }
    neuronsLayer(const neuronsLayer& nl)
    {
        std::cout<<"neuronLayer 2"<<std::endl;
        ac = vector<real>(nl.ac);
    }
    neuronsLayer(neuronsLayer&& nl)
    {
        std::cout<<"neuronLayer 3"<<std::endl;
        ac = std::move(nl.ac);
    }
    neuronsLayer operator=(const neuronsLayer& nl)
    {
        std::cout<<"neuronLayer 4"<<std::endl;
        return neuronsLayer(nl);
    }
    neuronsLayer(){ std::cout<<"neuronLayer 5"<<std::endl;}
    ~neuronsLayer(){}
};

这是层实现,那么:

struct network
{
    vector<neuronsLayer> hiddens;
    vector<neuronsConnection> synaps;
    real alpha;

   //std::initializer_list

    network(vector<int> layers)
    {
        alpha = 1.f;
        hiddens = vector<neuronsLayer>();//+2
        for(int& l : layers)
        {
            hiddens.push_back(neuronsLayer(l));
        }
        synaps = vector<neuronsConnection>();
        for(int i = 0 ; i < layers.size() -1 ; i++)
        {
            synaps.push_back(std::move(neuronsConnection(layers[i],layers[i+1])));
        }
    }

    void forward(vector<real> input)
    {
        hiddens[0].ac = input;
        for (int layer = 0; layer < hiddens.size() -1; ++layer)
        {
            for(int i = 0 ; i < synaps[layer].x ; i++)
            {
                for(int j = 0 ; j < synaps[layer].y ; j++)
                {
                    hiddens[layer+1].ac[i] += hiddens[layer].ac[j] * synaps[layer].w[i + synaps[layer].x * j]; //+ activation +biais
                }
            }
            for(int i = 0 ; i < hiddens[layer].ac.size() ; i ++)
                hiddens[layer+1].ac[i] = 1.f/(1+exp(-hiddens[layer+1].ac[i]));
        }
    }

    void backward(vector<real> expected)
    {
        vector<real> error(expected);
        for(int i = 0 ; i < error.size(); i ++)
        {
            error[i] = expected[i] - hiddens[hiddens.size() -1].ac[i];
        }
        for (int layer = 0; layer < hiddens.size() -1; ++layer)
        {
            for(int i = 0 ; i < synaps[layer].x ; i++)
            {
                for(int j = 0 ; j < synaps[layer].y ; j++)
                {
                    real dw = error[i]*(1+2*exp(-hiddens[0].ac[i])/(1+exp(-hiddens[0].ac[i])));
                    synaps[layer].w[i + synaps[layer].x * j] += dw*alpha;
                }
            }
        }
    }

主要的:

int main(int argc, char** argv)
{
    vector<int> net = {64,2};
    network nn(net);
    vector<float> o = {1,0};
    vector<float> t = {0,1};

    auto rOne = std::bind(std::normal_distribution<float>(6,1), std::default_random_engine{});
    auto rTwo = std::bind(std::normal_distribution<float>(3,1), std::default_random_engine{});

    auto gOne = [&](){
        int x=rOne(),y=rOne();
        //while(x=rOne > 8 or x < 0);
        //while(y=rOne > 8 or y < 0);
        std::vector<real> tbr (64,0);
        tbr[x + y*8] = 1.0;
        return tbr;
    };

    auto gTwo = [&](){
        int x=rTwo(),y=rTwo();
        //while(x=rTwo > 8 or x < 0);
        //while(y=rTwo > 8 or y < 0);
        std::vector<real> tbr (64,0);
        tbr[x + y*8] = 1.0;
        return tbr;
    };

    for(int i = 0 ; i < 5000 ; i++)
    {
        nn.forward(gOne());
        nn.backward(o);
        nn.forward(gTwo());
        nn.backward(t);
    }

我有一个主要问题和两个问题:

1) 当调用 backward 时,我在执行期间收到 SEGFAULT,似乎 hiddles[0] 是空的。所以我可能(有点轻描淡写)误解了 move 的工作原理?

Program received signal SIGSEGV, Segmentation fault. 
0x0000000000402159 in network::backward (this=0x7fffffffe190, expected=...) at dnn3.cpp:171
171   real dw = error[i]*(1+2*exp(-hiddens[0].ac[i])/(1+exp( hiddens[0].ac[i])));
(gdb) p i
$1 = 0
(gdb) p hiddens[0].ac[i]
$2 = (__gnu_cxx::__alloc_traits<std::allocator<float> >::value_type &) @0x3f0000003f000000: <error reading variable>

2) 在此之前程序的输出是:

neuronLayer 1
neuronLayer 3
neuronLayer 1
neuronLayer 3
neuronLayer 2

为什么要调用拷贝构造函数?我只创建了 2 层,它们都是按照完全相同的过程生成的,并且只有其中一个使用了这个构造函数。我不明白为什么需要它。

3) 关于绑定(bind)对象 rOne 和 rTwo,它们总是返回相同的值吗?因为当我查看 gOne 输出时,它似乎返回了两倍的相同值。这正常吗?

提前致谢, 马克。

编辑: 正如所问:

(gdb) p hiddens
 $1 = {<std::_Vector_base<neuronsLayer, std::allocator<neuronsLayer> >> = { _M_impl = {<std::allocator<neuronsLayer>> ={<__gnu_cxx::new_allocator<neuronsLayer>> = {<No data fields>}, <No data fields>},_M_start = 0x60c1a0, _M_finish = 0x60c1d0, _M_end_of_storage = 0x60c1d0}}, <No data fields>}
(gdb) p hiddens[0].ac
$2 = {<std::_Vector_base<float, std::allocator<float> >> = { _M_impl = {<std::allocator<float>> = {<__gnu_cxx::new_allocator<float>> = {<No data fields>}, <No data fields>}, _M_start = 0x3f0000003f000000, _M_finish = 0x3f0000003f000000, _M_end_of_storage = 0x60c2e0}}, <No data fields>}

编辑 2:

Breakpoint 1, network::forward (this=0x7fffffffe190, input=...)
(gdb) p hiddens
$1 = {<std::_Vector_base<neuronsLayer, std::allocator<neuronsLayer> >> = {_M_impl = {<std::allocator<neuronsLayer>> = {<__gnu_cxx::new_allocator<neuronsLayer>> = {<No data fields>}, <No data fields>},_M_start = 0x60d1a0, _M_finish = 0x60d1d0, _M_end_of_storage = 0x60d1d0}}, <No data fields>}
(gdb) p hiddens[0]
$2 = (__gnu_cxx::__alloc_traits<std::allocator<neuronsLayer> >::value_type &) @0x60d1a0: { ac = {<std::_Vector_base<float, std::allocator<float> >> = { _M_impl = {<std::allocator<float>> = {<__gnu_cxx::new_allocator<float>> = {<No data fields>}, <No data fields>}, _M_start = 0x60d1e0, _M_finish = 0x60d2e0, _M_end_of_storage = 0x60d2e0}}, <No data fields>}}
(gdb) p hiddens[0].ac
$3 = {<std::_Vector_base<float, std::allocator<float> >> = { _M_impl = {<std::allocator<float>> = {<__gnu_cxx::new_allocator<float>> = {<No data fields>}, <No data fields>}, _M_start = 0x60d1e0, _M_finish = 0x60d2e0, _M_end_of_storage = 0x60d2e0}}, <No data fields>}
(gdb) p hiddens[1]
$4 = (__gnu_cxx::__alloc_traits<std::allocator<neuronsLayer> >::value_type &) @0x60d1b8: { ac = {<std::_Vector_base<float, std::allocator<float> >> = _M_impl = {<std::allocator<float>> = {<__gnu_cxx::new_allocator<float>> = {<No data fields>}, <No data fields>}, _M_start = 0x60d180, _M_finish = 0x60d188, _M_end_of_storage = 0x60d188}}, <No data fields>}}
(gdb) p hiddens[1].ac[0] 
$5 = (__gnu_cxx::__alloc_traits<std::allocator<float> >::value_type &) @0x60d180: 0.100000001
(gdb) p hiddens[0].ac[0]
$6 = (__gnu_cxx::__alloc_traits<std::allocator<float> >::value_type &) @0x60d1e0: 0.100000001

最佳答案

neuronsLayer operator=(const neuronsLayer& nl)
{
    std::cout<<"neuronLayer 4"<<std::endl;
    return neuronsLayer(nl);
}

赋值运算符没有做你想做的事。它确实根据传入的nl复制了一个临时的neuronsLayer对象,并没有修改其调用者的内容。

应该是

neuronsLayer& operator=(const neuronsLayer& nl)
{
    std::cout<<"neuronLayer 4"<<std::endl;
    ac = nl.ac;
    return *this;
}

编辑: 正如所问:

(gdb) p hiddens
 $1 = {<std::_Vector_base<neuronsLayer, std::allocator<neuronsLayer> >> = { _M_impl = {<std::allocator<neuronsLayer>> ={<__gnu_cxx::new_allocator<neuronsLayer>> = {<No data fields>}, <No data fields>},_M_start = 0x60c1a0, _M_finish = 0x60c1d0, _M_end_of_storage = 0x60c1d0}}, <No data fields>}
(gdb) p hiddens[0].ac
$2 = {<std::_Vector_base<float, std::allocator<float> >> = { _M_impl = {<std::allocator<float>> = {<__gnu_cxx::new_allocator<float>> = {<No data fields>}, <No data fields>}, _M_start = 0x3f0000003f000000, _M_finish = 0x3f0000003f000000, _M_end_of_storage = 0x60c2e0}}, <No data fields>}

由于 hiddens[0].ac_M_start 等于 _M_finish,它是空的,因此获取它的第 0 个元素导致段错误。

关于c++ - std::vector 行为、移动和复制,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/31022537/

相关文章:

c++ - 为什么 C++ Builder 不能编译它?

c++ - “double& Point::operator[](unsigned int)”在此上下文中不可访问

C++多继承构造函数调用

c++ - 将整数文字赋给指针?

c++ - TableWidget 不显示项目的更新值

c++ - MSVC UTF8 字符串编码使用不正确的代码点

c++ - 将 Visual C++ 代码转换为 Borland C++Builder

c++ - NULL 不匹配模板参数?

c++ - 数组下标运算符重载

c++ - 运算符重载和多态区别