commit
9dd588b414
@ -0,0 +1,95 @@
|
||||
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "BilinearInterpLayer.h"
|
||||
#include "paddle/utils/Logging.h"
|
||||
#include "paddle/utils/Stat.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
REGISTER_LAYER(bilinear_interp, BilinearInterpLayer);
|
||||
|
||||
size_t BilinearInterpLayer::getSize() {
|
||||
inImgH_ = inputLayers_[0]->getOutput().getFrameHeight();
|
||||
inImgW_ = inputLayers_[0]->getOutput().getFrameWidth();
|
||||
|
||||
const BilinearInterpConfig& conf = config_.inputs(0).bilinear_interp_conf();
|
||||
if (inImgH_ == 0) {
|
||||
inImgH_ = conf.img_size_y();
|
||||
}
|
||||
if (inImgW_ == 0) {
|
||||
inImgW_ = conf.img_size_x();
|
||||
}
|
||||
|
||||
outImgH_ = conf.out_size_y();
|
||||
outImgW_ = conf.out_size_x();
|
||||
numChannels_ = conf.num_channels();
|
||||
|
||||
CHECK(outImgH_ > 0 && outImgW_ > 0);
|
||||
CHECK(inImgH_ > 0 && inImgW_ > 0);
|
||||
CHECK(numChannels_);
|
||||
|
||||
ratioH_ = (outImgH_ > 1) ?
|
||||
static_cast<real>(inImgH_ - 1) / (outImgH_ - 1) : 0.f;
|
||||
ratioW_ = (outImgW_ > 1) ?
|
||||
static_cast<real>(inImgW_ - 1) / (outImgW_ - 1) : 0.f;
|
||||
|
||||
getOutput().setFrameHeight(outImgH_);
|
||||
getOutput().setFrameWidth(outImgW_);
|
||||
return outImgH_ * outImgW_ * numChannels_;
|
||||
}
|
||||
|
||||
bool BilinearInterpLayer::init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) {
|
||||
/* Initialize the basic parent class */
|
||||
Layer::init(layerMap, parameterMap);
|
||||
|
||||
CHECK_EQ(1, config_.inputs_size());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void BilinearInterpLayer::forward(PassType passType) {
|
||||
Layer::forward(passType);
|
||||
|
||||
size_t batchSize = getInput(0).getBatchSize();
|
||||
size_t size = getSize();
|
||||
{
|
||||
REGISTER_TIMER_INFO("FwResetTimer", getName().c_str());
|
||||
resetOutput(batchSize, size);
|
||||
}
|
||||
|
||||
MatrixPtr inV = getInputValue(0);
|
||||
MatrixPtr outV = getOutputValue();
|
||||
{
|
||||
REGISTER_TIMER_INFO("FwBilinearInterpTimer", getName().c_str());
|
||||
outV->bilinearForward(*inV, inImgH_, inImgW_, outImgH_, outImgW_,
|
||||
numChannels_, ratioH_, ratioW_);
|
||||
}
|
||||
}
|
||||
|
||||
void BilinearInterpLayer::backward(const UpdateCallback& callback) {
|
||||
(void) callback;
|
||||
|
||||
MatrixPtr inputG = getInputGrad(0);
|
||||
MatrixPtr outG = getOutputGrad();
|
||||
{
|
||||
REGISTER_TIMER_INFO("BwBilinearInterpTimer", getName().c_str());
|
||||
if (inputG) {
|
||||
inputG->bilinearBackward(*outG, outImgH_, outImgW_, inImgH_, inImgW_,
|
||||
numChannels_, ratioH_, ratioW_);
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace paddle
|
@ -0,0 +1,46 @@
|
||||
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Layer.h"
|
||||
#include "paddle/math/Matrix.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* @brief A layer for bilinear interpolation which is
|
||||
* used on conv layer output.
|
||||
*
|
||||
* @note The config file api is bilinear_interp_layer.
|
||||
*/
|
||||
class BilinearInterpLayer : public Layer {
|
||||
protected:
|
||||
size_t outImgH_, outImgW_;
|
||||
size_t inImgH_, inImgW_;
|
||||
real ratioH_, ratioW_;
|
||||
size_t numChannels_;
|
||||
|
||||
public:
|
||||
explicit BilinearInterpLayer(const LayerConfig& config) : Layer(config) {}
|
||||
|
||||
virtual ~BilinearInterpLayer() {}
|
||||
|
||||
size_t getSize();
|
||||
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
|
||||
void forward(PassType passType);
|
||||
void backward(const UpdateCallback& callback = nullptr);
|
||||
};
|
||||
|
||||
} // namespace paddle
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,85 @@
|
||||
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "ConvBaseLayer.h"
|
||||
#include "paddle/math/Matrix.h"
|
||||
#include <vector>
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* @brief A subclass of ConvBaseLayer that is a superclass of both
|
||||
* ExpandConvLayer and ExpandConvTransLayer
|
||||
*/
|
||||
class ExpandConvBaseLayer : public ConvBaseLayer {
|
||||
protected:
|
||||
/// For expand convolution.
|
||||
/// subM_ = numFilters_ / groups_.
|
||||
IntV subM_;
|
||||
/// subN_ = outputH_ * outputW_.
|
||||
IntV subN_;
|
||||
/// subK_ = channels_ * filterPixels_ * groups_.
|
||||
IntV subK_;
|
||||
|
||||
/*The expandInput_ and transOutValue_ are used for CPU expand conv calc
|
||||
* Expand one sample at a time. shape:
|
||||
* (numChannels * filterPixels_, outputSizeH * outputSizeW)
|
||||
* */
|
||||
MatrixPtr expandInput_;
|
||||
/// The transpose of output, which is an auxiliary matrix.
|
||||
MatrixPtr transOutValue_;
|
||||
|
||||
public:
|
||||
explicit ExpandConvBaseLayer(const LayerConfig& config)
|
||||
: ConvBaseLayer(config) {}
|
||||
|
||||
~ExpandConvBaseLayer() {}
|
||||
|
||||
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
|
||||
|
||||
size_t getOutputSize();
|
||||
/**
|
||||
* Create or resize expandInput_.
|
||||
*/
|
||||
void resetExpandInput(size_t height, size_t width);
|
||||
|
||||
/**
|
||||
* Add shared bias.
|
||||
*/
|
||||
void addSharedBias();
|
||||
|
||||
/**
|
||||
* Add unshared bias.
|
||||
*/
|
||||
void addUnsharedBias();
|
||||
/**
|
||||
* Expand one input sample.
|
||||
*/
|
||||
void expandOneFrame(MatrixPtr image, size_t startIdx, int inIdx);
|
||||
|
||||
/**
|
||||
* Expand one input sample and perform matrix multiplication.
|
||||
*/
|
||||
void expandFwdOnce(MatrixPtr image, MatrixPtr out, int inIdx, int startIdx);
|
||||
|
||||
void bpropSharedBias(MatrixPtr biases, MatrixPtr v);
|
||||
void bpropBiases(MatrixPtr v);
|
||||
void bpropWeights(MatrixPtr image, MatrixPtr out, int inpIdx);
|
||||
void bpropActs(MatrixPtr image, MatrixPtr out, int inpIdx);
|
||||
};
|
||||
|
||||
} // namespace paddle
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,92 @@
|
||||
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
|
||||
#include "paddle/utils/Logging.h"
|
||||
#include "paddle/utils/Stat.h"
|
||||
#include "ExpandConvTransLayer.h"
|
||||
|
||||
/* The implementation of the convTransLayer is basically a swap of forward and
|
||||
* backward of the original convLayer.
|
||||
* The variable naming follows the convention of the convLayer.
|
||||
* */
|
||||
|
||||
namespace paddle {
|
||||
|
||||
REGISTER_LAYER(exconvt, ExpandConvTransLayer);
|
||||
|
||||
bool ExpandConvTransLayer::init(const LayerMap &layerMap,
|
||||
const ParameterMap ¶meterMap) {
|
||||
/* Initialize the basic convolutional parent class */
|
||||
ExpandConvBaseLayer::init(layerMap, parameterMap);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ExpandConvTransLayer::forward(PassType passType) {
|
||||
Layer::forward(passType);
|
||||
|
||||
/* malloc memory for the output_ if necessary */
|
||||
int batchSize = inputLayers_[0]->getOutputValue()->getHeight();
|
||||
resetOutput(batchSize, getOutputSize());
|
||||
|
||||
MatrixPtr output = nullptr;
|
||||
for (size_t i = 0; i < inputLayers_.size(); ++i) {
|
||||
LayerPtr prevLayer = getPrev(i);
|
||||
output = prevLayer->getOutputValue();
|
||||
REGISTER_TIMER_INFO("shrinkFwd", getName().c_str());
|
||||
bpropActs(output, getOutputValue(), i);
|
||||
}
|
||||
|
||||
/* add the bias-vector */
|
||||
if (biases_.get()) {
|
||||
if (sharedBiases_) {
|
||||
addSharedBias();
|
||||
} else {
|
||||
addUnsharedBias();
|
||||
}
|
||||
}
|
||||
|
||||
/* activation */
|
||||
forwardActivation();
|
||||
}
|
||||
|
||||
void ExpandConvTransLayer::backward(const UpdateCallback &callback) {
|
||||
backwardActivation();
|
||||
|
||||
MatrixPtr imageGrad = getOutputGrad();
|
||||
if (biases_ && biases_->getWGrad()) {
|
||||
bpropBiases(imageGrad);
|
||||
/* Increasing the number of gradient */
|
||||
biases_->getParameterPtr()->incUpdate(callback);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < inputLayers_.size(); ++i) {
|
||||
/* First, calculate the input layers error */
|
||||
for (size_t off = 0; off < imageGrad->getHeight(); off++) {
|
||||
if (getPrev(i)->getOutputGrad()) {
|
||||
expandFwdOnce(imageGrad, getPrev(i)->getOutputGrad(), i, off);
|
||||
}
|
||||
}
|
||||
if (weights_[i]->getWGrad()) {
|
||||
/* Then, calculate the W-gradient for the current layer */
|
||||
bpropWeights(imageGrad, getPrev(i)->getOutputValue(), i);
|
||||
/* Increasing the number of gradient */
|
||||
weights_[i]->getParameterPtr()->incUpdate(callback);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,44 @@
|
||||
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "paddle/math/Matrix.h"
|
||||
#include <vector>
|
||||
#include "ExpandConvBaseLayer.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* @brief A subclass of convolution layer.
|
||||
* This layer expands input and use matrix multiplication to
|
||||
* calculate convolution transpose (deconv) operation.
|
||||
*
|
||||
* The config file api is img_conv_layer with flag trans=True.
|
||||
*/
|
||||
class ExpandConvTransLayer : public ExpandConvBaseLayer {
|
||||
public:
|
||||
explicit ExpandConvTransLayer(const LayerConfig& config) :
|
||||
ExpandConvBaseLayer(config) {}
|
||||
|
||||
~ExpandConvTransLayer() {}
|
||||
|
||||
bool init(const LayerMap& layerMap, const ParameterMap& parameterMap);
|
||||
|
||||
void forward(PassType passType);
|
||||
void backward(const UpdateCallback& callback);
|
||||
};
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,246 @@
|
||||
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <vector>
|
||||
#include <string>
|
||||
#include "paddle/gserver/layers/DataLayer.h"
|
||||
#include "ModelConfig.pb.h"
|
||||
#include "paddle/trainer/Trainer.h"
|
||||
#include "paddle/utils/GlobalConstants.h"
|
||||
#include "paddle/gserver/layers/ExpandConvTransLayer.h"
|
||||
#include "paddle/math/MathUtils.h"
|
||||
|
||||
#include "TestUtil.h"
|
||||
#include "LayerGradUtil.h"
|
||||
|
||||
using namespace paddle; // NOLINT
|
||||
using namespace std; // NOLINT
|
||||
|
||||
P_DECLARE_bool(use_gpu);
|
||||
P_DECLARE_int32(gpu_id);
|
||||
P_DECLARE_double(checkgrad_eps);
|
||||
P_DECLARE_bool(thread_local_rand_use_global_seed);
|
||||
P_DECLARE_bool(prev_batch_state);
|
||||
|
||||
// Test that the convTrans forward is the same as conv backward
|
||||
TEST(Layer, convTransLayerFwd) {
|
||||
// Setting up conv-trans layer
|
||||
TestConfig configt;
|
||||
configt.biasSize = 3;
|
||||
configt.layerConfig.set_type("exconvt");
|
||||
configt.layerConfig.set_num_filters(3);
|
||||
configt.layerConfig.set_partial_sum(1);
|
||||
configt.layerConfig.set_shared_biases(true);
|
||||
|
||||
configt.inputDefs.push_back({INPUT_DATA, "layer_0", 1024, 384});
|
||||
LayerInputConfig* input = configt.layerConfig.add_inputs();
|
||||
ConvConfig* conv = input->mutable_conv_conf();
|
||||
conv->set_filter_size(2);
|
||||
conv->set_filter_size_y(4);
|
||||
conv->set_channels(16);
|
||||
conv->set_padding(0);
|
||||
conv->set_padding_y(1);
|
||||
conv->set_stride(2);
|
||||
conv->set_stride_y(2);
|
||||
conv->set_groups(1);
|
||||
conv->set_filter_channels(3 / conv->groups());
|
||||
conv->set_img_size(16);
|
||||
conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(),
|
||||
conv->padding(), conv->stride(),
|
||||
/* caffeMode */ true));
|
||||
configt.layerConfig.set_size(conv->img_size() * conv->img_size() *
|
||||
configt.layerConfig.num_filters());
|
||||
configt.layerConfig.set_name("convTrans");
|
||||
|
||||
// data layer initialize
|
||||
std::vector<DataLayerPtr> dataLayers;
|
||||
LayerMap layerMap;
|
||||
vector<Argument> datas;
|
||||
initDataLayer(configt, &dataLayers, &datas, &layerMap, "convTrans",
|
||||
100, false, false);
|
||||
// test layer initialize
|
||||
std::vector<ParameterPtr> parameters;
|
||||
LayerPtr convtLayer;
|
||||
initTestLayer(configt, &layerMap, ¶meters, &convtLayer);
|
||||
convtLayer->getBiasParameter()->zeroMem();
|
||||
convtLayer->forward(PASS_GC);
|
||||
|
||||
// Setting up conv-layer config
|
||||
TestConfig config;
|
||||
config.biasSize = 16;
|
||||
config.layerConfig.set_type("exconv");
|
||||
config.layerConfig.set_num_filters(16);
|
||||
config.layerConfig.set_partial_sum(1);
|
||||
config.layerConfig.set_shared_biases(true);
|
||||
|
||||
config.inputDefs.push_back({INPUT_DATA, "layer_1", 768, 384});
|
||||
input = config.layerConfig.add_inputs();
|
||||
conv = input->mutable_conv_conf();
|
||||
conv->set_filter_size(2);
|
||||
conv->set_filter_size_y(4);
|
||||
conv->set_channels(3);
|
||||
conv->set_padding(0);
|
||||
conv->set_padding_y(1);
|
||||
conv->set_stride(2);
|
||||
conv->set_stride_y(2);
|
||||
conv->set_groups(1);
|
||||
conv->set_filter_channels(conv->channels() / conv->groups());
|
||||
conv->set_img_size(16);
|
||||
conv->set_output_x(outputSize(conv->img_size(), conv->filter_size(),
|
||||
conv->padding(), conv->stride(),
|
||||
/* caffeMode */ true));
|
||||
config.layerConfig.set_size(conv->output_x() * conv->output_x() *
|
||||
config.layerConfig.num_filters());
|
||||
config.layerConfig.set_name("conv");
|
||||
|
||||
// data layer initialize
|
||||
std::vector<DataLayerPtr> dataLayers2;
|
||||
LayerMap layerMap2;
|
||||
vector<Argument> datas2;
|
||||
initDataLayer(config, &dataLayers2, &datas2, &layerMap2, "conv",
|
||||
100, false, false);
|
||||
// test layer initialize
|
||||
std::vector<ParameterPtr> parameters2;
|
||||
LayerPtr convLayer;
|
||||
initTestLayer(config, &layerMap2, ¶meters2, &convLayer);
|
||||
|
||||
// Sync convLayer and convtLayer parameter
|
||||
convLayer->getBiasParameter()->zeroMem();
|
||||
convLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->copyFrom(
|
||||
*(convtLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)));
|
||||
|
||||
// Set convLayer outputGrad as convTransLayer input value
|
||||
convLayer->forward(PASS_GC);
|
||||
convLayer->getOutput().grad->copyFrom(*(dataLayers[0]->getOutputValue()));
|
||||
|
||||
vector<int> callbackFlags(parameters2.size(), 0);
|
||||
auto callback = [&](Parameter* para) { ++callbackFlags[para->getID()]; };
|
||||
convLayer->backward(callback);
|
||||
|
||||
// Check that the convLayer backward is the same as convTransLayer forward
|
||||
checkMatrixEqual(convtLayer->getOutputValue(),
|
||||
dataLayers2[0]->getOutputGrad());
|
||||
}
|
||||
|
||||
|
||||
// Do one forward pass of convTrans layer and check to see if its output
|
||||
// matches the given result
|
||||
void doOneConvtTest(size_t imgSize, size_t output_x, size_t stride,
|
||||
size_t padding, size_t filter_size, MatrixPtr& result) {
|
||||
TestConfig configt;
|
||||
configt.biasSize = 1;
|
||||
configt.layerConfig.set_type("exconvt");
|
||||
configt.layerConfig.set_num_filters(1);
|
||||
configt.layerConfig.set_partial_sum(1);
|
||||
configt.layerConfig.set_shared_biases(true);
|
||||
|
||||
configt.inputDefs.push_back({INPUT_DATA, "layer_0", output_x * output_x,
|
||||
filter_size * filter_size});
|
||||
LayerInputConfig* input = configt.layerConfig.add_inputs();
|
||||
ConvConfig* conv = input->mutable_conv_conf();
|
||||
conv->set_filter_size(filter_size);
|
||||
conv->set_filter_size_y(filter_size);
|
||||
conv->set_channels(1);
|
||||
conv->set_padding(padding);
|
||||
conv->set_padding_y(padding);
|
||||
conv->set_stride(stride);
|
||||
conv->set_stride_y(stride);
|
||||
conv->set_groups(1);
|
||||
conv->set_filter_channels(1);
|
||||
conv->set_img_size(imgSize);
|
||||
conv->set_output_x(output_x);
|
||||
|
||||
configt.layerConfig.set_size(conv->img_size() * conv->img_size() *
|
||||
configt.layerConfig.num_filters());
|
||||
configt.layerConfig.set_name("convTrans");
|
||||
|
||||
std::vector<DataLayerPtr> dataLayers;
|
||||
LayerMap layerMap;
|
||||
vector<Argument> datas;
|
||||
initDataLayer(configt, &dataLayers, &datas, &layerMap, "convTrans",
|
||||
1, false, false);
|
||||
dataLayers[0]->getOutputValue()->zeroMem();
|
||||
dataLayers[0]->getOutputValue()->add(1.0);
|
||||
|
||||
// test layer initialize
|
||||
std::vector<ParameterPtr> parameters;
|
||||
LayerPtr convtLayer;
|
||||
initTestLayer(configt, &layerMap, ¶meters, &convtLayer);
|
||||
convtLayer->getBiasParameter()->zeroMem();
|
||||
convtLayer->getParameters()[0]->zeroMem();
|
||||
convtLayer->getParameters()[0]->getBuf(PARAMETER_VALUE)->add(1.0);
|
||||
convtLayer->forward(PASS_GC);
|
||||
|
||||
checkMatrixEqual(convtLayer->getOutputValue(), result);
|
||||
}
|
||||
|
||||
TEST(Layer, convTransLayerFwd2) {
|
||||
MatrixPtr result;
|
||||
result = Matrix::create(1, 5 * 5, false, false);
|
||||
result->zeroMem();
|
||||
result->add(1.0);
|
||||
doOneConvtTest(/* imgSize */ 5,
|
||||
/* output_x */ 1,
|
||||
/* stride */ 1,
|
||||
/* padding */ 0,
|
||||
/* filter_size */ 5,
|
||||
result);
|
||||
|
||||
float resultData[] = {1, 2, 2, 2, 1,
|
||||
2, 4, 4, 4, 2,
|
||||
2, 4, 4, 4, 2,
|
||||
2, 4, 4, 4, 2,
|
||||
1, 2, 2, 2, 1};
|
||||
result->setData(resultData);
|
||||
doOneConvtTest(/* imgSize */ 5,
|
||||
/* output_x */ 2,
|
||||
/* stride */ 1,
|
||||
/* padding */ 0,
|
||||
/* filter_size */ 4,
|
||||
result);
|
||||
|
||||
float resultData2[] = {1, 2, 2, 2, 1,
|
||||
2, 4, 4, 4, 2,
|
||||
2, 4, 4, 4, 2,
|
||||
2, 4, 4, 4, 2,
|
||||
1, 2, 2, 2, 1};
|
||||
result->setData(resultData2);
|
||||
doOneConvtTest(/* imgSize */ 5,
|
||||
/* output_x */ 2,
|
||||
/* stride */ 2,
|
||||
/* padding */ 1,
|
||||
/* filter_size */ 5,
|
||||
result);
|
||||
|
||||
float resultData3[] = {1, 1, 2, 1, 1,
|
||||
1, 1, 2, 1, 1,
|
||||
2, 2, 4, 2, 2,
|
||||
1, 1, 2, 1, 1,
|
||||
1, 1, 2, 1, 1};
|
||||
result->setData(resultData3);
|
||||
doOneConvtTest(/* imgSize */ 5,
|
||||
/* output_x */ 2,
|
||||
/* stride */ 2,
|
||||
/* padding */ 0,
|
||||
/* filter_size */ 3,
|
||||
result);}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
testing::InitGoogleTest(&argc, argv);
|
||||
initMain(argc, argv);
|
||||
FLAGS_thread_local_rand_use_global_seed = true;
|
||||
srand(1);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue