commit
68a7534461
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,102 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "paddle/fluid/operators/fc_op.h"
|
||||
#include <vector>
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
void FCOp::InferShape(framework::InferShapeContext* ctx) const {
|
||||
PADDLE_ENFORCE(ctx->HasInput("Input"),
|
||||
"X(Input) of Fully Connected should not be null.");
|
||||
PADDLE_ENFORCE(ctx->HasOutput("Out"),
|
||||
"Out(Output) of Fully Connected should not be null.");
|
||||
PADDLE_ENFORCE(ctx->HasInput("W"),
|
||||
"W(Input) of Fully Connected should not be null.");
|
||||
|
||||
auto in_dims = ctx->GetInputDim("Input");
|
||||
auto w_dims = ctx->GetInputDim("W");
|
||||
std::vector<int64_t> output_shape({in_dims[0], w_dims[1]});
|
||||
|
||||
PADDLE_ENFORCE(in_dims.size() == 2 || in_dims.size() == 4,
|
||||
"Fully Connected input should be 2-D or 4-D tensor.");
|
||||
|
||||
PADDLE_ENFORCE(w_dims.size() == 2 || w_dims.size() == 4,
|
||||
"Fully Connected input should be 2-D or 4-D tensor.");
|
||||
|
||||
ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
|
||||
ctx->ShareLoD("Input", "Out");
|
||||
}
|
||||
|
||||
framework::OpKernelType FCOp::GetExpectedKernelType(
|
||||
const framework::ExecutionContext& ctx) const {
|
||||
framework::LibraryType library{framework::LibraryType::kMKLDNN};
|
||||
framework::DataLayout layout{framework::DataLayout::kAnyLayout};
|
||||
|
||||
return framework::OpKernelType(
|
||||
framework::ToDataType(ctx.Input<Tensor>("Input")->type()), ctx.GetPlace(),
|
||||
layout, library);
|
||||
}
|
||||
|
||||
void FCOpGrad::InferShape(framework::InferShapeContext* ctx) const {
|
||||
auto in_dims = ctx->GetInputDim("Input");
|
||||
auto w_dims = ctx->GetInputDim("W");
|
||||
|
||||
if (ctx->HasOutput(framework::GradVarName("Input"))) {
|
||||
ctx->SetOutputDim(framework::GradVarName("Input"), in_dims);
|
||||
}
|
||||
if (ctx->HasOutput(framework::GradVarName("W"))) {
|
||||
ctx->SetOutputDim(framework::GradVarName("W"), w_dims);
|
||||
}
|
||||
}
|
||||
|
||||
framework::OpKernelType FCOpGrad::GetExpectedKernelType(
|
||||
const framework::ExecutionContext& ctx) const {
|
||||
framework::LibraryType library{framework::LibraryType::kMKLDNN};
|
||||
framework::DataLayout layout{framework::DataLayout::kAnyLayout};
|
||||
|
||||
return framework::OpKernelType(
|
||||
framework::ToDataType(ctx.Input<Tensor>("Input")->type()), ctx.GetPlace(),
|
||||
layout, library);
|
||||
}
|
||||
|
||||
FCOpMaker::FCOpMaker(OpProto* proto, OpAttrChecker* op_checker)
|
||||
: OpProtoAndCheckerMaker(proto, op_checker) {
|
||||
AddInput("Input", "(Tensor) The input tensor of fully connected operator. ");
|
||||
AddInput("W", "(Tensor), The second input tensor of fc op.");
|
||||
AddOutput("Out", "(Tensor) The output tensor of fully connected operator. ");
|
||||
AddAttr<bool>("use_mkldnn",
|
||||
"(bool, default false) Only used in mkldnn kernel")
|
||||
.SetDefault(false);
|
||||
AddAttr<bool>("bias_attr", "(bool, default false) Only used in mkldnn kernel")
|
||||
.SetDefault(false);
|
||||
AddComment(R"DOC(
|
||||
Fully Connected Operator.
|
||||
|
||||
The fully connected operation calculates the output based on the input, weights and bias attribute.
|
||||
The size of each dimension of the parameters checked in the infer-shape.
|
||||
The matrix of bias is generated by the mkldnn framework, when the bias_attr is True.
|
||||
Additional parametrs are use_mkldnn and bias_attr.
|
||||
The input(X) size and output(Out) size may be diffrent.
|
||||
|
||||
The fully connected layer only supports MKLDNN version
|
||||
)DOC");
|
||||
}
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
||||
|
||||
REGISTER_OP(fc, paddle::operators::FCOp, paddle::operators::FCOpMaker, fc_grad,
|
||||
paddle::operators::FCOpGrad);
|
@ -0,0 +1,52 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "paddle/fluid/framework/op_registry.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace operators {
|
||||
|
||||
using Tensor = framework::Tensor;
|
||||
|
||||
class FCOp : public framework::OperatorWithKernel {
|
||||
public:
|
||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||
|
||||
void InferShape(framework::InferShapeContext* ctx) const override;
|
||||
|
||||
protected:
|
||||
framework::OpKernelType GetExpectedKernelType(
|
||||
const framework::ExecutionContext& ctx) const override;
|
||||
};
|
||||
|
||||
class FCOpGrad : public framework::OperatorWithKernel {
|
||||
public:
|
||||
using framework::OperatorWithKernel::OperatorWithKernel;
|
||||
|
||||
void InferShape(framework::InferShapeContext* ctx) const override;
|
||||
|
||||
protected:
|
||||
framework::OpKernelType GetExpectedKernelType(
|
||||
const framework::ExecutionContext& ctx) const override;
|
||||
};
|
||||
|
||||
class FCOpMaker : public framework::OpProtoAndCheckerMaker {
|
||||
public:
|
||||
FCOpMaker(OpProto* proto, OpAttrChecker* op_checker);
|
||||
};
|
||||
|
||||
} // namespace operators
|
||||
} // namespace paddle
|
@ -0,0 +1,108 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include "UpsampleLayer.h"
|
||||
#include "iostream"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
REGISTER_LAYER(upsample, UpsampleLayer);
|
||||
|
||||
size_t UpsampleLayer::getOutputSize() {
|
||||
if (upsampleSize_ == 0) {
|
||||
upsampleSize_ = imgSize_ * scale_ - static_cast<int>(padOutX_);
|
||||
upsampleSizeY_ = imgSizeY_ * scaleY_ - static_cast<int>(padOutY_);
|
||||
}
|
||||
return upsampleSize_ * upsampleSizeY_ * channels_;
|
||||
}
|
||||
|
||||
bool UpsampleLayer::init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) {
|
||||
Layer::init(layerMap, parameterMap);
|
||||
|
||||
CHECK_EQ(inputLayers_.size(), 2U);
|
||||
CHECK_EQ(config_.inputs_size(), 2);
|
||||
const auto& conf = config_.inputs(0).upsample_conf();
|
||||
const auto& img_conf = conf.image_conf();
|
||||
|
||||
imgSizeY_ =
|
||||
img_conf.has_img_size_y() ? img_conf.img_size_y() : img_conf.img_size();
|
||||
imgSize_ = img_conf.img_size();
|
||||
channels_ = img_conf.channels();
|
||||
|
||||
CHECK((conf.has_upsample_size()) || (conf.has_scale()))
|
||||
<< "scale or upsample_size is required.";
|
||||
|
||||
if (conf.has_upsample_size()) {
|
||||
upsampleSize_ = conf.upsample_size();
|
||||
upsampleSizeY_ = upsampleSize_;
|
||||
if (conf.has_upsample_size_y()) {
|
||||
upsampleSizeY_ = conf.upsample_size_y();
|
||||
}
|
||||
} else {
|
||||
if (!conf.has_scale_y()) {
|
||||
scale_ = scaleY_ = conf.scale_y();
|
||||
CHECK_GT(static_cast<int>(scale_), 1);
|
||||
} else {
|
||||
scale_ = conf.scale();
|
||||
scaleY_ = conf.scale_y();
|
||||
}
|
||||
padOutX_ = conf.pad_out_x();
|
||||
padOutY_ = conf.pad_out_y();
|
||||
CHECK(!padOutX_ || scale_ == 2)
|
||||
<< "Output height padding compensation requires scale_ == 2";
|
||||
CHECK(!padOutY_ || scaleY_ == 2)
|
||||
<< "Output width padding compensation requires scaleY_ == 2";
|
||||
upsampleSize_ = upsampleSizeY_ = 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void UpsampleLayer::forward(PassType passType) {
|
||||
Layer::forward(passType);
|
||||
|
||||
MatrixPtr input = getInputValue(0);
|
||||
MatrixPtr mask = inputLayers_[1]->getOutput("mask").value;
|
||||
|
||||
size_t batchSize = input->getHeight();
|
||||
size_t outSize = getOutputSize();
|
||||
|
||||
CHECK_EQ(input->getWidth(), mask->getWidth());
|
||||
CHECK_EQ(mask->getHeight(), batchSize);
|
||||
resetOutput(batchSize, outSize);
|
||||
|
||||
MatrixPtr output = getOutputValue();
|
||||
output->upsampleForward(*input,
|
||||
*mask,
|
||||
imgSize_,
|
||||
imgSizeY_,
|
||||
channels_,
|
||||
upsampleSize_,
|
||||
upsampleSizeY_);
|
||||
}
|
||||
|
||||
void UpsampleLayer::backward(const UpdateCallback& callback) {
|
||||
MatrixPtr mask = inputLayers_[1]->getOutput("mask").value;
|
||||
MatrixPtr inputGrad = getInputGrad(0);
|
||||
MatrixPtr outputGrad = getOutputGrad();
|
||||
inputGrad->upsampleBackward(*outputGrad,
|
||||
*mask,
|
||||
imgSize_,
|
||||
imgSizeY_,
|
||||
channels_,
|
||||
upsampleSize_,
|
||||
upsampleSizeY_);
|
||||
}
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,53 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <vector>
|
||||
#include "Layer.h"
|
||||
#include "paddle/math/Matrix.h"
|
||||
#include "paddle/utils/Logging.h"
|
||||
#include "paddle/utils/Stat.h"
|
||||
|
||||
namespace paddle {
|
||||
|
||||
/**
|
||||
* This layer transpose the pooling process.
|
||||
* It takes two input, the first input is the input data, and
|
||||
* the second is the mask data from the max-pool-with-mask layer.
|
||||
*
|
||||
*/
|
||||
|
||||
class UpsampleLayer : public Layer {
|
||||
public:
|
||||
explicit UpsampleLayer(const LayerConfig& config) : Layer(config) {}
|
||||
~UpsampleLayer() {}
|
||||
|
||||
bool init(const LayerMap& layerMap,
|
||||
const ParameterMap& parameterMap) override;
|
||||
|
||||
void forward(PassType passType) override;
|
||||
void backward(const UpdateCallback& callback) override;
|
||||
|
||||
size_t getOutputSize();
|
||||
|
||||
protected:
|
||||
size_t scale_, scaleY_;
|
||||
size_t upsampleSize_, upsampleSizeY_;
|
||||
size_t padOutX_, padOutY_;
|
||||
size_t imgSize_, imgSizeY_;
|
||||
size_t channels_;
|
||||
};
|
||||
|
||||
} // namespace paddle
|
@ -0,0 +1,152 @@
|
||||
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "LayerGradUtil.h"
|
||||
#include "paddle/math/MathUtils.h"
|
||||
#include "paddle/testing/TestUtil.h"
|
||||
|
||||
using namespace paddle;
|
||||
|
||||
void setPoolConfig(TestConfig* config,
|
||||
PoolConfig* pool,
|
||||
const string& poolType) {
|
||||
(*config).biasSize = 0;
|
||||
(*config).layerConfig.set_type("pool");
|
||||
(*config).layerConfig.set_num_filters(1);
|
||||
|
||||
int kw = 2, kh = 2;
|
||||
int pw = 0, ph = 0;
|
||||
int sw = 2, sh = 2;
|
||||
pool->set_pool_type(poolType);
|
||||
pool->set_channels(2);
|
||||
pool->set_size_x(kw);
|
||||
pool->set_size_y(kh);
|
||||
pool->set_start(0);
|
||||
pool->set_padding(pw);
|
||||
pool->set_padding_y(ph);
|
||||
pool->set_stride(sw);
|
||||
pool->set_stride_y(sh);
|
||||
|
||||
int ow = outputSize(pool->img_size(), kw, pw, sw, /* caffeMode */ false);
|
||||
int oh = outputSize(pool->img_size_y(), kh, ph, sh, /* caffeMode */ false);
|
||||
pool->set_output_x(ow);
|
||||
pool->set_output_y(oh);
|
||||
}
|
||||
|
||||
LayerPtr doOneUpsampleTest(MatrixPtr& inputMat,
|
||||
const string& poolType,
|
||||
bool use_gpu,
|
||||
real* tempGradData) {
|
||||
/* prepare maxPoolWithMaskLayer */
|
||||
TestConfig config;
|
||||
config.inputDefs.push_back({INPUT_DATA, "layer_0", 128, 0});
|
||||
LayerInputConfig* input = config.layerConfig.add_inputs();
|
||||
PoolConfig* pool = input->mutable_pool_conf();
|
||||
|
||||
pool->set_img_size(8);
|
||||
pool->set_img_size_y(8);
|
||||
setPoolConfig(&config, pool, "max-pool-with-mask");
|
||||
config.layerConfig.set_size(pool->output_x() * pool->output_y() *
|
||||
pool->channels());
|
||||
|
||||
config.layerConfig.set_name("MaxPoolWithMask");
|
||||
|
||||
std::vector<DataLayerPtr> dataLayers;
|
||||
LayerMap layerMap;
|
||||
vector<Argument> datas;
|
||||
|
||||
initDataLayer(config,
|
||||
&dataLayers,
|
||||
&datas,
|
||||
&layerMap,
|
||||
"MaxPoolWithMask",
|
||||
1,
|
||||
false,
|
||||
use_gpu);
|
||||
|
||||
dataLayers[0]->getOutputValue()->copyFrom(*inputMat);
|
||||
|
||||
FLAGS_use_gpu = use_gpu;
|
||||
std::vector<ParameterPtr> parameters;
|
||||
LayerPtr maxPoolingWithMaskOutputLayer;
|
||||
initTestLayer(config, &layerMap, ¶meters, &maxPoolingWithMaskOutputLayer);
|
||||
maxPoolingWithMaskOutputLayer->forward(PASS_GC);
|
||||
|
||||
/* prepare the upsample layer */
|
||||
LayerConfig upsampleLayerConfig;
|
||||
upsampleLayerConfig.set_type("upsample");
|
||||
LayerInputConfig* input1 = upsampleLayerConfig.add_inputs();
|
||||
upsampleLayerConfig.add_inputs();
|
||||
|
||||
UpsampleConfig* upsampleConfig = input1->mutable_upsample_conf();
|
||||
upsampleConfig->set_scale(2);
|
||||
ImageConfig* imageConfig = upsampleConfig->mutable_image_conf();
|
||||
imageConfig->set_channels(2);
|
||||
imageConfig->set_img_size(4);
|
||||
imageConfig->set_img_size_y(4);
|
||||
upsampleLayerConfig.set_size(2 * 8 * 8);
|
||||
upsampleLayerConfig.set_name("upsample");
|
||||
|
||||
for (size_t i = 0; i < 2; i++) {
|
||||
LayerInputConfig& inputTemp = *(upsampleLayerConfig.mutable_inputs(i));
|
||||
inputTemp.set_input_layer_name("MaxPoolWithMask");
|
||||
}
|
||||
|
||||
LayerPtr upsampleLayer;
|
||||
ParameterMap parameterMap;
|
||||
upsampleLayer = Layer::create(upsampleLayerConfig);
|
||||
layerMap[upsampleLayerConfig.name()] = upsampleLayer;
|
||||
upsampleLayer->init(layerMap, parameterMap);
|
||||
upsampleLayer->setNeedGradient(true);
|
||||
upsampleLayer->forward(PASS_GC);
|
||||
upsampleLayer->getOutputGrad()->copyFrom(tempGradData, 128);
|
||||
upsampleLayer->backward();
|
||||
|
||||
return upsampleLayer;
|
||||
}
|
||||
|
||||
TEST(Layer, maxPoolingWithMaskOutputLayerFwd) {
|
||||
bool useGpu = false;
|
||||
MatrixPtr inputMat;
|
||||
MatrixPtr inputGPUMat;
|
||||
MatrixPtr tempGradMat;
|
||||
|
||||
inputMat = Matrix::create(1, 128, false, useGpu);
|
||||
inputMat->randomizeUniform();
|
||||
|
||||
tempGradMat = Matrix::create(1, 128, false, useGpu);
|
||||
tempGradMat->randomizeUniform();
|
||||
real* data = inputMat->getData();
|
||||
real* tempGradData = tempGradMat->getData();
|
||||
|
||||
LayerPtr upsampleLayerCPU =
|
||||
doOneUpsampleTest(inputMat, "max-pool-with-mask", useGpu, tempGradData);
|
||||
|
||||
#ifdef PADDLE_WITH_CUDA
|
||||
useGpu = true;
|
||||
inputGPUMat = Matrix::create(1, 128, false, useGpu);
|
||||
inputGPUMat->copyFrom(data, 128);
|
||||
LayerPtr upsampleLayerGPU = doOneUpsampleTest(
|
||||
inputGPUMat, "max-pool-with-mask", useGpu, tempGradData);
|
||||
checkMatrixEqual(upsampleLayerCPU->getOutput("").value,
|
||||
upsampleLayerGPU->getOutput("").value);
|
||||
|
||||
checkMatrixEqual(upsampleLayerCPU->getPrev(0)->getOutputGrad(),
|
||||
upsampleLayerGPU->getPrev(0)->getOutputGrad());
|
||||
#endif
|
||||
}
|
@ -0,0 +1,99 @@
|
||||
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest
|
||||
import numpy as np
|
||||
from op_test import OpTest
|
||||
|
||||
|
||||
def fully_connected_naive(input, weights, bias_data=None):
|
||||
in_n, in_c, in_h, in_w = input.shape
|
||||
w_h, w_c = weights.shape
|
||||
|
||||
x_data = np.reshape(input, [in_n, in_c * in_h * in_w])
|
||||
w_data = np.transpose(np.reshape(weights, (w_c, in_c * in_h * in_w)))
|
||||
result = None
|
||||
|
||||
if not bias_data:
|
||||
result = np.dot(x_data, w_data)
|
||||
else:
|
||||
result = np.dot(x_data, w_data) + bias_data
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class MatrixGenerate:
|
||||
def __init__(self, mb, ic, oc, h, w):
|
||||
self.input = np.random.random((mb, ic, h, w)).astype("float32")
|
||||
self.weights = np.random.random((ic * h * w, oc)).astype("float32")
|
||||
|
||||
|
||||
class TestFCMKLDNNOp(OpTest):
|
||||
def setUp(self):
|
||||
self.op_type = "fc"
|
||||
self.use_mkldnn = True
|
||||
self.with_bias = True
|
||||
self.matrix = MatrixGenerate(1, 10, 15, 3, 3)
|
||||
|
||||
self.inputs = {'Input': self.matrix.input, 'W': self.matrix.weights}
|
||||
|
||||
self.attrs = {
|
||||
'use_mkldnn': self.use_mkldnn,
|
||||
'with_bias': self.with_bias
|
||||
}
|
||||
|
||||
self.outputs = {
|
||||
'Out': fully_connected_naive(self.matrix.input, self.matrix.weights)
|
||||
}
|
||||
|
||||
def test_check_output(self):
|
||||
self.check_output()
|
||||
|
||||
def test_check_grad_normal(self):
|
||||
self.check_grad(set(['Input', 'W']), 'Out', max_relative_error=0.9)
|
||||
|
||||
def test_check_grad_no_weight(self):
|
||||
self.check_grad(
|
||||
['Input'], 'Out', max_relative_error=0.5, no_grad_set=set('W'))
|
||||
|
||||
|
||||
class TestFCMKLDNNOp1(TestFCMKLDNNOp):
|
||||
def init_op_type(self):
|
||||
self.matrix = MatrixGenerate(2, 15, 48, 2, 2)
|
||||
|
||||
|
||||
class TestFCMKLDNNOp2(TestFCMKLDNNOp):
|
||||
def init_op_type(self):
|
||||
self.matrix = MatrixGenerate(2, 32, 40, 1, 1)
|
||||
|
||||
|
||||
class TestFCMKLDNNOp3(TestFCMKLDNNOp):
|
||||
def init_op_type(self):
|
||||
self.matrix = MatrixGenerate(2, 2, 4, 1, 1)
|
||||
|
||||
|
||||
class TestFCMKLDNNOp4(TestFCMKLDNNOp):
|
||||
def init_op_type(self):
|
||||
self.with_bias = False
|
||||
self.matrix = MatrixGenerate(2, 32, 48, 2, 2)
|
||||
|
||||
|
||||
class TestFCMKLDNNOp4(TestFCMKLDNNOp):
|
||||
def init_op_type(self):
|
||||
self.with_bias = False
|
||||
self.matrix = MatrixGenerate(2, 32, 1000, 6, 6)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
Loading…
Reference in new issue