remove mkl & fix commit

revert-15774-anakin_subgraph_engine
heqiaozhi 6 years ago
parent 97b76c94c4
commit 04f876f5bc

File diff suppressed because one or more lines are too long

@ -15,9 +15,6 @@ limitations under the License. */
#include "paddle/fluid/operators/data_norm_op.h" #include "paddle/fluid/operators/data_norm_op.h"
#include <string> #include <string>
#include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/data_layout.h"
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
namespace paddle { namespace paddle {
namespace operators { namespace operators {
@ -97,13 +94,6 @@ class DataNormOp : public framework::OperatorWithKernel {
// TODO(pzelazko-intel): enable MKLDNN layout when it's ready // TODO(pzelazko-intel): enable MKLDNN layout when it's ready
framework::LibraryType library = framework::LibraryType::kPlain; framework::LibraryType library = framework::LibraryType::kPlain;
framework::DataLayout layout = framework::DataLayout::kAnyLayout; framework::DataLayout layout = framework::DataLayout::kAnyLayout;
#ifdef PADDLE_WITH_MKLDNN
if (library == framework::LibraryType::kPlain &&
platform::CanMKLDNNBeUsed(ctx)) {
library = framework::LibraryType::kMKLDNN;
layout = framework::DataLayout::kMKLDNN;
}
#endif
return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout, return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout,
library); library);
@ -140,9 +130,6 @@ class DataNormOpMaker : public framework::OpProtoAndCheckerMaker {
"Scales of the history data batch, " "Scales of the history data batch, "
"will apply to output when training") "will apply to output when training")
.AsIntermediate(); .AsIntermediate();
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false);
AddComment(R"DOC( AddComment(R"DOC(
Data Normalization. Data Normalization.
@ -264,14 +251,6 @@ class DataNormGradOp : public framework::OperatorWithKernel {
framework::LibraryType library = framework::LibraryType::kPlain; framework::LibraryType library = framework::LibraryType::kPlain;
framework::DataLayout layout = framework::DataLayout::kAnyLayout; framework::DataLayout layout = framework::DataLayout::kAnyLayout;
#ifdef PADDLE_WITH_MKLDNN
if (library == framework::LibraryType::kPlain &&
platform::CanMKLDNNBeUsed(ctx)) {
library = framework::LibraryType::kMKLDNN;
layout = framework::DataLayout::kMKLDNN;
}
#endif
return framework::OpKernelType(ctx.Input<Tensor>("X")->type(), return framework::OpKernelType(ctx.Input<Tensor>("X")->type(),
ctx.GetPlace(), layout, library); ctx.GetPlace(), layout, library);
} }

@ -117,11 +117,11 @@ class TeacherStudentSigmoidLossOpMaker
"[N x 1]. The teacher student sigmoid loss."); "[N x 1]. The teacher student sigmoid loss.");
AddAttr<float>( AddAttr<float>(
"soft_max_up_bound", "soft_max_up_bound",
"fp32, if input > soft_max_up_bound, will be bound, default 15.0") "fp32, if input > soft_max_up_bound, input will be bound, default 15.0")
.SetDefault(15.0); .SetDefault(15.0);
AddAttr<float>( AddAttr<float>("soft_max_lower_bound",
"soft_max_lower_bound", "fp32, if input < soft_max_lower_bound, input will be "
"fp32, if input < soft_max_lower_bound, will be bound, default -15.0") "bound, default -15.0")
.SetDefault(-15.0); .SetDefault(-15.0);
AddComment(R"DOC( AddComment(R"DOC(
TeacherStudentSigmoidLoss Operator. TeacherStudentSigmoidLoss Operator.

@ -2944,7 +2944,6 @@ def data_norm(input,
param_attr=None, param_attr=None,
data_layout='NCHW', data_layout='NCHW',
in_place=False, in_place=False,
use_mkldnn=False,
name=None, name=None,
moving_mean_name=None, moving_mean_name=None,
moving_variance_name=None, moving_variance_name=None,
@ -2978,7 +2977,6 @@ def data_norm(input,
param_attr(ParamAttr): The parameter attribute for Parameter `scale`. param_attr(ParamAttr): The parameter attribute for Parameter `scale`.
data_layout(string, default NCHW): NCHW|NHWC data_layout(string, default NCHW): NCHW|NHWC
in_place(bool, Default False): Make the input and output of batch norm reuse memory. in_place(bool, Default False): Make the input and output of batch norm reuse memory.
use_mkldnn(bool, Default false): ${use_mkldnn_comment}
name(string, Default None): A name for this layer(optional). If set None, the layer name(string, Default None): A name for this layer(optional). If set None, the layer
will be named automatically. will be named automatically.
moving_mean_name(string, Default None): The name of moving_mean which store the global Mean. moving_mean_name(string, Default None): The name of moving_mean which store the global Mean.
@ -3059,8 +3057,7 @@ def data_norm(input,
outputs={"Y": data_norm_out, outputs={"Y": data_norm_out,
"Means": means, "Means": means,
"Scales": scales}, "Scales": scales},
attrs={"epsilon": epsilon, attrs={"epsilon": epsilon})
"use_mkldnn": use_mkldnn})
return helper.append_activation(data_norm_out) return helper.append_activation(data_norm_out)
@ -9491,6 +9488,7 @@ def teacher_student_sigmoid_loss(input,
Examples: Examples:
.. code-block:: python .. code-block:: python
cost = fluid.layers.teacher_student_sigmoid_loss(input=similarity, label=label) cost = fluid.layers.teacher_student_sigmoid_loss(input=similarity, label=label)
""" """
helper = LayerHelper('teacher_student_sigmoid_loss', **locals()) helper = LayerHelper('teacher_student_sigmoid_loss', **locals())

Loading…
Cancel
Save