Unify dtype and datatype (#5869)

* Change all `data_type` in Python to `dtype`

* Change `date_type` in C++ to `dtype`

* Refine
release/0.11.0
fengjiayi 8 years ago committed by GitHub
parent 1ab1b092cd
commit 50d670ee06
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -522,7 +522,7 @@ ParamGradInfoMap AppendBackward(
new OpDescBind("fill_constant", {}, {{"Out", {fill_one_op_out}}},
{{"shape", std::vector<int>{1}},
{"value", static_cast<float>(1.0)},
{"data_type", target.GetDataType()}}));
{"dtype", target.GetDataType()}}));
// infer var type of fill_one_op
fill_one_op->InferVarType(root_block);

@ -302,7 +302,7 @@ LoDTensor TensorArray::Stack() const {
const auto& first_dims = values_.front().dims();
// check all the values have the same shape
// TODO(superjom) check the same dtypes
// TODO(superjom) check the same data_type
for (size_t idx = 1; idx < size(); idx++) {
const auto& value_dims = values_[idx].dims();
PADDLE_ENFORCE_EQ(first_dims, value_dims);

@ -25,8 +25,8 @@ class CastOpProtoMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "The input tensor of cast op");
AddOutput("Out", "The output tensor of cast op");
AddAttr<int>("out_data_type", "output data type");
AddAttr<int>("in_data_type", "input data type");
AddAttr<int>("out_dtype", "output data type");
AddAttr<int>("in_dtype", "input data type");
AddComment(R"DOC(
Cast Operator.
@ -58,8 +58,8 @@ class CastOpGradMaker : public framework::SingleGradOpDescMaker {
grad->SetType("cast");
grad->SetInput("X", OutputGrad("Out"));
grad->SetOutput("Out", InputGrad("X"));
grad->SetAttr("out_data_type", GetAttr("in_data_type"));
grad->SetAttr("in_data_type", GetAttr("out_data_type"));
grad->SetAttr("out_dtype", GetAttr("in_dtype"));
grad->SetAttr("in_dtype", GetAttr("out_dtype"));
return std::unique_ptr<framework::OpDescBind>(grad);
}
};

@ -55,7 +55,7 @@ class CastOpKernel : public framework::OpKernel<InT> {
auto* in = context.Input<framework::Tensor>("X");
auto* out = context.Output<framework::Tensor>("Out");
framework::VisitDataType(
static_cast<framework::DataType>(context.Attr<int>("out_data_type")),
static_cast<framework::DataType>(context.Attr<int>("out_dtype")),
CastOpFunctor<Place, InT>(in, out, context.device_context()));
}
};

@ -52,7 +52,7 @@ class FillConstantBatchSizeLikeOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelType(
const framework::ExecutionContext &ctx) const override {
return framework::OpKernelType(
static_cast<framework::DataType>(ctx.Attr<int>("data_type")),
static_cast<framework::DataType>(ctx.Attr<int>("dtype")),
ctx.device_context());
}
};
@ -63,7 +63,7 @@ class FillConstantBatchSizeLikeOpMaker
FillConstantBatchSizeLikeOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5 (FP32)) "
"Output data type")
.SetDefault(framework::DataType::FP32);

@ -34,7 +34,7 @@ class FillConstantOp : public framework::OperatorBase {
using framework::OperatorBase::OperatorBase;
void Run(const framework::Scope &scope,
const platform::DeviceContext &dev_ctx) const override {
auto data_type = static_cast<framework::DataType>(Attr<int>("data_type"));
auto data_type = static_cast<framework::DataType>(Attr<int>("dtype"));
auto value = Attr<float>("value");
auto force_cpu = Attr<bool>("force_cpu");
auto &out =
@ -55,7 +55,7 @@ class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker {
FillConstantOpMaker(framework::OpProto *proto,
framework::OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5 (FP32)) "
"Output data type")
.SetDefault(framework::DataType::FP32);

@ -60,7 +60,7 @@ class GaussianRandomOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
static_cast<framework::DataType>(ctx.Attr<int>("data_type")),
static_cast<framework::DataType>(ctx.Attr<int>("dtype")),
ctx.device_context());
}
};
@ -88,7 +88,7 @@ class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker {
"Random seed of generator."
"0 means use system wide seed.")
.SetDefault(0);
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5(FP32)) "
"Output data type.")
.SetDefault(framework::DataType::FP32);

@ -49,7 +49,7 @@ class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("Communicator",
"Create Communicator for communicating between gpus");
AddAttr<std::vector<int>>("gpus", "(vector<int>) GPU id lists");
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5 (FP32)) "
"Output data type")
.SetDefault(framework::DataType::FP32);

@ -401,7 +401,7 @@ class RecurrentGradOp : public RecurrentBase {
auto &inside_tensor = cur_scope.FindVar(inside_grad_name)
->Get<framework::LoDTensor>();
framework::AttributeMap attrs;
attrs["data_type"] = framework::ToDataType(inside_tensor.type());
attrs["dtype"] = framework::ToDataType(inside_tensor.type());
attrs["shape"] = framework::vectorize2int(inside_tensor.dims());
attrs["value"] = 0.0f;

@ -62,7 +62,7 @@ class RNNMemoryHelperOpInfoMaker : public framework::OpProtoAndCheckerMaker {
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "");
AddOutput("Out", "");
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5 (FP32)) "
"Output data type")
.SetDefault(framework::DataType::FP32);
@ -95,7 +95,7 @@ class RNNMemoryHelperGradOp : public framework::OperatorBase {
auto &in_var_tensor = in_var->Get<framework::LoDTensor>();
framework::AttributeMap attrs;
attrs["data_type"] = framework::ToDataType(in_var_tensor.type());
attrs["dtype"] = framework::ToDataType(in_var_tensor.type());
attrs["shape"] = framework::vectorize2int(in_var_tensor.dims());
attrs["value"] = 0.0f;
@ -121,7 +121,7 @@ class RNNMemoryHelperGradOpInfoMaker
AddInput("X", "");
AddInput("Out", "");
AddOutput(framework::GradVarName("X"), "");
AddAttr<int>("data_type",
AddAttr<int>("dtype",
"(int, default 5 (FP32)) "
"Output data type")
.SetDefault(framework::DataType::FP32);

@ -66,7 +66,7 @@ class UniformRandomOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
static_cast<framework::DataType>(ctx.Attr<int>("data_type")),
static_cast<framework::DataType>(ctx.Attr<int>("dtype")),
ctx.device_context());
}
};
@ -99,7 +99,7 @@ uniform distribution.
"Random seed used for generating samples. "
"0 means use a seed generated by the system.")
.SetDefault(0);
AddAttr<int>("data_type", "(int, default 5(FP32)) Output tensor data type")
AddAttr<int>("dtype", "(int, default 5(FP32)) Output tensor data type")
.SetDefault(framework::DataType::FP32);
}
};

@ -180,7 +180,7 @@ class WhileGradOp : public framework::OperatorBase {
if (var->IsType<LoDTensor>()) {
auto &inside_tensor = var->Get<framework::LoDTensor>();
framework::AttributeMap attrs;
attrs["data_type"] = framework::ToDataType(inside_tensor.type());
attrs["dtype"] = framework::ToDataType(inside_tensor.type());
attrs["shape"] = framework::vectorize2int(inside_tensor.dims());
attrs["value"] = 0.0f;

@ -202,9 +202,9 @@ void BindVarDsec(py::module &m) {
},
py::return_value_policy::reference)
.def("set_shape", &VarDescBind::SetShape)
.def("set_data_type", &VarDescBind::SetDataType)
.def("set_dtype", &VarDescBind::SetDataType)
.def("shape", &VarDescBind::Shape, py::return_value_policy::reference)
.def("data_type", &VarDescBind::GetDataType)
.def("dtype", &VarDescBind::GetDataType)
.def("lod_level", &VarDescBind::GetLodLevel)
.def("set_lod_level", &VarDescBind::SetLoDLevel)
.def("type", &VarDescBind::GetType)

@ -8,7 +8,7 @@ def _clone_var_in_block_(block, var):
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.data_type,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True)
@ -57,7 +57,7 @@ class Evaluator(object):
attrs={
"shape": g_var.shape,
"value": .0,
"data_type": 5,
"dtype": 5,
})
block.append_op(
type="scale", inputs={"X": zeros}, outputs={"Out": g_var})
@ -93,7 +93,7 @@ class Accuracy(Evaluator):
def _update_ops(self, input, label, k=1, **kwargs):
block = self._main_program.global_block()
topk_out = block.create_var(dtype=input.data_type)
topk_out = block.create_var(dtype=input.dtype)
topk_indices = block.create_var(dtype="int64")
block.append_op(
type="top_k",
@ -122,16 +122,16 @@ class Accuracy(Evaluator):
inputs={"X": [self._states["Total"]]},
outputs={"Out": [self._states["Total"]]},
attrs={
"in_data_type": 5, # float32
"out_data_type": 2, #int32
"in_dtype": 5, # float32
"out_dtype": 2, # int32
})
block.append_op(
type="cast",
inputs={"X": [self._states["Correct"]]},
outputs={"Out": [self._states["Correct"]]},
attrs={
"in_data_type": 5,
"out_data_type": 2,
"in_dtype": 5,
"out_dtype": 2,
})
block.append_op(
@ -153,7 +153,7 @@ class Accuracy(Evaluator):
else:
eval_program = Program()
block = eval_program.global_block()
eval_out = block.create_var(dtype=self._states["Total"].data_type)
eval_out = block.create_var(dtype=self._states["Total"].dtype)
e_total = _clone_var_in_block_(block, self._states["Total"])
e_correct = _clone_var_in_block_(block, self._states["Correct"])
block.append_op(
@ -161,16 +161,16 @@ class Accuracy(Evaluator):
inputs={"X": [e_total]},
outputs={"Out": [e_total]},
attrs={
"in_data_type": 2, #int32
"out_data_type": 5, #float32
"in_dtype": 2, # int32
"out_dtype": 5, # float32
})
block.append_op(
type="cast",
inputs={"X": [e_correct]},
outputs={"Out": [e_correct]},
attrs={
"in_data_type": 2,
"out_data_type": 5,
"in_dtype": 2,
"out_dtype": 5,
})
block.append_op(
type="elementwise_div",

@ -99,9 +99,9 @@ class Variable(object):
if not isinstance(dtype, core.DataType):
dtype = convert_np_dtype_to_dtype_(dtype)
if is_new_var:
self.desc.set_data_type(dtype)
self.desc.set_dtype(dtype)
else:
old_dtype = self.data_type
old_dtype = self.dtype
if dtype != old_dtype:
raise ValueError("Variable {0} has been created before. "
"The previous data type is {1}; the new "
@ -162,8 +162,8 @@ class Variable(object):
return tuple(self.desc.shape())
@property
def data_type(self):
return self.desc.data_type()
def dtype(self):
return self.desc.dtype()
@property
def lod_level(self):

@ -93,7 +93,7 @@ class ConstantInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"value": self._value
})
var.op = op
@ -140,7 +140,7 @@ class UniformInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"min": self._low,
"max": self._high,
"seed": self._seed
@ -188,7 +188,7 @@ class NormalInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"mean": self._mean,
"std": self._std_dev,
"seed": self._seed
@ -265,7 +265,7 @@ class XavierInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"min": -limit,
"max": limit,
"seed": self._seed
@ -278,7 +278,7 @@ class XavierInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"mean": 0.0,
"std": std,
"seed": self._seed
@ -348,7 +348,7 @@ class MSRAInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"min": -limit,
"max": limit,
"seed": self._seed
@ -361,7 +361,7 @@ class MSRAInitializer(Initializer):
outputs={"Out": var},
attrs={
"shape": var.shape,
"data_type": int(var.data_type),
"dtype": int(var.dtype),
"mean": 0.0,
"std": std,
"seed": self._seed

@ -23,7 +23,7 @@ def _clone_var_in_block_(block, var):
return block.create_var(
name=var.name,
shape=var.shape,
dtype=var.data_type,
dtype=var.dtype,
type=var.type,
lod_level=var.lod_level,
persistable=True)

@ -108,8 +108,8 @@ class LayerHelper(object):
dtype = None
for each in inputs:
if dtype is None:
dtype = each.data_type
elif dtype != each.data_type:
dtype = each.dtype
elif dtype != each.dtype:
raise ValueError("Data Type mismatch")
return dtype
@ -149,7 +149,7 @@ class LayerHelper(object):
self.startup_program.global_block().create_var(
name=var.name,
type=var.type,
dtype=var.data_type,
dtype=var.dtype,
shape=var.shape,
persistable=True,
initializer=initializer)
@ -180,10 +180,10 @@ class LayerHelper(object):
b = self.create_parameter(
attr=bias_attr,
shape=size,
dtype=input_var.data_type,
dtype=input_var.dtype,
suffix='b',
initializer=bias_initializer)
tmp = self.create_tmp_variable(dtype=input_var.data_type)
tmp = self.create_tmp_variable(dtype=input_var.dtype)
self.append_op(
type='elementwise_add',
inputs={'X': [input_var],
@ -198,7 +198,7 @@ class LayerHelper(object):
return input_var
if isinstance(act, basestring):
act = {'type': act}
tmp = self.create_tmp_variable(dtype=input_var.data_type)
tmp = self.create_tmp_variable(dtype=input_var.dtype)
act_type = act.pop('type')
self.append_op(
type=act_type,

File diff suppressed because it is too large Load Diff

@ -92,7 +92,7 @@ class Optimizer(object):
var = self.helper.create_global_variable(
name=unique_name(name),
persistable=True,
dtype=dtype or param.data_type,
dtype=dtype or param.dtype,
type=param.type,
shape=param.shape)
self.helper.set_variable_initializer(
@ -202,7 +202,7 @@ class Optimizer(object):
"""
params_grads = append_backward_ops(loss, parameter_list, no_grad_set or
set())
# Add regularization if any
# Add regularization if any
params_grads = append_regularization_ops(params_grads)
optimize_ops = self.create_optimization_pass(params_grads, loss,
startup_program)

@ -7,11 +7,11 @@ from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.io import save_persistables, load_persistables
from paddle.v2.fluid.optimizer import SGDOptimizer
x = layers.data(name='x', shape=[13], data_type='float32')
x = layers.data(name='x', shape=[13], dtype='float32')
y_predict = layers.fc(input=x, size=1, act=None)
y = layers.data(name='y', shape=[1], data_type='float32')
y = layers.data(name='y', shape=[1], dtype='float32')
cost = layers.square_error_cost(input=y_predict, label=y)
avg_cost = layers.mean(x=cost)

@ -90,8 +90,8 @@ def vgg16_bn_drop(input):
classdim = 10
data_shape = [3, 32, 32]
images = layers.data(name='pixel', shape=data_shape, data_type='float32')
label = layers.data(name='label', shape=[1], data_type='int64')
images = layers.data(name='pixel', shape=data_shape, dtype='float32')
label = layers.data(name='label', shape=[1], dtype='int64')
# Add neural network config
# option 1. resnet

@ -34,26 +34,26 @@ def load_parameter(file_name, h, w):
def db_lstm():
# 8 features
word = layers.data(name='word_data', shape=[1], data_type='int64')
predicate = layers.data(name='verb_data', shape=[1], data_type='int64')
ctx_n2 = layers.data(name='ctx_n2_data', shape=[1], data_type='int64')
ctx_n1 = layers.data(name='ctx_n1_data', shape=[1], data_type='int64')
ctx_0 = layers.data(name='ctx_0_data', shape=[1], data_type='int64')
ctx_p1 = layers.data(name='ctx_p1_data', shape=[1], data_type='int64')
ctx_p2 = layers.data(name='ctx_p2_data', shape=[1], data_type='int64')
mark = layers.data(name='mark_data', shape=[1], data_type='int64')
word = layers.data(name='word_data', shape=[1], dtype='int64')
predicate = layers.data(name='verb_data', shape=[1], dtype='int64')
ctx_n2 = layers.data(name='ctx_n2_data', shape=[1], dtype='int64')
ctx_n1 = layers.data(name='ctx_n1_data', shape=[1], dtype='int64')
ctx_0 = layers.data(name='ctx_0_data', shape=[1], dtype='int64')
ctx_p1 = layers.data(name='ctx_p1_data', shape=[1], dtype='int64')
ctx_p2 = layers.data(name='ctx_p2_data', shape=[1], dtype='int64')
mark = layers.data(name='mark_data', shape=[1], dtype='int64')
predicate_embedding = layers.embedding(
input=predicate,
size=[pred_len, word_dim],
data_type='float32',
dtype='float32',
is_sparse=IS_SPARSE,
param_attr={'name': 'vemb'})
mark_embedding = layers.embedding(
input=mark,
size=[mark_dict_len, mark_dim],
data_type='float32',
dtype='float32',
is_sparse=IS_SPARSE)
word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2]
@ -125,7 +125,7 @@ def to_lodtensor(data, place):
def main():
# define network topology
feature_out = db_lstm()
target = layers.data(name='target', shape=[1], data_type='int64')
target = layers.data(name='target', shape=[1], dtype='int64')
crf_cost = layers.linear_chain_crf(
input=feature_out,
label=target,

@ -8,8 +8,8 @@ import paddle.v2.fluid.nets as nets
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.optimizer import AdamOptimizer
images = layers.data(name='pixel', shape=[1, 28, 28], data_type='float32')
label = layers.data(name='label', shape=[1], data_type='int64')
images = layers.data(name='pixel', shape=[1, 28, 28], dtype='float32')
label = layers.data(name='label', shape=[1], dtype='int64')
conv_pool_1 = nets.simple_img_conv_pool(
input=images,
filter_size=5,

@ -10,7 +10,7 @@ from paddle.v2.fluid.optimizer import MomentumOptimizer
from paddle.v2.fluid.regularizer import L2DecayRegularizer
BATCH_SIZE = 128
image = layers.data(name='x', shape=[784], data_type='float32')
image = layers.data(name='x', shape=[784], dtype='float32')
param_attr = {
'name': None,
@ -27,7 +27,7 @@ predict = layers.fc(input=hidden2,
act='softmax',
param_attr=param_attr)
label = layers.data(name='y', shape=[1], data_type='int64')
label = layers.data(name='y', shape=[1], dtype='int64')
cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(x=cost)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save