Fix, test=develop

inference-pre-release-gpu
shippingwang 6 years ago
parent 5631fc0817
commit 9322d34032

@ -208,7 +208,7 @@ paddle.fluid.layers.bilinear_tensor_product ArgSpec(args=['x', 'y', 'size', 'act
paddle.fluid.layers.merge_selected_rows ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.get_tensor_from_selected_rows ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.lstm ArgSpec(args=['input', 'init_h', 'init_c', 'max_len', 'hidden_size', 'num_layers', 'dropout_prob', 'is_bidirec', 'is_test', 'name', 'default_initializer', 'seed'], varargs=None, keywords=None, defaults=(0.0, False, False, None, None, -1))
paddle.fluid.layers.shuffle_channel ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(1, None))
paddle.fluid.layers.shuffle_channel ArgSpec(args=['x', 'group', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.py_func ArgSpec(args=['func', 'x', 'out', 'backward_func', 'skip_vars_in_backward_input'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.psroi_pool ArgSpec(args=['input', 'rois', 'output_channels', 'spatial_scale', 'pooled_height', 'pooled_width', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.huber_loss ArgSpec(args=['input', 'label', 'delta'], varargs=None, keywords=None, defaults=None)

@ -29,15 +29,13 @@ class ShuffleChannelOp : public framework::OperatorWithKernel {
ctx->SetOutputDim("Out", input_dims);
}
/*
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
ctx.device_context());
}
*/
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(ctx.Input<framework::Tensor>("X")->type(),
ctx.device_context());
}
};
class ShuffleChannelOpMaker : public framework::OpProtoAndCheckerMaker {
@ -89,16 +87,13 @@ class ShuffleChannelGradOp : public framework::OperatorWithKernel {
ctx->SetOutputDim(framework::GradVarName("X"), input_dims);
}
/*
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
framework::ToDataType(
framework::ToDataType(ctx.Input<framework::Tensor>("X")->type()),
ctx.device_context());
}
*/
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(ctx.Input<framework::Tensor>("X")->type(),
ctx.device_context());
}
};
} // namespace operators

@ -50,7 +50,6 @@ class ShuffleChannelOpKernel : public framework::OpKernel<T> {
}
}
}
return;
}
};

@ -9335,13 +9335,13 @@ def get_tensor_from_selected_rows(x, name=None):
return out
def shuffle_channel(x, group=1, name=None):
def shuffle_channel(x, group, name=None):
"""
**Shuffle Channel Operator**
This operator obtains the group convolutional layer with channels shuffled.
First, divide the input channels in each group into several subgroups,
then, feed each group in the next layer with different subgroups.
Shuffle channel operation makes it possible to build more powerful structures
Channel shuffling operation makes it possible to build more powerful structures
with multiple group convolutional layers.
Args:

Loading…
Cancel
Save