revert-15470-feature/imperative
JiabinYang 6 years ago
parent 3be8ffad2f
commit ba981604fd

@ -555,18 +555,17 @@ Tensor* ExecutionContext::LegacyOutput<Tensor>(const std::string& name) const {
template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
const std::string& name) const {
auto names = op().Outputs(name);
auto it = ctx_.outputs.find(name);
if (it == ctx_.outputs.end()) {
return {};
}
const std::vector<Variable*>& vars = it->second;
std::vector<Tensor*> res;
res.reserve(names.size());
std::transform(names.begin(), names.end(), std::back_inserter(res),
[&](const std::string& sub_name) -> Tensor* {
auto var = scope_.FindVar(sub_name);
if (var == nullptr) return nullptr;
PADDLE_ENFORCE(
var->IsType<LoDTensor>(),
"%s should be LoDTensor, but the received type is %s",
sub_name, ToTypeName(var->Type()));
return var->GetMutable<LoDTensor>();
res.reserve(vars.size());
std::transform(vars.begin(), vars.end(), std::back_inserter(res),
[&](Variable* var) -> Tensor* {
return var == nullptr ? nullptr
: var->GetMutable<LoDTensor>();
});
return res;
}

@ -22,13 +22,7 @@ from . import layers
from ..framework import Variable, OpProtoHolder
from ..param_attr import ParamAttr
from ..initializer import Normal, Constant
__all__ = [
'Conv2D',
'Pool2D',
'FC',
'BatchNorm',
'EMBEDDING'
]
__all__ = ['Conv2D', 'Pool2D', 'FC', 'BatchNorm', 'EMBEDDING']
class Conv2D(layers.Layer):
@ -419,8 +413,6 @@ class BatchNorm(layers.Layer):
# Currently, we don't support inplace in imperative mode
return self._helper.append_activation(batch_norm_out)
outputs={'Out': [bias_out]},
class EMBEDDING(layers.Layer):
@ -438,7 +430,7 @@ class EMBEDDING(layers.Layer):
self._is_distributed = is_distributed
self._padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
size[0] + padding_idx)
size[0] + padding_idx)
self._param_attr = param_attr
self._dtype = dtype

@ -338,7 +338,6 @@ class TestImperative(unittest.TestCase):
dy_grad_i2h = simple_rnn._cell._i2h_w._gradient()
with new_program_scope():
print("im here")
inp = fluid.layers.data(
name="inp", shape=[1, 4, 3], append_batch_size=False)
simple_rnn = SimpleRNN()

@ -0,0 +1,45 @@
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
from paddle.fluid.imperative.base import to_variable
import numpy as np
class Split_test(fluid.imperative.Layer):
def __init__(self):
super(Split_test, self).__init__()
def _build_once(self, input):
pass
def forward(self, input):
out = fluid.layers.split(input, num_or_sections=4, dim=-1)
return out
class TestImperativePtbRnn(unittest.TestCase):
def test_spilt(self):
with fluid.imperative.guard():
inp = to_variable(np.arange(160).reshape(4, 40).astype('float32'))
st = Split_test()
out = st(inp)
print(out)
if __name__ == '__main__':
unittest.main()
Loading…
Cancel
Save