parent
463f88a75c
commit
8643dbc233
@ -0,0 +1,100 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/inference/anakin/convert/affine_channel.h"
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
using anakin::graph::GraphGlobalMem;
|
||||
using anakin::AK_FLOAT;
|
||||
using anakin::Precision;
|
||||
using anakin::saber::NV;
|
||||
using anakin::saber::X86;
|
||||
using anakin::saber::Shape;
|
||||
using anakin::PBlock;
|
||||
using anakin::PTuple;
|
||||
|
||||
namespace paddle {
|
||||
namespace inference {
|
||||
namespace anakin {
|
||||
|
||||
void AffineChannelOpConverter::operator()(
|
||||
const framework::proto::OpDesc &op, const framework::BlockDesc &block_desc,
|
||||
const framework::Scope &scope, bool test_mode) {
|
||||
framework::OpDesc op_desc(op, nullptr);
|
||||
PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1);
|
||||
PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1);
|
||||
|
||||
auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front();
|
||||
|
||||
auto input_name = op_desc.Input("X").front();
|
||||
auto output_name = op_desc.Output("Out").front();
|
||||
|
||||
// Copy the Scale to CPUPlace and get the pointer.
|
||||
auto *scale_v = scope.FindVar(op_desc.Input("Scale").front());
|
||||
PADDLE_ENFORCE_NOT_NULL(scale_v);
|
||||
auto *scale_t = scale_v->GetMutable<framework::LoDTensor>();
|
||||
std::unique_ptr<framework::LoDTensor> scale_tensor(
|
||||
new framework::LoDTensor());
|
||||
scale_tensor->Resize(scale_t->dims());
|
||||
TensorCopySync((*scale_t), platform::CPUPlace(), scale_tensor.get());
|
||||
|
||||
// Copy the Bias to CPUPlace and get the pointer.
|
||||
auto *bias_v = scope.FindVar(op_desc.Input("Bias").front());
|
||||
PADDLE_ENFORCE_NOT_NULL(bias_v);
|
||||
auto *bias_t = bias_v->GetMutable<framework::LoDTensor>();
|
||||
std::unique_ptr<framework::LoDTensor> bias_tensor(new framework::LoDTensor());
|
||||
bias_tensor->Resize(bias_t->dims());
|
||||
TensorCopySync((*bias_t), platform::CPUPlace(), bias_tensor.get());
|
||||
|
||||
engine_->AddOp(op_name, "AffineChannel", {input_name}, {output_name});
|
||||
|
||||
// Generate the Scale parameter of Anakin.
|
||||
auto scale_shape = framework::vectorize2int(scale_t->dims());
|
||||
while (scale_shape.size() < 4) {
|
||||
scale_shape.insert(scale_shape.begin(), 1);
|
||||
}
|
||||
Shape anakin_scale_shape(scale_shape);
|
||||
auto *weight1 = GraphGlobalMem<NV>::Global().template new_block<AK_FLOAT>(
|
||||
anakin_scale_shape);
|
||||
float *scale_cpu_data =
|
||||
static_cast<float *>(weight1->h_tensor().mutable_data());
|
||||
std::copy_n(scale_tensor->data<float>(), scale_tensor->numel(),
|
||||
scale_cpu_data);
|
||||
weight1->d_tensor().set_shape(anakin_scale_shape);
|
||||
weight1->d_tensor().copy_from(weight1->h_tensor());
|
||||
engine_->AddOpAttr(op_name, "weight_1", *weight1);
|
||||
|
||||
// Generate the Bias parameter of Anakin.
|
||||
auto bias_shape = framework::vectorize2int(bias_t->dims());
|
||||
while (bias_shape.size() < 4) {
|
||||
bias_shape.insert(bias_shape.begin(), 1);
|
||||
}
|
||||
Shape anakin_bias_shape(bias_shape);
|
||||
auto *weight2 = GraphGlobalMem<NV>::Global().template new_block<AK_FLOAT>(
|
||||
anakin_bias_shape);
|
||||
float *bias_cpu_data =
|
||||
static_cast<float *>(weight2->h_tensor().mutable_data());
|
||||
std::copy_n(bias_tensor->data<float>(), bias_tensor->numel(), bias_cpu_data);
|
||||
weight2->d_tensor().set_shape(anakin_bias_shape);
|
||||
weight2->d_tensor().copy_from(weight2->h_tensor());
|
||||
engine_->AddOpAttr(op_name, "weight_2", *weight2);
|
||||
}
|
||||
|
||||
} // namespace anakin
|
||||
} // namespace inference
|
||||
} // namespace paddle
|
||||
|
||||
REGISTER_ANAKIN_OP_CONVERTER(affine_channel, AffineChannelOpConverter);
|
@ -0,0 +1,39 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace inference {
|
||||
namespace anakin {
|
||||
|
||||
class AffineChannelOpConverter : public AnakinOpConverter {
|
||||
public:
|
||||
AffineChannelOpConverter() = default;
|
||||
|
||||
virtual void operator()(const framework::proto::OpDesc &op,
|
||||
const framework::BlockDesc &block_desc,
|
||||
const framework::Scope &scope,
|
||||
bool test_mode) override;
|
||||
virtual ~AffineChannelOpConverter() {}
|
||||
|
||||
private:
|
||||
};
|
||||
|
||||
} // namespace anakin
|
||||
} // namespace inference
|
||||
} // namespace paddle
|
@ -0,0 +1,59 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "paddle/fluid/inference/anakin/convert/roi_align.h"
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
|
||||
using anakin::graph::GraphGlobalMem;
|
||||
using anakin::AK_FLOAT;
|
||||
using anakin::saber::NV;
|
||||
using anakin::saber::Shape;
|
||||
|
||||
namespace paddle {
|
||||
namespace inference {
|
||||
namespace anakin {
|
||||
|
||||
void RoiAlignOpConverter::operator()(const framework::proto::OpDesc &op,
|
||||
const framework::BlockDesc &block_desc,
|
||||
const framework::Scope &scope,
|
||||
bool test_mode) {
|
||||
framework::OpDesc op_desc(op, nullptr);
|
||||
PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1);
|
||||
PADDLE_ENFORCE_EQ(op_desc.Input("ROIs").size(), 1);
|
||||
PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1);
|
||||
|
||||
auto op_name = op_desc.Type() + ":" + op_desc.Output("Out").front();
|
||||
auto input_x_name = op_desc.Input("X").front();
|
||||
auto input_rois_name = op_desc.Input("ROIs").front();
|
||||
auto output_name = op_desc.Output("Out").front();
|
||||
|
||||
auto spatial_scale = boost::get<float>(op_desc.GetAttr("spatial_scale"));
|
||||
auto pooled_height = boost::get<int>(op_desc.GetAttr("pooled_height"));
|
||||
auto pooled_width = boost::get<int>(op_desc.GetAttr("pooled_width"));
|
||||
auto sampling_ratio = boost::get<int>(op_desc.GetAttr("sampling_ratio"));
|
||||
|
||||
engine_->AddOp(op_name, "RoiAlign", {input_x_name, input_rois_name},
|
||||
{output_name});
|
||||
engine_->AddOpAttr(op_name, "spatial_scale", spatial_scale);
|
||||
engine_->AddOpAttr(op_name, "pooled_height", pooled_height);
|
||||
engine_->AddOpAttr(op_name, "pooled_width", pooled_width);
|
||||
engine_->AddOpAttr(op_name, "sampling_ratio", sampling_ratio);
|
||||
}
|
||||
|
||||
} // namespace anakin
|
||||
} // namespace inference
|
||||
} // namespace paddle
|
||||
|
||||
REGISTER_ANAKIN_OP_CONVERTER(roi_align, RoiAlignOpConverter);
|
@ -0,0 +1,38 @@
|
||||
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace inference {
|
||||
namespace anakin {
|
||||
|
||||
class RoiAlignOpConverter : public AnakinOpConverter {
|
||||
public:
|
||||
RoiAlignOpConverter() = default;
|
||||
|
||||
virtual void operator()(const framework::proto::OpDesc &op,
|
||||
const framework::BlockDesc &block_desc,
|
||||
const framework::Scope &scope,
|
||||
bool test_mode) override;
|
||||
virtual ~RoiAlignOpConverter() {}
|
||||
};
|
||||
|
||||
} // namespace anakin
|
||||
} // namespace inference
|
||||
} // namespace paddle
|
@ -0,0 +1,55 @@
|
||||
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. */
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
#include "paddle/fluid/inference/anakin/convert/affine_channel.h"
|
||||
#include "paddle/fluid/inference/anakin/convert/op_converter.h"
|
||||
#include "paddle/fluid/inference/anakin/convert/ut_helper.h"
|
||||
|
||||
namespace paddle {
|
||||
namespace inference {
|
||||
namespace anakin {
|
||||
|
||||
TEST(affine_channel, native) {
|
||||
// Declare the difference between the inputs.
|
||||
std::unordered_set<std::string> parameters({"scale", "bias"});
|
||||
|
||||
framework::Scope scope;
|
||||
AnakinConvertValidation validator(parameters, &scope);
|
||||
validator.DeclInputVar("x", {1, 3, 5, 2});
|
||||
validator.DeclOutputVar("out", {1, 3, 5, 2});
|
||||
validator.DeclParamVar("scale", {1, 3, 1, 1});
|
||||
validator.DeclParamVar("bias", {1, 3, 1, 1});
|
||||
|
||||
// Prepare Op descriptions.
|
||||
framework::OpDesc desc;
|
||||
desc.SetType("affine_channel");
|
||||
desc.SetInput("X", {"x"});
|
||||
desc.SetInput("Bias", {"bias"});
|
||||
desc.SetInput("Scale", {"scale"});
|
||||
desc.SetOutput("Out", {"out"});
|
||||
|
||||
// Layout must be explicitly specified here as NCHW.
|
||||
desc.SetAttr("data_layout", std::string("NCHW"));
|
||||
|
||||
validator.SetOp(*desc.Proto());
|
||||
validator.Execute(1);
|
||||
}
|
||||
|
||||
} // namespace anakin
|
||||
} // namespace inference
|
||||
} // namespace paddle
|
||||
|
||||
USE_OP(affine_channel);
|
||||
USE_ANAKIN_CONVERTER(affine_channel);
|
Loading…
Reference in new issue