Fix CPPLint issues in some tests in fluid/framework (#10068)

* Fix CPPLint in data_device_transform_test

* Fix compilation error

* Fix compilation error

* Fix CPPLint errors in data_layout_transform_test

* Fix CPPLint errors in data_type_transform_test

* Fix CPPLint errors in data_type_transform_test.cu

* Fix compilation error

* Fix CPPLint issues in threadpool_test

* Fix CPPLInt issues in op_registry_test

* Fix CPPLint issues in operator_test

* Fix compilation error

* test
wangkuiyi-patch-2
Abhinav Arora 7 years ago committed by GitHub
parent 12ae354a10
commit 6402b59a7c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -103,9 +103,7 @@ static void BuildVar(const std::string& param_name,
} }
TEST(Operator, CPUtoGPU) { TEST(Operator, CPUtoGPU) {
using namespace paddle::framework; paddle::framework::InitDevices(true);
using namespace paddle::platform;
InitDevices(true);
paddle::framework::Scope scope; paddle::framework::Scope scope;
paddle::platform::CPUPlace cpu_place; paddle::platform::CPUPlace cpu_place;
@ -118,8 +116,9 @@ TEST(Operator, CPUtoGPU) {
auto cpu_op = paddle::framework::OpRegistry::CreateOp(cpu_op_desc); auto cpu_op = paddle::framework::OpRegistry::CreateOp(cpu_op_desc);
// prepare input // prepare input
auto* in_t = scope.Var("IN1")->GetMutable<LoDTensor>(); auto* in_t = scope.Var("IN1")->GetMutable<paddle::framework::LoDTensor>();
auto* src_ptr = in_t->mutable_data<float>({2, 3}, CPUPlace()); auto* src_ptr =
in_t->mutable_data<float>({2, 3}, paddle::platform::CPUPlace());
for (int i = 0; i < 2 * 3; ++i) { for (int i = 0; i < 2 * 3; ++i) {
src_ptr[i] = static_cast<float>(i); src_ptr[i] = static_cast<float>(i);
} }
@ -128,7 +127,7 @@ TEST(Operator, CPUtoGPU) {
auto* output = scope.Var("OUT1"); auto* output = scope.Var("OUT1");
cpu_op->Run(scope, cpu_place); cpu_op->Run(scope, cpu_place);
auto* output_ptr = output->Get<LoDTensor>().data<float>(); auto* output_ptr = output->Get<paddle::framework::LoDTensor>().data<float>();
for (int i = 0; i < 2 * 3; ++i) { for (int i = 0; i < 2 * 3; ++i) {
ASSERT_EQ(output_ptr[i], static_cast<float>(i) * 2); ASSERT_EQ(output_ptr[i], static_cast<float>(i) * 2);
} }
@ -153,12 +152,14 @@ TEST(Operator, CPUtoGPU) {
VLOG(3) << "after gpu_op run"; VLOG(3) << "after gpu_op run";
// auto* output2_ptr = output2->Get<LoDTensor>().data<float>(); // auto* output2_ptr = output2->Get<LoDTensor>().data<float>();
DeviceContextPool& pool = DeviceContextPool::Instance(); paddle::platform::DeviceContextPool& pool =
paddle::platform::DeviceContextPool::Instance();
auto dev_ctx = pool.Get(cuda_place); auto dev_ctx = pool.Get(cuda_place);
paddle::framework::Tensor output_tensor; paddle::framework::Tensor output_tensor;
TensorCopy(output2->Get<LoDTensor>(), paddle::platform::CPUPlace(), *dev_ctx, paddle::framework::TensorCopy(output2->Get<paddle::framework::LoDTensor>(),
&output_tensor); paddle::platform::CPUPlace(), *dev_ctx,
&output_tensor);
dev_ctx->Wait(); dev_ctx->Wait();
float* output2_ptr = output_tensor.data<float>(); float* output2_ptr = output_tensor.data<float>();

@ -18,27 +18,28 @@
#include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/device_context.h"
TEST(DataTransform, DataLayoutFunction) { TEST(DataTransform, DataLayoutFunction) {
using namespace paddle::framework; auto place = paddle::platform::CPUPlace();
using namespace paddle::platform; paddle::framework::Tensor in = paddle::framework::Tensor();
paddle::framework::Tensor out = paddle::framework::Tensor();
auto place = CPUPlace(); in.mutable_data<double>(paddle::framework::make_ddim({2, 3, 1, 2}), place);
Tensor in = Tensor(); in.set_layout(paddle::framework::DataLayout::kNHWC);
Tensor out = Tensor();
in.mutable_data<double>(make_ddim({2, 3, 1, 2}), place); auto kernel_nhwc = paddle::framework::OpKernelType(
in.set_layout(DataLayout::kNHWC); paddle::framework::proto::VarType::FP32, place,
paddle::framework::DataLayout::kNHWC,
auto kernel_nhwc = OpKernelType(proto::VarType::FP32, place, paddle::framework::LibraryType::kPlain);
DataLayout::kNHWC, LibraryType::kPlain); auto kernel_ncwh = paddle::framework::OpKernelType(
auto kernel_ncwh = OpKernelType(proto::VarType::FP32, place, paddle::framework::proto::VarType::FP32, place,
DataLayout::kNCHW, LibraryType::kPlain); paddle::framework::DataLayout::kNCHW,
paddle::framework::LibraryType::kPlain);
TransDataLayout(kernel_nhwc, kernel_ncwh, in, &out);
paddle::framework::TransDataLayout(kernel_nhwc, kernel_ncwh, in, &out);
EXPECT_TRUE(out.layout() == DataLayout::kNCHW);
EXPECT_TRUE(out.dims() == make_ddim({2, 2, 3, 1})); EXPECT_TRUE(out.layout() == paddle::framework::DataLayout::kNCHW);
EXPECT_TRUE(out.dims() == paddle::framework::make_ddim({2, 2, 3, 1}));
TransDataLayout(kernel_ncwh, kernel_nhwc, in, &out); TransDataLayout(kernel_ncwh, kernel_nhwc, in, &out);
EXPECT_TRUE(in.layout() == DataLayout::kNHWC); EXPECT_TRUE(in.layout() == paddle::framework::DataLayout::kNHWC);
EXPECT_TRUE(in.dims() == make_ddim({2, 3, 1, 2})); EXPECT_TRUE(in.dims() == paddle::framework::make_ddim({2, 3, 1, 2}));
} }

@ -17,43 +17,58 @@ limitations under the License. */
#include "gtest/gtest.h" #include "gtest/gtest.h"
TEST(DataTypeTransform, CPUTransform) { TEST(DataTypeTransform, CPUTransform) {
using namespace paddle::framework; auto place = paddle::platform::CPUPlace();
using namespace paddle::platform;
auto kernel_fp16 = paddle::framework::OpKernelType(
auto place = CPUPlace(); paddle::framework::proto::VarType::FP16, place,
paddle::framework::DataLayout::kAnyLayout,
auto kernel_fp16 = OpKernelType(proto::VarType::FP16, place, paddle::framework::LibraryType::kPlain);
DataLayout::kAnyLayout, LibraryType::kPlain);
auto kernel_fp32 = OpKernelType(proto::VarType::FP32, place, auto kernel_fp32 = paddle::framework::OpKernelType(
DataLayout::kAnyLayout, LibraryType::kPlain); paddle::framework::proto::VarType::FP32, place,
auto kernel_fp64 = OpKernelType(proto::VarType::FP64, place, paddle::framework::DataLayout::kAnyLayout,
DataLayout::kAnyLayout, LibraryType::kPlain); paddle::framework::LibraryType::kPlain);
auto kernel_int32 = OpKernelType(proto::VarType::INT32, place,
DataLayout::kAnyLayout, LibraryType::kPlain); auto kernel_fp64 = paddle::framework::OpKernelType(
auto kernel_int64 = OpKernelType(proto::VarType::INT64, place, paddle::framework::proto::VarType::FP64, place,
DataLayout::kAnyLayout, LibraryType::kPlain); paddle::framework::DataLayout::kAnyLayout,
auto kernel_bool = OpKernelType(proto::VarType::BOOL, place, paddle::framework::LibraryType::kPlain);
DataLayout::kAnyLayout, LibraryType::kPlain);
auto kernel_int32 = paddle::framework::OpKernelType(
paddle::framework::proto::VarType::INT32, place,
paddle::framework::DataLayout::kAnyLayout,
paddle::framework::LibraryType::kPlain);
auto kernel_int64 = paddle::framework::OpKernelType(
paddle::framework::proto::VarType::INT64, place,
paddle::framework::DataLayout::kAnyLayout,
paddle::framework::LibraryType::kPlain);
auto kernel_bool = paddle::framework::OpKernelType(
paddle::framework::proto::VarType::BOOL, place,
paddle::framework::DataLayout::kAnyLayout,
paddle::framework::LibraryType::kPlain);
// data type transform from float32 // data type transform from float32
{ {
Tensor in; paddle::framework::Tensor in;
Tensor out; paddle::framework::Tensor out;
float* ptr = in.mutable_data<float>(make_ddim({2, 3}), place); float* ptr =
in.mutable_data<float>(paddle::framework::make_ddim({2, 3}), place);
int data_number = 2 * 3; int data_number = 2 * 3;
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
ptr[i] = i / 3; ptr[i] = i / 3;
} }
TransDataType(kernel_fp32, kernel_fp64, in, &out); paddle::framework::TransDataType(kernel_fp32, kernel_fp64, in, &out);
double* out_data_double = out.data<double>(); double* out_data_double = out.data<double>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_double[i], static_cast<double>(i / 3)); EXPECT_EQ(out_data_double[i], static_cast<double>(i / 3));
} }
TransDataType(kernel_fp32, kernel_int32, in, &out); paddle::framework::TransDataType(kernel_fp32, kernel_int32, in, &out);
int* out_data_int = out.data<int>(); int* out_data_int = out.data<int>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_int[i], static_cast<int>(i / 3)); EXPECT_EQ(out_data_int[i], static_cast<int>(i / 3));
@ -62,10 +77,11 @@ TEST(DataTypeTransform, CPUTransform) {
// data type transform from/to float16 // data type transform from/to float16
{ {
Tensor in; paddle::framework::Tensor in;
Tensor out; paddle::framework::Tensor out;
float16* ptr = in.mutable_data<float16>(make_ddim({2, 3}), place); paddle::platform::float16* ptr = in.mutable_data<paddle::platform::float16>(
paddle::framework::make_ddim({2, 3}), place);
int data_number = 2 * 3; int data_number = 2 * 3;
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
@ -73,94 +89,104 @@ TEST(DataTypeTransform, CPUTransform) {
} }
// transform from float16 to other data types // transform from float16 to other data types
TransDataType(kernel_fp16, kernel_fp32, in, &out); paddle::framework::TransDataType(kernel_fp16, kernel_fp32, in, &out);
float* out_data_float = out.data<float>(); float* out_data_float = out.data<float>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_float[i], static_cast<float>(ptr[i])); EXPECT_EQ(out_data_float[i], static_cast<float>(ptr[i]));
} }
TransDataType(kernel_fp16, kernel_fp64, in, &out); paddle::framework::TransDataType(kernel_fp16, kernel_fp64, in, &out);
double* out_data_double = out.data<double>(); double* out_data_double = out.data<double>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_double[i], static_cast<double>(ptr[i])); EXPECT_EQ(out_data_double[i], static_cast<double>(ptr[i]));
} }
TransDataType(kernel_fp16, kernel_int32, in, &out); paddle::framework::TransDataType(kernel_fp16, kernel_int32, in, &out);
int* out_data_int = out.data<int>(); int* out_data_int = out.data<int>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_int[i], static_cast<int>(ptr[i])); EXPECT_EQ(out_data_int[i], static_cast<int>(ptr[i]));
} }
TransDataType(kernel_fp16, kernel_int64, in, &out); paddle::framework::TransDataType(kernel_fp16, kernel_int64, in, &out);
int64_t* out_data_int64 = out.data<int64_t>(); int64_t* out_data_int64 = out.data<int64_t>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_int64[i], static_cast<int64_t>(ptr[i])); EXPECT_EQ(out_data_int64[i], static_cast<int64_t>(ptr[i]));
} }
TransDataType(kernel_fp16, kernel_bool, in, &out); paddle::framework::TransDataType(kernel_fp16, kernel_bool, in, &out);
bool* out_data_bool = out.data<bool>(); bool* out_data_bool = out.data<bool>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(out_data_bool[i], static_cast<bool>(ptr[i])); EXPECT_EQ(out_data_bool[i], static_cast<bool>(ptr[i]));
} }
// transform float to float16 // transform float to float16
float* in_data_float = in.mutable_data<float>(make_ddim({2, 3}), place); float* in_data_float =
in.mutable_data<float>(paddle::framework::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
in_data_float[i] = i; in_data_float[i] = i;
} }
TransDataType(kernel_fp32, kernel_fp16, in, &out); paddle::framework::TransDataType(kernel_fp32, kernel_fp16, in, &out);
ptr = out.data<float16>(); ptr = out.data<paddle::platform::float16>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_float[i]).x); EXPECT_EQ(ptr[i].x,
static_cast<paddle::platform::float16>(in_data_float[i]).x);
} }
// transform double to float16 // transform double to float16
double* in_data_double = in.mutable_data<double>(make_ddim({2, 3}), place); double* in_data_double =
in.mutable_data<double>(paddle::framework::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
in_data_double[i] = i; in_data_double[i] = i;
} }
TransDataType(kernel_fp64, kernel_fp16, in, &out); paddle::framework::TransDataType(kernel_fp64, kernel_fp16, in, &out);
ptr = out.data<float16>(); ptr = out.data<paddle::platform::float16>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_double[i]).x); EXPECT_EQ(ptr[i].x,
static_cast<paddle::platform::float16>(in_data_double[i]).x);
} }
// transform int to float16 // transform int to float16
int* in_data_int = in.mutable_data<int>(make_ddim({2, 3}), place); int* in_data_int =
in.mutable_data<int>(paddle::framework::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
in_data_int[i] = i; in_data_int[i] = i;
} }
TransDataType(kernel_int32, kernel_fp16, in, &out); paddle::framework::TransDataType(kernel_int32, kernel_fp16, in, &out);
ptr = out.data<float16>(); ptr = out.data<paddle::platform::float16>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_int[i]).x); EXPECT_EQ(ptr[i].x,
static_cast<paddle::platform::float16>(in_data_int[i]).x);
} }
// transform int64 to float16 // transform int64 to float16
int64_t* in_data_int64 = in.mutable_data<int64_t>(make_ddim({2, 3}), place); int64_t* in_data_int64 =
in.mutable_data<int64_t>(paddle::framework::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
in_data_int64[i] = i; in_data_int64[i] = i;
} }
TransDataType(kernel_int64, kernel_fp16, in, &out); paddle::framework::TransDataType(kernel_int64, kernel_fp16, in, &out);
ptr = out.data<float16>(); ptr = out.data<paddle::platform::float16>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_int64[i]).x); EXPECT_EQ(ptr[i].x,
static_cast<paddle::platform::float16>(in_data_int64[i]).x);
} }
// transform bool to float16 // transform bool to float16
bool* in_data_bool = in.mutable_data<bool>(make_ddim({2, 3}), place); bool* in_data_bool =
in.mutable_data<bool>(paddle::framework::make_ddim({2, 3}), place);
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
in_data_bool[i] = i; in_data_bool[i] = i;
} }
TransDataType(kernel_bool, kernel_fp16, in, &out); paddle::framework::TransDataType(kernel_bool, kernel_fp16, in, &out);
ptr = out.data<float16>(); ptr = out.data<paddle::platform::float16>();
for (int i = 0; i < data_number; ++i) { for (int i = 0; i < data_number; ++i) {
EXPECT_EQ(ptr[i].x, static_cast<float16>(in_data_bool[i]).x); EXPECT_EQ(ptr[i].x,
static_cast<paddle::platform::float16>(in_data_bool[i]).x);
} }
} }
} }

File diff suppressed because it is too large Load Diff

@ -202,8 +202,9 @@ class CosineOpComplete : public paddle::framework::CosineOp {
}; };
TEST(OperatorRegistrar, Test) { TEST(OperatorRegistrar, Test) {
using namespace paddle::framework; paddle::framework::OperatorRegistrar<
OperatorRegistrar<CosineOpComplete, CosineOpProtoAndCheckerMaker> reg("cos"); CosineOpComplete, paddle::framework::CosineOpProtoAndCheckerMaker>
reg("cos");
} }
namespace paddle { namespace paddle {

@ -226,10 +226,8 @@ REGISTER_OP_CPU_KERNEL(op_multi_inputs_with_kernel,
// test with multi inputs // test with multi inputs
TEST(OpKernel, multi_inputs) { TEST(OpKernel, multi_inputs) {
using namespace paddle::framework;
paddle::framework::InitDevices(true); paddle::framework::InitDevices(true);
proto::OpDesc op_desc; paddle::framework::proto::OpDesc op_desc;
op_desc.set_type("op_multi_inputs_with_kernel"); op_desc.set_type("op_multi_inputs_with_kernel");
BuildVar("xs", {"x0", "x1", "x2"}, op_desc.add_inputs()); BuildVar("xs", {"x0", "x1", "x2"}, op_desc.add_inputs());
@ -243,12 +241,12 @@ TEST(OpKernel, multi_inputs) {
paddle::platform::CPUPlace cpu_place; paddle::platform::CPUPlace cpu_place;
paddle::framework::Scope scope; paddle::framework::Scope scope;
scope.Var("x0")->GetMutable<LoDTensor>(); scope.Var("x0")->GetMutable<paddle::framework::LoDTensor>();
scope.Var("x1")->GetMutable<LoDTensor>(); scope.Var("x1")->GetMutable<paddle::framework::LoDTensor>();
scope.Var("x2")->GetMutable<LoDTensor>(); scope.Var("x2")->GetMutable<paddle::framework::LoDTensor>();
scope.Var("k0")->GetMutable<LoDTensor>(); scope.Var("k0")->GetMutable<paddle::framework::LoDTensor>();
scope.Var("y0")->GetMutable<LoDTensor>(); scope.Var("y0")->GetMutable<paddle::framework::LoDTensor>();
scope.Var("y1")->GetMutable<LoDTensor>(); scope.Var("y1")->GetMutable<paddle::framework::LoDTensor>();
auto op = paddle::framework::OpRegistry::CreateOp(op_desc); auto op = paddle::framework::OpRegistry::CreateOp(op_desc);
op->Run(scope, cpu_place); op->Run(scope, cpu_place);

@ -15,14 +15,14 @@ limitations under the License. */
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include <atomic> #include <atomic>
#include "threadpool.h" #include "paddle/fluid/framework/threadpool.h"
namespace framework = paddle::framework; namespace framework = paddle::framework;
void do_sum(framework::ThreadPool* pool, std::atomic<int>& sum, int cnt) { void do_sum(framework::ThreadPool* pool, std::atomic<int>* sum, int cnt) {
std::vector<std::future<void>> fs; std::vector<std::future<void>> fs;
for (int i = 0; i < cnt; ++i) { for (int i = 0; i < cnt; ++i) {
fs.push_back(framework::Async([&sum]() { sum.fetch_add(1); })); fs.push_back(framework::Async([sum]() { sum->fetch_add(1); }));
} }
} }
@ -46,7 +46,7 @@ TEST(ThreadPool, ConcurrentRun) {
int n = 50; int n = 50;
// sum = (n * (n + 1)) / 2 // sum = (n * (n + 1)) / 2
for (int i = 1; i <= n; ++i) { for (int i = 1; i <= n; ++i) {
std::thread t(do_sum, pool, std::ref(sum), i); std::thread t(do_sum, pool, &sum, i);
threads.push_back(std::move(t)); threads.push_back(std::move(t));
} }
for (auto& t : threads) { for (auto& t : threads) {

Loading…
Cancel
Save