|
|
|
@ -27,8 +27,20 @@ namespace p = paddle::platform;
|
|
|
|
|
// test data amount
|
|
|
|
|
const f::DDim kDims = {20, 20};
|
|
|
|
|
|
|
|
|
|
class BroadcastTester : public ::testing::Test {
|
|
|
|
|
public:
|
|
|
|
|
struct TestBroadcastOpHandle {
|
|
|
|
|
std::vector<std::unique_ptr<p::DeviceContext>> ctxs_;
|
|
|
|
|
std::vector<Scope*> local_scopes_;
|
|
|
|
|
Scope g_scope_;
|
|
|
|
|
std::unique_ptr<OpHandleBase> op_handle_;
|
|
|
|
|
std::vector<std::unique_ptr<VarHandleBase>> vars_;
|
|
|
|
|
std::vector<p::Place> gpu_list_;
|
|
|
|
|
|
|
|
|
|
void WaitAll() {
|
|
|
|
|
for (size_t j = 0; j < ctxs_.size(); ++j) {
|
|
|
|
|
ctxs_[j]->Wait();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void InitCtxOnGpu(bool use_gpu) {
|
|
|
|
|
if (use_gpu) {
|
|
|
|
|
#ifdef PADDLE_WITH_CUDA
|
|
|
|
@ -57,61 +69,56 @@ class BroadcastTester : public ::testing::Test {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BroadcastInitOp(int input_scope_idx) {
|
|
|
|
|
void InitBroadcastOp(size_t input_scope_idx) {
|
|
|
|
|
for (size_t j = 0; j < gpu_list_.size(); ++j) {
|
|
|
|
|
local_scope_.push_back(&g_scope_.NewScope());
|
|
|
|
|
local_scope_[j]->Var("out");
|
|
|
|
|
local_scopes_.push_back(&(g_scope_.NewScope()));
|
|
|
|
|
local_scopes_[j]->Var("out");
|
|
|
|
|
}
|
|
|
|
|
local_scope_[input_scope_idx]->Var("input");
|
|
|
|
|
local_scopes_[input_scope_idx]->Var("input");
|
|
|
|
|
|
|
|
|
|
bc_op_handle_ = new f::details::BroadcastOpHandle(local_scope_, gpu_list_);
|
|
|
|
|
op_handle_.reset(new BroadcastOpHandle(local_scopes_, gpu_list_));
|
|
|
|
|
|
|
|
|
|
f::details::VarHandle* in_var_handle = new f::details::VarHandle();
|
|
|
|
|
vars_.emplace_back(new VarHandle());
|
|
|
|
|
VarHandle* in_var_handle = static_cast<VarHandle*>(vars_.back().get());
|
|
|
|
|
in_var_handle->place_ = gpu_list_[input_scope_idx];
|
|
|
|
|
in_var_handle->name_ = "input";
|
|
|
|
|
in_var_handle->version_ = 1;
|
|
|
|
|
in_var_handle->scope_idx_ = input_scope_idx;
|
|
|
|
|
in_var_handle->generated_op_ = nullptr;
|
|
|
|
|
bc_op_handle_->AddInput(in_var_handle);
|
|
|
|
|
op_handle_->AddInput(in_var_handle);
|
|
|
|
|
|
|
|
|
|
// add dummy var
|
|
|
|
|
vars_.emplace_back(new DummyVarHandle());
|
|
|
|
|
DummyVarHandle* dummy_var_handle =
|
|
|
|
|
static_cast<DummyVarHandle*>(vars_.back().get());
|
|
|
|
|
dummy_var_handle->generated_op_ = nullptr;
|
|
|
|
|
op_handle_->AddInput(dummy_var_handle);
|
|
|
|
|
|
|
|
|
|
for (size_t j = 0; j < gpu_list_.size(); ++j) {
|
|
|
|
|
bc_op_handle_->dev_ctxes_[gpu_list_[j]] = ctxs_[j];
|
|
|
|
|
f::details::VarHandle* out_var_handle = new f::details::VarHandle();
|
|
|
|
|
op_handle_->dev_ctxes_[gpu_list_[j]] = ctxs_[j].get();
|
|
|
|
|
vars_.emplace_back(new VarHandle());
|
|
|
|
|
VarHandle* out_var_handle = static_cast<VarHandle*>(vars_.back().get());
|
|
|
|
|
out_var_handle->place_ = gpu_list_[j];
|
|
|
|
|
out_var_handle->name_ = "out";
|
|
|
|
|
out_var_handle->version_ = 2;
|
|
|
|
|
out_var_handle->scope_idx_ = j;
|
|
|
|
|
bc_op_handle_->AddOutput(out_var_handle);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
void BroadcastOpDestroy() {
|
|
|
|
|
for (auto in : bc_op_handle_->inputs_) {
|
|
|
|
|
delete in;
|
|
|
|
|
}
|
|
|
|
|
for (auto out : bc_op_handle_->outputs_) {
|
|
|
|
|
delete out;
|
|
|
|
|
op_handle_->AddOutput(out_var_handle);
|
|
|
|
|
}
|
|
|
|
|
delete bc_op_handle_;
|
|
|
|
|
for (size_t j = 0; j < ctxs_.size(); ++j) {
|
|
|
|
|
delete ctxs_[j];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void WaitAll() {
|
|
|
|
|
for (size_t j = 0; j < ctxs_.size(); ++j) {
|
|
|
|
|
ctxs_[j]->Wait();
|
|
|
|
|
}
|
|
|
|
|
// add dummy var
|
|
|
|
|
vars_.emplace_back(new DummyVarHandle());
|
|
|
|
|
DummyVarHandle* out_dummy_var_handle =
|
|
|
|
|
static_cast<DummyVarHandle*>(vars_.back().get());
|
|
|
|
|
out_dummy_var_handle->generated_op_ = nullptr;
|
|
|
|
|
op_handle_->AddOutput(out_dummy_var_handle);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void TestBroadcastLodTensor() {
|
|
|
|
|
int input_scope_idx = 0;
|
|
|
|
|
BroadcastInitOp(input_scope_idx);
|
|
|
|
|
|
|
|
|
|
auto in_var = local_scope_[input_scope_idx]->Var("input");
|
|
|
|
|
void TestBroadcastLodTensor(size_t input_scope_idx) {
|
|
|
|
|
auto in_var = local_scopes_[input_scope_idx]->Var("input");
|
|
|
|
|
auto in_lod_tensor = in_var->GetMutable<f::LoDTensor>();
|
|
|
|
|
in_lod_tensor->mutable_data<float>(kDims, gpu_list_[input_scope_idx]);
|
|
|
|
|
|
|
|
|
|
std::vector<float> send_vector(f::product(kDims), input_scope_idx + 12);
|
|
|
|
|
std::vector<float> send_vector(static_cast<size_t>(f::product(kDims)));
|
|
|
|
|
for (size_t k = 0; k < send_vector.size(); ++k) {
|
|
|
|
|
send_vector[k] = k;
|
|
|
|
|
}
|
|
|
|
@ -120,13 +127,13 @@ class BroadcastTester : public ::testing::Test {
|
|
|
|
|
send_vector, *(ctxs_[input_scope_idx]), in_lod_tensor);
|
|
|
|
|
in_lod_tensor->set_lod(lod);
|
|
|
|
|
|
|
|
|
|
bc_op_handle_->Run(false);
|
|
|
|
|
op_handle_->Run(false);
|
|
|
|
|
|
|
|
|
|
WaitAll();
|
|
|
|
|
|
|
|
|
|
p::CPUPlace cpu_place;
|
|
|
|
|
for (size_t j = 0; j < gpu_list_.size(); ++j) {
|
|
|
|
|
auto out_var = local_scope_[j]->Var("out");
|
|
|
|
|
auto out_var = local_scopes_[j]->Var("out");
|
|
|
|
|
auto out_tensor = out_var->Get<f::LoDTensor>();
|
|
|
|
|
PADDLE_ENFORCE_EQ(out_tensor.lod(), lod, "lod is not equal.");
|
|
|
|
|
|
|
|
|
@ -134,42 +141,37 @@ class BroadcastTester : public ::testing::Test {
|
|
|
|
|
f::TensorCopy(out_tensor, cpu_place, *(ctxs_[j]), &result_tensor);
|
|
|
|
|
float* ct = result_tensor.mutable_data<float>(cpu_place);
|
|
|
|
|
|
|
|
|
|
for (int64_t j = 0; j < f::product(kDims); ++j) {
|
|
|
|
|
ASSERT_NEAR(ct[j], send_vector[j], 1e-5);
|
|
|
|
|
for (int64_t i = 0; i < f::product(kDims); ++i) {
|
|
|
|
|
ASSERT_NEAR(ct[i], send_vector[i], 1e-5);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BroadcastOpDestroy();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void TestBroadcastSelectedRows() {
|
|
|
|
|
int input_scope_idx = 0;
|
|
|
|
|
BroadcastInitOp(input_scope_idx);
|
|
|
|
|
|
|
|
|
|
auto in_var = local_scope_[input_scope_idx]->Var("input");
|
|
|
|
|
void TestBroadcastSelectedRows(size_t input_scope_idx) {
|
|
|
|
|
auto in_var = local_scopes_[input_scope_idx]->Var("input");
|
|
|
|
|
auto in_selected_rows = in_var->GetMutable<f::SelectedRows>();
|
|
|
|
|
auto value = in_selected_rows->mutable_value();
|
|
|
|
|
value->mutable_data<float>(kDims, gpu_list_[input_scope_idx]);
|
|
|
|
|
int height = kDims[0] * 2;
|
|
|
|
|
int height = static_cast<int>(kDims[0]) * 2;
|
|
|
|
|
std::vector<int64_t> rows{0, 1, 2, 3, 3, 0, 14, 7, 3, 1,
|
|
|
|
|
2, 4, 6, 3, 1, 1, 1, 1, 3, 7};
|
|
|
|
|
in_selected_rows->set_height(height);
|
|
|
|
|
in_selected_rows->set_rows(rows);
|
|
|
|
|
|
|
|
|
|
std::vector<float> send_vector(f::product(kDims));
|
|
|
|
|
std::vector<float> send_vector(static_cast<size_t>(f::product(kDims)));
|
|
|
|
|
for (size_t k = 0; k < send_vector.size(); ++k) {
|
|
|
|
|
send_vector[k] = k;
|
|
|
|
|
}
|
|
|
|
|
paddle::framework::TensorFromVector<float>(
|
|
|
|
|
send_vector, *(ctxs_[input_scope_idx]), value);
|
|
|
|
|
|
|
|
|
|
bc_op_handle_->Run(false);
|
|
|
|
|
op_handle_->Run(false);
|
|
|
|
|
|
|
|
|
|
WaitAll();
|
|
|
|
|
|
|
|
|
|
p::CPUPlace cpu_place;
|
|
|
|
|
for (size_t j = 0; j < gpu_list_.size(); ++j) {
|
|
|
|
|
auto out_var = local_scope_[j]->Var("out");
|
|
|
|
|
auto out_var = local_scopes_[j]->Var("out");
|
|
|
|
|
auto& out_select_rows = out_var->Get<f::SelectedRows>();
|
|
|
|
|
auto rt = out_select_rows.value();
|
|
|
|
|
|
|
|
|
@ -183,41 +185,44 @@ class BroadcastTester : public ::testing::Test {
|
|
|
|
|
f::TensorCopy(rt, cpu_place, *(ctxs_[j]), &result_tensor);
|
|
|
|
|
float* ct = result_tensor.data<float>();
|
|
|
|
|
|
|
|
|
|
for (int64_t j = 0; j < f::product(kDims); ++j) {
|
|
|
|
|
ASSERT_NEAR(ct[j], send_vector[j], 1e-5);
|
|
|
|
|
for (int64_t i = 0; i < f::product(kDims); ++i) {
|
|
|
|
|
ASSERT_NEAR(ct[i], send_vector[i], 1e-5);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BroadcastOpDestroy();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public:
|
|
|
|
|
f::Scope g_scope_;
|
|
|
|
|
std::vector<p::DeviceContext*> ctxs_;
|
|
|
|
|
std::vector<f::Scope*> local_scope_;
|
|
|
|
|
std::vector<p::Place> gpu_list_;
|
|
|
|
|
f::details::BroadcastOpHandle* bc_op_handle_;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
TEST_F(BroadcastTester, TestCPUBroadcastTestLodTensor) {
|
|
|
|
|
InitCtxOnGpu(false);
|
|
|
|
|
TestBroadcastLodTensor();
|
|
|
|
|
TEST(BroadcastTester, TestCPUBroadcastTestLodTensor) {
|
|
|
|
|
TestBroadcastOpHandle test_op;
|
|
|
|
|
size_t input_scope_idx = 0;
|
|
|
|
|
test_op.InitCtxOnGpu(false);
|
|
|
|
|
test_op.InitBroadcastOp(input_scope_idx);
|
|
|
|
|
test_op.TestBroadcastLodTensor(input_scope_idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(BroadcastTester, TestCPUBroadcastTestSelectedRows) {
|
|
|
|
|
InitCtxOnGpu(false);
|
|
|
|
|
TestBroadcastSelectedRows();
|
|
|
|
|
TEST(BroadcastTester, TestCPUBroadcastTestSelectedRows) {
|
|
|
|
|
TestBroadcastOpHandle test_op;
|
|
|
|
|
size_t input_scope_idx = 0;
|
|
|
|
|
test_op.InitCtxOnGpu(false);
|
|
|
|
|
test_op.InitBroadcastOp(input_scope_idx);
|
|
|
|
|
test_op.TestBroadcastSelectedRows(input_scope_idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#ifdef PADDLE_WITH_CUDA
|
|
|
|
|
TEST_F(BroadcastTester, TestGPUBroadcastTestLodTensor) {
|
|
|
|
|
InitCtxOnGpu(true);
|
|
|
|
|
TestBroadcastLodTensor();
|
|
|
|
|
TEST(BroadcastTester, TestGPUBroadcastTestLodTensor) {
|
|
|
|
|
TestBroadcastOpHandle test_op;
|
|
|
|
|
size_t input_scope_idx = 0;
|
|
|
|
|
test_op.InitCtxOnGpu(true);
|
|
|
|
|
test_op.InitBroadcastOp(input_scope_idx);
|
|
|
|
|
test_op.TestBroadcastLodTensor(input_scope_idx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TEST_F(BroadcastTester, TestGPUBroadcastTestSelectedRows) {
|
|
|
|
|
InitCtxOnGpu(true);
|
|
|
|
|
TestBroadcastSelectedRows();
|
|
|
|
|
TEST(BroadcastTester, TestGPUBroadcastTestSelectedRows) {
|
|
|
|
|
TestBroadcastOpHandle test_op;
|
|
|
|
|
size_t input_scope_idx = 0;
|
|
|
|
|
test_op.InitCtxOnGpu(true);
|
|
|
|
|
test_op.InitBroadcastOp(input_scope_idx);
|
|
|
|
|
test_op.TestBroadcastSelectedRows(input_scope_idx);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|