Support SparseMatrixArg unit test using Daoyuan's new Function Test.

avx_docs
xutianbing 8 years ago
parent 316bf75afd
commit 077f936aa5

@ -33,7 +33,6 @@ SparseMatrixArg::SparseMatrixArg(const CpuSparseMatrix& sparse, ArgType argType)
: BufferArg(sparse, argType), : BufferArg(sparse, argType),
row_(reinterpret_cast<void*>(sparse.getRows()), VALUE_TYPE_INT32), row_(reinterpret_cast<void*>(sparse.getRows()), VALUE_TYPE_INT32),
col_(reinterpret_cast<void*>(sparse.getCols()), VALUE_TYPE_INT32), col_(reinterpret_cast<void*>(sparse.getCols()), VALUE_TYPE_INT32),
/// todo(tianbing), make sure how to get NNZ
nnz_(sparse.getElementCnt()), nnz_(sparse.getElementCnt()),
format_(sparse.getFormat()), format_(sparse.getFormat()),
type_(sparse.getValueType()) { type_(sparse.getValueType()) {
@ -44,7 +43,6 @@ SparseMatrixArg::SparseMatrixArg(const GpuSparseMatrix& sparse, ArgType argType)
: BufferArg(sparse, argType), : BufferArg(sparse, argType),
row_(reinterpret_cast<void*>(sparse.getRows()), VALUE_TYPE_INT32), row_(reinterpret_cast<void*>(sparse.getRows()), VALUE_TYPE_INT32),
col_(reinterpret_cast<void*>(sparse.getCols()), VALUE_TYPE_INT32), col_(reinterpret_cast<void*>(sparse.getCols()), VALUE_TYPE_INT32),
/// todo(tianbing), make sure how to get NNZ
nnz_(sparse.getElementCnt()), nnz_(sparse.getElementCnt()),
format_(sparse.getFormat()), format_(sparse.getFormat()),
type_(sparse.getValueType()) { type_(sparse.getValueType()) {

@ -71,17 +71,24 @@ public:
public: public:
BufferArg(ValueType valueType, BufferArg(ValueType valueType,
const TensorShape& shape, const TensorShape& shape,
ArgType argType = UNSPECIFIED) ArgType argType = UNSPECIFIED,
bool trans = false)
: buf_(nullptr), : buf_(nullptr),
valueType_(valueType), valueType_(valueType),
shape_(shape), shape_(shape),
argType_(argType) {} argType_(argType),
trans_(trans) {}
BufferArg(void* buf, BufferArg(void* buf,
ValueType valueType, ValueType valueType,
const TensorShape& shape, const TensorShape& shape,
ArgType argType = UNSPECIFIED) ArgType argType = UNSPECIFIED,
: buf_(buf), valueType_(valueType), shape_(shape), argType_(argType) {} bool trans = false)
: buf_(buf),
valueType_(valueType),
shape_(shape),
argType_(argType),
trans_(trans) {}
BufferArg(void* buf, ValueType valueType) BufferArg(void* buf, ValueType valueType)
: buf_(buf), valueType_(valueType) {} : buf_(buf), valueType_(valueType) {}
@ -162,6 +169,7 @@ public:
ValueType valueType() const { return valueType_; } ValueType valueType() const { return valueType_; }
BufferType bufferType() const { return bufferType_; } BufferType bufferType() const { return bufferType_; }
const TensorShape& shape() const { return shape_; } const TensorShape& shape() const { return shape_; }
bool isTransposed() const { return trans_; }
bool isSparseArg() const { return TENSOR_SPARSE == bufferType_; } bool isSparseArg() const { return TENSOR_SPARSE == bufferType_; }
bool isSequenceArg() const { return TENSOR_SEQUENCE_DATA == bufferType_; } bool isSequenceArg() const { return TENSOR_SEQUENCE_DATA == bufferType_; }
@ -175,6 +183,7 @@ protected:
BufferType bufferType_{TENSOR_UNKNOWN}; BufferType bufferType_{TENSOR_UNKNOWN};
ArgType argType_{UNSPECIFIED}; ArgType argType_{UNSPECIFIED};
bool trans_{false}; bool trans_{false};
// todo(tianbing), add deviceType_
// leading dimensions. The size is dims_.size() // leading dimensions. The size is dims_.size()
// Dims lds_; // Dims lds_;
}; };
@ -267,8 +276,9 @@ public:
size_t nnz, size_t nnz,
SparseFormat format, SparseFormat format,
SparseValueType type, SparseValueType type,
ArgType argType = UNSPECIFIED) ArgType argType = UNSPECIFIED,
: BufferArg(buf, valueType, shape, argType), bool trans = false)
: BufferArg(buf, valueType, shape, argType, trans),
row_(row), row_(row),
col_(col), col_(col),
nnz_(nnz), nnz_(nnz),
@ -286,6 +296,33 @@ public:
} }
} }
SparseMatrixArg(ValueType valueType,
const TensorShape& shape,
size_t nnz,
SparseFormat format,
SparseValueType type,
ArgType argType = UNSPECIFIED,
bool trans = false)
: BufferArg(valueType, shape, argType, trans),
/// len of row_ : height + 1 (CSR), buf_ == nullptr
row_(format == SPARSE_CSR
? BufferArg(VALUE_TYPE_INT32, TensorShape{shape[0] + 1})
: BufferArg(VALUE_TYPE_INT32, TensorShape{nnz})),
/// len of col_ : width + 1 (CSC), buf_ == nullptr
col_(format == SPARSE_CSR
? BufferArg(VALUE_TYPE_INT32, TensorShape{nnz})
: BufferArg(VALUE_TYPE_INT32, TensorShape{shape[1] + 1})),
nnz_(nnz),
format_(format),
type_(type) {
bufferType_ = TENSOR_SPARSE;
/// todo(tianbing)
/// valueType and shape_.ndims() == 2 need to check before
/// this constructor to make sure row_ and col_ are right
CHECK((valueType == VALUE_TYPE_FLOAT) || (valueType == VALUE_TYPE_DOUBLE));
CHECK_EQ(shape_.ndims(), (size_t)2);
}
SparseMatrixArg(const CpuSparseMatrix& sparse, ArgType argType = UNSPECIFIED); SparseMatrixArg(const CpuSparseMatrix& sparse, ArgType argType = UNSPECIFIED);
SparseMatrixArg(const GpuSparseMatrix& sparse, ArgType argType = UNSPECIFIED); SparseMatrixArg(const GpuSparseMatrix& sparse, ArgType argType = UNSPECIFIED);

@ -13,6 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "Function.h" #include "Function.h"
#include "paddle/math/Matrix.h"
#include "paddle/math/SparseMatrix.h"
#include "paddle/math/Vector.h" #include "paddle/math/Vector.h"
#include "paddle/math/tests/TensorCheck.h" #include "paddle/math/tests/TensorCheck.h"
#include "paddle/testing/TestUtil.h" #include "paddle/testing/TestUtil.h"
@ -62,29 +64,41 @@ public:
cpuMemory_.emplace_back(std::make_shared<CpuMemoryHandle>(size)); cpuMemory_.emplace_back(std::make_shared<CpuMemoryHandle>(size));
gpuMemory_.emplace_back(std::make_shared<GpuMemoryHandle>(size)); gpuMemory_.emplace_back(std::make_shared<GpuMemoryHandle>(size));
cpuInputs_.emplace_back(std::make_shared<BufferArg>( cpuInputs_.emplace_back(
cpuMemory_.back()->getBuf(), input.valueType(), input.shape())); std::make_shared<BufferArg>(cpuMemory_.back()->getBuf(),
gpuInputs_.emplace_back(std::make_shared<BufferArg>( input.valueType(),
gpuMemory_.back()->getBuf(), input.valueType(), input.shape())); input.shape(),
UNSPECIFIED,
input.isTransposed()));
gpuInputs_.emplace_back(
std::make_shared<BufferArg>(gpuMemory_.back()->getBuf(),
input.valueType(),
input.shape(),
UNSPECIFIED,
input.isTransposed()));
} }
// output need only contains shape, do not contains data. // output need only contains shape, do not contains data.
void addOutputs(const BufferArg& output) { void addOutputs(const BufferArg& output, ArgType argType = ASSIGN_TO) {
size_t size = size_t size =
output.shape().getElements() * sizeOfValuType(output.valueType()); output.shape().getElements() * sizeOfValuType(output.valueType());
cpuMemory_.emplace_back(std::make_shared<CpuMemoryHandle>(size)); cpuMemory_.emplace_back(std::make_shared<CpuMemoryHandle>(size));
gpuMemory_.emplace_back(std::make_shared<GpuMemoryHandle>(size)); gpuMemory_.emplace_back(std::make_shared<GpuMemoryHandle>(size));
cpuOutputs_.emplace_back( cpuOutputs_.emplace_back(std::make_shared<BufferArg>(
std::make_shared<BufferArg>(cpuMemory_.back()->getBuf(), cpuMemory_.back()->getBuf(),
output.valueType(), output.valueType(),
output.shape(), output.shape(),
ASSIGN_TO)); // todo(tianbing), argType = output.getArgType(), but default ASSIGN_TO
gpuOutputs_.emplace_back( argType,
std::make_shared<BufferArg>(gpuMemory_.back()->getBuf(), output.isTransposed()));
gpuOutputs_.emplace_back(std::make_shared<BufferArg>(
gpuMemory_.back()->getBuf(),
output.valueType(), output.valueType(),
output.shape(), output.shape(),
ASSIGN_TO)); // todo(tianbing), argType = output.getArgType(), but default ASSIGN_TO
argType,
output.isTransposed()));
} }
void addInputs(const SequenceArg& input) { void addInputs(const SequenceArg& input) {
@ -107,10 +121,36 @@ public:
// TODO: need be implemented. // TODO: need be implemented.
} }
void addInputs(const SparseMatrixArg& input) {
cpuSparse_ = std::make_shared<CpuSparseMatrix>(input.shape()[0],
input.shape()[1],
input.nnz(),
input.dataType(),
input.dataFormat(),
input.isTransposed());
gpuSparse_ = std::make_shared<GpuSparseMatrix>(input.shape()[0],
input.shape()[1],
input.nnz(),
input.dataType(),
input.dataFormat(),
input.isTransposed());
/// init sparse matrix
hl_stream_t stream(HPPL_STREAM_1);
cpuSparse_->randomizeUniform();
gpuSparse_->copyFrom(*cpuSparse_, stream);
hl_stream_synchronize(stream);
cpuInputs_.emplace_back(std::make_shared<SparseMatrixArg>(*cpuSparse_));
gpuInputs_.emplace_back(std::make_shared<SparseMatrixArg>(*gpuSparse_));
}
void run() { void run() {
// prepare cpu/gpu arguments // prepare cpu/gpu arguments
initInputs(); initInputs();
initOutputs();
// function calculate // function calculate
auto callFunction = [](FunctionBase* function, auto callFunction = [](FunctionBase* function,
std::vector<BufferArgPtr>& inputs, std::vector<BufferArgPtr>& inputs,
@ -129,7 +169,7 @@ public:
callFunction(cpuFunc_.get(), cpuInputs_, cpuOutputs_); callFunction(cpuFunc_.get(), cpuInputs_, cpuOutputs_);
callFunction(gpuFunc_.get(), gpuInputs_, gpuOutputs_); callFunction(gpuFunc_.get(), gpuInputs_, gpuOutputs_);
// check outputs and inouts // check outputs
compareOutputs(); compareOutputs();
} }
@ -140,6 +180,10 @@ public:
protected: protected:
void initInputs() { void initInputs() {
for (size_t i = 0; i < cpuInputs_.size(); i++) { for (size_t i = 0; i < cpuInputs_.size(); i++) {
if (cpuInputs_[i]->isSparseArg()) {
continue; /// sparse matrix already init
}
initArg(*cpuInputs_[i]); initArg(*cpuInputs_[i]);
// TODO: Need a BufferCopy used to copy from one BufferArg to another. // TODO: Need a BufferCopy used to copy from one BufferArg to another.
@ -152,6 +196,25 @@ protected:
} }
} }
void initOutputs() {
for (size_t i = 0; i < cpuOutputs_.size(); i++) {
if (cpuOutputs_[i]->isSparseArg()) {
LOG(INFO) << "output sparse matrix already init";
continue;
}
initArg(*cpuOutputs_[i]);
// TODO: Need a BufferCopy used to copy from one BufferArg to another.
CpuVector cpuVector(cpuOutputs_[i]->shape().getElements(),
(real*)cpuOutputs_[i]->data());
GpuVector gpuVector(gpuOutputs_[i]->shape().getElements(),
(real*)gpuOutputs_[i]->data());
gpuVector.copyFrom(cpuVector);
}
}
void compareOutputs() { void compareOutputs() {
for (size_t i = 0; i < cpuOutputs_.size(); i++) { for (size_t i = 0; i < cpuOutputs_.size(); i++) {
// TODO, Need a BufferCheck used to compare the two buffers. // TODO, Need a BufferCheck used to compare the two buffers.
@ -159,7 +222,6 @@ protected:
auto gpu = gpuOutputs_[i]; auto gpu = gpuOutputs_[i];
CpuVector cpuVector(cpu->shape().getElements(), (real*)cpu->data()); CpuVector cpuVector(cpu->shape().getElements(), (real*)cpu->data());
GpuVector gpuVector(cpu->shape().getElements(), (real*)gpu->data()); GpuVector gpuVector(cpu->shape().getElements(), (real*)gpu->data());
autotest::TensorCheckErr(cpuVector, gpuVector); autotest::TensorCheckErr(cpuVector, gpuVector);
} }
} }
@ -195,6 +257,8 @@ protected:
std::vector<BufferArgPtr> cpuOutputs_; std::vector<BufferArgPtr> cpuOutputs_;
std::vector<BufferArgPtr> gpuInputs_; std::vector<BufferArgPtr> gpuInputs_;
std::vector<BufferArgPtr> gpuOutputs_; std::vector<BufferArgPtr> gpuOutputs_;
std::shared_ptr<CpuSparseMatrix> cpuSparse_;
std::shared_ptr<GpuSparseMatrix> gpuSparse_;
}; };
} // namespace paddle } // namespace paddle

@ -15,6 +15,8 @@ limitations under the License. */
#pragma once #pragma once
#include "Function.h" #include "Function.h"
/// todo(tianbing), delete it
#include <iostream>
#include "paddle/math/Matrix.h" #include "paddle/math/Matrix.h"
#include "paddle/math/SparseMatrix.h" #include "paddle/math/SparseMatrix.h"

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save