|
|
|
@ -44,6 +44,7 @@ limitations under the License. */
|
|
|
|
|
#include "paddle/fluid/operators/activation_op.h"
|
|
|
|
|
#include "paddle/fluid/operators/py_func_op.h"
|
|
|
|
|
#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h"
|
|
|
|
|
#include "paddle/fluid/platform/cpu_helper.h"
|
|
|
|
|
#include "paddle/fluid/platform/cpu_info.h"
|
|
|
|
|
#include "paddle/fluid/platform/enforce.h"
|
|
|
|
|
#include "paddle/fluid/platform/init.h"
|
|
|
|
@ -164,6 +165,8 @@ PYBIND11_MODULE(core_noavx, m) {
|
|
|
|
|
|
|
|
|
|
BindException(&m);
|
|
|
|
|
|
|
|
|
|
m.def("set_num_threads", &platform::SetNumThreads);
|
|
|
|
|
|
|
|
|
|
m.def(
|
|
|
|
|
"_append_python_callable_object_and_return_id",
|
|
|
|
|
[](py::object py_obj) -> size_t {
|
|
|
|
@ -283,8 +286,8 @@ PYBIND11_MODULE(core_noavx, m) {
|
|
|
|
|
LoD is short for Level of Details and is usually used for varied sequence
|
|
|
|
|
length. You can skip the following comment if you don't need optional LoD.
|
|
|
|
|
|
|
|
|
|
For example, a LoDTensor X can look like the example below. It contains
|
|
|
|
|
2 sequences. The first has length 2 and the second has length 3, as
|
|
|
|
|
For example, a LoDTensor X can look like the example below. It contains
|
|
|
|
|
2 sequences. The first has length 2 and the second has length 3, as
|
|
|
|
|
described by x.lod.
|
|
|
|
|
|
|
|
|
|
The first tensor dimension 5=2+3 is calculated from LoD if it's available.
|
|
|
|
@ -292,7 +295,7 @@ PYBIND11_MODULE(core_noavx, m) {
|
|
|
|
|
columns, hence [5, 2].
|
|
|
|
|
|
|
|
|
|
x.lod = [[2, 3]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
x.data = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
|
|
|
|
|
|
|
|
|
|
x.shape = [5, 2]
|
|
|
|
@ -1002,7 +1005,7 @@ All parameter, weight, gradient are variables in Paddle.
|
|
|
|
|
|
|
|
|
|
Examples:
|
|
|
|
|
.. code-block:: python
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import paddle.fluid as fluid
|
|
|
|
|
|
|
|
|
|
arr = fluid.LoDTensorArray()
|
|
|
|
@ -1482,14 +1485,14 @@ All parameter, weight, gradient are variables in Paddle.
|
|
|
|
|
"memory_optimize",
|
|
|
|
|
[](const BuildStrategy &self) { return self.memory_optimize_; },
|
|
|
|
|
[](BuildStrategy &self, bool b) { self.memory_optimize_ = b; },
|
|
|
|
|
R"DOC(The type is BOOL, memory opitimize aims to save total memory
|
|
|
|
|
R"DOC(The type is BOOL, memory opitimize aims to save total memory
|
|
|
|
|
consumption, set to True to enable it.
|
|
|
|
|
|
|
|
|
|
Memory Optimize is our experimental feature, some variables
|
|
|
|
|
|
|
|
|
|
Memory Optimize is our experimental feature, some variables
|
|
|
|
|
may be reused/removed by optimize strategy. If you need to
|
|
|
|
|
fetch some variable values when using this feature, please
|
|
|
|
|
set the persistable property of the variables to True.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Default False)DOC")
|
|
|
|
|
.def_property(
|
|
|
|
|
"is_distribution",
|
|
|
|
|