Decouple ir::Tensor class from python

pull/2245/head
He Wei 5 years ago
parent 363a232cbc
commit 43e0967024

@ -27,6 +27,7 @@
#include "utils/symbolic.h" #include "utils/symbolic.h"
#include "ir/meta_func_graph.h" #include "ir/meta_func_graph.h"
#include "ir/param_value_py.h" #include "ir/param_value_py.h"
#include "ir/tensor_py.h"
#include "pipeline/parse/python_adapter.h" #include "pipeline/parse/python_adapter.h"
#include "pipeline/parse/resolve.h" #include "pipeline/parse/resolve.h"
#include "operator/composite/composite.h" #include "operator/composite/composite.h"
@ -39,6 +40,8 @@
#include "utils/context/ms_context.h" #include "utils/context/ms_context.h"
#include "operator/ops.h" #include "operator/ops.h"
using mindspore::tensor::TensorPy;
namespace mindspore { namespace mindspore {
// max number of elements in sequence // max number of elements in sequence
const int NUM_MAX_SEQUENCE_ELEMS = 0x00FFFFFF; const int NUM_MAX_SEQUENCE_ELEMS = 0x00FFFFFF;
@ -399,7 +402,7 @@ std::string AnfExporter::GetValueText(const FuncGraphPtr &func_graph, const Valu
oss << value->DumpText(); oss << value->DumpText();
} else if (value->isa<tensor::Tensor>()) { } else if (value->isa<tensor::Tensor>()) {
auto tensor_ptr = dyn_cast<tensor::Tensor>(value); auto tensor_ptr = dyn_cast<tensor::Tensor>(value);
oss << value->DumpText() << "@" << DumpObject(tensor_ptr->data(), "T"); oss << value->DumpText() << "@" << DumpObject(TensorPy::AsNumpy(*tensor_ptr), "T");
} else if (value->isa<parse::Symbol>() || value->isa<None>() || value->isa<NullObj>()) { } else if (value->isa<parse::Symbol>() || value->isa<None>() || value->isa<NullObj>()) {
oss << value->DumpText(); oss << value->DumpText();
} else if (value->isa<ValueSequeue>()) { } else if (value->isa<ValueSequeue>()) {
@ -1813,7 +1816,7 @@ class IrParser {
if (tensor_data == nullptr) { if (tensor_data == nullptr) {
return TOK_ERROR; return TOK_ERROR;
} }
*val_ptr = std::make_shared<tensor::Tensor>(tensor_data, TypeIdToType(type)); *val_ptr = TensorPy::MakeTensor(tensor_data, TypeIdToType(type));
return lexer_.GetNextToken(); return lexer_.GetNextToken();
} }

@ -117,7 +117,7 @@ void DebugServices::check_watchpoints(std::vector<std::string> *name, std::vecto
continue; continue;
} }
float *start_addr = reinterpret_cast<float *>(tensor_ptr->data_c(false)); float *start_addr = reinterpret_cast<float *>(tensor_ptr->data_c());
unsigned int num_elements = (tensor_ptr->data().nbytes()) / sizeof(float); unsigned int num_elements = (tensor_ptr->data().nbytes()) / sizeof(float);
std::unordered_map<unsigned int, watchpoint_t>::iterator it_w_table_check; std::unordered_map<unsigned int, watchpoint_t>::iterator it_w_table_check;
@ -144,7 +144,7 @@ void DebugServices::check_watchpoints(std::vector<std::string> *name, std::vecto
name->push_back(name_no_slot); name->push_back(name_no_slot);
slot->push_back(std::to_string(tensor_list[i]->GetSlot())); slot->push_back(std::to_string(tensor_list[i]->GetSlot()));
data_ptr->push_back(reinterpret_cast<char *>(tensor_ptr->data_c(false))); data_ptr->push_back(reinterpret_cast<char *>(tensor_ptr->data_c()));
data_size->push_back(tensor_ptr->data().nbytes()); data_size->push_back(tensor_ptr->data().nbytes());
int condition_item = -1; int condition_item = -1;
@ -182,7 +182,7 @@ void DebugServices::read_nodes_tensors(std::vector<std::string> name, std::vecto
continue; continue;
} }
ret_name->push_back(std::get<0>(result)); ret_name->push_back(std::get<0>(result));
data_ptr->push_back(reinterpret_cast<char *>(std::get<1>(result)->GetTensor()->data_c(false))); data_ptr->push_back(reinterpret_cast<char *>(std::get<1>(result)->GetTensor()->data_c()));
data_size->push_back(std::get<1>(result)->GetTensor()->data().nbytes()); data_size->push_back(std::get<1>(result)->GetTensor()->data().nbytes());
dtype->push_back(std::get<1>(result)->GetTensor()->Dtype()); dtype->push_back(std::get<1>(result)->GetTensor()->Dtype());
shape->push_back(std::get<1>(result)->GetTensor()->shape()); shape->push_back(std::get<1>(result)->GetTensor()->shape());

@ -329,12 +329,12 @@ bool AscendDeviceAddress::DumpMemToFile(bool trans_flag, const std::string &file
MS_LOG(INFO) << "E2E Dump path is " << path; MS_LOG(INFO) << "E2E Dump path is " << path;
mindspore::tensor::TensorPtr out_tensor = std::make_shared<tensor::Tensor>(host_type, host_shape); mindspore::tensor::TensorPtr out_tensor = std::make_shared<tensor::Tensor>(host_type, host_shape);
size_t host_size = out_tensor->data().nbytes(); size_t host_size = out_tensor->data().nbytes();
ret = SyncDeviceToHost(host_shape, host_size, host_type, out_tensor->data_c(true)); ret = SyncDeviceToHost(host_shape, host_size, host_type, out_tensor->data_c());
if (!ret) { if (!ret) {
MS_LOG(ERROR) << "Copy device mem to host failed"; MS_LOG(ERROR) << "Copy device mem to host failed";
return ret; return ret;
} }
ret = mindspore::Dump::DumpToFile(path, out_tensor->data_c(false), host_size); ret = mindspore::Dump::DumpToFile(path, out_tensor->data_c(), host_size);
} else { } else {
auto host_tmp = std::vector<uint8_t>(size_); auto host_tmp = std::vector<uint8_t>(size_);
auto ret_rt_memcpy = rtMemcpy(host_tmp.data(), size_, ptr_, size_, RT_MEMCPY_DEVICE_TO_HOST); auto ret_rt_memcpy = rtMemcpy(host_tmp.data(), size_, ptr_, size_, RT_MEMCPY_DEVICE_TO_HOST);
@ -364,7 +364,7 @@ bool AscendDeviceAddress::LoadMemToHost(bool trans_flag, const std::string &tens
MS_LOG(INFO) << "E2E tensor name is " << tensor_name; MS_LOG(INFO) << "E2E tensor name is " << tensor_name;
mindspore::tensor::TensorPtr out_tensor = std::make_shared<tensor::Tensor>(host_type, host_shape); mindspore::tensor::TensorPtr out_tensor = std::make_shared<tensor::Tensor>(host_type, host_shape);
size_t host_size = out_tensor->data().nbytes(); size_t host_size = out_tensor->data().nbytes();
ret = SyncDeviceToHost(host_shape, host_size, host_type, out_tensor->data_c(true)); ret = SyncDeviceToHost(host_shape, host_size, host_type, out_tensor->data_c());
if (!ret) { if (!ret) {
MS_LOG(ERROR) << "Copy device mem to host failed"; MS_LOG(ERROR) << "Copy device mem to host failed";
return ret; return ret;
@ -379,7 +379,7 @@ bool AscendDeviceAddress::LoadMemToHost(bool trans_flag, const std::string &tens
} else { } else {
mindspore::tensor::TensorPtr out_tensor = std::make_shared<tensor::Tensor>(type_id_, host_shape); mindspore::tensor::TensorPtr out_tensor = std::make_shared<tensor::Tensor>(type_id_, host_shape);
size_t host_size = out_tensor->data().nbytes(); size_t host_size = out_tensor->data().nbytes();
auto ret_rt_memcpy = rtMemcpy(out_tensor->data_c(true), host_size, ptr_, host_size, RT_MEMCPY_DEVICE_TO_HOST); auto ret_rt_memcpy = rtMemcpy(out_tensor->data_c(), host_size, ptr_, host_size, RT_MEMCPY_DEVICE_TO_HOST);
auto tensor_data = std::make_shared<mindspore::TensorData>(); auto tensor_data = std::make_shared<mindspore::TensorData>();
tensor_data->SetName(tensor_name); tensor_data->SetName(tensor_name);

@ -80,11 +80,11 @@ void CPUKernelRuntime::AssignValueNodeAddress(session::KernelGraph *kernel_graph
size_t tensor_size = std::accumulate(data_shape.begin(), data_shape.end(), type_size, std::multiplies<size_t>()); size_t tensor_size = std::accumulate(data_shape.begin(), data_shape.end(), type_size, std::multiplies<size_t>());
DeviceAddressPtr address = CreateDeviceAddress(nullptr, tensor_size, kOpFormat_DEFAULT, kNumberTypeFloat32); DeviceAddressPtr address = CreateDeviceAddress(nullptr, tensor_size, kOpFormat_DEFAULT, kNumberTypeFloat32);
if (tensor->data_type() == kNumberTypeFloat32 || tensor->data_type() == kNumberTypeInt32) { if (tensor->data_type() == kNumberTypeFloat32 || tensor->data_type() == kNumberTypeInt32) {
address->ptr_ = tensor->data_c(false); address->ptr_ = tensor->data_c();
} else { } else {
address->ptr_ = resource_manager_.MemMalloc(tensor_size); address->ptr_ = resource_manager_.MemMalloc(tensor_size);
if (!address->SyncHostToDevice(data_shape, LongToSize(tensor->data().nbytes()), tensor->data_type(), if (!address->SyncHostToDevice(data_shape, LongToSize(tensor->data().nbytes()), tensor->data_type(),
tensor->data_c(false))) { tensor->data_c())) {
MS_LOG(EXCEPTION) << "Value node sync host to device failed!"; MS_LOG(EXCEPTION) << "Value node sync host to device failed!";
} }
} }
@ -177,7 +177,7 @@ BaseRef CPUKernelRuntime::CreatTensorForOutput(const session::KernelWithIndex &k
tensor->set_device_address(address); tensor->set_device_address(address);
need_sync_outputs->emplace_back(tensor); need_sync_outputs->emplace_back(tensor);
} else { } else {
address->ptr_ = tensor->data_c(true); address->ptr_ = tensor->data_c();
address->ref_count_ = INIT_NODE_REF; address->ref_count_ = INIT_NODE_REF;
(void)bound_addresses->insert(address); (void)bound_addresses->insert(address);
} }
@ -220,11 +220,11 @@ void CPUKernelRuntime::BindInputOutput(const session::KernelGraph *kernel_graph,
size_t tensor_size = size_t tensor_size =
std::accumulate(data_shape.begin(), data_shape.end(), sizeof(float), std::multiplies<size_t>()); std::accumulate(data_shape.begin(), data_shape.end(), sizeof(float), std::multiplies<size_t>());
if (tensor->data_type() == kNumberTypeFloat32 || tensor->data_type() == kNumberTypeInt32) { if (tensor->data_type() == kNumberTypeFloat32 || tensor->data_type() == kNumberTypeInt32) {
address->ptr_ = tensor->data_c(false); address->ptr_ = tensor->data_c();
} else { } else {
address->ptr_ = resource_manager_.MemMalloc(tensor_size); address->ptr_ = resource_manager_.MemMalloc(tensor_size);
if (!address->SyncHostToDevice(data_shape, LongToSize(tensor->data().nbytes()), tensor->data_type(), if (!address->SyncHostToDevice(data_shape, LongToSize(tensor->data().nbytes()), tensor->data_type(),
tensor->data_c(false))) { tensor->data_c())) {
MS_LOG(EXCEPTION) << "Parameter node sync host to device failed!"; MS_LOG(EXCEPTION) << "Parameter node sync host to device failed!";
} }
tensor->set_dirty(true); tensor->set_dirty(true);

@ -390,7 +390,7 @@ bool KernelAdjust::StepLoadCtrlInputs(const std::shared_ptr<session::KernelGraph
tensor->set_device_address(device_address); tensor->set_device_address(device_address);
if (!device_address->SyncHostToDevice(trans::GetRuntimePaddingShape(pk_node, 0), if (!device_address->SyncHostToDevice(trans::GetRuntimePaddingShape(pk_node, 0),
LongToSize(tensor->data().nbytes()), tensor->data_type(), LongToSize(tensor->data().nbytes()), tensor->data_type(),
tensor->data_c(false))) { tensor->data_c())) {
MS_LOG(INFO) << "SyncHostToDevice failed."; MS_LOG(INFO) << "SyncHostToDevice failed.";
return false; return false;
} }
@ -407,14 +407,14 @@ void KernelAdjust::LoadSwitchInputs(std::vector<tensor::TensorPtr> *inputs) {
tensor::TensorPtr loop_count_tensor = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp); tensor::TensorPtr loop_count_tensor = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp);
MS_EXCEPTION_IF_NULL(loop_count_tensor); MS_EXCEPTION_IF_NULL(loop_count_tensor);
int32_t *val = nullptr; int32_t *val = nullptr;
val = static_cast<int32_t *>(loop_count_tensor->data_c(true)); val = static_cast<int32_t *>(loop_count_tensor->data_c());
MS_EXCEPTION_IF_NULL(val); MS_EXCEPTION_IF_NULL(val);
*val = 0; *val = 0;
inputs->push_back(loop_count_tensor); inputs->push_back(loop_count_tensor);
tensor::TensorPtr iter_loop_tensor = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp); tensor::TensorPtr iter_loop_tensor = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp);
MS_EXCEPTION_IF_NULL(iter_loop_tensor); MS_EXCEPTION_IF_NULL(iter_loop_tensor);
val = static_cast<int32_t *>(iter_loop_tensor->data_c(true)); val = static_cast<int32_t *>(iter_loop_tensor->data_c());
MS_EXCEPTION_IF_NULL(val); MS_EXCEPTION_IF_NULL(val);
*val = SizeToInt(LongToSize(ConfigManager::GetInstance().iter_num())); *val = SizeToInt(LongToSize(ConfigManager::GetInstance().iter_num()));
MS_LOG(INFO) << "iter_loop_tensor = " << *val; MS_LOG(INFO) << "iter_loop_tensor = " << *val;
@ -422,14 +422,14 @@ void KernelAdjust::LoadSwitchInputs(std::vector<tensor::TensorPtr> *inputs) {
tensor::TensorPtr zero_tensor = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp); tensor::TensorPtr zero_tensor = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp);
MS_EXCEPTION_IF_NULL(zero_tensor); MS_EXCEPTION_IF_NULL(zero_tensor);
val = static_cast<int32_t *>(zero_tensor->data_c(true)); val = static_cast<int32_t *>(zero_tensor->data_c());
MS_EXCEPTION_IF_NULL(val); MS_EXCEPTION_IF_NULL(val);
*val = 0; *val = 0;
inputs->push_back(zero_tensor); inputs->push_back(zero_tensor);
tensor::TensorPtr one_tensor = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp); tensor::TensorPtr one_tensor = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp);
MS_EXCEPTION_IF_NULL(one_tensor); MS_EXCEPTION_IF_NULL(one_tensor);
val = static_cast<int32_t *>(one_tensor->data_c(true)); val = static_cast<int32_t *>(one_tensor->data_c());
MS_EXCEPTION_IF_NULL(val); MS_EXCEPTION_IF_NULL(val);
*val = 1; *val = 1;
inputs->push_back(one_tensor); inputs->push_back(one_tensor);

@ -543,7 +543,7 @@ void KernelRuntime::AssignValueNodeTensor(const ValueNodePtr &value_node, const
} }
AnfAlgo::SetOutputAddr(address, output_idx, value_node.get()); AnfAlgo::SetOutputAddr(address, output_idx, value_node.get());
if (!address->SyncHostToDevice(trans::GetRuntimePaddingShape(value_node, 0), tensor_size, tensor->data_type(), if (!address->SyncHostToDevice(trans::GetRuntimePaddingShape(value_node, 0), tensor_size, tensor->data_type(),
tensor->data_c(false))) { tensor->data_c())) {
MS_EXCEPTION(NotExistsError) << "ValueNode SyncHostToDevice fail!" << value_node->DebugString() << "node format is" MS_EXCEPTION(NotExistsError) << "ValueNode SyncHostToDevice fail!" << value_node->DebugString() << "node format is"
<< AnfAlgo::GetOutputFormat(value_node, output_idx) << "node dtype is " << AnfAlgo::GetOutputFormat(value_node, output_idx) << "node dtype is "
<< AnfAlgo::GetOutputInferDataType(value_node, output_idx); << AnfAlgo::GetOutputInferDataType(value_node, output_idx);

@ -115,7 +115,7 @@ class MetaTensor : public Value {
// order it represents. // order it represents.
// //
// return A const vector<int> which represents the shape of the tensor. // return A const vector<int> which represents the shape of the tensor.
std::vector<int> shape() const { return shape_; } const std::vector<int> &shape() const { return shape_; }
// brief Sets the shape of a tensor. // brief Sets the shape of a tensor.
// //

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,114 @@
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_IR_TENSOR_PY_H_
#define MINDSPORE_CCSRC_IR_TENSOR_PY_H_
#include <memory>
#include <string>
#include <vector>
#include "pybind11/pybind11.h"
#include "pybind11/numpy.h"
#include "ir/tensor.h"
namespace py = pybind11;
namespace pybind11 {
namespace detail {
// Similar to enums in `pybind11/numpy.h`. Determined by doing:
// python3 -c 'import numpy as np; print(np.dtype(np.float16).num)'
constexpr int NPY_FLOAT16 = 23;
template <typename T>
struct npy_scalar_caster {
PYBIND11_TYPE_CASTER(T, _("PleaseOverride"));
using Array = array_t<T>;
bool load(handle src, bool convert) {
// Taken from Eigen casters. Permits either scalar dtype or scalar array.
handle type = dtype::of<T>().attr("type");
if (!convert && !isinstance<Array>(src) && !isinstance(src, type)) return false;
Array tmp = Array::ensure(src);
if (tmp && tmp.size() == 1 && tmp.ndim() == 0) {
this->value = *tmp.data();
return true;
}
return false;
}
static handle cast(T src, return_value_policy, handle) {
Array tmp({1});
tmp.mutable_at(0) = src;
tmp.resize({});
// You could also just return the array if you want a scalar array.
object scalar = tmp[tuple()];
return scalar.release();
}
};
template <>
struct npy_format_descriptor<float16> {
static constexpr auto name = "float16";
static pybind11::dtype dtype() {
handle ptr = npy_api::get().PyArray_DescrFromType_(NPY_FLOAT16);
return reinterpret_borrow<pybind11::dtype>(ptr);
}
virtual ~npy_format_descriptor<float16>() {}
};
template <>
struct type_caster<float16> : public npy_scalar_caster<float16> {
static constexpr auto name = "float16";
};
} // namespace detail
} // namespace pybind11
using mindspore::device::DeviceAddress;
using DeviceAddressPtr = std::shared_ptr<mindspore::device::DeviceAddress>;
// brief mindspore namespace.
//
// mindspore namespace is the top level namespace of Mindsporeession project.
// Other namespace should be a sub namespace of mindspore namespace in the ME project.
namespace mindspore {
// brief mindspore::tensor namespace
//
// A sub namespace in ME to support tensor related definition.
namespace tensor {
// Tensor python wrapper and adapter class.
class TensorPy {
public:
// brief Create Tensor from a numpy array object.
//
// param input [py::array] Data value of the tensor.
// param data_type [TypeId] Data type of the tensor.
static TensorPtr MakeTensor(const py::array &input, const TypePtr &data_type = nullptr);
static py::array SyncAsNumpy(const Tensor &tensor);
static py::array AsNumpy(const Tensor &tensor);
static py::tuple GetPyTupleShape(const Tensor &tensor);
};
} // namespace tensor
} // namespace mindspore
#endif // MINDSPORE_CCSRC_IR_TENSOR_PY_H_

@ -23,6 +23,7 @@
#include <algorithm> #include <algorithm>
#include <functional> #include <functional>
#include "ir/tensor_py.h"
#include "ir/param_value_py.h" #include "ir/param_value_py.h"
#include "debug/anf_ir_utils.h" #include "debug/anf_ir_utils.h"
#include "operator/ops.h" #include "operator/ops.h"
@ -257,7 +258,7 @@ void IrExportBuilder::SetTensorToAttributeProto(const ValuePtr &value, onnx::Att
attr_proto->set_type(onnx::AttributeProto_AttributeType_TENSOR); attr_proto->set_type(onnx::AttributeProto_AttributeType_TENSOR);
onnx::TensorProto *tensor_proto = attr_proto->mutable_t(); onnx::TensorProto *tensor_proto = attr_proto->mutable_t();
auto data = value->cast<tensor::TensorPtr>(); auto data = value->cast<tensor::TensorPtr>();
tensor_proto->set_raw_data(data->data().request(true).ptr, static_cast<size_t>(data->data().nbytes())); tensor_proto->set_raw_data(data->data_c(), static_cast<size_t>(data->data().nbytes()));
auto dtype = data->data_type(); auto dtype = data->data_type();
auto shape = data->shape_c(); auto shape = data->shape_c();
tensor_proto->set_data_type(GetOnnxDataType(dtype)); tensor_proto->set_data_type(GetOnnxDataType(dtype));

@ -27,6 +27,7 @@
#include "proto/onnx.pb.h" #include "proto/onnx.pb.h"
#include "operator/ops.h" #include "operator/ops.h"
#include "ir/param_value_py.h" #include "ir/param_value_py.h"
#include "ir/tensor_py.h"
namespace mindspore { namespace mindspore {
enum OpMergeMode { enum OpMergeMode {
@ -1190,7 +1191,7 @@ void OnnxExporter::SetNodeAttribute(const ValuePtr &value, onnx::NodeProto *cons
attr_proto->set_type(onnx::AttributeProto_AttributeType_TENSOR); attr_proto->set_type(onnx::AttributeProto_AttributeType_TENSOR);
onnx::TensorProto *tensor_proto = attr_proto->mutable_t(); onnx::TensorProto *tensor_proto = attr_proto->mutable_t();
auto data = dyn_cast<tensor::Tensor>(value); auto data = dyn_cast<tensor::Tensor>(value);
tensor_proto->set_raw_data(data->data().request(true).ptr, static_cast<size_t>(data->data().nbytes())); tensor_proto->set_raw_data(data->data_c(), static_cast<size_t>(data->data().nbytes()));
auto dtype = data->data_type(); auto dtype = data->data_type();
auto shape = data->shape_c(); auto shape = data->shape_c();

@ -21,6 +21,9 @@
#include "pipeline/static_analysis/param_validator.h" #include "pipeline/static_analysis/param_validator.h"
#include "operator/ops.h" #include "operator/ops.h"
#include "utils/convert_utils.h" #include "utils/convert_utils.h"
#include "ir/tensor_py.h"
using mindspore::tensor::TensorPy;
namespace mindspore { namespace mindspore {
namespace abstract { namespace abstract {
@ -554,7 +557,7 @@ AbstractBasePtr InferImplTuple2Array(const AnalysisEnginePtr &, const PrimitiveP
py::tuple data_tuple = ValuePtrToPyData(input->BuildValue()); py::tuple data_tuple = ValuePtrToPyData(input->BuildValue());
py::array data = py::array(data_tuple); py::array data = py::array(data_tuple);
auto tensor = std::make_shared<tensor::Tensor>(data); auto tensor = TensorPy::MakeTensor(data);
auto ret = tensor->ToAbstract(); auto ret = tensor->ToAbstract();
ret->set_value(tensor); ret->set_value(tensor);
MS_LOG(DEBUG) << "Tuple2arry result AbstractTensor: " << ret->ToString(); MS_LOG(DEBUG) << "Tuple2arry result AbstractTensor: " << ret->ToString();

@ -153,7 +153,7 @@ class TensorMultiplyBase : public AnfVisitor {
} }
tensor::TensorPtr tensor_ptr = dyn_cast<tensor::Tensor>(value); tensor::TensorPtr tensor_ptr = dyn_cast<tensor::Tensor>(value);
return tensor_ptr->data_c(writable); return tensor_ptr->data_c();
} }
// Make a new tensor (when possible) with the same shape as of `node` // Make a new tensor (when possible) with the same shape as of `node`
@ -171,7 +171,7 @@ class TensorMultiplyBase : public AnfVisitor {
auto new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_type_ptr->type_id(), tensor_shape); auto new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_type_ptr->type_id(), tensor_shape);
size_t mem_size = GetTypeByte(tensor_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum()); size_t mem_size = GetTypeByte(tensor_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum());
char *data = reinterpret_cast<char *>(new_tensor_ptr->data_c(true)); char *data = reinterpret_cast<char *>(new_tensor_ptr->data_c());
if (x == nullptr) { if (x == nullptr) {
std::memset(data, 0, mem_size); std::memset(data, 0, mem_size);
@ -546,7 +546,7 @@ class ConstantDuplicateMul : public AnfVisitor {
auto new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_3_type_ptr->type_id(), tensor_out_shape); auto new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_3_type_ptr->type_id(), tensor_out_shape);
size_t mem_size = GetTypeByte(tensor_3_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum()); size_t mem_size = GetTypeByte(tensor_3_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum());
char *data = reinterpret_cast<char *>(new_tensor_ptr->data_c(true)); char *data = reinterpret_cast<char *>(new_tensor_ptr->data_c());
memcpy(data, data_out, mem_size); memcpy(data, data_out, mem_size);
auto new_vnode = NewValueNode(new_tensor_ptr); auto new_vnode = NewValueNode(new_tensor_ptr);

@ -191,7 +191,7 @@ inline void ResetSharedOp() {
tensor::TensorPtr ConstData() { tensor::TensorPtr ConstData() {
std::vector<int> shp = {1}; std::vector<int> shp = {1};
tensor::TensorPtr const_data = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp); tensor::TensorPtr const_data = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp);
auto *val = static_cast<int32_t *>(const_data->data_c(true)); auto *val = static_cast<int32_t *>(const_data->data_c());
*val = 0; *val = 0;
return const_data; return const_data;
} }
@ -267,7 +267,7 @@ CNodePtr GenerateSwitchControlDependNode(const FuncGraphPtr &graph, const AnfNod
auto PrimSquare = prim::GetPythonOps("square", "mindspore.ops.functional")->cast<PrimitivePtr>(); auto PrimSquare = prim::GetPythonOps("square", "mindspore.ops.functional")->cast<PrimitivePtr>();
std::vector<int> shp = {1}; std::vector<int> shp = {1};
tensor::TensorPtr const_data = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp); tensor::TensorPtr const_data = std::make_shared<tensor::Tensor>(kInt32->type_id(), shp);
auto *val = static_cast<int32_t *>(const_data->data_c(true)); auto *val = static_cast<int32_t *>(const_data->data_c());
*val = 0; *val = 0;
// for the control_depend netoutput node , add two const data to merge the flow ,one for depended node with same // for the control_depend netoutput node , add two const data to merge the flow ,one for depended node with same
// switch the other use the opposite // switch the other use the opposite

@ -178,7 +178,7 @@ class ZeroLikeFillZero : public AnfVisitor {
tensor::TensorPtr new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_type_ptr->type_id(), tensor_shape); tensor::TensorPtr new_tensor_ptr = std::make_shared<tensor::Tensor>(tensor_type_ptr->type_id(), tensor_shape);
size_t mem_size = GetTypeByte(tensor_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum()); size_t mem_size = GetTypeByte(tensor_type_ptr) * IntToSize(new_tensor_ptr->ElementsNum());
char *data = reinterpret_cast<char *>(new_tensor_ptr->data_c(true)); char *data = reinterpret_cast<char *>(new_tensor_ptr->data_c());
(void)memset_s(data, mem_size, 0, mem_size); (void)memset_s(data, mem_size, 0, mem_size);
auto new_cnode = NewValueNode(new_tensor_ptr); auto new_cnode = NewValueNode(new_tensor_ptr);

@ -71,7 +71,7 @@ class SpecializeTransform {
continue; continue;
} }
if (value_args[i] != nullptr) { if (value_args[i] != nullptr) {
auto const_tensor = *value_args[i]; auto &const_tensor = *value_args[i];
auto const_tensor_ptr = std::make_shared<tensor::Tensor>(const_tensor); auto const_tensor_ptr = std::make_shared<tensor::Tensor>(const_tensor);
AnfNodePtr arg = NewValueNode(const_tensor_ptr); AnfNodePtr arg = NewValueNode(const_tensor_ptr);
(void)mng->Replace(params[i], arg); (void)mng->Replace(params[i], arg);

@ -210,8 +210,8 @@ OperatorVector CreateSubOp(int32_t sub_value) {
OperatorName operator_name = SUB; OperatorName operator_name = SUB;
OperatorAttrs operator_attrs; OperatorAttrs operator_attrs;
py::tuple tuple = py::make_tuple(sub_value); std::vector<int64_t> tensor_data = {sub_value};
mindspore::tensor::TensorPtr tensor_ptr = std::make_shared<mindspore::tensor::Tensor>(tuple, kInt32); mindspore::tensor::TensorPtr tensor_ptr = std::make_shared<mindspore::tensor::Tensor>(tensor_data, kInt32);
ValuePtr op_param_value = MakeValue(tensor_ptr); ValuePtr op_param_value = MakeValue(tensor_ptr);
Attr op1_param = std::make_pair("", op_param_value); Attr op1_param = std::make_pair("", op_param_value);

@ -204,8 +204,8 @@ ForwardOp CreatReduceMeanForwardOp(const std::vector<Group> &forward_group, cons
OperatorName operator1_name = REAL_DIV; OperatorName operator1_name = REAL_DIV;
std::vector<Device> device_list = forward_group[0].GetDevicesList(); std::vector<Device> device_list = forward_group[0].GetDevicesList();
auto divisor = static_cast<float>(device_list.size()); auto divisor = static_cast<float>(device_list.size());
py::tuple tuple = py::make_tuple(divisor); std::vector<double> tensor_data = {divisor};
mindspore::tensor::TensorPtr tensor_ptr = std::make_shared<mindspore::tensor::Tensor>(tuple, dtype); mindspore::tensor::TensorPtr tensor_ptr = std::make_shared<mindspore::tensor::Tensor>(tensor_data, dtype);
ValuePtr op1_param_value = MakeValue(tensor_ptr); ValuePtr op1_param_value = MakeValue(tensor_ptr);
Attr op1_param = std::make_pair("divisor", op1_param_value); Attr op1_param = std::make_pair("divisor", op1_param_value);
OperatorParams operator1_params = {std::make_pair(op1_param, 2)}; OperatorParams operator1_params = {std::make_pair(op1_param, 2)};

@ -156,11 +156,11 @@ void ConvertObjectToTensors(const py::dict &dict, TensorOrderMap *const tensors)
if (py::isinstance<py::float_>(item.second.attr("default_input"))) { if (py::isinstance<py::float_>(item.second.attr("default_input"))) {
// convert float to tensor with shape([1]) // convert float to tensor with shape([1])
tensor = std::make_shared<Tensor>(kNumberTypeFloat32, std::vector<int>({1})); tensor = std::make_shared<Tensor>(kNumberTypeFloat32, std::vector<int>({1}));
*(static_cast<float *>(tensor->data_c(true))) = py::cast<float>(item.second.attr("default_input")); *(static_cast<float *>(tensor->data_c())) = py::cast<float>(item.second.attr("default_input"));
} else if (py::isinstance<py::int_>(item.second.attr("default_input"))) { } else if (py::isinstance<py::int_>(item.second.attr("default_input"))) {
// convert int to tensor with shape([1]) // convert int to tensor with shape([1])
tensor = std::make_shared<Tensor>(kNumberTypeInt32, std::vector<int>({1})); tensor = std::make_shared<Tensor>(kNumberTypeInt32, std::vector<int>({1}));
*(static_cast<float *>(tensor->data_c(true))) = py::cast<float>(item.second.attr("default_input")); *(static_cast<float *>(tensor->data_c())) = py::cast<float>(item.second.attr("default_input"));
} else if (py::hasattr(item.second.attr("default_input"), PYTHON_TENSOR_FLAG)) { } else if (py::hasattr(item.second.attr("default_input"), PYTHON_TENSOR_FLAG)) {
// cast tensor // cast tensor
tensor = py::cast<std::shared_ptr<Tensor>>(item.second.attr("default_input")); tensor = py::cast<std::shared_ptr<Tensor>>(item.second.attr("default_input"));
@ -330,7 +330,7 @@ py::object ExtractGeneralCnodeRet(const AbstractBasePtr &cnode_data, const py::t
MS_LOG(EXCEPTION) << "The shape of the tensor derived is not Shape, is " << shape->ToString(); MS_LOG(EXCEPTION) << "The shape of the tensor derived is not Shape, is " << shape->ToString();
} }
auto shape_me = shape->cast<abstract::ShapePtr>()->shape(); auto shape_me = shape->cast<abstract::ShapePtr>()->shape();
auto shape_ge = py::cast<Tensor>(data[*count]).shape(); auto shape_ge = py::cast<Tensor &>(data[*count]).shape();
if (shape_ge != shape_me) { if (shape_ge != shape_me) {
MS_LOG(EXCEPTION) << "The shape of the " << *count << "th tensor returned: " << shape_ge MS_LOG(EXCEPTION) << "The shape of the " << *count << "th tensor returned: " << shape_ge
<< " is not the same as the shape of the tensor derived: " << shape_me; << " is not the same as the shape of the tensor derived: " << shape_me;

@ -44,7 +44,7 @@ tensor::TensorPtr CreateTensor(const AnfNodePtr &node) {
indices_tensor->set_device_info(device_info); indices_tensor->set_device_info(device_info);
// 2 set value of tensor // 2 set value of tensor
auto data_ptr = indices_tensor->data_c(true); auto data_ptr = indices_tensor->data_c();
MS_EXCEPTION_IF_NULL(data_ptr); MS_EXCEPTION_IF_NULL(data_ptr);
std::vector<Eigen::half> half_data; std::vector<Eigen::half> half_data;
for (size_t i = 0; i < last_dim; ++i) { for (size_t i = 0; i < last_dim; ++i) {

@ -348,7 +348,7 @@ tensor::TensorPtr CreateTensorWithValueTuple(const ValueTuplePtr &value_tuple_pt
MS_EXCEPTION_IF_NULL(tensor); MS_EXCEPTION_IF_NULL(tensor);
tensor::DeviceInfo device_info{kOpFormat_DEFAULT, type_ptr}; tensor::DeviceInfo device_info{kOpFormat_DEFAULT, type_ptr};
tensor->set_device_info(device_info); tensor->set_device_info(device_info);
auto data_ptr = tensor->data_c(true); auto data_ptr = tensor->data_c();
MS_EXCEPTION_IF_NULL(data_ptr); MS_EXCEPTION_IF_NULL(data_ptr);
auto elem_num = values.size() * data_length; auto elem_num = values.size() * data_length;
auto ret_code = memcpy_s(data_ptr, static_cast<size_t>(tensor->data().nbytes()), values.data(), elem_num); auto ret_code = memcpy_s(data_ptr, static_cast<size_t>(tensor->data().nbytes()), values.data(), elem_num);

@ -538,7 +538,7 @@ bool Kernel2Ms::KernelInput2MS(const std::vector<TensorPtr> &input_tensors) {
auto match_idx = match_to_rel_idxs[j]; auto match_idx = match_to_rel_idxs[j];
auto real_tensor = input_tensors[match_idx]; auto real_tensor = input_tensors[match_idx];
auto real_size = LongToSize(real_tensor->data().nbytes()); auto real_size = LongToSize(real_tensor->data().nbytes());
auto real_data = real_tensor->data_c(false); auto real_data = real_tensor->data_c();
MS_EXCEPTION_IF_NULL(real_data); MS_EXCEPTION_IF_NULL(real_data);
if (sub_ms_graph_->allTensors[cache_idx] != nullptr) { if (sub_ms_graph_->allTensors[cache_idx] != nullptr) {
sub_ms_graph_->allTensors[cache_idx]->data.resize(real_size); sub_ms_graph_->allTensors[cache_idx]->data.resize(real_size);

@ -22,6 +22,7 @@
#include <unordered_set> #include <unordered_set>
#include <algorithm> #include <algorithm>
#include "ir/tensor_py.h"
#include "ir/param_value_py.h" #include "ir/param_value_py.h"
#include "utils/any.h" #include "utils/any.h"
#include "utils/utils.h" #include "utils/utils.h"
@ -51,6 +52,8 @@
#include "pynative/pynative_execute_ge.h" #include "pynative/pynative_execute_ge.h"
#endif #endif
using mindspore::tensor::TensorPy;
const char SINGLE_OP_GRAPH[] = "single_op_graph"; const char SINGLE_OP_GRAPH[] = "single_op_graph";
// primitive unable to infer value for constant input in PyNative mode // primitive unable to infer value for constant input in PyNative mode
const std::set<std::string> vm_operators = {"make_ref", "HookBackward", "stop_gradient"}; const std::set<std::string> vm_operators = {"make_ref", "HookBackward", "stop_gradient"};
@ -171,7 +174,8 @@ py::tuple ConvertInputs(const PrimitivePyPtr &prim, const py::list &args, py::tu
py_args[i] = std::make_shared<tensor::Tensor>(py::cast<py::int_>(py_args[i]), tensor_ptr->Dtype()); py_args[i] = std::make_shared<tensor::Tensor>(py::cast<py::int_>(py_args[i]), tensor_ptr->Dtype());
(*out_args_list)[i] = py_args[i]; (*out_args_list)[i] = py_args[i];
} else { } else {
py_args[i] = std::make_shared<tensor::Tensor>(py::cast<py::float_>(py_args[i]), tensor_ptr->Dtype()); double arg_value = py::cast<py::float_>(py_args[i]);
py_args[i] = std::make_shared<tensor::Tensor>(arg_value, tensor_ptr->Dtype());
(*out_args_list)[i] = py_args[i]; (*out_args_list)[i] = py_args[i];
} }
continue; continue;
@ -262,7 +266,7 @@ py::object RunOpInVM(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *stat
result[i] = py::getattr(input, "data"); result[i] = py::getattr(input, "data");
} else { } else {
auto tensor = py::cast<tensor::TensorPtr>(op_inputs[i]); auto tensor = py::cast<tensor::TensorPtr>(op_inputs[i]);
auto new_tensor = std::make_shared<tensor::Tensor>(tensor->data()); auto new_tensor = std::make_shared<tensor::Tensor>(tensor->data_type(), tensor->shape(), tensor->data_ptr());
result[i] = new_tensor; result[i] = new_tensor;
} }
} }
@ -366,13 +370,14 @@ void ConvertPyObjectToTensor(const py::object &input_object, const PrimitivePtr
if (py::isinstance<tensor::Tensor>(input_object)) { if (py::isinstance<tensor::Tensor>(input_object)) {
tensor_ptr = py::cast<tensor::TensorPtr>(input_object); tensor_ptr = py::cast<tensor::TensorPtr>(input_object);
} else if (py::isinstance<py::float_>(input_object)) { } else if (py::isinstance<py::float_>(input_object)) {
tensor_ptr = std::make_shared<tensor::Tensor>(py::cast<py::float_>(input_object), kFloat32); double input_value = py::cast<py::float_>(input_object);
tensor_ptr = std::make_shared<tensor::Tensor>(input_value, kFloat32);
*tensor_mask = kValueNodeTensorMask; *tensor_mask = kValueNodeTensorMask;
} else if (py::isinstance<py::int_>(input_object)) { } else if (py::isinstance<py::int_>(input_object)) {
tensor_ptr = std::make_shared<tensor::Tensor>(py::cast<py::int_>(input_object), kInt32); tensor_ptr = std::make_shared<tensor::Tensor>(py::cast<py::int_>(input_object), kInt32);
*tensor_mask = kValueNodeTensorMask; *tensor_mask = kValueNodeTensorMask;
} else if (py::isinstance<py::array>(input_object)) { } else if (py::isinstance<py::array>(input_object)) {
tensor_ptr = std::make_shared<tensor::Tensor>(py::cast<py::array>(input_object), nullptr); tensor_ptr = TensorPy::MakeTensor(py::cast<py::array>(input_object), nullptr);
} else if (py::isinstance<py::list>(input_object)) { } else if (py::isinstance<py::list>(input_object)) {
auto list_inputs = py::cast<py::list>(input_object); auto list_inputs = py::cast<py::list>(input_object);
py::tuple tuple_inputs(list_inputs.size()); py::tuple tuple_inputs(list_inputs.size());

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save