|
|
|
@ -351,13 +351,13 @@ py::object RunOpInVM(const OpExecInfoPtr &op_exec_info, PynativeStatusCode *stat
|
|
|
|
|
for (size_t i = 0; i < op_inputs.size(); i++) {
|
|
|
|
|
py::object input = op_inputs[i];
|
|
|
|
|
if (py::hasattr(input, "__parameter__")) {
|
|
|
|
|
result[i] = py::getattr(input, "data");
|
|
|
|
|
} else {
|
|
|
|
|
auto tensor = py::cast<tensor::TensorPtr>(input);
|
|
|
|
|
auto new_tensor = std::make_shared<tensor::Tensor>(tensor->data_type(), tensor->shape(), tensor->data_ptr());
|
|
|
|
|
new_tensor->set_device_address(tensor->device_address());
|
|
|
|
|
new_tensor->set_dirty(tensor->is_dirty());
|
|
|
|
|
result[i] = new_tensor;
|
|
|
|
|
input = py::getattr(input, "data");
|
|
|
|
|
}
|
|
|
|
|
auto tensor = py::cast<tensor::TensorPtr>(input);
|
|
|
|
|
auto new_tensor = std::make_shared<tensor::Tensor>(tensor->data_type(), tensor->shape(), tensor->data_ptr());
|
|
|
|
|
new_tensor->set_device_address(tensor->device_address());
|
|
|
|
|
new_tensor->set_dirty(tensor->is_dirty());
|
|
|
|
|
result[i] = new_tensor;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
*status = PYNATIVE_SUCCESS;
|
|
|
|
|