From: @yeyunpeng2020
Reviewed-by: @zhanghaibo5,@hangangqiang
Signed-off-by: @hangangqiang
pull/13183/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit 3f479d0273

@ -6,6 +6,7 @@ import com.mindspore.lite.Model;
import com.mindspore.lite.DataType;
import com.mindspore.lite.Version;
import com.mindspore.lite.config.MSConfig;
import com.mindspore.lite.config.DeviceType;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;

@ -74,6 +74,10 @@ public class MSTensor {
this.tensorPtr = 0;
}
public String tensorName() {
return this.tensorName(this.tensorPtr);
}
protected long getMSTensorPtr() {
return tensorPtr;
}
@ -99,4 +103,6 @@ public class MSTensor {
private native int elementsNum(long tensorPtr);
private native void free(long tensorPtr);
private native String tensorName(long tensorPtr);
}

@ -65,11 +65,6 @@ extern "C" JNIEXPORT jbyteArray JNICALL Java_com_mindspore_lite_MSTensor_getByte
return env->NewByteArray(0);
}
if (ms_tensor_ptr->data_type() != mindspore::kNumberTypeUInt8) {
MS_LOGE("data type is error : %d", ms_tensor_ptr->data_type());
return env->NewByteArray(0);
}
auto local_element_num = ms_tensor_ptr->ElementsNum();
auto ret = env->NewByteArray(local_element_num);
env->SetByteArrayRegion(ret, 0, local_element_num, local_data);
@ -183,7 +178,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_com_mindspore_lite_MSTensor_setByteBu
jlong tensor_ptr,
jobject buffer) {
auto *p_data = reinterpret_cast<jbyte *>(env->GetDirectBufferAddress(buffer)); // get buffer pointer
jlong data_len = env->GetDirectBufferCapacity(buffer); // get buffer capacity
jlong data_len = env->GetDirectBufferCapacity(buffer); // get buffer capacity
if (p_data == nullptr) {
MS_LOGE("GetDirectBufferAddress return null");
return false;
@ -239,3 +234,15 @@ extern "C" JNIEXPORT void JNICALL Java_com_mindspore_lite_MSTensor_free(JNIEnv *
auto *ms_tensor_ptr = static_cast<mindspore::tensor::MSTensor *>(pointer);
delete (ms_tensor_ptr);
}
extern "C" JNIEXPORT jstring JNICALL Java_com_mindspore_lite_MSTensor_tensorName(JNIEnv *env, jobject thiz,
jlong tensor_ptr) {
auto *pointer = reinterpret_cast<void *>(tensor_ptr);
if (pointer == nullptr) {
MS_LOGE("Tensor pointer from java is nullptr");
return nullptr;
}
auto *ms_tensor_ptr = static_cast<mindspore::tensor::MSTensor *>(pointer);
return env->NewStringUTF(ms_tensor_ptr->tensor_name().c_str());
}

@ -13,15 +13,15 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_NNACL_RNADOM_STANDARD_NORMAL_PARAMETER_H_
#define MINDSPORE_LITE_NNACL_RNADOM_STANDARD_NORMAL_PARAMETER_H_
#ifndef MINDSPORE_LITE_NNACL_RNADOM_PARAMETER_H_
#define MINDSPORE_LITE_NNACL_RNADOM_PARAMETER_H_
#include "nnacl/op_base.h"
typedef struct RandomStandardNormalParam {
typedef struct RandomParam {
OpParameter op_parameter_;
int seed_;
int seed2_;
} RandomStandardNormalParam;
} RandomParam;
#endif // MINDSPORE_LITE_NNACL_RNADOM_STANDARD_NORMAL_PARAMETER_H_

@ -15,19 +15,18 @@
*/
#include "src/ops/populate/populate_register.h"
#include "nnacl/random_standard_normal_parameter.h"
#include "nnacl/random_parameter.h"
namespace mindspore {
namespace lite {
namespace {
OpParameter *PopulateRandomStandardNormalParameter(const void *prim) {
RandomStandardNormalParam *random_parameter =
reinterpret_cast<RandomStandardNormalParam *>(malloc(sizeof(RandomStandardNormalParam)));
RandomParam *random_parameter = reinterpret_cast<RandomParam *>(malloc(sizeof(RandomParam)));
if (random_parameter == nullptr) {
MS_LOG(ERROR) << "malloc RandomStandardNormal parameter failed.";
return nullptr;
}
memset(random_parameter, 0, sizeof(RandomStandardNormalParam));
memset(random_parameter, 0, sizeof(RandomParam));
auto *primitive = static_cast<const schema::Primitive *>(prim);
random_parameter->op_parameter_.type_ = primitive->value_type();
auto param = primitive->value_as_RandomStandardNormal();

@ -15,9 +15,25 @@
*/
#include "src/ops/populate/populate_register.h"
#include "src/ops/populate/default_populate.h"
#include "nnacl/random_parameter.h"
namespace mindspore {
namespace lite {
OpParameter *PopulateRandomStandardNormalParameter(const void *prim) {
auto *random_parameter = reinterpret_cast<RandomParam *>(malloc(sizeof(RandomParam)));
if (random_parameter == nullptr) {
MS_LOG(ERROR) << "malloc Random parameter failed.";
return nullptr;
}
memset(random_parameter, 0, sizeof(RandomParam));
auto *primitive = static_cast<const schema::Primitive *>(prim);
random_parameter->op_parameter_.type_ = primitive->value_type();
auto param = primitive->value_as_RandomStandardNormal();
random_parameter->seed_ = param->seed();
random_parameter->seed2_ = param->seed2();
return reinterpret_cast<OpParameter *>(random_parameter);
}
Registry g_uniformRealParameterRegistry(schema::PrimitiveType_UniformReal, DefaultPopulateParameter, SCHEMA_CUR);
} // namespace lite
} // namespace mindspore

@ -19,7 +19,7 @@
#include <vector>
#include "src/lite_kernel.h"
#include "nnacl/random_standard_normal_parameter.h"
#include "nnacl/random_parameter.h"
using mindspore::lite::InnerContext;
@ -29,7 +29,7 @@ class RandomStandardNormalCPUKernel : public LiteKernel {
RandomStandardNormalCPUKernel(OpParameter *parameter, const std::vector<lite::Tensor *> &inputs,
const std::vector<lite::Tensor *> &outputs, const InnerContext *ctx)
: LiteKernel(parameter, inputs, outputs, ctx) {
param_ = reinterpret_cast<RandomStandardNormalParam *>(parameter);
param_ = reinterpret_cast<RandomParam *>(parameter);
}
~RandomStandardNormalCPUKernel() override = default;
@ -38,7 +38,7 @@ class RandomStandardNormalCPUKernel : public LiteKernel {
int Run() override;
protected:
RandomStandardNormalParam *param_ = nullptr;
RandomParam *param_ = nullptr;
};
} // namespace mindspore::kernel

@ -39,6 +39,8 @@ ops::PrimitiveC *TFActivationParser::Parse(const tensorflow::NodeDef &tf_op,
prim->set_activation_type(mindspore::ActivationType::TANH);
} else if (tf_op.op() == "Selu") {
prim->set_activation_type(mindspore::ActivationType::SELU);
} else if (tf_op.op() == "Softplus") {
prim->set_activation_type(mindspore::ActivationType::SOFTPLUS);
} else {
MS_LOG(ERROR) << "unsupported activation type:" << tf_op.op();
return nullptr;
@ -80,5 +82,6 @@ TFNodeRegistrar g_tfSigmoidParser("Sigmoid", new TFActivationParser());
TFNodeRegistrar g_tfTanhParser("Tanh", new TFActivationParser());
TFNodeRegistrar g_tfSeLUParser("Selu", new TFActivationParser());
TFNodeRegistrar g_tfLeakyReluParser("LeakyRelu", new TFLeakyReluParser());
TFNodeRegistrar g_tfSoftplusParser("Softplus", new TFActivationParser());
} // namespace lite
} // namespace mindspore

@ -0,0 +1,40 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tools/converter/parser/tf/tf_neg_parser.h"
#include <string>
#include <memory>
#include <map>
#include <vector>
#include "tools/converter/parser/tf/tf_node_parser_registry.h"
#include "ops/neg.h"
namespace mindspore {
namespace lite {
ops::PrimitiveC *TFNegParser::Parse(const tensorflow::NodeDef &tf_op,
const std::map<string, const tensorflow::NodeDef *> &tf_node_map,
std::vector<std::string> *inputs, int *output_size) {
auto prim = std::make_unique<ops::Neg>();
*output_size = tf_op.input_size();
for (int i = 0; i < tf_op.input_size(); i++) {
inputs->emplace_back(tf_op.input(i));
}
return prim.release();
}
TFNodeRegistrar g_tfNegParser("Neg", new TFNegParser());
} // namespace lite
} // namespace mindspore

@ -0,0 +1,38 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_NEG_PARSER_H_
#define MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_NEG_PARSER_H_
#include <string>
#include <memory>
#include <map>
#include <vector>
#include "tools/converter/parser/tf/tf_node_parser.h"
namespace mindspore {
namespace lite {
class TFNegParser : public TFNodeParser {
public:
TFNegParser() = default;
~TFNegParser() override = default;
ops::PrimitiveC *Parse(const tensorflow::NodeDef &tf_op,
const std::map<string, const tensorflow::NodeDef *> &tf_node_map,
std::vector<std::string> *inputs, int *output_size) override;
};
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_TOOLS_CONVERTER_PARSER_TF_TF_NEG_PARSER_H_
Loading…
Cancel
Save