!56 Synchronization code423 to ms-incubator

Merge pull request !56 from changzherui/syn-code423
pull/915/head
mindspore-ci-bot 5 years ago committed by Gitee
commit 6844ea633d

@ -52,7 +52,7 @@ ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 2
Cpp11BracedListStyle: true
DerivePointerAlignment: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
@ -94,7 +94,7 @@ PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
PointerAlignment: Right
RawStringFormats:
- Language: Cpp
Delimiters:

@ -1,8 +1,6 @@
cmake_minimum_required(VERSION 3.14)
project (MindSpore)
include(${CMAKE_SOURCE_DIR}/cmake/options.cmake)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/modules/")
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")

@ -179,7 +179,7 @@ Check out how MindSpore Open Governance [works](https://gitee.com/mindspore/comm
- [MindSpore Slack](https://join.slack.com/t/mindspore/shared_invite/enQtOTcwMTIxMDI3NjM0LTNkMWM2MzI5NjIyZWU5ZWQ5M2EwMTQ5MWNiYzMxOGM4OWFhZjI4M2E5OGI2YTg3ODU1ODE2Njg1MThiNWI3YmQ) - Communication platform for developers.
- IRC channel at `#mindspore` (only for meeting minutes logging purpose)
- Video Conferencing: meet.jit.si
- Video Conferencing: https://meet.jit.si
- Mailing-list: https://mailweb.mindspore.cn/postorius/lists
## Contributing

@ -31,6 +31,7 @@ cd %CD%/mindspore
cmake -DCMAKE_BUILD_TYPE=Release -DENABLE_CPU=ON -DENABLE_MINDDATA=ON -DUSE_GLOG=ON -G "CodeBlocks - MinGW Makefiles" ../..
IF NOT %errorlevel% == 0 (
echo "cmake fail."
goto run_fail
)
@ -40,6 +41,7 @@ IF "%1%" == "" (
cmake --build . --target package -- -j%1%
)
IF NOT %errorlevel% == 0 (
echo "build fail."
goto run_fail
)
@ -49,6 +51,6 @@ goto run_eof
:run_fail
cd %BASEPATH%
echo "build fail."
set errorlevel=1
:run_eof

@ -23,30 +23,30 @@ export BUILD_PATH="${BASEPATH}/build/"
usage()
{
echo "Usage:"
echo "bash build.sh [-d] [-r] [-v] [-c on|off] [-t on|off] [-g on|off] [-h] [-b ge|cpu] [-m infer|train] \\"
echo " [-a on|off] [-g on|off] [-p on|off] [-i] [-L] [-R] [-D on|off] [-j[n]] [-e gpu|d|cpu] \\"
echo "bash build.sh [-d] [-r] [-v] [-c on|off] [-t on|off] [-g on|off] [-h] [-b ge] [-m infer|train] \\"
echo " [-a on|off] [-Q on|off] [-p on|off] [-i] [-L] [-R] [-D on|off] [-j[n]] [-e gpu|d|cpu] \\"
echo " [-P on|off] [-z [on|off]] [-M on|off] [-V 9.2|10.1] [-I] [-K]"
echo ""
echo "Options:"
echo " -d Debug mode"
echo " -r Release mode, default mode"
echo " -v Display build command"
echo " -c Enable code coverage switch, default off"
echo " -t Run testcases switch, default on"
echo " -c Enable code coverage, default off"
echo " -t Run testcases, default on"
echo " -g Use glog to output log, default on"
echo " -h Print usage"
echo " -b Select other backend, available: \\"
echo " ge:graph engine, cpu"
echo " -m Select mode, available: infer, train, default is infer "
echo " ge:graph engine"
echo " -m Select graph engine backend mode, available: infer, train, default is infer"
echo " -a Enable ASAN, default off"
echo " -p Enable pipeline profile, default off"
echo " -p Enable pipeline profile, print to stdout, default off"
echo " -R Enable pipeline profile, record to json, default off"
echo " -i Enable increment building, default off"
echo " -L Enable load ANF-IR as input of 'infer', default off"
echo " -R Enable the time_line record, default off"
echo " -j[n] Set the threads when building (Default: -j8)"
echo " -e Use gpu, d or cpu"
echo " -P Enable dump anf graph to file in ProtoBuffer format, default on"
echo " -Q Enable dump end to end, default off"
echo " -Q Enable dump memory, default off"
echo " -D Enable dumping of function graph ir, default on"
echo " -z Compile dataset & mindrecord, default on"
echo " -M Enable MPI and NCCL for GPU training, default on"

@ -64,7 +64,7 @@ set(_ge_tmp_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
string(REPLACE " -Wall" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE " -Werror" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
# force __FILE__ to show relative path of file, from source directory
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__FILE__='\"$(subst ${CMAKE_SOURCE_DIR}/,,$(abspath $<))\"' -Wno-builtin-macro-redefined")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__FILE__='\"$(subst $(realpath ${CMAKE_SOURCE_DIR})/,,$(abspath $<))\"' -Wno-builtin-macro-redefined")
add_subdirectory(${GE_SOURCE_DIR}/src/common/graph)
if(ENABLE_D)
add_subdirectory(${GE_SOURCE_DIR}/src/ge/common)

@ -1,16 +1,15 @@
set(incubator_tvm_gpu_CFLAGS "-pipe -Wall -fPIC -fstack-protector-all -D_FORTIFY_SOURCE=2 -O2")
set(incubator_tvm_gpu_CXXFLAGS "-std=c++11 -pipe -Wall -fPIC -fstack-protector-all -D_FORTIFY_SOURCE=2 -O2")
set(USE_CUDA "ON")
set(incubator_tvm_gpu_CXXFLAGS "-D_FORTIFY_SOURCE=2 -O2")
set(incubator_tvm_gpu_CFLAGS "-D_FORTIFY_SOURCE=2 -O2")
mindspore_add_pkg(incubator_tvm_gpu
VER 0.6.0
LIBS tvm
URL https://github.com/apache/incubator-tvm/archive/v0.6.0.tar.gz
MD5 9cbbd32545a776023acabbba270449fe
CUSTOM_CMAKE ${CMAKE_SOURCE_DIR}/third_party/patch/incubator-tvm/
SUBMODULES ${dlpack_DIRPATH} ${dmlc-core_DIRPATH} ${rang_DIRPATH}
SOURCEMODULES topi/python/topi python/tvm
PATCHES ${CMAKE_SOURCE_DIR}/third_party/patch/incubator-tvm/find_library.patch
${CMAKE_SOURCE_DIR}/third_party/patch/incubator-tvm/include.patch
${CMAKE_SOURCE_DIR}/third_party/patch/incubator-tvm/src_pass.patch
CMAKE_OPTION -DBUILD_TESTING=OFF -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DBUILD_SHARED_LIBS=ON)
include_directories(${incubator_tvm_gpu_INC})
add_library(mindspore::tvm ALIAS incubator_tvm_gpu::tvm)
${CMAKE_SOURCE_DIR}/third_party/patch/incubator-tvm/include.patch
${CMAKE_SOURCE_DIR}/third_party/patch/incubator-tvm/src_pass.patch
CMAKE_OPTION " ")
add_library(mindspore::tvm ALIAS incubator_tvm_gpu::tvm)

@ -205,7 +205,7 @@ set(MS_FIND_NO_DEFAULT_PATH ${MS_FIND_NO_DEFAULT_PATH} PARENT_SCOPE)
function(mindspore_add_pkg pkg_name )
set(options )
set(oneValueArgs URL MD5 GIT_REPOSITORY GIT_TAG VER EXE DIR HEAD_ONLY CMAKE_PATH RELEASE LIB_PATH)
set(oneValueArgs URL MD5 GIT_REPOSITORY GIT_TAG VER EXE DIR HEAD_ONLY CMAKE_PATH RELEASE LIB_PATH CUSTOM_CMAKE)
set(multiValueArgs CMAKE_OPTION LIBS PRE_CONFIGURE_COMMAND CONFIGURE_COMMAND BUILD_OPTION INSTALL_INCS INSTALL_LIBS PATCHES SUBMODULES SOURCEMODULES)
cmake_parse_arguments(PKG "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
@ -281,10 +281,6 @@ function(mindspore_add_pkg pkg_name )
file(GLOB ${pkg_name}_INSTALL_SUBMODULE ${_SUBMODULE_FILE}/*)
file(COPY ${${pkg_name}_INSTALL_SUBMODULE} DESTINATION ${${pkg_name}_SOURCE_DIR}/3rdparty/${_SUBMODENAME})
endforeach (_SUBMODULE_FILE)
foreach(_SOURCE_DIR ${PKG_SOURCEMODULES})
file(GLOB ${pkg_name}_INSTALL_SOURCE ${${pkg_name}_SOURCE_DIR}/${_SOURCE_DIR}/*)
file(COPY ${${pkg_name}_INSTALL_SOURCE} DESTINATION ${${pkg_name}_BASE_DIR}/${_SOURCE_DIR}/)
endforeach (_SUBMODULE_FILE)
else()
set(${pkg_name}_SOURCE_DIR ${PKG_DIR})
endif ()
@ -304,12 +300,20 @@ function(mindspore_add_pkg pkg_name )
message(FATAL_ERROR "Failed patch: ${_LF_PATCH_FILE}")
endif()
endforeach(_PATCH_FILE)
foreach(_SOURCE_DIR ${PKG_SOURCEMODULES})
file(GLOB ${pkg_name}_INSTALL_SOURCE ${${pkg_name}_SOURCE_DIR}/${_SOURCE_DIR}/*)
file(COPY ${${pkg_name}_INSTALL_SOURCE} DESTINATION ${${pkg_name}_BASE_DIR}/${_SOURCE_DIR}/)
endforeach (_SUBMODULE_FILE)
file(LOCK ${${pkg_name}_BASE_DIR} DIRECTORY GUARD FUNCTION RESULT_VARIABLE ${pkg_name}_LOCK_RET TIMEOUT 600)
if(NOT ${pkg_name}_LOCK_RET EQUAL "0")
message(FATAL_ERROR "error! when try lock ${${pkg_name}_BASE_DIR} : ${${pkg_name}_LOCK_RET}")
endif()
if (PKG_CUSTOM_CMAKE)
file(GLOB ${pkg_name}_cmake ${PKG_CUSTOM_CMAKE}/CMakeLists.txt)
file(COPY ${${pkg_name}_cmake} DESTINATION ${${pkg_name}_SOURCE_DIR})
endif ()
if(${pkg_name}_SOURCE_DIR)
if (PKG_HEAD_ONLY)
file(GLOB ${pkg_name}_SOURCE_SUBDIRS ${${pkg_name}_SOURCE_DIR}/*)

@ -0,0 +1,58 @@
# AlexNet Example
## Description
Training AlexNet with CIFAR-10 dataset in MindSpore.
This is the simple tutorial for training AlexNet in MindSpore.
## Requirements
- Install [MindSpore](https://www.mindspore.cn/install/en).
- Download the CIFAR-10 dataset at <http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz>. The directory structure is as follows:
```
├─cifar-10-batches-bin
└─cifar-10-verify-bin
```
## Running the example
```python
# train AlexNet, hyperparameter setting in config.py
python train.py --data_path cifar-10-batches-bin
```
You can get loss with each step similar to this:
```bash
epoch: 1 step: 1, loss is 2.2791853
...
epoch: 1 step: 1536, loss is 1.9366643
epoch: 1 step: 1537, loss is 1.6983616
epoch: 1 step: 1538, loss is 1.0221305
...
```
Then, test AlexNet according to network model
```python
# test AlexNet, 1 epoch training accuracy is up to 51.1%; 10 epoch training accuracy is up to 81.2%
python eval.py --data_path cifar-10-verify-bin --mode test --ckpt_path checkpoint_alexnet-1_1562.ckpt
```
## Note
There are some optional arguments:
```bash
-h, --help show this help message and exit
--device_target {Ascend,GPU}
device where the code will be implemented (default: Ascend)
--data_path DATA_PATH
path where the dataset is saved
--dataset_sink_mode DATASET_SINK_MODE
dataset_sink_mode is False or True
```
You can run ```python train.py -h``` or ```python eval.py -h``` to get more information.

@ -0,0 +1,46 @@
# MindRecord generating guidelines
<!-- TOC -->
- [MindRecord generating guidelines](#mindrecord-generating-guidelines)
- [Create work space](#create-work-space)
- [Implement data generator](#implement-data-generator)
- [Run data generator](#run-data-generator)
<!-- /TOC -->
## Create work space
Assume the dataset name is 'xyz'
* Create work space from template
```shell
cd ${your_mindspore_home}/example/convert_to_mindrecord
cp -r template xyz
```
## Implement data generator
Edit dictionary data generator
* Edit file
```shell
cd ${your_mindspore_home}/example/convert_to_mindrecord
vi xyz/mr_api.py
```
Two API, 'mindrecord_task_number' and 'mindrecord_dict_data', must be implemented
- 'mindrecord_task_number()' returns number of tasks. Return 1 if data row is generated serially. Return N if generator can be split into N parallel-run tasks.
- 'mindrecord_dict_data(task_id)' yields dictionary data row by row. 'task_id' is 0..N-1, if N is return value of mindrecord_task_number()
Tricky for parallel run
- For imagenet, one directory can be a task.
- For TFRecord with multiple files, each file can be a task.
- For TFRecord with 1 file only, it could also be split into N tasks. Task_id=K means: data row is picked only if (count % N == K)
## Run data generator
* run python script
```shell
cd ${your_mindspore_home}/example/convert_to_mindrecord
python writer.py --mindrecord_script imagenet [...]
```

@ -0,0 +1,122 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
User-defined API for MindRecord writer.
Two API must be implemented,
1. mindrecord_task_number()
# Return number of parallel tasks. return 1 if no parallel
2. mindrecord_dict_data(task_id)
# Yield data for one task
# task_id is 0..N-1, if N is return value of mindrecord_task_number()
"""
import argparse
import os
import pickle
######## mindrecord_schema begin ##########
mindrecord_schema = {"label": {"type": "int64"},
"data": {"type": "bytes"},
"file_name": {"type": "string"}}
######## mindrecord_schema end ##########
######## Frozen code begin ##########
with open('mr_argument.pickle', 'rb') as mindrecord_argument_file_handle:
ARG_LIST = pickle.load(mindrecord_argument_file_handle)
######## Frozen code end ##########
parser = argparse.ArgumentParser(description='Mind record imagenet example')
parser.add_argument('--label_file', type=str, default="", help='label file')
parser.add_argument('--image_dir', type=str, default="", help='images directory')
######## Frozen code begin ##########
args = parser.parse_args(ARG_LIST)
print(args)
######## Frozen code end ##########
def _user_defined_private_func():
"""
Internal function for tasks list
Return:
tasks list
"""
if not os.path.exists(args.label_file):
raise IOError("map file {} not exists".format(args.label_file))
label_dict = {}
with open(args.label_file) as file_handle:
line = file_handle.readline()
while line:
labels = line.split(" ")
label_dict[labels[1]] = labels[0]
line = file_handle.readline()
# get all the dir which are n02087046, n02094114, n02109525
dir_paths = {}
for item in label_dict:
real_path = os.path.join(args.image_dir, label_dict[item])
if not os.path.isdir(real_path):
print("{} dir is not exist".format(real_path))
continue
dir_paths[item] = real_path
if not dir_paths:
print("not valid image dir in {}".format(args.image_dir))
return {}, {}
dir_list = []
for label in dir_paths:
dir_list.append(label)
return dir_list, dir_paths
dir_list_global, dir_paths_global = _user_defined_private_func()
def mindrecord_task_number():
"""
Get task size.
Return:
number of tasks
"""
return len(dir_list_global)
def mindrecord_dict_data(task_id):
"""
Get data dict.
Yields:
data (dict): data row which is dict.
"""
# get the filename, label and image binary as a dict
label = dir_list_global[task_id]
for item in os.listdir(dir_paths_global[label]):
file_name = os.path.join(dir_paths_global[label], item)
if not item.endswith("JPEG") and not item.endswith(
"jpg") and not item.endswith("jpeg"):
print("{} file is not suffix with JPEG/jpg, skip it.".format(file_name))
continue
data = {}
data["file_name"] = str(file_name)
data["label"] = int(label)
# get the image data
image_file = open(file_name, "rb")
image_bytes = image_file.read()
image_file.close()
data["data"] = image_bytes
yield data

@ -0,0 +1,8 @@
#!/bin/bash
rm /tmp/imagenet/mr/*
python writer.py --mindrecord_script imagenet \
--mindrecord_file "/tmp/imagenet/mr/m" \
--mindrecord_partitions 16 \
--label_file "/tmp/imagenet/label.txt" \
--image_dir "/tmp/imagenet/jpeg"

@ -0,0 +1,6 @@
#!/bin/bash
rm /tmp/template/*
python writer.py --mindrecord_script template \
--mindrecord_file "/tmp/template/m" \
--mindrecord_partitions 4

@ -0,0 +1,73 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
User-defined API for MindRecord writer.
Two API must be implemented,
1. mindrecord_task_number()
# Return number of parallel tasks. return 1 if no parallel
2. mindrecord_dict_data(task_id)
# Yield data for one task
# task_id is 0..N-1, if N is return value of mindrecord_task_number()
"""
import argparse
import pickle
# ## Parse argument
with open('mr_argument.pickle', 'rb') as mindrecord_argument_file_handle: # Do NOT change this line
ARG_LIST = pickle.load(mindrecord_argument_file_handle) # Do NOT change this line
parser = argparse.ArgumentParser(description='Mind record api template') # Do NOT change this line
# ## Your arguments below
# parser.add_argument(...)
args = parser.parse_args(ARG_LIST) # Do NOT change this line
print(args) # Do NOT change this line
# ## Default mindrecord vars. Comment them unless default value has to be changed.
# mindrecord_index_fields = ['label']
# mindrecord_header_size = 1 << 24
# mindrecord_page_size = 1 << 25
# define global vars here if necessary
# ####### Your code below ##########
mindrecord_schema = {"label": {"type": "int32"}}
def mindrecord_task_number():
"""
Get task size.
Return:
number of tasks
"""
return 1
def mindrecord_dict_data(task_id):
"""
Get data dict.
Yields:
data (dict): data row which is dict.
"""
print("task is {}".format(task_id))
for i in range(256):
data = {}
data['label'] = i
yield data

@ -0,0 +1,152 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
######################## write mindrecord example ########################
Write mindrecord by data dictionary:
python writer.py --mindrecord_script /YourScriptPath ...
"""
import argparse
import os
import pickle
import time
from importlib import import_module
from multiprocessing import Pool
from mindspore.mindrecord import FileWriter
def _exec_task(task_id, parallel_writer=True):
"""
Execute task with specified task id
"""
print("exec task {}, parallel: {} ...".format(task_id, parallel_writer))
imagenet_iter = mindrecord_dict_data(task_id)
batch_size = 2048
transform_count = 0
while True:
data_list = []
try:
for _ in range(batch_size):
data_list.append(imagenet_iter.__next__())
transform_count += 1
writer.write_raw_data(data_list, parallel_writer=parallel_writer)
print("transformed {} record...".format(transform_count))
except StopIteration:
if data_list:
writer.write_raw_data(data_list, parallel_writer=parallel_writer)
print("transformed {} record...".format(transform_count))
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Mind record writer')
parser.add_argument('--mindrecord_script', type=str, default="template",
help='path where script is saved')
parser.add_argument('--mindrecord_file', type=str, default="/tmp/mindrecord",
help='written file name prefix')
parser.add_argument('--mindrecord_partitions', type=int, default=1,
help='number of written files')
parser.add_argument('--mindrecord_workers', type=int, default=8,
help='number of parallel workers')
args = parser.parse_known_args()
args, other_args = parser.parse_known_args()
print(args)
print(other_args)
with open('mr_argument.pickle', 'wb') as file_handle:
pickle.dump(other_args, file_handle)
try:
mr_api = import_module(args.mindrecord_script + '.mr_api')
except ModuleNotFoundError:
raise RuntimeError("Unknown module path: {}".format(args.mindrecord_script + '.mr_api'))
num_tasks = mr_api.mindrecord_task_number()
print("Write mindrecord ...")
mindrecord_dict_data = mr_api.mindrecord_dict_data
# get number of files
writer = FileWriter(args.mindrecord_file, args.mindrecord_partitions)
start_time = time.time()
# set the header size
try:
header_size = mr_api.mindrecord_header_size
writer.set_header_size(header_size)
except AttributeError:
print("Default header size: {}".format(1 << 24))
# set the page size
try:
page_size = mr_api.mindrecord_page_size
writer.set_page_size(page_size)
except AttributeError:
print("Default page size: {}".format(1 << 25))
# get schema
try:
mindrecord_schema = mr_api.mindrecord_schema
except AttributeError:
raise RuntimeError("mindrecord_schema is not defined in mr_api.py.")
# create the schema
writer.add_schema(mindrecord_schema, "mindrecord_schema")
# add the index
try:
index_fields = mr_api.mindrecord_index_fields
writer.add_index(index_fields)
except AttributeError:
print("Default index fields: all simple fields are indexes.")
writer.open_and_set_header()
task_list = list(range(num_tasks))
# set number of workers
num_workers = args.mindrecord_workers
if num_tasks < 1:
num_tasks = 1
if num_workers > num_tasks:
num_workers = num_tasks
if os.name == 'nt':
for window_task_id in task_list:
_exec_task(window_task_id, False)
elif num_tasks > 1:
with Pool(num_workers) as p:
p.map(_exec_task, task_list)
else:
_exec_task(0, False)
ret = writer.commit()
os.remove("{}".format("mr_argument.pickle"))
end_time = time.time()
print("--------------------------------------------")
print("END. Total time: {}".format(end_time - start_time))
print("--------------------------------------------")

@ -0,0 +1,63 @@
# LeNet Example
## Description
Training LeNet with MNIST dataset in MindSpore.
This is the simple and basic tutorial for constructing a network in MindSpore.
## Requirements
- Install [MindSpore](https://www.mindspore.cn/install/en).
- Download the MNIST dataset at <http://yann.lecun.com/exdb/mnist/>. The directory structure is as follows:
```
└─MNIST_Data
├─test
│ t10k-images.idx3-ubyte
│ t10k-labels.idx1-ubyte
└─train
train-images.idx3-ubyte
train-labels.idx1-ubyte
```
## Running the example
```python
# train LeNet, hyperparameter setting in config.py
python train.py --data_path MNIST_Data
```
You can get loss with each step similar to this:
```bash
epoch: 1 step: 1, loss is 2.3040335
...
epoch: 1 step: 1739, loss is 0.06952668
epoch: 1 step: 1740, loss is 0.05038793
epoch: 1 step: 1741, loss is 0.05018193
...
```
Then, test LeNet according to network model
```python
# test LeNet, after 1 epoch training, the accuracy is up to 96.5%
python eval.py --data_path MNIST_Data --mode test --ckpt_path checkpoint_lenet-1_1875.ckpt
```
## Note
There are some optional arguments:
```bash
-h, --help show this help message and exit
--device_target {Ascend,GPU,CPU}
device where the code will be implemented (default: Ascend)
--data_path DATA_PATH
path where the dataset is saved
--dataset_sink_mode DATASET_SINK_MODE
dataset_sink_mode is False or True
```
You can run ```python train.py -h``` or ```python eval.py -h``` to get more information.

@ -1 +1 @@
Subproject commit 70bb745b459ff9a0e7fc1008d15fe4b510f03da7
Subproject commit 43a715bc461fd70b7837051a2f47f0a1b19c5859

@ -26,7 +26,12 @@ from .squeeze_grad import SqueezeGrad, gpu_schedule_SqueezeGrad
from .mean import SimpleMean, gpu_schedule_SimpleMean
from .mean_grad import SimpleMeanGrad, gpu_schedule_SimpleMeanGrad
from .mul import Mul, gpu_schedule_Mul
from .hsigmoid import Hsigmoid, gpu_schedule_Hsigmoid
from .hsigmoid_grad import HsigmoidGrad, gpu_schedule_HsigmoidGrad
from .hswish import Hswish, gpu_schedule_Hswish
from .hswish_grad import HswishGrad, gpu_schedule_HswishGrad
from .hsigmoid import HSigmoid, gpu_schedule_HSigmoid
from .hsigmoid_grad import HSigmoidGrad, gpu_schedule_HSigmoidGrad
from .hswish import HSwish, gpu_schedule_HSwish
from .hswish_grad import HSwishGrad, gpu_schedule_HSwishGrad
from .logical_or import LogicalOr, gpu_schedule_LogicalOr
from .logical_not import LogicalNot, gpu_schedule_LogicalNot
from .logical_and import LogicalAnd, gpu_schedule_LogicalAnd
from .sub import Sub, gpu_schedule_Sub
from .less_equal import LessEqual, gpu_schedule_LessEqual

@ -33,9 +33,9 @@ def topi_nn_hsigmoid(x):
(x(*i) + 3) / 6)))
def Hsigmoid(x):
def HSigmoid(x):
"""
Hsigmoid
HSigmoid
Args:
x:
@ -45,9 +45,9 @@ def Hsigmoid(x):
return topi_nn_hsigmoid(x)
def gpu_schedule_Hsigmoid(outs):
def gpu_schedule_HSigmoid(outs):
"""
gpu schedule Hsigmoid
gpu schedule HSigmoid
Args:
outs:

@ -12,14 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hsigmoid grad"""
"""HSigmoid grad"""
import _akg.topi as topi
import _akg.tvm as tvm
def HsigmoidGrad(y_grad, x):
def HSigmoidGrad(y_grad, x):
"""
HsigmoidGrad
HSigmoidGrad
Args:
y_grad:
x:
@ -32,7 +32,7 @@ def HsigmoidGrad(y_grad, x):
y_grad(*i) / 6)))
def gpu_schedule_HsigmoidGrad(outs):
def gpu_schedule_HSigmoidGrad(outs):
"""
gpu schedule ReLU6Grad
Args:

@ -33,9 +33,9 @@ def topi_nn_hswish(x):
x(*i) * (x(*i) + 3) / 6)))
def Hswish(x):
def HSwish(x):
"""
Hswish
HSwish
Args:
x:
@ -45,9 +45,9 @@ def Hswish(x):
return topi_nn_hswish(x)
def gpu_schedule_Hswish(outs):
def gpu_schedule_HSwish(outs):
"""
gpu schedule Hswish
gpu schedule HSwish
Args:
outs:

@ -12,14 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""HswishGrad"""
"""HSwishGrad"""
import _akg.topi as topi
import _akg.tvm as tvm
def HswishGrad(y_grad, x):
def HSwishGrad(y_grad, x):
"""
HswishGrad
HSwishGrad
Args:
y_grad:
x:
@ -34,9 +34,9 @@ def HswishGrad(y_grad, x):
return res6
def gpu_schedule_HswishGrad(outs):
def gpu_schedule_HSwishGrad(outs):
"""
gpu schedule HswishGrad
gpu schedule HSwishGrad
Args:
outs:

@ -0,0 +1,40 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""less_equal"""
import _akg.tvm
from _akg.ops.math import less_equal
from _akg.topi.generic import schedule_elemwise
def LessEqual(x, y):
"""LessEqual."""
return less_equal.less_equal(x, y)
def gpu_schedule_LessEqual(outs):
"""
GPU schedule for LessEqual.
Args:
outs (tvm.tensor.Tensor): Outputs of compute.
Returns:
sch (schedule.Schedule): The created schedule.
"""
device = 'cuda'
ctx = _akg.tvm.context(device, 0)
if not ctx.exist:
raise SystemError("Skip because %s is not enabled" % device)
with _akg.tvm.target.create(device):
sch = schedule_elemwise(outs)
return sch

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save