parent
125940314f
commit
2190da9946
@ -1 +1 @@
|
||||
Subproject commit 6ffe9c24319d7297d0feeb10ee2bd8135e24c5c8
|
||||
Subproject commit 0a0338fecd54c654c1992af156d41e036569343c
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,57 @@
|
||||
/**
|
||||
* Copyright 2020 Huawei Technologies Co., Ltd
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ADD_ATOMIC_CLEAN_GPU_H_
|
||||
#define MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ADD_ATOMIC_CLEAN_GPU_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include "backend/optimizer/common/optimizer.h"
|
||||
#include "backend/session/kernel_graph.h"
|
||||
|
||||
namespace mindspore {
|
||||
namespace opt {
|
||||
class AtomicCleanInsertter : public Pass {
|
||||
public:
|
||||
AtomicCleanInsertter() : Pass("atomic_clean") {}
|
||||
~AtomicCleanInsertter() override = default;
|
||||
bool Run(const FuncGraphPtr &func_graph) override;
|
||||
|
||||
private:
|
||||
void ProcessOriginCNode(const AnfNodePtr &composite_node, const AnfNodePtr &new_input,
|
||||
const FuncGraphManagerPtr &mng);
|
||||
bool CanActivateAtomicAdd(const AnfNodePtr &anf_node);
|
||||
void InsertAtomicClean(const KernelGraphPtr &main_graph, const AnfNodePtr &anf_node, const FuncGraphManagerPtr &mng);
|
||||
void AddDepend(const FuncGraphPtr &main_graph, const AnfNodePtr &clean_node, const AnfNodePtr &composite_node,
|
||||
const AnfNodePtr &user_node, int index);
|
||||
void AddControlDepend(const FuncGraphPtr &main_graph, const AnfNodePtr &pre_node, const AnfNodePtr &post_node,
|
||||
const FuncGraphManagerPtr &mng);
|
||||
void CreateInplaceAssignNodeAndCorrectReturn(const FuncGraphPtr &sub_graph, const AnfNodePtr &new_parameter);
|
||||
void CorrectAbstract(const AnfNodePtr &composite_node);
|
||||
void CorrectKernelBuildInfo(const AnfNodePtr &composite_node, const AnfNodePtr &new_input);
|
||||
CNodePtr CreateAtomicCleanCompositeNode(const KernelGraphPtr &main_graph, TypeId dst_type);
|
||||
void ProcessOriginCNodeUser(const KernelGraphPtr &main_graph, const AnfNodePtr &composite_node,
|
||||
const AnfNodePtr &broadcast_to_node, const FuncGraphManagerPtr &mng);
|
||||
|
||||
CNodePtr atomic_add_node_{nullptr};
|
||||
size_t reduce_real_output_index_{0};
|
||||
size_t real_output_num_{0};
|
||||
};
|
||||
using AtomicCleanInsertterPtr = std::shared_ptr<AtomicCleanInsertter>;
|
||||
} // namespace opt
|
||||
} // namespace mindspore
|
||||
|
||||
#endif // MINDSPORE_CCSRC_BACKEND_OPTIMIZER_GRAPH_KERNEL_ADD_ATOMIC_CLEAN_GPU_H_
|
@ -0,0 +1,124 @@
|
||||
# Copyright 2020 Huawei Technologies Co., Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
|
||||
import numpy as np
|
||||
import pytest
|
||||
import mindspore.context as context
|
||||
from mindspore import Tensor
|
||||
from mindspore.nn import Cell
|
||||
import mindspore.ops.operations as P
|
||||
|
||||
|
||||
class SumOutNet(Cell):
|
||||
def __init__(self):
|
||||
super(SumOutNet, self).__init__()
|
||||
self.square = P.Square()
|
||||
self.sum = P.ReduceSum()
|
||||
|
||||
def construct(self, x):
|
||||
mul_res = self.square(x)
|
||||
return self.sum(mul_res, (0,))
|
||||
|
||||
|
||||
class SingleOutNet(Cell):
|
||||
def __init__(self):
|
||||
super(SingleOutNet, self).__init__()
|
||||
self.add = P.TensorAdd()
|
||||
self.mul = P.Mul()
|
||||
self.sum = P.ReduceSum()
|
||||
|
||||
def construct(self, x, y):
|
||||
mul_res = self.mul(x, y)
|
||||
sum_res = self.sum(mul_res, ())
|
||||
return self.add(sum_res, x)
|
||||
|
||||
|
||||
class MultiOutNet(Cell):
|
||||
def __init__(self):
|
||||
super(MultiOutNet, self).__init__()
|
||||
self.add = P.TensorAdd()
|
||||
self.mul = P.Mul()
|
||||
self.sum = P.ReduceSum()
|
||||
|
||||
def construct(self, x, y):
|
||||
add_res = self.add(x, y)
|
||||
mul_res = self.mul(add_res, add_res)
|
||||
sum_res = self.sum(mul_res, ())
|
||||
return self.add(add_res, sum_res)
|
||||
|
||||
|
||||
def atomic_add_sum_output():
|
||||
np.random.seed(0)
|
||||
input_x = np.random.normal(0, 1, [2, 3, 4, 3]).astype(np.float32)
|
||||
|
||||
expect = np.sum(np.square(input_x), axis=(0,))
|
||||
|
||||
net = SumOutNet()
|
||||
result = net(Tensor(input_x))
|
||||
|
||||
res = np.allclose(expect, result.asnumpy(), rtol=1.e-4, atol=1.e-7, equal_nan=True)
|
||||
assert res
|
||||
|
||||
|
||||
def atomic_add_single_output():
|
||||
np.random.seed(0)
|
||||
input_x = np.random.normal(0, 1, [2, 2, 2, 256]).astype(np.float32)
|
||||
input_y = np.random.normal(0, 1, [2, 2, 2, 256]).astype(np.float32)
|
||||
|
||||
expect = np.sum(input_x * input_y) + input_x
|
||||
|
||||
net = SingleOutNet()
|
||||
result = net(Tensor(input_x), Tensor(input_y))
|
||||
|
||||
res = np.allclose(expect, result.asnumpy(), rtol=1.e-4, atol=1.e-7, equal_nan=True)
|
||||
assert res
|
||||
|
||||
|
||||
def atomic_add_multi_output():
|
||||
np.random.seed(0)
|
||||
input_x = np.random.normal(0, 1, [2, 2, 2, 256]).astype(np.float32)
|
||||
input_y = np.random.normal(0, 1, [2, 2, 2, 256]).astype(np.float32)
|
||||
|
||||
expect = np.sum(np.square(input_x + input_y)) + (input_x + input_y)
|
||||
|
||||
net = MultiOutNet()
|
||||
result = net(Tensor(input_x), Tensor(input_y))
|
||||
|
||||
res = np.allclose(expect, result.asnumpy(), rtol=1.e-4, atol=1.e-7, equal_nan=True)
|
||||
assert res
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_atomic_add_sum_output_gpu():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="GPU")
|
||||
atomic_add_sum_output()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_atomic_add_single_output_gpu():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="GPU")
|
||||
atomic_add_single_output()
|
||||
|
||||
|
||||
@pytest.mark.level0
|
||||
@pytest.mark.platform_x86_gpu_training
|
||||
@pytest.mark.env_onecard
|
||||
def test_atomic_add_multi_output_gpu():
|
||||
context.set_context(mode=context.GRAPH_MODE, enable_graph_kernel=True, device_target="GPU")
|
||||
atomic_add_multi_output()
|
Loading…
Reference in new issue