add testcase for input update in adagrad and move resetresource

pull/9949/head
TFbunny 4 years ago
parent d88aa05859
commit d5a08f1956

@ -32,15 +32,7 @@ namespace kernel {
template <typename T>
class SparseApplyProximalAdagradKernel : public GpuKernel {
public:
SparseApplyProximalAdagradKernel()
: variable_size_(0),
accumulation_size_(0),
learning_rate_size_(0),
l1_regularization_size_(0),
l2_regularization_size_(0),
gradient_size_(0),
indices_size_(0) {}
SparseApplyProximalAdagradKernel() { ResetResource(); }
~SparseApplyProximalAdagradKernel() override = default;
const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; }
@ -121,6 +113,19 @@ class SparseApplyProximalAdagradKernel : public GpuKernel {
output_size_list_.push_back(accumulation_size_);
}
void ResetResource() noexcept override {
variable_size_ = 0;
accumulation_size_ = 0;
learning_rate_size_ = 0;
l1_regularization_size_ = 0;
l2_regularization_size_ = 0;
gradient_size_ = 0;
indices_size_ = 0;
input_size_list_.clear();
output_size_list_.clear();
workspace_size_list_.clear();
}
private:
size_t variable_size_;
size_t accumulation_size_;

@ -97,6 +97,18 @@ class SparseFtrlGpuKernel : public GpuKernel {
return true;
}
protected:
void InitSizeLists() override {
input_size_list_.push_back(variable_size_);
input_size_list_.push_back(accumulation_size_);
input_size_list_.push_back(linear_size_);
input_size_list_.push_back(gradient_size_);
input_size_list_.push_back(indices_size_);
output_size_list_.push_back(0);
output_size_list_.push_back(0);
output_size_list_.push_back(0);
}
void ResetResource() noexcept override {
variable_size_ = 0;
accumulation_size_ = 0;
@ -115,18 +127,6 @@ class SparseFtrlGpuKernel : public GpuKernel {
workspace_size_list_.clear();
}
protected:
void InitSizeLists() override {
input_size_list_.push_back(variable_size_);
input_size_list_.push_back(accumulation_size_);
input_size_list_.push_back(linear_size_);
input_size_list_.push_back(gradient_size_);
input_size_list_.push_back(indices_size_);
output_size_list_.push_back(0);
output_size_list_.push_back(0);
output_size_list_.push_back(0);
}
private:
size_t variable_size_;
size_t accumulation_size_;

@ -62,6 +62,28 @@ def test_small_shape():
np.testing.assert_array_almost_equal(output1.asnumpy(), expect1)
np.testing.assert_array_almost_equal(output2.asnumpy(), expect2)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_small_shape_input_update():
var = Tensor(np.arange(9).reshape(3, 3).astype(np.float32))
accum = Tensor(np.zeros(9).reshape(3, 3).astype(np.float32))
lr = 1.0
l1 = 1.0
l2 = 0.0
grad = Tensor(np.ones(9).reshape(3, 3).astype(np.float32) * 8)
indices = Tensor(np.array([1, 0, 2], np.int32))
net = Net(var, accum, lr, l1, l2)
net(grad, indices)
expect1 = np.array([[-0.875, 0., 0.875],
[1.875, 2.875, 3.875],
[4.875, 5.875, 6.875]])
expect2 = np.array([[64., 64., 64.],
[64., 64., 64.],
[64., 64., 64.]])
np.testing.assert_array_almost_equal(net.var.data.asnumpy(), expect1)
np.testing.assert_array_almost_equal(net.accum.data.asnumpy(), expect2)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard

Loading…
Cancel
Save