Clean codex: use std::vector instead of new[]

pull/5640/head
ZPaC 5 years ago
parent 57215ef000
commit 69d527050f

@ -103,17 +103,16 @@ void SparseOptimInfo::Accumulate(const Values &values, const Lengths &lengths) {
float *incr_indice_data = values.data() + indice_offset;
size_t incr_indice_size = lengths[indices_index];
size_t incr_indice_data_size = incr_indice_size * sizeof(int);
int *converted_indices = new int[incr_indice_size];
std::vector<int> converted_indices(incr_indice_size);
for (size_t i = 0; i < incr_indice_size; i++) {
converted_indices[i] = static_cast<int>(incr_indice_data[i]);
}
auto ret2 =
memcpy_s(accum_indices_data + indices_offset_, incr_indice_data_size, converted_indices, incr_indice_data_size);
auto ret2 = memcpy_s(accum_indices_data + indices_offset_, incr_indice_data_size, converted_indices.data(),
incr_indice_data_size);
if (ret2 != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret2 << ")";
}
delete[] converted_indices;
indices_offset_ += lengths[indices_index];
indices()->size += incr_indice_data_size;
}
@ -123,9 +122,9 @@ void SparseOptimInfo::ComputeMean(const std::shared_ptr<std::vector<std::shared_
size_t indices_size = static_cast<size_t>(indices()->size / sizeof(int));
int segment_size = gradient()->size / indices()->size;
float *new_grad = new float[indices_size * segment_size];
int *new_indices = new int[indices_size];
mindspore::kernel::SparseGradient<int> unique_sparse_grad({new_grad, new_indices, indices_size});
std::vector<float> new_grad(indices_size * segment_size);
std::vector<int> new_indices(indices_size);
mindspore::kernel::SparseGradient<int> unique_sparse_grad({new_grad.data(), new_indices.data(), indices_size});
const std::vector<std::shared_ptr<std::vector<size_t>>> &shape_vec = *shapes;
if (shape_vec.size() < 2 || shape_vec[1] == nullptr) {
@ -181,9 +180,6 @@ void SparseOptimInfo::ComputeMean(const std::shared_ptr<std::vector<std::shared_
for (size_t i = 0; i < unique_sparse_grad.indices_size_ * segment_size; i++) {
grad_data[i] = grad_data[i] / n;
}
delete[] new_grad;
delete[] new_indices;
}
void SparseOptimInfo::Reset() {

@ -17,6 +17,7 @@
#include "frontend/parallel/ps/optimizer_info_builder.h"
#include <vector>
#include <memory>
#include <functional>
namespace mindspore {
namespace parallel {
@ -139,18 +140,17 @@ OptimizerInfo *SparseAdamOptimInfoBuilder::BuildInputs(const WeightPtr &weight,
std::accumulate((*indices_shape).begin(), (*indices_shape).end(), sizeof(int), std::multiplies<size_t>());
AddressPtr indices = std::make_shared<kernel::Address>();
indices->addr = new int[total_indice_size * worker_num];
int *converted_indices = new int[lens[7]];
std::vector<int> converted_indices(lens[7]);
size_t indices_data_size = lens[7] * sizeof(int);
float *indices_data = reinterpret_cast<float *>(epsilon->addr) + lens[5] + lens[6];
for (int i = 0; i < lens[7]; i++) {
converted_indices[i] = static_cast<int>(indices_data[i]);
}
ret = memcpy_s(indices->addr, indices_data_size, converted_indices, indices_data_size);
ret = memcpy_s(indices->addr, indices_data_size, converted_indices.data(), indices_data_size);
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret << ")";
}
indices->size = indices_data_size;
delete[] converted_indices;
return new SparseAdamOptimInfo(weight_addr, m, v, beta1_power, beta2_power, learning_rate, beta1, beta2, epsilon,
grad, indices);
@ -192,18 +192,17 @@ OptimizerInfo *SparseFtrlOptimInfoBuilder::BuildInputs(const WeightPtr &weight,
std::accumulate((*indices_shape).begin(), (*indices_shape).end(), 1, std::multiplies<size_t>());
AddressPtr indices = std::make_shared<kernel::Address>();
indices->addr = new int[total_indice_size * worker_num];
int *converted_indices = new int[lens[1]];
std::vector<int> converted_indices(lens[1]);
size_t indices_data_size = lens[1] * sizeof(int);
float *indices_data = reinterpret_cast<float *>(values.data()) + lens[0];
for (int i = 0; i < lens[1]; i++) {
converted_indices[i] = static_cast<int>(indices_data[i]);
}
ret = memcpy_s(indices->addr, indices_data_size, converted_indices, indices_data_size);
ret = memcpy_s(indices->addr, indices_data_size, converted_indices.data(), indices_data_size);
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret << ")";
}
indices->size = indices_data_size;
delete[] converted_indices;
return new SparseFtrlOptimInfo(weight_addr, accum, linear, grad, indices);
}

@ -15,6 +15,7 @@
*/
#ifndef MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_OPTIMIZER_INFO_BUILDER_H_
#define MINDSPORE_CCSRC_FRONTEND_PARALLEL_PS_OPTIMIZER_INFO_BUILDER_H_
#include <vector>
#include <memory>

@ -171,15 +171,14 @@ void Util::ReduceSparseGradient(float *gradients, int *indices, const size_t ind
const size_t first_dim_size, const size_t outer_dim_size,
mindspore::kernel::SparseGradient<int> *unique_sparse_grad) {
size_t slice_segment_size = indices_size * segment_size;
auto workspace_grad = new float[slice_segment_size];
auto workspace_indices = new int[indices_size];
std::vector<float> workspace_grad(slice_segment_size);
std::vector<int> workspace_indices(indices_size);
MS_EXCEPTION_IF_NULL(gradients);
MS_EXCEPTION_IF_NULL(indices);
MS_EXCEPTION_IF_NULL(workspace_grad);
MS_EXCEPTION_IF_NULL(workspace_indices);
mindspore::kernel::SparseGradient<int> workspace_sparse_grad({workspace_grad, workspace_indices, indices_size});
mindspore::kernel::SparseGradient<int> workspace_sparse_grad(
{workspace_grad.data(), workspace_indices.data(), indices_size});
mindspore::kernel::SparseGradient<int> input_sparse_grad({gradients, indices, indices_size});
mindspore::kernel::ReduceSparseGradientParam<int> param;
param.input_grad_ = &input_sparse_grad;
@ -189,8 +188,6 @@ void Util::ReduceSparseGradient(float *gradients, int *indices, const size_t ind
param.value_stride_ = outer_dim_size;
mindspore::kernel::SparseOptimizerCPUKernel::BucketReduceSparseGradient(param);
delete[] workspace_grad;
delete[] workspace_indices;
}
} // namespace ps
} // namespace parallel

@ -345,7 +345,6 @@ void Worker<T>::AddEmbeddingTable(const ::ps::Key &key, const size_t &row_count)
}
kv_worker_->AddEmbeddingTable(key, row_count);
}
} // namespace ps
} // namespace parallel
} // namespace mindspore

@ -473,17 +473,17 @@ void WorkerProxy<T>::SparseSlicer(int timestamp, const ::ps::KVPairs<T> &send, c
size_t indices_size = indice_ids.size();
if (indices_size > 0) {
int slice_segment_size = indices_size * segment_size;
T *src_grad_data = new T[slice_segment_size];
int *src_indice_data = new int[indices_size];
PrepareSparseGradient(begin, end, distinct_ids, indice_to_grads, indice_data, segment_size, src_grad_data,
src_indice_data);
std::vector<T> src_grad_data(slice_segment_size);
std::vector<int> src_indice_data(indices_size);
PrepareSparseGradient(begin, end, distinct_ids, indice_to_grads, indice_data, segment_size, src_grad_data.data(),
src_indice_data.data());
// Reduce the sparse gradient and indice
T *new_grad = new T[slice_segment_size];
int *new_indices = new int[indices_size];
mindspore::kernel::SparseGradient<int> unique_sparse_grad({new_grad, new_indices, indices_size});
Util::ReduceSparseGradient(src_grad_data, src_indice_data, indices_size, segment_size, first_dim_size,
outer_dim_size, &unique_sparse_grad);
std::vector<T> new_grad(slice_segment_size);
std::vector<int> new_indices(indices_size);
mindspore::kernel::SparseGradient<int> unique_sparse_grad({new_grad.data(), new_indices.data(), indices_size});
Util::ReduceSparseGradient(src_grad_data.data(), src_indice_data.data(), indices_size, segment_size,
first_dim_size, outer_dim_size, &unique_sparse_grad);
// Update the length of reduce sparse gradient and indice
::ps::SArray<int> reduced_lens;
@ -502,11 +502,6 @@ void WorkerProxy<T>::SparseSlicer(int timestamp, const ::ps::KVPairs<T> &send, c
kvs.lens = reduced_lens;
kvs.vals = reduced_data;
delete[] src_grad_data;
delete[] src_indice_data;
delete[] new_grad;
delete[] new_indices;
}
if (indices_size <= 0) {
@ -576,15 +571,14 @@ void WorkerProxy<T>::BuildSparseValue(const ::ps::SArray<int> &lengths, const si
int indice_offset = grad_offset + lengths[grad_index];
data_size = lengths[indice_index] * sizeof(T);
T *indice_data = reduced_data->data() + indice_offset;
T *convert = new T[lengths[indice_index]];
std::vector<T> convert(lengths[indice_index]);
for (int i = 0; i < lengths[indice_index]; i++) {
convert[i] = static_cast<T>(indices[i]);
}
ret = memcpy_s(indice_data, data_size, convert, data_size);
ret = memcpy_s(indice_data, data_size, convert.data(), data_size);
if (ret != 0) {
MS_LOG(EXCEPTION) << "memcpy_s error, errorno(" << ret << ")";
}
delete[] convert;
}
template <typename T>

Loading…
Cancel
Save