!12628 Add bond, angle and dihedral modules of Sponge

From: @zhangxinfeng3
Reviewed-by: 
Signed-off-by:
pull/12628/MERGE
mindspore-ci-bot 4 years ago committed by Gitee
commit be2019c5be

@ -0,0 +1,65 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleAtomEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, float *atom_energy) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
atomicAdd(&atom_energy[atom_i], k * dtheta * dtheta);
}
}
void AngleAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene,
cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
AngleAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(angle_numbers, uint_crd, scaler, atom_a,
atom_b, atom_c, angle_k, angle_theta0, ene);
return;
}
void AngleAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene,
cudaStream_t stream);

@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ATOM_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ATOM_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void AngleAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene,
cudaStream_t stream);
#endif

@ -0,0 +1,63 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, float *angle_energy) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
angle_energy[angle_i] = k * dtheta * dtheta;
}
}
void AngleEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
AngleEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(angle_numbers, uint_crd, scaler, atom_a, atom_b,
atom_c, angle_k, angle_theta0, ene);
return;
}
void AngleEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene, cudaStream_t stream);

@ -0,0 +1,25 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void AngleEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *ene, cudaStream_t stream);
#endif

@ -0,0 +1,83 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleForceKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const float *angle_k,
const float *angle_theta0, VECTOR *frc) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
k = -2 * k * dtheta / sinf(theta);
float common_factor_cross = k * rij_1_rkj_1;
float common_factor_self = k * costheta;
VECTOR fi = common_factor_self * rij_2 * drij - common_factor_cross * drkj;
VECTOR fk = common_factor_self * rkj_2 * drkj - common_factor_cross * drij;
atomicAdd(&frc[atom_i].x, fi.x);
atomicAdd(&frc[atom_i].y, fi.y);
atomicAdd(&frc[atom_i].z, fi.z);
atomicAdd(&frc[atom_k].x, fk.x);
atomicAdd(&frc[atom_k].y, fk.y);
atomicAdd(&frc[atom_k].z, fk.z);
fi = -fi - fk;
atomicAdd(&frc[atom_j].x, fi.x);
atomicAdd(&frc[atom_j].y, fi.y);
atomicAdd(&frc[atom_j].z, fi.z);
}
}
void AngleForce(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *frc_f, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
AngleForceKernel<<<block_per_grid, thread_per_block, 0, stream>>>(angle_numbers, uint_crd, scaler, atom_a, atom_b,
atom_c, angle_k, angle_theta0, frc);
return;
}
void AngleForce(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *frc_f, cudaStream_t stream);

@ -0,0 +1,25 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void AngleForce(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0, float *frc_f, cudaStream_t stream);
#endif

@ -0,0 +1,89 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/angle/angle_force_with_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void AngleForceWithAtomEnergyKernel(int angle_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const int *atom_c, const float *angle_k, const float *angle_theta0,
VECTOR *frc, float *atom_energy) {
int angle_i = blockDim.x * blockIdx.x + threadIdx.x;
if (angle_i < angle_numbers) {
int atom_i = atom_a[angle_i];
int atom_j = atom_b[angle_i];
int atom_k = atom_c[angle_i];
float theta0 = angle_theta0[angle_i];
float k = angle_k[angle_i];
float k2 = k;
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
float rij_2 = 1. / (drij * drij);
float rkj_2 = 1. / (drkj * drkj);
float rij_1_rkj_1 = sqrtf(rij_2 * rkj_2);
float costheta = drij * drkj * rij_1_rkj_1;
costheta = fmaxf(-0.999999, fminf(costheta, 0.999999));
float theta = acosf(costheta);
float dtheta = theta - theta0;
k = -2 * k * dtheta / sinf(theta);
float common_factor_cross = k * rij_1_rkj_1;
float common_factor_self = k * costheta;
VECTOR fi = common_factor_self * rij_2 * drij - common_factor_cross * drkj;
VECTOR fk = common_factor_self * rkj_2 * drkj - common_factor_cross * drij;
atomicAdd(&frc[atom_i].x, fi.x);
atomicAdd(&frc[atom_i].y, fi.y);
atomicAdd(&frc[atom_i].z, fi.z);
atomicAdd(&frc[atom_k].x, fk.x);
atomicAdd(&frc[atom_k].y, fk.y);
atomicAdd(&frc[atom_k].z, fk.z);
fi = -fi - fk;
atomicAdd(&frc[atom_j].x, fi.x);
atomicAdd(&frc[atom_j].y, fi.y);
atomicAdd(&frc[atom_j].z, fi.z);
atomicAdd(&atom_energy[atom_i], k2 * dtheta * dtheta);
}
}
void AngleForceWithAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0,
float *frc_f, float *ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(angle_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
AngleForceWithAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
angle_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, angle_k, angle_theta0, frc, ene);
return;
}
void AngleForceWithAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0,
float *frc_f, float *ene, cudaStream_t stream);

@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_WITH_ATOM_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_ANGLE_ANGLE_FORCE_WITH_ATOM_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void AngleForceWithAtomEnergy(int angle_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const float *angle_k, const float *angle_theta0,
float *frc_f, float *ene, cudaStream_t stream);
#endif

@ -0,0 +1,56 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_atom_energy_cuda_gpu_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
__global__ void BondAtomEnergyCudaKernel(const int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *atom_ene) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];
float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];
VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
float r1 = norm3df(dr.x, dr.y, dr.z);
float tempf = r1 - r0;
atomicAdd(&atom_ene[atom_i], k * tempf * tempf);
}
}
void BondAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene,
cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
BondAtomEnergyCudaKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a,
atom_b, bond_k, bond_r0, atom_ene);
return;
}
void BondAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene, cudaStream_t stream);

@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ATOM_ENERGY_CUDA_GPU_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ATOM_ENERGY_GPU_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void BondAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *atom_ene, cudaStream_t stream);
#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BOND_ATOM_ENERGY_GPU_IMPL_H_

@ -0,0 +1,55 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_energy_cuda_gpu_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
__global__ void BondEnergyCudaKernel(const int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0,
float *bond_ene) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];
float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];
VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
float r1 = norm3df(dr.x, dr.y, dr.z);
float tempf = r1 - r0;
bond_ene[bond_i] = k * tempf * tempf;
}
}
void BondEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *bond_ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
BondEnergyCudaKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a, atom_b,
bond_k, bond_r0, bond_ene);
return;
}
void BondEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *bond_ene, cudaStream_t stream);

@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ENERGY_CUDA_GPU_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_ENERGY_CUDA_GPU_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void BondEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *bond_ene, cudaStream_t stream);
#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BOND_ENERGY_CUDA_GPU_IMPL_H_

@ -0,0 +1,60 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_cuda_gpu_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
__global__ void BondForceCudaKernel(int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const float *bond_k, const float *bond_r0,
VECTOR *frc) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];
float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];
VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
float r_1 = rnorm3df(dr.x, dr.y, dr.z);
float tempf = 1.0 - r0 * r_1;
VECTOR f = 2 * tempf * k * dr;
atomicAdd(&frc[atom_i].x, -f.x);
atomicAdd(&frc[atom_i].y, -f.y);
atomicAdd(&frc[atom_i].z, -f.z);
atomicAdd(&frc[atom_j].x, f.x);
atomicAdd(&frc[atom_j].y, f.y);
atomicAdd(&frc[atom_j].z, f.z);
}
}
void BondForce(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *frc_f, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
BondForceCudaKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a, atom_b,
bond_k, bond_r0, frc);
return;
}
void BondForce(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *frc_f, cudaStream_t stream);

@ -0,0 +1,25 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_CUDA_GPU_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_CUDA_GPU_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void BondForce(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, float *frc_f, cudaStream_t stream);
#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_BOND_FORCE_CUDA_GPU_IMPL_H_

@ -0,0 +1,67 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
__global__ void BondForceWithAtomEnergyKernel(int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, VECTOR *frc,
float *atom_energy) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];
float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];
VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
float abs_r = norm3df(dr.x, dr.y, dr.z);
float r_1 = 1. / abs_r;
float tempf = abs_r - r0;
VECTOR f = 2 * tempf * r_1 * k * dr;
atomicAdd(&frc[atom_i].x, -f.x);
atomicAdd(&frc[atom_i].y, -f.y);
atomicAdd(&frc[atom_i].z, -f.z);
atomicAdd(&frc[atom_j].x, f.x);
atomicAdd(&frc[atom_j].y, f.y);
atomicAdd(&frc[atom_j].z, f.z);
atomicAdd(&atom_energy[atom_i], k * tempf * tempf);
}
}
void BondForceWithAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_e,
cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
BondForceWithAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a,
atom_b, bond_k, bond_r0, frc, atom_e);
return;
}
void BondForceWithAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_e,
cudaStream_t stream);

@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void BondForceWithAtomEnergy(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_e,
cudaStream_t stream);
#endif

@ -0,0 +1,67 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/bond/bond_force_with_atom_virial_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
__global__ void BondForceWithAtomVirialKernel(int bond_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b,
const float *bond_k, const float *bond_r0, VECTOR *frc,
float *atom_virial) {
int bond_i = blockDim.x * blockIdx.x + threadIdx.x;
if (bond_i < bond_numbers) {
int atom_i = atom_a[bond_i];
int atom_j = atom_b[bond_i];
float k = bond_k[bond_i];
float r0 = bond_r0[bond_i];
VECTOR dr = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
float abs_r = norm3df(dr.x, dr.y, dr.z);
float r_1 = 1. / abs_r;
float tempf = (abs_r - r0) * k;
VECTOR f = 2 * tempf * r_1 * dr;
atomicAdd(&frc[atom_i].x, -f.x);
atomicAdd(&frc[atom_i].y, -f.y);
atomicAdd(&frc[atom_i].z, -f.z);
atomicAdd(&frc[atom_j].x, f.x);
atomicAdd(&frc[atom_j].y, f.y);
atomicAdd(&frc[atom_j].z, f.z);
atomicAdd(&atom_virial[atom_i], abs_r * tempf);
}
}
void BondForceWithAtomVirial(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_v,
cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(bond_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
BondForceWithAtomVirialKernel<<<block_per_grid, thread_per_block, 0, stream>>>(bond_numbers, uint_crd, scaler, atom_a,
atom_b, bond_k, bond_r0, frc, atom_v);
return;
}
void BondForceWithAtomVirial(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_v,
cudaStream_t stream);

@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_BOND_BOND_FORCE_WITH_ATOM_VIRIAL_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void BondForceWithAtomVirial(int bond_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const float *bond_k, const float *bond_r0, float *frc_f, float *atom_v,
cudaStream_t stream);
#endif

@ -0,0 +1,94 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_SPONGE_COMMONHW_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_SPONGE_COMMONHW_H_
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <curand_kernel.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "runtime/device/gpu/cuda_common.h"
#define CONSTANT_Pi 3.1415926535897932
struct VECTOR {
float x;
float y;
float z;
};
struct UNSIGNED_INT_VECTOR {
unsigned int uint_x;
unsigned int uint_y;
unsigned int uint_z;
};
__device__ __host__ static inline VECTOR Get_Periodic_Displacement(const UNSIGNED_INT_VECTOR uvec_a,
const UNSIGNED_INT_VECTOR uvec_b,
const VECTOR scaler) {
VECTOR dr;
dr.x = (static_cast<int>(uvec_a.uint_x - uvec_b.uint_x)) * scaler.x;
dr.y = (static_cast<int>(uvec_a.uint_y - uvec_b.uint_y)) * scaler.y;
dr.z = (static_cast<int>(uvec_a.uint_z - uvec_b.uint_z)) * scaler.z;
return dr;
}
__device__ __host__ static inline VECTOR operator+(const VECTOR &veca, const VECTOR &vecb) {
VECTOR vec;
vec.x = veca.x + vecb.x;
vec.y = veca.y + vecb.y;
vec.z = veca.z + vecb.z;
return vec;
}
__device__ __host__ static inline float operator*(const VECTOR &veca, const VECTOR &vecb) {
return veca.x * vecb.x + veca.y * vecb.y + veca.z * vecb.z;
}
__device__ __host__ static inline VECTOR operator*(const float &a, const VECTOR &vecb) {
VECTOR vec;
vec.x = a * vecb.x;
vec.y = a * vecb.y;
vec.z = a * vecb.z;
return vec;
}
__device__ __host__ static inline VECTOR operator-(const VECTOR &veca, const VECTOR &vecb) {
VECTOR vec;
vec.x = veca.x - vecb.x;
vec.y = veca.y - vecb.y;
vec.z = veca.z - vecb.z;
return vec;
}
__device__ __host__ static inline VECTOR operator-(const VECTOR &vecb) {
VECTOR vec;
vec.x = -vecb.x;
vec.y = -vecb.y;
vec.z = -vecb.z;
return vec;
}
__device__ __host__ static inline VECTOR operator^(const VECTOR &veca, const VECTOR &vecb) {
VECTOR vec;
vec.x = veca.y * vecb.z - veca.z * vecb.y;
vec.y = veca.z * vecb.x - veca.x * vecb.z;
vec.z = veca.x * vecb.y - veca.y * vecb.x;
return vec;
}
#endif // MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMP_SPONGE_COMMON_H_

@ -0,0 +1,83 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void DihedralAtomEnergyKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd,
const VECTOR *scaler, const int *atom_a, const int *atom_b, const int *atom_c,
const int *atom_d, const int *ipn, const float *pk, const float *gamc,
const float *gams, const float *pn, float *ene) {
int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_i < dihedral_numbers) {
int atom_i = atom_a[dihedral_i];
int atom_j = atom_b[dihedral_i];
int atom_k = atom_c[dihedral_i];
int atom_l = atom_d[dihedral_i];
int temp_ipn = ipn[dihedral_i];
float temp_pk = pk[dihedral_i];
float temp_pn = pn[dihedral_i];
float temp_gamc = gamc[dihedral_i];
float temp_gams = gams[dihedral_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]);
VECTOR r1 = drij ^ drkj;
VECTOR r2 = drkl ^ drkj;
float r1_1 = rnorm3df(r1.x, r1.y, r1.z);
float r2_1 = rnorm3df(r2.x, r2.y, r2.z);
float r1_1_r2_1 = r1_1 * r2_1;
float phi = r1 * r2 * r1_1_r2_1;
phi = fmaxf(-0.999999, fminf(phi, 0.999999));
phi = acosf(phi);
float sign = (r2 ^ r1) * drkj;
copysignf(phi, sign);
phi = CONSTANT_Pi - phi;
float nphi = temp_pn * phi;
float cos_nphi = cosf(nphi);
float sin_nphi = sinf(nphi);
atomicAdd(&ene[atom_i], (temp_pk + cos_nphi * temp_gamc + sin_nphi * temp_gams));
}
}
void DihedralAtomEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(dihedral_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
DihedralAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, ene);
return;
}
void DihedralAtomEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream);

@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ATOM_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ATOM_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void DihedralAtomEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream);
#endif

@ -0,0 +1,83 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void DihedralEnergyKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d,
const int *ipn, const float *pk, const float *gamc, const float *gams,
const float *pn, float *ene) {
int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_i < dihedral_numbers) {
int atom_i = atom_a[dihedral_i];
int atom_j = atom_b[dihedral_i];
int atom_k = atom_c[dihedral_i];
int atom_l = atom_d[dihedral_i];
int temp_ipn = ipn[dihedral_i];
float temp_pk = pk[dihedral_i];
float temp_pn = pn[dihedral_i];
float temp_gamc = gamc[dihedral_i];
float temp_gams = gams[dihedral_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]);
VECTOR r1 = drij ^ drkj;
VECTOR r2 = drkl ^ drkj;
float r1_1 = rnorm3df(r1.x, r1.y, r1.z);
float r2_1 = rnorm3df(r2.x, r2.y, r2.z);
float r1_1_r2_1 = r1_1 * r2_1;
float phi = r1 * r2 * r1_1_r2_1;
phi = fmaxf(-0.999999, fminf(phi, 0.999999));
phi = acosf(phi);
float sign = (r2 ^ r1) * drkj;
copysignf(phi, sign);
phi = CONSTANT_Pi - phi;
float nphi = temp_pn * phi;
float cos_nphi = cosf(nphi);
float sin_nphi = sinf(nphi);
ene[dihedral_i] = (temp_pk + cos_nphi * temp_gamc + sin_nphi * temp_gams);
}
}
void DihedralEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(dihedral_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
DihedralEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, ene);
return;
}
void DihedralEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream);

@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ENERGY_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_ENERGY_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void DihedralEnergy(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *ene, cudaStream_t stream);
#endif

@ -0,0 +1,119 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/dihedral/dihedral_force_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/util.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void DihedralForceKernel(int dihedral_numbers, const UNSIGNED_INT_VECTOR *uint_crd, const VECTOR *scaler,
const int *atom_a, const int *atom_b, const int *atom_c, const int *atom_d,
const int *ipn, const float *pk, const float *gamc, const float *gams,
const float *pn, VECTOR *frc) {
int dihedral_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_i < dihedral_numbers) {
int atom_i = atom_a[dihedral_i];
int atom_j = atom_b[dihedral_i];
int atom_k = atom_c[dihedral_i];
int atom_l = atom_d[dihedral_i];
int temp_ipn = ipn[dihedral_i];
float temp_pk = pk[dihedral_i];
float temp_pn = pn[dihedral_i];
float temp_gamc = gamc[dihedral_i];
float temp_gams = gams[dihedral_i];
VECTOR drij = Get_Periodic_Displacement(uint_crd[atom_i], uint_crd[atom_j], scaler[0]);
VECTOR drkj = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_j], scaler[0]);
VECTOR drkl = Get_Periodic_Displacement(uint_crd[atom_k], uint_crd[atom_l], scaler[0]);
VECTOR r1 = drij ^ drkj;
VECTOR r2 = drkl ^ drkj;
float r1_1 = rnorm3df(r1.x, r1.y, r1.z);
float r2_1 = rnorm3df(r2.x, r2.y, r2.z);
float r1_2 = r1_1 * r1_1;
float r2_2 = r2_1 * r2_1;
float r1_1_r2_1 = r1_1 * r2_1;
float phi = r1 * r2 * r1_1_r2_1;
phi = fmaxf(-0.999999, fminf(phi, 0.999999));
phi = acosf(phi);
float sign = (r2 ^ r1) * drkj;
copysignf(phi, sign);
phi = CONSTANT_Pi - phi;
float nphi = temp_pn * phi;
float cos_phi = cosf(phi);
float sin_phi = sinf(phi);
float cos_nphi = cosf(nphi);
float sin_nphi = sinf(nphi);
float dE_dphi;
if (fabsf(sin_phi) < 1e-6) {
temp_ipn *= temp_ipn % 2; // (((temp_ipn - 1) & 1) ^ 1)
dE_dphi = temp_gamc * (temp_pn - temp_ipn + temp_ipn * cos_phi);
} else {
dE_dphi = temp_pn * (temp_gamc * sin_nphi - temp_gams * cos_nphi) / sin_phi;
}
VECTOR dphi_dr1 = r1_1_r2_1 * r2 + cos_phi * r1_2 * r1;
VECTOR dphi_dr2 = r1_1_r2_1 * r1 + cos_phi * r2_2 * r2;
VECTOR dE_dri = dE_dphi * drkj ^ dphi_dr1;
VECTOR dE_drl = dE_dphi * dphi_dr2 ^ drkj;
VECTOR dE_drj_part = dE_dphi * ((drij ^ dphi_dr1) + (drkl ^ dphi_dr2));
VECTOR fi = dE_dri;
VECTOR fj = dE_drj_part - dE_dri;
VECTOR fk = -dE_drl - dE_drj_part;
VECTOR fl = dE_drl;
atomicAdd(&frc[atom_i].x, fi.x);
atomicAdd(&frc[atom_i].y, fi.y);
atomicAdd(&frc[atom_i].z, fi.z);
atomicAdd(&frc[atom_j].x, fj.x);
atomicAdd(&frc[atom_j].y, fj.y);
atomicAdd(&frc[atom_j].z, fj.z);
atomicAdd(&frc[atom_k].x, fk.x);
atomicAdd(&frc[atom_k].y, fk.y);
atomicAdd(&frc[atom_k].z, fk.z);
atomicAdd(&frc[atom_l].x, fl.x);
atomicAdd(&frc[atom_l].y, fl.y);
atomicAdd(&frc[atom_l].z, fl.z);
}
}
void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *frc_f, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(dihedral_numbers) / 128);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
VECTOR *frc = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(frc_f));
VECTOR *scaler = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(scaler_f));
DihedralForceKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
dihedral_numbers, uint_crd, scaler, atom_a, atom_b, atom_c, atom_d, ipn, pk, gamc, gams, pn, frc);
return;
}
void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *frc_f, cudaStream_t stream);

@ -0,0 +1,26 @@
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_IMPL_H_
#define MINDSPORE_CCSRC_KERNEL_GPU_CUDA_IMPL_SPONGE_DIHEDRAL_DIHEDRAL_FORCE_IMPL_H_
#include <curand_kernel.h>
#include "runtime/device/gpu/cuda_common.h"
void DihedralForce(int dihedral_numbers, const int *uint_crd_f, const float *scaler_f, const int *atom_a,
const int *atom_b, const int *atom_c, const int *atom_d, const int *ipn, const float *pk,
const float *gamc, const float *gams, const float *pn, float *frc_f, cudaStream_t stream);
#endif

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save