-
-
Notifications
You must be signed in to change notification settings - Fork 35
Open
Description
Hello, I was testing the new updates to Terminete!
with EnsembleGPUKernel. It Works fine with DiscreteCallback, however when using ContinuousCallback I still have the problem, Out of Dynamic GPU memory in EnsembleGPUKernel for higher number of threads. I attach the code used
using StaticArrays
using CUDA
using DiffEqGPU
using NPZ
using OrdinaryDiffEq
using Plots
"""
pot_central(u,p,t)
u=[x,dx,y,dy]
p=[k,m]
"""
function pot_central(u,p,t)
r3 = ( u[1]^2 + u[3]^2 )^(3/2)
du1 = u[2] # u[2]= dx
du2 = -( p[1]*u[1] ) / ( p[2]*r3 )
du3 = u[4] # u[4]= dy
du4 = -( p[1]*u[3] ) / ( p[2]*r3 )
return SVector{4}(du1,du2,du3,du4)
end
T = 100.0
k = 1.0
m = 1.0
trajectories = 5_000
u_rand = convert(Array{Float64},npzread("IO_GPU/IO_u0.npy"))
u0 = @SVector [2.0; 2.0; 1.0; 1.5]
p = @SVector [k,m]
tspan = (0.0,T)
prob = ODEProblem{false}(pot_central,u0,tspan,p)
prob_func = (prob,i,repeat) -> remake(prob, u0 = SVector{4}(u_rand[i,:]).*u0 + @SVector [1.0;1.0;1.0;1.0] )
Ensemble_Problem = EnsembleProblem(prob,prob_func=prob_func,safetycopy=false)
function condition(u,t,integrator)
R2 = @SVector [4.5,5_000.0] # R2=[Rmin2,Rmax2]
r2 = u[1]*u[1] + u[3]*u[3]
(R2[2] - r2)*(r2 - R2[1])#< 0.0
end
affect!(integrator) = terminate!(integrator)
gpu_cb = ContinuousCallback(condition, affect!;save_positions=(false,false),rootfind=true,interp_points=0,abstol=1e-7,reltol=0)
#gpu_cb = DiscreteCallback(condition, affect!;save_positions=(false,false))
CUDA.@time sol= solve(Ensemble_Problem,
GPUTsit5(),
#GPUVern7(),
#GPUVern9(),
EnsembleGPUKernel(),
trajectories = trajectories,
batch_size = 10_000,
adaptive = false,
dt = 0.01,
save_everystep = false,
callback = gpu_cb,
merge_callbacks = true
)
Metadata
Metadata
Assignees
Labels
No labels