Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion include/iblgf/domain/mpi/task.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ class Task : public Task_base<TaskBuffer<Tag, T, ID>, BufferPolicy, ID>
{
public:
using super_type = Task_base<TaskBuffer<Tag, T, ID>, BufferPolicy, ID>;
using typename super_type::Task_base;
using super_type::super_type;
using id_type = ID;
using buffer_type = TaskBuffer<Tag, T, ID>;
using buffer_container_type = typename buffer_type::container_t;
Expand Down
46 changes: 45 additions & 1 deletion tests/ns_amr_lgf/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,15 @@ add_custom_target(copy_ifherk_files ALL
)

set(Ptest_list ns_amr_lgf_Test)

set(Ptest_list_gpu ns_amr_lgf_gpu_Test)
if(USE_GPU)
add_custom_command(TARGET copy_ifherk_files POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${CTEST_WORKING_DIRECTORY}/${NAME}_gpu
COMMAND ${CMAKE_COMMAND} -E copy_directory
${CMAKE_CURRENT_SOURCE_DIR}/configs
${CTEST_WORKING_DIRECTORY}/${NAME}_gpu
)
endif()
# add the executable
add_executable(ns_amr_lgf.x ns_amr_lgf.cpp ${SOURCES})
target_link_libraries(ns_amr_lgf.x
Expand All @@ -20,7 +28,26 @@ target_link_libraries(ns_amr_lgf.x
${BLAS_LIBRARIES}
xtensor
xsimd)
if(USE_GPU)

set(CMAKE_CUDA_HOST_COMPILER ${MPI_CXX_COMPILER})
set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --max-errors=1")

add_executable(ns_amr_lgf_gpu.x ns_amr_lgf_gpu.cu ${SOURCES} ${GPU_SOURCES})
target_link_libraries(ns_amr_lgf_gpu.x
${MPI_LIBRARIES}
${Boost_LIBRARIES}
${FFTW_LIBRARIES}
${HDF5_LIBRARIES} hdf5
${BLAS_LIBRARIES}
xtensor
xsimd
${CUDA_CUFFT_LIBRARIES} cufft)
set_target_properties(ns_amr_lgf_gpu.x PROPERTIES
CUDA_SEPARABLE_COMPILATION ON
CUDA_STANDARD 17
)
endif()
foreach (item ${Ptest_list})
add_executable(${item} ${item}.cpp ${SOURCES})
add_dependencies(${item} copy_ifherk_files)
Expand All @@ -31,3 +58,20 @@ foreach (item ${Ptest_list})
#COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} 10 $<TARGET_FILE:${item}> WORKING_DIRECTORY ${CTEST_WORKING_DIRECTORY}/${NAME}
)
endforeach()
if(USE_GPU)
foreach (item ${Ptest_list_gpu})
add_executable(${item} ${item}.cu ${SOURCES} ${GPU_SOURCES})
add_dependencies(${item} copy_ifherk_files)
target_link_libraries(${item} iblgf_test_main
${CUDA_CUFFT_LIBRARIES} cufft)
set_target_properties(${item} PROPERTIES
CUDA_SEPARABLE_COMPILATION ON
CUDA_STANDARD 17
)
add_test(
NAME ${item}_parallel_np13_gpu
COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${IBLGF_MPI_NP} $<TARGET_FILE:${item}> WORKING_DIRECTORY ${CTEST_WORKING_DIRECTORY}/${NAME}_gpu
#COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} 10 $<TARGET_FILE:${item}> WORKING_DIRECTORY ${CTEST_WORKING_DIRECTORY}/${NAME}_gpu
)
endforeach()
endif()
61 changes: 61 additions & 0 deletions tests/ns_amr_lgf/configFile_30
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
simulation_parameters
{
nLevels=0;

// Time Marching
source_max=13.0;
nBaseLevelTimeSteps=4;
cfl = 0.35;
cfl_max = 1000;
Re = 1000.0;
refinement_factor=0.125;
R=0.5;
DistanceOfVortexRings=0.25;
adapt_frequency=10;
output_frequency=4;
base_level_threshold=1e-4;
hard_max_refinement=true;

single_ring=true;
perturbation=false;
vDelta=0.2;

fat_ring=true;

//hdf5_ic_name=ic.hdf5;
// hdf5_ref_name=ref.hdf5;

output
{
directory=output;
}

restart_write_frequency=20;
write_restart=false;
use_restart=false;

restart
{
load_directory=restart;
save_directory=restart;
}

domain
{

bd_base = (-60,-60,-60);
bd_extent = (120,120,120);


dx_base=0.25;

block_extent=30;

block
{
base=(-30,-30,-30);
extent=(60,60,60);
}
}
EXP_LInf=1e-2;
}
4 changes: 2 additions & 2 deletions tests/ns_amr_lgf/ns_amr_lgf.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ struct NS_AMR_LGF : public SetupBase<NS_AMR_LGF, parameters>
linear_start_ = simulation_.dictionary()->template get_or<bool>("linear_start", false);

simulation_.frame_vel() =
[this](std::size_t idx, float_type t, auto coord = {0, 0, 0})
[this](std::size_t idx, float_type t, auto coord)
{
float_type T0 = 0.5;
if (t<=0.0 && (smooth_start_ || linear_start_))
Expand Down Expand Up @@ -153,7 +153,7 @@ struct NS_AMR_LGF : public SetupBase<NS_AMR_LGF, parameters>
}
};
simulation_.bc_vel() =
[this](std::size_t idx, float_type t, auto coord = {0, 0, 0})
[this](std::size_t idx, float_type t, auto coord)
{
float_type T0 = 0.5;
if (t<=0.0 && (smooth_start_|| linear_start_))
Expand Down
54 changes: 54 additions & 0 deletions tests/ns_amr_lgf/ns_amr_lgf_gpu.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
// ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄ ▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄
// ▐░░░░░░░░░░░▌▐░░░░░░░░░░▌ ▐░▌ ▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌
// ▀▀▀▀█░█▀▀▀▀ ▐░█▀▀▀▀▀▀▀█░▌▐░▌ ▐░█▀▀▀▀▀▀▀▀▀ ▐░█▀▀▀▀▀▀▀▀▀
// ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌
// ▐░▌ ▐░█▄▄▄▄▄▄▄█░▌▐░▌ ▐░▌ ▄▄▄▄▄▄▄▄ ▐░█▄▄▄▄▄▄▄▄▄
// ▐░▌ ▐░░░░░░░░░░▌ ▐░▌ ▐░▌▐░░░░░░░░▌▐░░░░░░░░░░░▌
// ▐░▌ ▐░█▀▀▀▀▀▀▀█░▌▐░▌ ▐░▌ ▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀▀▀
// ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌▐░▌
// ▄▄▄▄█░█▄▄▄▄ ▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄▄▄ ▐░█▄▄▄▄▄▄▄█░▌▐░▌
// ▐░░░░░░░░░░░▌▐░░░░░░░░░░▌ ▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░▌
// ▀▀▀▀▀▀▀▀▀▀▀ ▀▀▀▀▀▀▀▀▀▀ ▀▀▀▀▀▀▀▀▀▀▀ ▀▀▀▀▀▀▀▀▀▀▀ ▀
#ifndef IBLGF_COMPILE_CUDA
#define IBLGF_COMPILE_CUDA
#endif
#include <boost/mpi.hpp>
#include <boost/mpi/environment.hpp>
#include <boost/mpi/communicator.hpp>

#include "ns_amr_lgf.hpp"
#include <iblgf/dictionary/dictionary.hpp>


using namespace iblgf;

int main(int argc, char *argv[])
{

boost::mpi::environment env(argc, argv);
boost::mpi::communicator world;

std::string input="./";
input += std::string("configFile");

if (argc>1 && argv[1][0] != '-')
{
input = argv[1];
}
int rank = world.rank();
// get number of GPUs and set device
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
std::cout<<"Rank "<<rank<<" found "<<deviceCount<<" GPUs"<<std::endl;
cudaSetDevice(rank % deviceCount);
// Read in dictionary
Dictionary dictionary(input, argc, argv);

//Instantiate setup
NS_AMR_LGF setup(&dictionary);

// run setup
setup.run();

return 0;
}
81 changes: 81 additions & 0 deletions tests/ns_amr_lgf/ns_amr_lgf_gpu_Test.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
// ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄ ▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄
// ▐░░░░░░░░░░░▌▐░░░░░░░░░░▌ ▐░▌ ▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌
// ▀▀▀▀█░█▀▀▀▀ ▐░█▀▀▀▀▀▀▀█░▌▐░▌ ▐░█▀▀▀▀▀▀▀▀▀ ▐░█▀▀▀▀▀▀▀▀▀
// ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌
// ▐░▌ ▐░█▄▄▄▄▄▄▄█░▌▐░▌ ▐░▌ ▄▄▄▄▄▄▄▄ ▐░█▄▄▄▄▄▄▄▄▄
// ▐░▌ ▐░░░░░░░░░░▌ ▐░▌ ▐░▌▐░░░░░░░░▌▐░░░░░░░░░░░▌
// ▐░▌ ▐░█▀▀▀▀▀▀▀█░▌▐░▌ ▐░▌ ▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀▀▀
// ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌▐░▌
// ▄▄▄▄█░█▄▄▄▄ ▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄▄▄ ▐░█▄▄▄▄▄▄▄█░▌▐░▌
// ▐░░░░░░░░░░░▌▐░░░░░░░░░░▌ ▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░▌
// ▀▀▀▀▀▀▀▀▀▀▀ ▀▀▀▀▀▀▀▀▀▀ ▀▀▀▀▀▀▀▀▀▀▀ ▀▀▀▀▀▀▀▀▀▀▀ ▀
#ifndef IBLGF_COMPILE_CUDA
#define IBLGF_COMPILE_CUDA
#endif
#include <gtest/gtest.h>
#ifndef __CUDACC__
#include <boost/filesystem.hpp>
#endif
#include <boost/mpi/communicator.hpp>
#include <boost/mpi/environment.hpp>

#include "ns_amr_lgf.hpp"
#include <iblgf/dictionary/dictionary.hpp>


namespace iblgf{
double vortex_run(const std::string input, int argc, char** argv)
{
// Read in dictionary
dictionary::Dictionary dictionary(input, argc, argv);

//Instantiate setup
NS_AMR_LGF setup(&dictionary);

// run setup
double L_inf_error = setup.run();
L_inf_error = setup.u1_Linf_fine();

double EXP_LInf = dictionary.get_dictionary("simulation_parameters")
->template get_or<double>("EXP_LInf", 0);

return L_inf_error - EXP_LInf;
}

TEST(PoissonSolverTest_GPU, VortexRing_1)
{
boost::mpi::communicator world;

for (auto& entry : boost::filesystem::directory_iterator( "./"))
{
auto s = entry.path();

if (s.filename().string().rfind("config", 0) == 0)
{
if (world.rank() == 0)
std::cout << "------------- Testing on config file "
<< s.filename() << " -------------" << std::endl;

double L_inf_error = vortex_run(s.string());
world.barrier();

EXPECT_LT(L_inf_error, 0.0);
}
}
}
} //namespace iblgf
int main(int argc, char** argv)
{
::testing::InitGoogleTest(&argc, argv);

// Initialize MPI before any tests run
boost::mpi::environment env(argc, argv);
boost::mpi::communicator world; // optional, can use in main if needed
int rank = world.rank();
// get number of GPUs and set device
int deviceCount = 0;
cudaError_t err = cudaGetDeviceCount(&deviceCount);
std::cout<<"Rank "<<rank<<" found "<<deviceCount<<" GPUs"<<std::endl;
cudaSetDevice(rank % deviceCount);
return RUN_ALL_TESTS(); // now MPI is already initialized
}
29 changes: 28 additions & 1 deletion tests/poisson/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,17 @@ add_custom_target(copy_files ALL
${CTEST_WORKING_DIRECTORY}/${NAME}
)

set(Ptest_list poisson_Test)
if(USE_GPU)
add_custom_command(TARGET copy_files POST_BUILD
COMMAND ${CMAKE_COMMAND} -E make_directory ${CTEST_WORKING_DIRECTORY}/${NAME}_gpu
COMMAND ${CMAKE_COMMAND} -E copy_directory
${CMAKE_CURRENT_SOURCE_DIR}/configs
${CTEST_WORKING_DIRECTORY}/${NAME}_gpu
)
endif()

set(Ptest_list poisson_Test)
set(Ptest_list_gpu poisson_gpu_Test)
# add the executable
add_executable(poisson.x poisson.cpp ${SOURCES})
target_link_libraries(poisson.x
Expand Down Expand Up @@ -66,3 +75,21 @@ foreach (item ${Ptest_list})
#COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} 8 $<TARGET_FILE:${item}> WORKING_DIRECTORY ${CTEST_WORKING_DIRECTORY}/${NAME}
)
endforeach()

if(USE_GPU)
foreach (item ${Ptest_list_gpu})
add_executable(${item} ${item}.cu ${SOURCES} ${GPU_SOURCES})
add_dependencies(${item} copy_files)
target_link_libraries(${item} iblgf_test_main
${CUDA_CUFFT_LIBRARIES} cufft)
set_target_properties(${item} PROPERTIES
CUDA_SEPARABLE_COMPILATION ON
CUDA_STANDARD 17
)
add_test(
NAME ${item}_parallel_np13
COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${IBLGF_MPI_NP} $<TARGET_FILE:${item}> WORKING_DIRECTORY ${CTEST_WORKING_DIRECTORY}/${NAME}_gpu
#COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} 8 $<TARGET_FILE:${item}> WORKING_DIRECTORY ${CTEST_WORKING_DIRECTORY}/${NAME}
)
endforeach()
endif()
Loading
Loading