// MFEM Example 1 - Parallel Version // AmgX Modification // // Compile with: make ex1p // // AmgX sample runs: // mpirun -np 4 ex1p // mpirun -np 4 ex1p -d cuda // mpirun -np 10 ex1p --amgx-file amg_pcg.json --amgx-mpi-teams // mpirun -np 4 ex1p --amgx-file amg_pcg.json // // Description: This example code demonstrates the use of MFEM to define a // simple finite element discretization of the Laplace problem // -Delta u = 1 with homogeneous Dirichlet boundary conditions. // Specifically, we discretize using a FE space of the specified // order, or if order < 1 using an isoparametric/isogeometric // space (i.e. quadratic for quadratic curvilinear mesh, NURBS for // NURBS mesh, etc.) // // The example highlights the use of mesh refinement, finite // element grid functions, as well as linear and bilinear forms // corresponding to the left-hand side and right-hand side of the // discrete linear system. We also cover the explicit elimination // of essential boundary conditions, static condensation, and the // optional connection to the GLVis tool for visualization. #include "mfem.hpp" #include #include using namespace std; using namespace mfem; #ifndef MFEM_USE_AMGX #error This example requires that MFEM is built with MFEM_USE_AMGX=YES #endif int main(int argc, char *argv[]) { // 1. Initialize MPI and HYPRE. Mpi::Init(argc, argv); int num_procs = Mpi::WorldSize(); int myid = Mpi::WorldRank(); Hypre::Init(); // 2. Parse command-line options. const char *mesh_file = "../../data/star.mesh"; int order = 1; bool static_cond = false; bool pa = false; const char *device_config = "cpu"; bool visualization = true; bool amgx_lib = true; bool amgx_mpi_teams = false; const char* amgx_json_file = ""; // JSON file for AmgX int ndevices = 1; OptionsParser args(argc, argv); args.AddOption(&mesh_file, "-m", "--mesh", "Mesh file to use."); args.AddOption(&order, "-o", "--order", "Finite element order (polynomial degree) or -1 for" " isoparametric space."); args.AddOption(&static_cond, "-sc", "--static-condensation", "-no-sc", "--no-static-condensation", "Enable static condensation."); args.AddOption(&pa, "-pa", "--partial-assembly", "-no-pa", "--no-partial-assembly", "Enable Partial Assembly."); args.AddOption(&amgx_lib, "-amgx", "--amgx-lib", "-no-amgx", "--no-amgx-lib", "Use AmgX in example."); args.AddOption(&amgx_json_file, "--amgx-file", "--amgx-file", "AMGX solver config file (overrides --amgx-solver, --amgx-verbose)"); args.AddOption(&amgx_mpi_teams, "--amgx-mpi-teams", "--amgx-mpi-teams", "--amgx-mpi-gpu-exclusive", "--amgx-mpi-gpu-exclusive", "Create MPI teams when using AmgX to load balance between ranks and GPUs."); args.AddOption(&device_config, "-d", "--device", "Device configuration string, see Device::Configure()."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.AddOption(&ndevices, "-nd","--gpus-per-node-in-teams-mode", "Number of GPU devices per node (Only used if amgx_mpi_teams is true)."); args.Parse(); if (!args.Good()) { if (myid == 0) { args.PrintUsage(cout); } return 1; } if (myid == 0) { args.PrintOptions(cout); } // 3. Enable hardware devices such as GPUs, and programming models such as // CUDA, OCCA, RAJA and OpenMP based on command line options. Device device(device_config); if (myid == 0) { device.Print(); } // 4. Read the (serial) mesh from the given mesh file on all processors. We // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface // and volume meshes with the same code. Mesh mesh(mesh_file, 1, 1); int dim = mesh.Dimension(); // 5. Refine the serial mesh on all processors to increase the resolution. In // this example we do 'ref_levels' of uniform refinement. We choose // 'ref_levels' to be the largest number that gives a final mesh with no // more than 10,000 elements. { int ref_levels = (int)floor(log(10000./mesh.GetNE())/log(2.)/dim); for (int l = 0; l < ref_levels; l++) { mesh.UniformRefinement(); } } // 6. Define a parallel mesh by a partitioning of the serial mesh. Refine // this mesh further in parallel to increase the resolution. Once the // parallel mesh is defined, the serial mesh can be deleted. ParMesh pmesh(MPI_COMM_WORLD, mesh); mesh.Clear(); { int par_ref_levels = 2; for (int l = 0; l < par_ref_levels; l++) { pmesh.UniformRefinement(); } } // 7. Define a parallel finite element space on the parallel mesh. Here we // use continuous Lagrange finite elements of the specified order. If // order < 1, we instead use an isoparametric/isogeometric space. FiniteElementCollection *fec; bool delete_fec; if (order > 0) { fec = new H1_FECollection(order, dim); delete_fec = true; } else if (pmesh.GetNodes()) { fec = pmesh.GetNodes()->OwnFEC(); delete_fec = false; if (myid == 0) { cout << "Using isoparametric FEs: " << fec->Name() << endl; } } else { fec = new H1_FECollection(order = 1, dim); delete_fec = true; } ParFiniteElementSpace fespace(&pmesh, fec); HYPRE_BigInt size = fespace.GlobalTrueVSize(); if (myid == 0) { cout << "Number of finite element unknowns: " << size << endl; } // 8. Determine the list of true (i.e. parallel conforming) essential // boundary dofs. In this example, the boundary conditions are defined // by marking all the boundary attributes from the mesh as essential // (Dirichlet) and converting them to a list of true dofs. Array ess_tdof_list; if (pmesh.bdr_attributes.Size()) { Array ess_bdr(pmesh.bdr_attributes.Max()); ess_bdr = 1; fespace.GetEssentialTrueDofs(ess_bdr, ess_tdof_list); } // 9. Set up the parallel linear form b(.) which corresponds to the // right-hand side of the FEM linear system, which in this case is // (1,phi_i) where phi_i are the basis functions in fespace. ParLinearForm b(&fespace); ConstantCoefficient one(1.0); b.AddDomainIntegrator(new DomainLFIntegrator(one)); b.Assemble(); // 10. Define the solution vector x as a parallel finite element grid function // corresponding to fespace. Initialize x with initial guess of zero, // which satisfies the boundary conditions. ParGridFunction x(&fespace); x = 0.0; // 11. Set up the parallel bilinear form a(.,.) on the finite element space // corresponding to the Laplacian operator -Delta, by adding the Diffusion // domain integrator. ParBilinearForm a(&fespace); if (pa) { a.SetAssemblyLevel(AssemblyLevel::PARTIAL); } a.AddDomainIntegrator(new DiffusionIntegrator(one)); // 12. Assemble the parallel bilinear form and the corresponding linear // system, applying any necessary transformations such as: parallel // assembly, eliminating boundary conditions, applying conforming // constraints for non-conforming AMR, static condensation, etc. if (static_cond) { a.EnableStaticCondensation(); } a.Assemble(); OperatorPtr A; Vector B, X; a.FormLinearSystem(ess_tdof_list, x, b, A, X, B); // 13. Solve the linear system A X = B. // * With full assembly, use the BoomerAMG preconditioner from hypre. // * If AmgX is available solve using amg preconditioner. // * With partial assembly, use Jacobi smoothing, for now. Solver *prec = NULL; if (pa) { if (UsesTensorBasis(fespace)) { prec = new OperatorJacobiSmoother(a, ess_tdof_list); } CGSolver cg(MPI_COMM_WORLD); cg.SetRelTol(1e-12); cg.SetMaxIter(2000); cg.SetPrintLevel(1); if (prec) { cg.SetPreconditioner(*prec); } cg.SetOperator(*A); cg.Mult(B, X); delete prec; } else if (amgx_lib && strcmp(amgx_json_file,"") == 0) { MFEM_VERIFY(!amgx_mpi_teams, "Please add JSON file to try AmgX with MPI teams mode"); bool amgx_verbose = false; prec = new AmgXSolver(MPI_COMM_WORLD, AmgXSolver::PRECONDITIONER, amgx_verbose); CGSolver cg(MPI_COMM_WORLD); cg.SetRelTol(1e-12); cg.SetMaxIter(2000); cg.SetPrintLevel(1); if (prec) { cg.SetPreconditioner(*prec); } cg.SetOperator(*A); cg.Mult(B, X); delete prec; } else if (amgx_lib && strcmp(amgx_json_file,"") != 0) { AmgXSolver amgx; amgx.ReadParameters(amgx_json_file, AmgXSolver::EXTERNAL); if (amgx_mpi_teams) { // Forms MPI teams to load balance between MPI ranks and GPUs amgx.InitMPITeams(MPI_COMM_WORLD, ndevices); } else { // Assumes each MPI rank is paired with a GPU amgx.InitExclusiveGPU(MPI_COMM_WORLD); } amgx.SetOperator(*A.As()); amgx.SetConvergenceCheck(true); amgx.Mult(B, X); // Release MPI communicators and resources created by AmgX amgx.Finalize(); } else { prec = new HypreBoomerAMG; CGSolver cg(MPI_COMM_WORLD); cg.SetRelTol(1e-12); cg.SetMaxIter(2000); cg.SetPrintLevel(1); if (prec) { cg.SetPreconditioner(*prec); } cg.SetOperator(*A); cg.Mult(B, X); delete prec; } // 14. Recover the parallel grid function corresponding to X. This is the // local finite element solution on each processor. a.RecoverFEMSolution(X, b, x); // 15. Save the refined mesh and the solution in parallel. This output can // be viewed later using GLVis: "glvis -np -m mesh -g sol". { ostringstream mesh_name, sol_name; mesh_name << "mesh." << setfill('0') << setw(6) << myid; sol_name << "sol." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh.Print(mesh_ofs); ofstream sol_ofs(sol_name.str().c_str()); sol_ofs.precision(8); x.Save(sol_ofs); } // 16. Send the solution by socket to a GLVis server. if (visualization) { char vishost[] = "localhost"; int visport = 19916; socketstream sol_sock(vishost, visport); sol_sock << "parallel " << num_procs << " " << myid << "\n"; sol_sock.precision(8); sol_sock << "solution\n" << pmesh << x << flush; } // 17. Free the used memory. if (delete_fec) { delete fec; } return 0; }