5#ifndef DUNE_ISTL_FASTAMG_HH
6#define DUNE_ISTL_FASTAMG_HH
9#include <dune/common/exceptions.hh>
10#include <dune/common/typetraits.hh>
58 template<
class M,
class X,
class PI=SequentialInformation,
class A=std::allocator<X> >
106 FastAMG(std::shared_ptr<const Operator> fineOperator,
130 :
FastAMG(stackobject_to_shared_ptr(fineOperator),
131 criterion, parms, symmetric, pinfo)
175 matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
192 void createHierarchies(C& criterion,
193 std::shared_ptr<const Operator> fineOperator,
207 typename OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator
matrix;
215 typename OperatorHierarchy::RedistributeInfoList::const_iterator
redist;
219 typename OperatorHierarchy::AggregatesMapList::const_iterator
aggregates;
239 void mgc(LevelContext& levelContext,
Domain& x,
const Range& b);
247 void presmooth(LevelContext& levelContext,
Domain& x,
const Range& b);
255 void postsmooth(LevelContext& levelContext,
Domain& x,
const Range& b);
263 void moveToFineLevel(LevelContext& levelContext,
bool processedFineLevel,
270 bool moveToCoarseLevel(LevelContext& levelContext);
276 void initIteratorsWithFineLevel(LevelContext& levelContext);
279 std::shared_ptr<OperatorHierarchy> matrices_;
281 std::shared_ptr<CoarseSolver> solver_;
283 std::shared_ptr<Hierarchy<Range,A>> rhs_;
285 std::shared_ptr<Hierarchy<Domain,A>> lhs_;
287 std::shared_ptr<Hierarchy<Domain,A>> residual_;
292 std::shared_ptr<ScalarProduct> scalarProduct_;
296 std::size_t preSteps_;
298 std::size_t postSteps_;
300 bool buildHierarchy_;
302 bool coarsesolverconverged;
304 typedef std::shared_ptr<Smoother> SmootherPointer;
305 SmootherPointer coarseSmoother_;
307 std::size_t verbosity_;
310 template<
class M,
class X,
class PI,
class A>
312 : matrices_(amg.matrices_), solver_(amg.solver_),
313 rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
314 gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
315 symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
316 coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
319 template<
class M,
class X,
class PI,
class A>
322 : matrices_(stackobject_to_shared_ptr(matrices)), solver_(&coarseSolver),
323 rhs_(), lhs_(), residual_(), scalarProduct_(),
324 gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
325 postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
326 symmetric(symmetric_), coarsesolverconverged(true),
327 coarseSmoother_(), verbosity_(parms.debugLevel())
329 if(preSteps_>1||postSteps_>1)
331 std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
332 preSteps_=postSteps_=0;
334 assert(matrices_->isBuilt());
335 static_assert(std::is_same<PI,SequentialInformation>::value,
336 "Currently only sequential runs are supported");
338 template<
class M,
class X,
class PI,
class A>
345 : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
346 preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
347 buildHierarchy_(true),
348 symmetric(symmetric_), coarsesolverconverged(true),
349 coarseSmoother_(), verbosity_(criterion.debugLevel())
351 if(preSteps_>1||postSteps_>1)
353 std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
354 preSteps_=postSteps_=1;
356 static_assert(std::is_same<PI,SequentialInformation>::value,
357 "Currently only sequential runs are supported");
361 createHierarchies(criterion, std::move(fineOperator), pinfo);
364 template<
class M,
class X,
class PI,
class A>
367 std::shared_ptr<const Operator> fineOperator,
371 matrices_ = std::make_shared<OperatorHierarchy>(
372 std::const_pointer_cast<Operator>(std::move(fineOperator)),
373 stackobject_to_shared_ptr(
const_cast<PI&
>(pinfo)));
375 matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
377 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
378 std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
380 if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
387 cargs.setArgs(sargs);
388 if(matrices_->redistributeInformation().back().isSetup()) {
390 cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
391 cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
393 cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
394 cargs.setComm(*matrices_->parallelInformation().coarsest());
398 scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
400#if HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
401#if HAVE_SUITESPARSE_UMFPACK
402#define DIRECTSOLVER UMFPack
404#define DIRECTSOLVER SuperLU
407 if(std::is_same<ParallelInformation,SequentialInformation>::value
408 || matrices_->parallelInformation().coarsest()->communicator().size()==1
409 || (matrices_->parallelInformation().coarsest().isRedistributed()
410 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
411 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) {
412 if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
413 std::cout<<
"Using superlu"<<std::endl;
414 if(matrices_->parallelInformation().coarsest().isRedistributed())
416 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
418 solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(),
false,
false));
422 solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(),
false,
false));
427 if(matrices_->parallelInformation().coarsest().isRedistributed())
429 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
431 solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(matrices_->matrices().coarsest().getRedistributed()),
433 *coarseSmoother_, 1E-2, 1000, 0));
437 solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(*matrices_->matrices().coarsest()),
439 *coarseSmoother_, 1E-2, 1000, 0));
443 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
444 std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
448 template<
class M,
class X,
class PI,
class A>
456 typedef typename M::matrix_type
Matrix;
463 const Matrix&
mat=matrices_->matrices().finest()->getmat();
464 for(RowIter row=
mat.begin(); row!=
mat.end(); ++row) {
465 bool isDirichlet =
true;
466 bool hasDiagonal =
false;
468 for(ColIter
col=row->begin();
col!=row->end(); ++
col) {
469 if(row.index()==
col.index()) {
471 hasDiagonal = (*
col != zero);
477 if(isDirichlet && hasDiagonal)
478 diag->solve(x[row.index()], b[row.index()]);
481 std::cout<<
" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
484 matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
485 rhs_ = std::make_shared<Hierarchy<Range,A>>(std::make_shared<Range>(b));
486 lhs_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
487 residual_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
488 matrices_->coarsenVector(*rhs_);
489 matrices_->coarsenVector(*lhs_);
490 matrices_->coarsenVector(*residual_);
497 template<
class M,
class X,
class PI,
class A>
500 return matrices_->levels();
502 template<
class M,
class X,
class PI,
class A>
505 return matrices_->maxlevels();
509 template<
class M,
class X,
class PI,
class A>
512 LevelContext levelContext;
514 initIteratorsWithFineLevel(levelContext);
516 assert(v.two_norm()==0);
519 if(matrices_->maxlevels()==1){
522 mgc(levelContext, v, b);
524 mgc(levelContext, v, d);
525 if(postSteps_==0||matrices_->maxlevels()==1)
526 levelContext.pinfo->copyOwnerToAll(v, v);
529 template<
class M,
class X,
class PI,
class A>
532 levelContext.matrix = matrices_->matrices().finest();
533 levelContext.pinfo = matrices_->parallelInformation().finest();
534 levelContext.redist =
535 matrices_->redistributeInformation().begin();
536 levelContext.aggregates = matrices_->aggregatesMaps().begin();
537 levelContext.lhs = lhs_->finest();
538 levelContext.residual = residual_->finest();
539 levelContext.rhs = rhs_->finest();
540 levelContext.level=0;
543 template<
class M,
class X,
class PI,
class A>
544 bool FastAMG<M,X,PI,A>
545 ::moveToCoarseLevel(LevelContext& levelContext)
547 bool processNextLevel=
true;
549 if(levelContext.redist->isSetup()) {
551 levelContext.redist->redistribute(
static_cast<const Range&
>(*levelContext.residual),
552 levelContext.residual.getRedistributed());
553 processNextLevel = levelContext.residual.getRedistributed().size()>0;
554 if(processNextLevel) {
556 ++levelContext.pinfo;
557 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
558 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
559 static_cast<const Range&
>(levelContext.residual.getRedistributed()),
560 *levelContext.pinfo);
565 ++levelContext.pinfo;
566 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
567 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
568 static_cast<const Range&
>(*levelContext.residual), *levelContext.pinfo);
571 if(processNextLevel) {
573 ++levelContext.residual;
575 ++levelContext.matrix;
576 ++levelContext.level;
577 ++levelContext.redist;
579 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
581 ++levelContext.aggregates;
585 *levelContext.residual=0;
587 return processNextLevel;
590 template<
class M,
class X,
class PI,
class A>
591 void FastAMG<M,X,PI,A>
592 ::moveToFineLevel(LevelContext& levelContext,
bool processNextLevel, Domain& x)
594 if(processNextLevel) {
595 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
597 --levelContext.aggregates;
599 --levelContext.redist;
600 --levelContext.level;
602 --levelContext.matrix;
603 --levelContext.residual;
607 typename Hierarchy<Domain,A>::Iterator coarseLhs = levelContext.lhs--;
608 if(levelContext.redist->isSetup()) {
611 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
612 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
613 levelContext.lhs.getRedistributed(),
614 matrices_->getProlongationDampingFactor(),
615 *levelContext.pinfo, *levelContext.redist);
617 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
618 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
619 matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
625 if(processNextLevel) {
632 template<
class M,
class X,
class PI,
class A>
633 void FastAMG<M,X,PI,A>
634 ::presmooth(LevelContext& levelContext, Domain& x,
const Range& b)
636 constexpr auto bl = blockLevel<typename M::matrix_type>();
639 *levelContext.residual,
643 template<
class M,
class X,
class PI,
class A>
644 void FastAMG<M,X,PI,A>
645 ::postsmooth(LevelContext& levelContext, Domain& x,
const Range& b)
647 constexpr auto bl = blockLevel<typename M::matrix_type>();
648 GaussSeidelPostsmoothDefect<bl>
649 ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
653 template<
class M,
class X,
class PI,
class A>
659 template<
class M,
class X,
class PI,
class A>
662 if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
666 if(levelContext.redist->isSetup()) {
667 levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
668 if(levelContext.rhs.getRedistributed().size()>0) {
670 levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
671 levelContext.rhs.getRedistributed());
672 solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
674 levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
675 levelContext.pinfo->copyOwnerToAll(v, v);
677 levelContext.pinfo->copyOwnerToAll(b, b);
678 solver_->apply(v,
const_cast<Range&
>(b), res);
684 coarsesolverconverged =
false;
690#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
691 bool processNextLevel = moveToCoarseLevel(levelContext);
693 if(processNextLevel) {
695 for(std::size_t i=0; i<gamma_; i++)
696 mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
699 moveToFineLevel(levelContext, processNextLevel, v);
704 if(levelContext.matrix == matrices_->matrices().finest()) {
705 coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
706 if(!coarsesolverconverged)
707 DUNE_THROW(MathError,
"Coarse solver did not converge");
716 template<
class M,
class X,
class PI,
class A>
724 template<
class M,
class X,
class PI,
class A>
728 matrices_->getCoarsestAggregatesOnFinest(cont);
Classes for using SuperLU with ISTL matrices.
Classes for using UMFPack with ISTL matrices.
Templates characterizing the type of a solver.
Implementations of the inverse operator interface.
Define base class for scalar product and norm.
Define general preconditioner interface.
Prolongation and restriction for amg.
Classes for the generic construction and application of the smoothers.
Provides a classes representing the hierarchies in AMG.
Some generic functions for pretty printing vectors and matrices.
Col col
Definition matrixmatrix.hh:351
Matrix & mat
Definition matrixmatrix.hh:347
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition smoother.hh:406
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition construction.hh:44
static std::shared_ptr< T > construct(Arguments &args)
Construct an object with the specified arguments.
Definition construction.hh:52
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition smoother.hh:428
int iterations
The number of iterations to perform.
Definition smoother.hh:47
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition fastamg.hh:207
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition fastamg.hh:726
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition fastamg.hh:211
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition fastamg.hh:173
void post(Domain &x)
Clean up.
Definition fastamg.hh:717
std::size_t maxlevels()
Definition fastamg.hh:503
X Domain
The domain type.
Definition fastamg.hh:77
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition fastamg.hh:227
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition fastamg.hh:146
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition fastamg.hh:72
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition fastamg.hh:215
X Range
The range type.
Definition fastamg.hh:79
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition fastamg.hh:70
M Operator
The matrix operator type.
Definition fastamg.hh:63
std::size_t levels()
Definition fastamg.hh:498
FastAMG(const Operator &fineOperator, const C &criterion, const Parameters &parms=Parameters(), bool symmetric=true, const ParallelInformation &pinfo=ParallelInformation())
Construct an AMG with an inexact coarse solver based on the smoother.
Definition fastamg.hh:125
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition fastamg.hh:81
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition fastamg.hh:654
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition fastamg.hh:223
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition fastamg.hh:231
std::size_t level
The level index.
Definition fastamg.hh:235
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition fastamg.hh:510
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition fastamg.hh:74
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition fastamg.hh:449
FastAMG(OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition fastamg.hh:320
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition fastamg.hh:219
Definition allocator.hh:11
ConstIterator class for sequential access.
Definition matrix.hh:404
A generic dynamic dense matrix.
Definition matrix.hh:561
typename Imp::BlockTraits< T >::field_type field_type
Export the type representing the underlying field.
Definition matrix.hh:565
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition matrix.hh:589
T block_type
Export the type representing the components.
Definition matrix.hh:568
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition fastamg.hh:60
static void apply(const M &A, X &x, Y &d, const Y &b)
Definition fastamgsmoother.hh:19
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition hierarchy.hh:216
Iterator over the levels in the hierarchy.
Definition hierarchy.hh:120
The hierarchies build by the coarsening process.
Definition matrixhierarchy.hh:61
All parameters for AMG.
Definition parameters.hh:416
The default class for the smoother arguments.
Definition smoother.hh:38
Base class for matrix free definition of preconditioners.
Definition preconditioner.hh:33
Sequential SSOR preconditioner.
Definition preconditioners.hh:142
Base class for scalar product and norm computation.
Definition scalarproducts.hh:52
Statistics about the application of an inverse operator.
Definition solver.hh:50
bool converged
True if convergence criterion has been met.
Definition solver.hh:75
Abstract base class for all solvers.
Definition solver.hh:101
Category
Definition solvercategory.hh:23
@ sequential
Category for sequential solvers.
Definition solvercategory.hh:25
Definition solvertype.hh:16