dune-pdelab  2.4.1-rc2
parallelhelper.hh
Go to the documentation of this file.
1 // -*- tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 // vi: set et ts=8 sw=2 sts=2:
3 #ifndef DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
4 #define DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
5 
6 #include <limits>
7 
8 #include <dune/common/deprecated.hh>
9 #include <dune/common/parallel/mpihelper.hh>
10 #include <dune/common/stdstreams.hh>
11 
12 #include <dune/istl/owneroverlapcopy.hh>
13 #include <dune/istl/solvercategory.hh>
14 #include <dune/istl/operators.hh>
15 #include <dune/istl/solvers.hh>
16 #include <dune/istl/preconditioners.hh>
17 #include <dune/istl/scalarproducts.hh>
18 #include <dune/istl/paamg/amg.hh>
19 #include <dune/istl/paamg/pinfo.hh>
20 #include <dune/istl/io.hh>
21 #include <dune/istl/superlu.hh>
22 
29 
30 namespace Dune {
31  namespace PDELab {
32  namespace istl {
33 
37 
38 
39  //========================================================
40  // A parallel helper class providing a nonoverlapping
41  // decomposition of all degrees of freedom
42  //========================================================
43 
44  template<typename GFS>
46  {
47 
49  typedef int RankIndex;
50 
54  using GhostVector = Dune::PDELab::Backend::Vector<GFS,bool>;
55 
57  typedef typename GFS::Ordering::Traits::ContainerIndex ContainerIndex;
58 
59  public:
60 
61  ParallelHelper (const GFS& gfs, int verbose = 1)
62  : _gfs(gfs)
63  , _rank(gfs.gridView().comm().rank())
64  , _ranks(gfs,_rank)
65  , _ghosts(gfs,false)
66  , _verbose(verbose)
67  {
68 
69  // Let's try to be clever and reduce the communication overhead by picking the smallest
70  // possible communication interface depending on the overlap structure of the GFS.
71  // FIXME: Switch to simple comparison as soon as dune-grid:1b3e83ec0 is reliably available!
72  if (gfs.entitySet().partitions().value == Partitions::interiorBorder.value)
73  {
74  // The GFS only spans the interior and border partitions, so we can skip sending or
75  // receiving anything else.
76  _interiorBorder_all_interface = InteriorBorder_InteriorBorder_Interface;
77  _all_all_interface = InteriorBorder_InteriorBorder_Interface;
78  }
79  else
80  {
81  // In general, we have to transmit more.
82  _interiorBorder_all_interface = InteriorBorder_All_Interface;
83  _all_all_interface = All_All_Interface;
84  }
85 
86  if (_gfs.gridView().comm().size()>1)
87  {
88 
89  // find out about ghosts
90  //GFSDataHandle<GFS,GhostVector,GhostGatherScatter>
92  gdh(_gfs,_ghosts,false);
93  _gfs.gridView().communicate(gdh,_interiorBorder_all_interface,Dune::ForwardCommunication);
94 
95  // create disjoint DOF partitioning
96  // GFSDataHandle<GFS,RankVector,DisjointPartitioningGatherScatter<RankIndex> >
97  // ibdh(_gfs,_ranks,DisjointPartitioningGatherScatter<RankIndex>(_rank));
99  _gfs.gridView().communicate(pdh,_interiorBorder_all_interface,Dune::ForwardCommunication);
100 
101  }
102 
103  }
104 
106  template<typename X>
107  void maskForeignDOFs(X& x) const
108  {
109  using Backend::native;
110  // dispatch to implementation.
112  }
113 
114  private:
115 
116  // Implementation for block vector; recursively masks blocks.
117  template<typename X, typename Mask>
118  void maskForeignDOFs(istl::tags::block_vector, X& x, const Mask& mask) const
119  {
120  typename Mask::const_iterator mask_it = mask.begin();
121  for (typename X::iterator it = x.begin(),
122  end_it = x.end();
123  it != end_it;
124  ++it, ++mask_it)
125  maskForeignDOFs(istl::container_tag(*it),*it,*mask_it);
126  }
127 
128  // Implementation for field vector, iterates over entries and masks them individually.
129  template<typename X, typename Mask>
130  void maskForeignDOFs(istl::tags::field_vector, X& x, const Mask& mask) const
131  {
132  typename Mask::const_iterator mask_it = mask.begin();
133  for (typename X::iterator it = x.begin(),
134  end_it = x.end();
135  it != end_it;
136  ++it, ++mask_it)
137  *it = (*mask_it == _rank ? *it : typename X::field_type(0));
138  }
139 
140  public:
141 
143  bool owned(const ContainerIndex& i) const
144  {
145  return _ranks[i] == _rank;
146  }
147 
149  bool isGhost(const ContainerIndex& i) const
150  {
151  return _ghosts[i];
152  }
153 
155  template<typename X, typename Y>
156  typename PromotionTraits<
157  typename X::field_type,
158  typename Y::field_type
159  >::PromotedType
160  disjointDot(const X& x, const Y& y) const
161  {
162  using Backend::native;
164  native(x),
165  native(y),
166  native(_ranks)
167  );
168  }
169 
170  private:
171 
172  // Implementation for BlockVector, collects the result of recursively
173  // invoking the algorithm on the vector blocks.
174  template<typename X, typename Y, typename Mask>
175  typename PromotionTraits<
176  typename X::field_type,
177  typename Y::field_type
178  >::PromotedType
179  disjointDot(istl::tags::block_vector, const X& x, const Y& y, const Mask& mask) const
180  {
181  typedef typename PromotionTraits<
182  typename X::field_type,
183  typename Y::field_type
184  >::PromotedType result_type;
185 
186  result_type r(0);
187 
188  typename Y::const_iterator y_it = y.begin();
189  typename Mask::const_iterator mask_it = mask.begin();
190  for (typename X::const_iterator x_it = x.begin(),
191  end_it = x.end();
192  x_it != end_it;
193  ++x_it, ++y_it, ++mask_it)
194  r += disjointDot(istl::container_tag(*x_it),*x_it,*y_it,*mask_it);
195 
196  return r;
197  }
198 
199  // Implementation for FieldVector, iterates over the entries and calls Dune::dot() for DOFs
200  // associated with the current rank.
201  template<typename X, typename Y, typename Mask>
202  typename PromotionTraits<
203  typename X::field_type,
204  typename Y::field_type
205  >::PromotedType
206  disjointDot(istl::tags::field_vector, const X& x, const Y& y, const Mask& mask) const
207  {
208  typedef typename PromotionTraits<
209  typename X::field_type,
210  typename Y::field_type
211  >::PromotedType result_type;
212 
213  result_type r(0);
214 
215  typename Y::const_iterator y_it = y.begin();
216  typename Mask::const_iterator mask_it = mask.begin();
217  for (typename X::const_iterator x_it = x.begin(),
218  end_it = x.end();
219  x_it != end_it;
220  ++x_it, ++y_it, ++mask_it)
221  r += (*mask_it == _rank ? Dune::dot(*x_it,*y_it) : result_type(0));
222 
223  return r;
224  }
225 
226  public:
227 
229  RankIndex rank() const
230  {
231  return _rank;
232  }
233 
234 #if HAVE_MPI
235 
237 
253  template<typename MatrixType, typename Comm>
254  void createIndexSetAndProjectForAMG(MatrixType& m, Comm& c);
255 
256  private:
257 
258  // Checks whether a matrix block is owned by the current process. Used for the AMG
259  // construction and thus assumes a single level of blocking and blocks with ownership
260  // restricted to a single DOF.
261  bool owned_for_amg(std::size_t i) const
262  {
263  return Backend::native(_ranks)[i][0] == _rank;
264  }
265 
266 #endif // HAVE_MPI
267 
268  private:
269 
270  const GFS& _gfs;
271  const RankIndex _rank;
272  RankVector _ranks; // vector to identify unique decomposition
273  GhostVector _ghosts; //vector to identify ghost dofs
274  int _verbose; //verbosity
275 
277  InterfaceType _interiorBorder_all_interface;
278 
280  InterfaceType _all_all_interface;
281  };
282 
283 #if HAVE_MPI
284 
285  template<typename GFS>
286  template<typename M, typename C>
288  {
289 
290  using Backend::native;
291 
292  const bool is_bcrs_matrix =
293  is_same<
294  typename istl::tags::container<
296  >::type::base_tag,
298  >::value;
299 
300  const bool block_type_is_field_matrix =
301  is_same<
302  typename istl::tags::container<
304  >::type::base_tag,
306  >::value;
307 
308  // We assume M to be a BCRSMatrix in the following, so better check for that
309  static_assert(is_bcrs_matrix && block_type_is_field_matrix, "matrix structure not compatible with AMG");
310 
311  // ********************************************************************************
312  // In the following, the code will always assume that all DOFs stored in a single
313  // block of the BCRSMatrix are attached to the same entity and can be handled
314  // identically. For that reason, the code often restricts itself to inspecting the
315  // first entry of the blocks in the diverse BlockVectors.
316  // ********************************************************************************
317 
318  typedef typename GFS::Traits::GridViewType GV;
319  typedef typename RankVector::size_type size_type;
320  const GV& gv = _gfs.gridView();
321 
322  // Do we need to communicate at all?
323  const bool need_communication = _gfs.gridView().comm().size() > 1;
324 
325  // First find out which dofs we share with other processors
326  using BoolVector = Backend::Vector<GFS,bool>;
327  BoolVector sharedDOF(_gfs, false);
328 
329  if (need_communication)
330  {
331  SharedDOFDataHandle<GFS,BoolVector> data_handle(_gfs,sharedDOF,false);
332  _gfs.gridView().communicate(data_handle,_all_all_interface,Dune::ForwardCommunication);
333  }
334 
335  // Count shared dofs that we own
336  typedef typename C::ParallelIndexSet::GlobalIndex GlobalIndex;
337  GlobalIndex count = 0;
338 
339  for (size_type i = 0; i < sharedDOF.N(); ++i)
340  if (owned_for_amg(i) && native(sharedDOF)[i][0])
341  ++count;
342 
343  dverb << gv.comm().rank() << ": shared block count is " << count.touint() << std::endl;
344 
345  // Communicate per-rank count of owned and shared DOFs to all processes.
346  std::vector<GlobalIndex> counts(_gfs.gridView().comm().size());
347  _gfs.gridView().comm().allgather(&count, 1, &(counts[0]));
348 
349  // Compute start index start_p = \sum_{i=0}^{i<p} counts_i
350  GlobalIndex start = std::accumulate(counts.begin(),counts.begin() + _rank,GlobalIndex(0));
351 
353  GIVector scalarIndices(_gfs, std::numeric_limits<GlobalIndex>::max());
354 
355  for (size_type i = 0; i < sharedDOF.N(); ++i)
356  if (owned_for_amg(i) && native(sharedDOF)[i][0])
357  {
358  native(scalarIndices)[i][0] = start;
359  ++start;
360  }
361 
362  // Publish global indices for the shared DOFS to other processors.
363  if (need_communication)
364  {
365  MinDataHandle<GFS,GIVector> data_handle(_gfs,scalarIndices);
366  _gfs.gridView().communicate(data_handle,_interiorBorder_all_interface,Dune::ForwardCommunication);
367  }
368 
369  // Setup the index set
370  c.indexSet().beginResize();
371  for (size_type i=0; i<scalarIndices.N(); ++i)
372  {
373  Dune::OwnerOverlapCopyAttributeSet::AttributeSet attr;
374  if(native(scalarIndices)[i][0] != std::numeric_limits<GlobalIndex>::max())
375  {
376  // global index exist in index set
377  if (owned_for_amg(i))
378  {
379  // This dof is managed by us.
380  attr = Dune::OwnerOverlapCopyAttributeSet::owner;
381  }
382  else
383  {
384  attr = Dune::OwnerOverlapCopyAttributeSet::copy;
385  }
386  c.indexSet().add(native(scalarIndices)[i][0], typename C::ParallelIndexSet::LocalIndex(i,attr));
387  }
388  }
389  c.indexSet().endResize();
390 
391  // Compute neighbors using communication
392  std::set<int> neighbors;
393 
394  if (need_communication)
395  {
396  GFSNeighborDataHandle<GFS,int> data_handle(_gfs,_rank,neighbors);
397  _gfs.gridView().communicate(data_handle,_all_all_interface,Dune::ForwardCommunication);
398  }
399 
400  c.remoteIndices().setNeighbours(neighbors);
401  c.remoteIndices().template rebuild<false>();
402  }
403 
404 #endif // HAVE_MPI
405 
406  template<int s, bool isFakeMPIHelper>
408  {
409  typedef Dune::Amg::SequentialInformation type;
410  };
411 
412 
413 #if HAVE_MPI
414 
415  // Need MPI for OwnerOverlapCopyCommunication
416  template<int s>
417  struct CommSelector<s,false>
418  {
419  typedef OwnerOverlapCopyCommunication<bigunsignedint<s>,int> type;
420  };
421 
422 #endif // HAVE_MPI
423 
425 
426  } // namespace istl
427  } // namespace PDELab
428 } // namespace Dune
429 
430 #endif // DUNE_PDELAB_BACKEND_ISTL_PARALLELHELPER_HH
std::enable_if< std::is_base_of< impl::WrapperBase, T >::value, Native< T > & >::type native(T &t)
Definition: backend/interface.hh:199
GatherScatter data handle for creating a disjoint DOF partitioning.
Definition: genericdatahandle.hh:930
Definition: genericdatahandle.hh:716
Tag describing a BCRSMatrix.
Definition: backend/istl/tags.hh:60
Data handle for marking ghost DOFs.
Definition: genericdatahandle.hh:812
Data handle for marking shared DOFs.
Definition: genericdatahandle.hh:1014
Definition: parallelhelper.hh:45
static const unsigned int value
Definition: gridfunctionspace/tags.hh:177
void createIndexSetAndProjectForAMG(MatrixType &m, Comm &c)
Makes the matrix consistent and creates the parallel information for AMG.
Definition: parallelhelper.hh:407
typename native_type< T >::type Native
Alias of the native container type associated with T or T itself if it is not a backend wrapper...
Definition: backend/interface.hh:183
Tag describing an arbitrary FieldVector.
Definition: backend/istl/tags.hh:43
tags::container< T >::type container_tag(const T &)
Gets instance of container tag associated with T.
Definition: backend/istl/tags.hh:246
bool owned(const ContainerIndex &i) const
Tests whether the given index is owned by this process.
Definition: parallelhelper.hh:143
OwnerOverlapCopyCommunication< bigunsignedint< s >, int > type
Definition: parallelhelper.hh:419
Definition: adaptivity.hh:27
Tag describing an arbitrary FieldMatrix.
Definition: backend/istl/tags.hh:80
Dune::Amg::SequentialInformation type
Definition: parallelhelper.hh:409
Extracts the container tag from T.
Definition: backend/istl/tags.hh:142
RankIndex rank() const
Returns the MPI rank of this process.
Definition: parallelhelper.hh:229
bool isGhost(const ContainerIndex &i) const
Tests whether the given index belongs to a ghost DOF.
Definition: parallelhelper.hh:149
ParallelHelper(const GFS &gfs, int verbose=1)
Definition: parallelhelper.hh:61
typename impl::BackendVectorSelector< GridFunctionSpace, FieldType >::Type Vector
alias of the return type of BackendVectorSelector
Definition: backend/interface.hh:113
const std::string s
Definition: function.hh:1102
PromotionTraits< typename X::field_type, typename Y::field_type >::PromotedType disjointDot(const X &x, const Y &y) const
Calculates the (rank-local) dot product of x and y on the disjoint partition defined by the helper...
Definition: parallelhelper.hh:160
Data handle for collecting set of neighboring MPI ranks.
Definition: genericdatahandle.hh:1060
void maskForeignDOFs(X &x) const
Mask out all DOFs not owned by the current process with 0.
Definition: parallelhelper.hh:107
Tag describing a BlockVector.
Definition: backend/istl/tags.hh:23