My Project
WellHelpers.hpp
1 /*
2  Copyright 2016 SINTEF ICT, Applied Mathematics.
3  Copyright 2016 Statoil ASA.
4  Copyright 2020 OPM-OP AS.
5 
6  This file is part of the Open Porous Media project (OPM).
7 
8  OPM is free software: you can redistribute it and/or modify
9  it under the terms of the GNU General Public License as published by
10  the Free Software Foundation, either version 3 of the License, or
11  (at your option) any later version.
12 
13  OPM is distributed in the hope that it will be useful,
14  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  GNU General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with OPM. If not, see <http://www.gnu.org/licenses/>.
20 */
21 
22 
23 #ifndef OPM_WELLHELPERS_HEADER_INCLUDED
24 #define OPM_WELLHELPERS_HEADER_INCLUDED
25 
26 #include <opm/common/OpmLog/OpmLog.hpp>
27 
28 #include <opm/grid/cpgrid/GridHelpers.hpp>
29 
30 #include <opm/simulators/wells/ParallelWellInfo.hpp>
31 
32 #include <dune/istl/bcrsmatrix.hh>
33 #include <dune/common/dynmatrix.hh>
34 #include <dune/common/parallel/mpihelper.hh>
35 
36 #include <vector>
37 
38 namespace Opm {
39 
40 
41 
42 
43  namespace wellhelpers
44  {
45 
56  template<typename Scalar>
58  {
59  public:
60  using Block = Dune::DynamicMatrix<Scalar>;
61  using Matrix = Dune::BCRSMatrix<Block>;
62 
63  ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& parallel_well_info)
64  : B_(&B), parallel_well_info_(&parallel_well_info)
65  {}
66 
68  template<class X, class Y>
69  void mv (const X& x, Y& y) const
70  {
71 #if !defined(NDEBUG) && HAVE_MPI
72  // We need to make sure that all ranks are actually computing
73  // for the same well. Doing this by checking the name of the well.
74  int cstring_size = parallel_well_info_->name().size()+1;
75  std::vector<int> sizes(parallel_well_info_->communication().size());
76  parallel_well_info_->communication().allgather(&cstring_size, 1, sizes.data());
77  std::vector<int> offsets(sizes.size()+1, 0); //last entry will be accumulated size
78  std::partial_sum(sizes.begin(), sizes.end(), offsets.begin() + 1);
79  std::vector<char> cstrings(offsets[sizes.size()]);
80  bool consistentWells = true;
81  char* send = const_cast<char*>(parallel_well_info_->name().c_str());
82  parallel_well_info_->communication().allgatherv(send, cstring_size,
83  cstrings.data(), sizes.data(),
84  offsets.data());
85  for(std::size_t i = 0; i < sizes.size(); ++i)
86  {
87  std::string name(cstrings.data()+offsets[i]);
88  if (name != parallel_well_info_->name())
89  {
90  if (parallel_well_info_->communication().rank() == 0)
91  {
92  //only one process per well logs, might not be 0 of MPI_COMM_WORLD, though
93  std::string msg = std::string("Fatal Error: Not all ranks are computing for the same well")
94  + " well should be " + parallel_well_info_->name() + " but is "
95  + name;
96  OpmLog::debug(msg);
97  }
98  consistentWells = false;
99  break;
100  }
101  }
102  parallel_well_info_->communication().barrier();
103  // As not all processes are involved here we need to use MPI_Abort and hope MPI kills them all
104  if (!consistentWells)
105  {
106  MPI_Abort(MPI_COMM_WORLD, 1);
107  }
108 #endif
109  B_->mv(x, y);
110 
111  if (this->parallel_well_info_->communication().size() > 1)
112  {
113  // Only do communication if we must.
114  // The B matrix is basically a component-wise multiplication
115  // with a vector followed by a parallel reduction. We do that
116  // reduction to all ranks computing for the well to save the
117  // broadcast when applying C^T.
118  using YField = typename Y::block_type::value_type;
119  assert(y.size() == 1);
120  this->parallel_well_info_->communication().template allreduce<std::plus<YField>>(y[0].container().data(),
121  y[0].container().size());
122  }
123  }
124 
126  template<class X, class Y>
127  void mmv (const X& x, Y& y) const
128  {
129  if (this->parallel_well_info_->communication().size() == 1)
130  {
131  // Do the same thing as before. The else branch
132  // produces different rounding errors and results
133  // slightly different iteration counts / well curves
134  B_->mmv(x, y);
135  }
136  else
137  {
138  Y temp(y);
139  mv(x, temp); // includes parallel reduction
140  y -= temp;
141  }
142  }
143  private:
144  const Matrix* B_;
145  const ParallelWellInfo* parallel_well_info_;
146  };
147 
148  inline
149  double computeHydrostaticCorrection(const double well_ref_depth, const double vfp_ref_depth,
150  const double rho, const double gravity) {
151  const double dh = vfp_ref_depth - well_ref_depth;
152  const double dp = rho * gravity * dh;
153 
154  return dp;
155  }
156 
157 
158 
160  template<typename Scalar, typename Comm>
161  void sumDistributedWellEntries(Dune::DynamicMatrix<Scalar>& mat, Dune::DynamicVector<Scalar>& vec,
162  const Comm& comm)
163  {
164  // DynamicMatrix does not use one contiguous array for storing the data
165  // but a DynamicVector of DynamicVectors. Hence we need to copy the data
166  // to contiguous memory for MPI.
167  if (comm.size() == 1)
168  {
169  return;
170  }
171  std::vector<Scalar> allEntries;
172  allEntries.reserve(mat.N()*mat.M()+vec.size());
173  for(const auto& row: mat)
174  {
175  allEntries.insert(allEntries.end(), row.begin(), row.end());
176  }
177  allEntries.insert(allEntries.end(), vec.begin(), vec.end());
178  comm.sum(allEntries.data(), allEntries.size());
179  auto pos = allEntries.begin();
180  auto cols = mat.cols();
181  for(auto&& row: mat)
182  {
183  std::copy(pos, pos + cols, &(row[0]));
184  pos += cols;
185  }
186  assert(std::size_t(allEntries.end() - pos) == vec.size());
187  std::copy(pos, allEntries.end(), &(vec[0]));
188  }
189 
190 
191 
192  template <int dim, class C2F, class FC>
193  std::array<double, dim>
194  getCubeDim(const C2F& c2f,
195  FC begin_face_centroids,
196  int cell)
197  {
198  std::array< std::vector<double>, dim > X;
199  {
200  const std::vector<double>::size_type
201  nf = std::distance(c2f[cell].begin(),
202  c2f[cell].end ());
203 
204  for (int d = 0; d < dim; ++d) {
205  X[d].reserve(nf);
206  }
207  }
208 
209  typedef typename C2F::row_type::const_iterator FI;
210 
211  for (FI f = c2f[cell].begin(), e = c2f[cell].end(); f != e; ++f) {
212  using Opm::UgGridHelpers::increment;
213  using Opm::UgGridHelpers::getCoordinate;
214 
215  const FC& fc = increment(begin_face_centroids, *f, dim);
216 
217  for (int d = 0; d < dim; ++d) {
218  X[d].push_back(getCoordinate(fc, d));
219  }
220  }
221 
222  std::array<double, dim> cube;
223  for (int d = 0; d < dim; ++d) {
224  typedef std::vector<double>::iterator VI;
225  typedef std::pair<VI,VI> PVI;
226 
227  const PVI m = std::minmax_element(X[d].begin(), X[d].end());
228 
229  cube[d] = *m.second - *m.first;
230  }
231 
232  return cube;
233  }
234 
235 
236  } // namespace wellhelpers
237 
238 }
239 
240 #endif
Class encapsulating some information about parallel wells.
Definition: ParallelWellInfo.hpp:252
const std::string & name() const
Name of the well.
Definition: ParallelWellInfo.hpp:347
A wrapper around the B matrix for distributed wells.
Definition: WellHelpers.hpp:58
void mv(const X &x, Y &y) const
y = A x
Definition: WellHelpers.hpp:69
void mmv(const X &x, Y &y) const
y = A x
Definition: WellHelpers.hpp:127
This file contains a set of helper functions used by VFPProd / VFPInj.
Definition: BlackoilPhases.hpp:27