LORENE
connection_fspher.C
1 /*
2  * Methods of class Connection_fspher.
3  *
4  * (see file connection.h for documentation)
5  *
6  */
7 
8 /*
9  * Copyright (c) 2003-2004 Eric Gourgoulhon & Jerome Novak
10  *
11  * This file is part of LORENE.
12  *
13  * LORENE is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation.
16  *
17  * LORENE is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20  * GNU General Public License for more details.
21  *
22  * You should have received a copy of the GNU General Public License
23  * along with LORENE; if not, write to the Free Software
24  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25  *
26  */
27 
28 
29 
30 /*
31  * $Id: connection_fspher.C,v 1.25 2016/12/05 16:17:50 j_novak Exp $
32  * $Log: connection_fspher.C,v $
33  * Revision 1.25 2016/12/05 16:17:50 j_novak
34  * Suppression of some global variables (file names, loch, ...) to prevent redefinitions
35  *
36  * Revision 1.24 2014/10/13 08:52:50 j_novak
37  * Lorene classes and functions now belong to the namespace Lorene.
38  *
39  * Revision 1.23 2014/10/06 15:13:04 j_novak
40  * Modified #include directives to use c++ syntax.
41  *
42  * Revision 1.22 2005/05/25 16:11:03 j_novak
43  * Better handling of the case with no compactified domain.
44  *
45  * Revision 1.21 2004/01/29 15:21:21 e_gourgoulhon
46  * Method p_divergence: changed treatment of dzpuis.
47  * Methods p_derive_cov and p_divergence: add warning if all the input component
48  * do not have the same dzpuis.
49  *
50  * Revision 1.20 2004/01/28 13:25:40 j_novak
51  * The ced_mult_r arguments have been suppressed from the Scalar::*dsd* methods.
52  * In the div/mult _r_dzpuis, there is no more default value.
53  *
54  * Revision 1.19 2004/01/27 15:10:02 j_novak
55  * New methods Scalar::div_r_dzpuis(int) and Scalar_mult_r_dzpuis(int)
56  * which replace div_r_inc*. Tried to clean the dzpuis handling.
57  * WARNING: no testing at this point!!
58  *
59  * Revision 1.18 2004/01/23 07:57:06 e_gourgoulhon
60  * Slight change in some comment.
61  *
62  * Revision 1.17 2004/01/22 16:14:22 e_gourgoulhon
63  * Method p_derive_cov: reorganization of the dzpuis treatment.
64  * Added the case of input dzpuis = 2.
65  *
66  * Revision 1.16 2004/01/04 21:00:50 e_gourgoulhon
67  * Better handling of tensor symmetries in methods p_derive_cov() and
68  * p_divergence() (thanks to the new class Tensor_sym).
69  *
70  * Revision 1.15 2004/01/01 11:24:04 e_gourgoulhon
71  * Full reorganization of method p_derive_cov: the main loop is now
72  * on the indices of the *output* tensor (to take into account
73  * symmetries in the input and output tensors).
74  *
75  * Revision 1.14 2003/12/27 14:59:52 e_gourgoulhon
76  * -- Method derive_cov() suppressed.
77  * -- Change of the position of the derivation index from the first one
78  * to the last one in methods p_derive_cov() and p_divergence().
79  *
80  * Revision 1.13 2003/11/03 13:37:58 j_novak
81  * Still dzpuis...
82  *
83  * Revision 1.12 2003/11/03 11:14:18 j_novak
84  * Treatment of the case dzpuis = 4.
85  *
86  * Revision 1.11 2003/11/03 10:58:30 j_novak
87  * Treatment of the general case for divergence.
88  *
89  * Revision 1.10 2003/10/22 13:08:03 j_novak
90  * Better handling of dzpuis flags
91  *
92  * Revision 1.9 2003/10/16 15:26:48 e_gourgoulhon
93  * Name of method Scalar::div_r_ced() changed to Scalar::div_r_inc2().
94  *
95  * Revision 1.8 2003/10/16 14:21:36 j_novak
96  * The calculation of the divergence of a Tensor is now possible.
97  *
98  * Revision 1.7 2003/10/15 10:46:18 e_gourgoulhon
99  * Introduced call to the new method Scalar::div_tant to perform
100  * division by tan(theta) in derive_cov.
101  *
102  * Revision 1.6 2003/10/11 16:45:43 e_gourgoulhon
103  * Suppressed the call to Itbl::set_etat_qcq() after
104  * the construction of the Itbl's.
105  *
106  * Revision 1.5 2003/10/11 14:39:50 e_gourgoulhon
107  * Suppressed declaration of unusued arguments in some methods.
108  *
109  * Revision 1.4 2003/10/06 13:58:47 j_novak
110  * The memory management has been improved.
111  * Implementation of the covariant derivative with respect to the exact Tensor
112  * type.
113  *
114  * Revision 1.3 2003/10/05 21:09:23 e_gourgoulhon
115  * Method derive_cov: multiplication by r^2 in the CED.
116  *
117  * Revision 1.2 2003/10/01 21:49:45 e_gourgoulhon
118  * First version of derive_cov --- not tested yet.
119  *
120  * Revision 1.1 2003/10/01 15:42:49 e_gourgoulhon
121  * still ongoing...
122  *
123  *
124  *
125  * $Header: /cvsroot/Lorene/C++/Source/Connection/connection_fspher.C,v 1.25 2016/12/05 16:17:50 j_novak Exp $
126  *
127  */
128 
129 // C++ headers
130 #include "headcpp.h"
131 
132 // C headers
133 #include <cstdlib>
134 
135 // Lorene headers
136 #include "connection.h"
137 
138  //------------------------------//
139  // Constructors //
140  //------------------------------//
141 
142 // Contructor from a spherical flat-metric-orthonormal basis
143 
144 namespace Lorene {
146  : Connection_flat(mpi, bi) {
147 
148 }
149 
150 // Copy constructor
152  : Connection_flat(ci) {
153 
154 }
155 
156 
157  //----------------------------//
158  // Destructor //
159  //----------------------------//
160 
162 
163 }
164 
165 
166  //--------------------------------//
167  // Mutators / assignment //
168  //--------------------------------//
169 
170 
172 
173  cout << "Connection_fspher::operator= : not implemented yet !" << endl ;
174  abort() ;
175 
176 }
177 
178 
179  //-----------------------------//
180  // Computational methods //
181  //-----------------------------//
182 
183 // Covariant derivative, returning a pointer.
184 //-------------------------------------------
185 
187 
188  // Notations: suffix 0 in name <=> input tensor
189  // suffix 1 in name <=> output tensor
190 
191  int valence0 = uu.get_valence() ;
192  int valence1 = valence0 + 1 ;
193  int valence1m1 = valence1 - 1 ; // same as valence0, but introduced for
194  // the sake of clarity
195  int ncomp0 = uu.get_n_comp() ;
196 
197  // Protections
198  // -----------
199  if (valence0 >= 1) {
200  assert(uu.get_triad() == triad) ;
201  }
202 
203  // Creation of the result (pointer)
204  // --------------------------------
205  Tensor* resu ;
206 
207  // If uu is a Scalar, the result is a vector
208  if (valence0 == 0)
209  resu = new Vector(*mp, COV, triad) ;
210  else {
211 
212  // Type of indices of the result :
213  Itbl tipe(valence1) ;
214  const Itbl& tipeuu = uu.get_index_type() ;
215  for (int id = 0; id<valence0; id++) {
216  tipe.set(id) = tipeuu(id) ; // First indices = same as uu
217  }
218  tipe.set(valence1m1) = COV ; // last index is the derivation index
219 
220  // if uu is a Tensor_sym, the result is also a Tensor_sym:
221  const Tensor* puu = &uu ;
222  const Tensor_sym* puus = dynamic_cast<const Tensor_sym*>(puu) ;
223  if ( puus != 0x0 ) { // the input tensor is symmetric
224  resu = new Tensor_sym(*mp, valence1, tipe, *triad,
225  puus->sym_index1(), puus->sym_index2()) ;
226  }
227  else {
228  resu = new Tensor(*mp, valence1, tipe, *triad) ; // no symmetry
229  }
230 
231  }
232 
233  int ncomp1 = resu->get_n_comp() ;
234 
235  Itbl ind1(valence1) ; // working Itbl to store the indices of resu
236  Itbl ind0(valence0) ; // working Itbl to store the indices of uu
237  Itbl ind(valence0) ; // working Itbl to store the indices of uu
238 
239  Scalar tmp(*mp) ; // working scalar
240 
241  // Determination of the dzpuis parameter of the result --> dz_resu
242  // ---------------------------------------------------
243  int dz_in = 0 ;
244  for (int ic=0; ic<ncomp0; ic++) {
245  int dzp = uu(uu.indices(ic)).get_dzpuis() ;
246  assert(dzp >= 0) ;
247  if (dzp > dz_in) dz_in = dzp ;
248  }
249 
250 #ifndef NDEBUG
251  // Check : do all components have the same dzpuis ?
252  for (int ic=0; ic<ncomp0; ic++) {
253  if ( !(uu(uu.indices(ic)).check_dzpuis(dz_in)) ) {
254  cout << "######## WARNING #######\n" ;
255  cout << " Connection_fspher::p_derive_cov : the tensor components \n"
256  << " do not have all the same dzpuis ! : \n"
257  << " ic, dzpuis(ic), dz_in : " << ic << " "
258  << uu(uu.indices(ic)).get_dzpuis() << " " << dz_in << endl ;
259  }
260  }
261 #endif
262 
263  int dz_resu = (dz_in == 0) ? 2 : dz_in + 1 ;
264  int nzm1 = mp->get_mg()->get_nzone() - 1 ;
265  if (mp->get_mg()->get_type_r(nzm1) != UNSURR) dz_resu = 0 ;
266 
267  // Loop on all the components of the output tensor
268  // -----------------------------------------------
269  for (int ic=0; ic<ncomp1; ic++) {
270 
271  // indices corresponding to the component no. ic in the output tensor
272  ind1 = resu->indices(ic) ;
273 
274  // Component no. ic:
275  Scalar& cresu = resu->set(ind1) ;
276 
277  // Indices of the input tensor
278  for (int id = 0; id < valence0; id++) {
279  ind0.set(id) = ind1(id) ;
280  }
281 
282  // Value of last index (derivation index)
283  int k = ind1(valence1m1) ;
284 
285  switch (k) {
286 
287  case 1 : { // Derivation index = r
288  //---------------------
289 
290  cresu = (uu(ind0)).dsdr() ; // d/dr
291 
292  // all the connection coefficients Gamma^i_{jk} are zero for k=1
293  break ;
294  }
295 
296  case 2 : { // Derivation index = theta
297  //-------------------------
298 
299  cresu = (uu(ind0)).srdsdt() ; // 1/r d/dtheta
300 
301  // Loop on all the indices of uu
302  for (int id=0; id<valence0; id++) {
303 
304  switch ( ind0(id) ) {
305 
306  case 1 : { // Gamma^r_{l theta} V^l
307  // or -Gamma^l_{r theta} V_l
308  ind = ind0 ;
309  ind.set(id) = 2 ; // l = theta
310 
311  // Division by r :
312  tmp = uu(ind) ;
313  tmp.div_r_dzpuis(dz_resu) ;
314 
315  cresu -= tmp ;
316  break ;
317  }
318 
319  case 2 : { // Gamma^theta_{l theta} V^l
320  // or -Gamma^l_{theta theta} V_l
321  ind = ind0 ;
322  ind.set(id) = 1 ; // l = r
323  tmp = uu(ind) ;
324  tmp.div_r_dzpuis(dz_resu) ;
325 
326  cresu += tmp ;
327  break ;
328  }
329 
330  case 3 : { // Gamma^phi_{l theta} V^l
331  // or -Gamma^l_{phi theta} V_l
332  break ;
333  }
334 
335  default : {
336  cerr << "Connection_fspher::p_derive_cov : index problem ! "
337  << endl ;
338  abort() ;
339  }
340  }
341 
342  }
343  break ;
344  }
345 
346 
347  case 3 : { // Derivation index = phi
348  //-----------------------
349 
350  cresu = (uu(ind0)).srstdsdp() ; // 1/(r sin(theta)) d/dphi
351 
352  // Loop on all the indices of uu
353  for (int id=0; id<valence0; id++) {
354 
355  switch ( ind0(id) ) {
356 
357  case 1 : { // Gamma^r_{l phi} V^l
358  // or -Gamma^l_{r phi} V_l
359  ind = ind0 ;
360  ind.set(id) = 3 ; // l = phi
361  tmp = uu(ind) ;
362  tmp.div_r_dzpuis(dz_resu) ;
363 
364  cresu -= tmp ;
365  break ;
366  }
367 
368  case 2 : { // Gamma^theta_{l phi} V^l
369  // or -Gamma^l_{theta phi} V_l
370  ind = ind0 ;
371  ind.set(id) = 3 ; // l = phi
372  tmp = uu(ind) ;
373  tmp.div_r_dzpuis(dz_resu) ;
374 
375  tmp.div_tant() ; // division by tan(theta)
376 
377  cresu -= tmp ;
378  break ;
379  }
380 
381  case 3 : { // Gamma^phi_{l phi} V^l
382  // or -Gamma^l_{phi phi} V_l
383 
384  ind = ind0 ;
385  ind.set(id) = 1 ; // l = r
386  tmp = uu(ind) ;
387  tmp.div_r_dzpuis(dz_resu) ;
388 
389  cresu += tmp ;
390 
391  ind.set(id) = 2 ; // l = theta
392  tmp = uu(ind) ;
393  tmp.div_r_dzpuis(dz_resu) ;
394 
395  tmp.div_tant() ; // division by tan(theta)
396 
397  cresu += tmp ;
398  break ;
399  }
400 
401  default : {
402  cerr << "Connection_fspher::p_derive_cov : index problem ! "
403  << endl ;
404  abort() ;
405  }
406  }
407 
408  }
409 
410  break ;
411  }
412 
413  default : {
414  cerr << "Connection_fspher::p_derive_cov : index problem ! \n" ;
415  abort() ;
416  }
417 
418  } // End of switch on the derivation index
419 
420 
421  } // End of loop on all the components of the output tensor
422 
423  // C'est fini !
424  // -----------
425  return resu ;
426 
427 }
428 
429 
430 
431 // Divergence, returning a pointer.
432 //---------------------------------
433 
435 
436  // Notations: suffix 0 in name <=> input tensor
437  // suffix 1 in name <=> output tensor
438 
439  int valence0 = uu.get_valence() ;
440  int valence1 = valence0 - 1 ;
441  int valence0m1 = valence0 - 1 ; // same as valence1 but introduced for
442  // the sake of clarity
443  int ncomp0 = uu.get_n_comp() ;
444 
445  // Protections
446  // -----------
447  assert (valence0 >= 1) ;
448  assert (uu.get_triad() == triad) ;
449 
450  // Last index must be contravariant:
451  assert (uu.get_index_type(valence0-1) == CON) ;
452 
453 
454  // Creation of the pointer on the result tensor
455  // --------------------------------------------
456  Tensor* resu ;
457 
458  if (valence0 == 1) // if u is a Vector, the result is a Scalar
459  resu = new Scalar(*mp) ;
460  else {
461 
462  // Type of indices of the result :
463  Itbl tipe(valence1) ;
464  const Itbl& tipeuu = uu.get_index_type() ;
465  for (int id = 0; id<valence1; id++) {
466  tipe.set(id) = tipeuu(id) ; // type of remaining indices =
467  } // same as uu indices
468 
469  if (valence0 == 2) { // if u is a rank 2 tensor, the result is a Vector
470  resu = new Vector(*mp, tipe(0), *triad) ;
471  }
472  else {
473  // if uu is a Tensor_sym, the result might be also a Tensor_sym:
474  const Tensor* puu = &uu ;
475  const Tensor_sym* puus = dynamic_cast<const Tensor_sym*>(puu) ;
476  if ( puus != 0x0 ) { // the input tensor is symmetric
477 
478  if (puus->sym_index2() != valence0 - 1) {
479 
480  // the symmetry is preserved:
481 
482  if (valence1 == 2) {
483  resu = new Sym_tensor(*mp, tipe, *triad) ;
484  }
485  else {
486  resu = new Tensor_sym(*mp, valence1, tipe, *triad,
487  puus->sym_index1(), puus->sym_index2()) ;
488  }
489  }
490  else { // the symmetry is lost:
491 
492  resu = new Tensor(*mp, valence1, tipe, *triad) ;
493  }
494  }
495  else { // no symmetry in the input tensor:
496  resu = new Tensor(*mp, valence1, tipe, *triad) ;
497  }
498  }
499 
500  }
501 
502  int ncomp1 = resu->get_n_comp() ;
503 
504  Itbl ind0(valence0) ; // working Itbl to store the indices of uu
505  Itbl ind1(valence1) ; // working Itbl to store the indices of resu
506  Itbl ind(valence0) ; // working Itbl to store the indices of uu
507 
508  Scalar tmp1(*mp) ; // working scalar
509  Scalar tmp2(*mp) ; // working scalar
510 
511  // Determination of the dzpuis parameter of the result --> dz_resu
512  // ---------------------------------------------------
513  int dz_in = 0 ;
514  for (int ic=0; ic<ncomp0; ic++) {
515  int dzp = uu(uu.indices(ic)).get_dzpuis() ;
516  assert(dzp >= 0) ;
517  if (dzp > dz_in) dz_in = dzp ;
518  }
519 
520 #ifndef NDEBUG
521  // Check : do all components have the same dzpuis ?
522  for (int ic=0; ic<ncomp0; ic++) {
523  if ( !(uu(uu.indices(ic)).check_dzpuis(dz_in)) ) {
524  cout << "######## WARNING #######\n" ;
525  cout << " Connection_fspher::p_divergence : the tensor components \n"
526  << " do not have all the same dzpuis ! : \n"
527  << " ic, dzpuis(ic), dz_in : " << ic << " "
528  << uu(uu.indices(ic)).get_dzpuis() << " " << dz_in << endl ;
529  }
530  }
531 #endif
532 
533  int dz_resu = (dz_in == 0) ? 2 : dz_in + 1 ;
534 
535  // Loop on all the components of the output tensor
536  for (int ic=0; ic<ncomp1; ic++) {
537 
538  ind1 = resu->indices(ic) ;
539  Scalar& cresu = resu->set(ind1) ;
540 
541  // Derivation index = r
542  // --------------------
543  int k = 1 ;
544 
545  // indices (ind1,k) in the input tensor
546  for (int id = 0; id < valence1; id++) {
547  ind0.set(id) = ind1(id) ;
548  }
549  ind0.set(valence0m1) = k ;
550 
551  cresu = uu(ind0).dsdr() ; //dT^{l r}/dr
552 
553  // Derivation index = theta
554  // ------------------------
555  k = 2 ;
556 
557  // indices (ind1,k) in the input tensor
558  for (int id = 0; id < valence1; id++) {
559  ind0.set(id) = ind1(id) ;
560  }
561  ind0.set(valence0m1) = k ;
562 
563  tmp1 = uu(ind0).dsdt() ; //dT^{l theta} /dtheta
564 
565  ind = ind0 ;
566  ind.set(valence0m1) = 1 ;
567  tmp1 += uu(ind) ;//Gamma^theta_{r theta}T^{l r} (div_r is done later)
568 
569 
570  // Loop on all the (valence0-1) first indices of uu
571  for (int id=0; id<valence0m1; id++) {
572 
573  switch ( ind0(id) ) {
574  case 1 : { // Gamma^r_{l theta} V^l
575  // or -Gamma^l_{r theta} V_l
576  ind = ind0 ;
577  ind.set(id) = 2 ; // l = theta
578  tmp1 -= uu(ind) ;
579  break ;
580  }
581 
582  case 2 : { // Gamma^theta_{l theta} V^l
583  // or -Gamma^l_{theta theta} V_l
584  ind = ind0 ;
585  ind.set(id) = 1 ; // l = r
586  tmp1 += uu(ind) ;
587  break ;
588  }
589 
590  case 3 : { // Gamma^phi_{l theta} V^l
591  // or -Gamma^l_{phi theta} V_l
592  break ;
593  }
594 
595  default : {
596  cout << "Connection_fspher::p_divergence : index problem ! "
597  << endl ;
598  abort() ;
599  }
600  }
601 
602  }
603 
604  // Derivation index = phi
605  // ----------------------
606  k = 3 ;
607 
608  // indices (ind1,k) in the input tensor
609  for (int id = 0; id < valence1; id++) {
610  ind0.set(id) = ind1(id) ;
611  }
612  ind0.set(valence0m1) = k ;
613 
614  tmp1 += uu(ind0).stdsdp() ; // 1/sin(theta) dT^phi / dphi
615 
616  ind = ind0 ;
617  ind.set(valence0m1) = 1 ;
618  tmp1 += uu(ind) ;//Gamma^phi_{r phi} T^{l r} (div_r is done later)
619  ind.set(valence0m1) = 2 ;
620  tmp2 = uu(ind) ;//Gamma^phi_{theta phi} T^{l theta} (div_r is done later)
621 
622  // Loop on all the (valence0-1) first indices of uu
623  for (int id=0; id<valence0-1; id++) {
624 
625  switch ( ind0(id) ) {
626  case 1 : { // Gamma^r_{l phi} V^l
627  // or -Gamma^l_{r phi} V_l
628  ind = ind0 ;
629  ind.set(id) = 3 ; // l = phi
630  tmp1 -= uu(ind) ;
631  break ;
632  }
633 
634  case 2 : { // Gamma^theta_{l phi} V^l
635  // or -Gamma^l_{theta phi} V_l
636  ind = ind0 ;
637  ind.set(id) = 3 ; // l = phi
638  tmp2 -= uu(ind) ;
639  break ;
640  }
641 
642  case 3 : { // Gamma^phi_{l phi} V^l
643  // or -Gamma^l_{phi phi} V_l
644  ind = ind0 ;
645 
646  ind.set(id) = 1 ; // l = r
647  tmp1 += uu(ind) ;
648 
649  ind.set(id) = 2 ; // l = theta
650  tmp2 += uu(ind) ;
651  break ;
652  }
653 
654  default : {
655  cout << "Connection_fspher::p_divergence : index problem ! "
656  << endl ;
657  abort() ;
658  }
659  }
660  }
661  // There remains a division by tan(theta) and r:
662  //----------------------------------------------
663  tmp2.div_tant() ;
664  tmp1 += tmp2 ;
665  tmp1.div_r_dzpuis(dz_resu) ;
666 
667  cresu += tmp1 ; // the d/dr term...
668 
669  }
670 
671  // C'est fini !
672  // -----------
673  return resu ;
674 
675 }
676 
677 
678 
679 
680 
681 
682 
683 }
Connection_fspher(const Map &, const Base_vect_spher &)
Contructor from a spherical flat-metric-orthonormal basis.
int & set(int i)
Read/write of a particular element (index i ) (1D case)
Definition: itbl.h:247
virtual Tensor * p_divergence(const Tensor &tens) const
Computes the divergence of a tensor (with respect to the current connection).
const Map *const mp
Reference mapping.
Definition: connection.h:119
Class Connection_flat.
Definition: connection.h:354
int sym_index1() const
Number of the first symmetric index (0<= id_sym1 < valence )
Definition: tensor.h:1162
Lorene prototypes.
Definition: app_hor.h:67
const Mg3d * get_mg() const
Gives the Mg3d on which the mapping is defined.
Definition: map.h:783
Tensor field of valence 0 (or component of a tensorial field).
Definition: scalar.h:393
Base class for coordinate mappings.
Definition: map.h:688
int get_n_comp() const
Returns the number of stored components.
Definition: tensor.h:885
int sym_index2() const
Number of the second symmetric index (id_sym1 < id_sym2 < valence )
Definition: tensor.h:1167
Basic integer array class.
Definition: itbl.h:122
Tensor field of valence 1.
Definition: vector.h:188
const Base_vect * get_triad() const
Returns the vectorial basis (triad) on which the components are defined.
Definition: tensor.h:879
int get_index_type(int i) const
Gives the type (covariant or contravariant) of the index number i .
Definition: tensor.h:899
virtual ~Connection_fspher()
destructor
int get_nzone() const
Returns the number of domains.
Definition: grilles.h:465
void operator=(const Connection_fspher &)
Assignment to another Connection_fspher.
Tensor handling.
Definition: tensor.h:294
int get_valence() const
Returns the valence.
Definition: tensor.h:882
Spherical orthonormal vectorial bases (triads).
Definition: base_vect.h:308
Scalar & set(const Itbl &ind)
Returns the value of a component (read/write version).
Definition: tensor.C:663
Class Connection_fspher.
Definition: connection.h:452
Symmetric tensors (with respect to two of their arguments).
Definition: tensor.h:1050
int get_type_r(int l) const
Returns the type of sampling in the radial direction in domain no.
Definition: grilles.h:491
virtual Tensor * p_derive_cov(const Tensor &tens) const
Computes the covariant derivative of a tensor (with respect to the current connection).
void div_r_dzpuis(int ced_mult_r)
Division by r everywhere but with the output flag dzpuis set to ced_mult_r .
void div_tant()
Division by .
const Base_vect *const triad
Triad with respect to which the connection coefficients are defined.
Definition: connection.h:124
Class intended to describe valence-2 symmetric tensors.
Definition: sym_tensor.h:226
virtual Itbl indices(int pos) const
Returns the indices of a component given by its position in the array cmp .
Definition: tensor.C:548