LORENE
tensor_calculus_ext.C
1 /*
2  * Function external to class Tensor for tensor calculus
3  *
4  *
5  */
6 
7 /*
8  * Copyright (c) 2003-2004 Eric Gourgoulhon & Jerome Novak
9  *
10  * Copyright (c) 1999-2001 Philippe Grandclement (for preceding class Tenseur)
11  *
12  * This file is part of LORENE.
13  *
14  * LORENE is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2 of the License, or
17  * (at your option) any later version.
18  *
19  * LORENE is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with LORENE; if not, write to the Free Software
26  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27  *
28  */
29 
30 
31 
32 
33 /*
34  * $Id: tensor_calculus_ext.C,v 1.16 2024/01/26 17:44:25 g_servignat Exp $
35  * $Log: tensor_calculus_ext.C,v $
36  * Revision 1.16 2024/01/26 17:44:25 g_servignat
37  * Updated the Pseudopolytrope_1D class to be consistent with the paper (i.e. with a GPP in the middle)
38  *
39  * Revision 1.15 2016/12/05 16:18:17 j_novak
40  * Suppression of some global variables (file names, loch, ...) to prevent redefinitions
41  *
42  * Revision 1.14 2014/10/13 08:53:44 j_novak
43  * Lorene classes and functions now belong to the namespace Lorene.
44  *
45  * Revision 1.13 2014/10/06 15:13:20 j_novak
46  * Modified #include directives to use c++ syntax.
47  *
48  * Revision 1.12 2008/12/05 08:44:02 j_novak
49  * New flag to control the "verbosity" of maxabs.
50  *
51  * Revision 1.11 2004/05/13 21:32:29 e_gourgoulhon
52  * Added functions central_value, max_all_domains,
53  * min_all_domains and maxabs_all_domains.
54  *
55  * Revision 1.10 2004/02/27 21:15:34 e_gourgoulhon
56  * Suppressed function contract_desal (since function contract has now
57  * the boolean argument "desaliasing").
58  *
59  * Revision 1.9 2004/02/19 22:11:00 e_gourgoulhon
60  * Added argument "comment" in functions max, min, etc...
61  *
62  * Revision 1.8 2004/02/18 18:51:04 e_gourgoulhon
63  * Tensorial product moved from file tensor_calculus.C, since
64  * it is not a method of class Tensor.
65  *
66  * Revision 1.7 2004/02/18 15:56:23 e_gourgoulhon
67  * -- Added function contract for double contraction.
68  * -- Efficiency improved in functions contract: better handling of variable
69  * "work"(work is now a reference on the relevant component of the result).
70  *
71  * Revision 1.6 2004/01/23 08:00:16 e_gourgoulhon
72  * Minor modifs. in output of methods min, max, maxabs, diffrel to
73  * better handle the display in the scalar case.
74  *
75  * Revision 1.5 2004/01/15 10:59:53 f_limousin
76  * Added method contract_desal for the contraction of two tensors with desaliasing
77  *
78  * Revision 1.4 2004/01/14 11:38:32 f_limousin
79  * Added method contract for one tensor
80  *
81  * Revision 1.3 2003/11/05 15:29:36 e_gourgoulhon
82  * Added declaration of externa functions max, min, maxabs,
83  * diffrel and diffrelmax.
84  *
85  * Revision 1.2 2003/10/11 16:47:10 e_gourgoulhon
86  * Suppressed the call to Ibtl::set_etat_qcq() after the construction
87  * of the Itbl's, thanks to the new property of the Itbl class.
88  *
89  * Revision 1.1 2003/10/06 15:13:38 e_gourgoulhon
90  * Tensor contraction.
91  *
92  *
93  * $Header: /cvsroot/Lorene/C++/Source/Tensor/tensor_calculus_ext.C,v 1.16 2024/01/26 17:44:25 g_servignat Exp $
94  *
95  */
96 
97 // Headers C
98 #include <cstdlib>
99 #include <cassert>
100 #include <cmath>
101 
102 // Headers Lorene
103 #include "tensor.h"
104 
105 
106 
107  //-----------------------//
108  // Tensorial product //
109  //-----------------------//
110 
111 
112 namespace Lorene {
113 Tensor operator*(const Tensor& t1, const Tensor& t2) {
114 
115  assert (t1.mp == t2.mp) ;
116 
117  int val_res = t1.valence + t2.valence ;
118 
119  Itbl tipe (val_res) ;
120 
121  for (int i=0 ; i<t1.valence ; i++)
122  tipe.set(i) = t1.type_indice(i) ;
123  for (int i=0 ; i<t2.valence ; i++)
124  tipe.set(i+t1.valence) = t2.type_indice(i) ;
125 
126 
127  if ( (t1.valence != 0) && (t2.valence != 0) ) {
128  assert ( *(t1.get_triad()) == *(t2.get_triad()) ) ;
129  }
130 
131  const Base_vect* triad_res ;
132  if (t1.valence != 0) {
133  triad_res = t1.get_triad() ;
134  }
135  else{
136  triad_res = t2.get_triad() ;
137  }
138 
139  Tensor res(*t1.mp, val_res, tipe, triad_res) ;
140 
141  Itbl jeux_indice_t1 (t1.valence) ;
142  Itbl jeux_indice_t2 (t2.valence) ;
143 
144  for (int i=0 ; i<res.n_comp ; i++) {
145  Itbl jeux_indice_res(res.indices(i)) ;
146  for (int j=0 ; j<t1.valence ; j++)
147  jeux_indice_t1.set(j) = jeux_indice_res(j) ;
148  for (int j=0 ; j<t2.valence ; j++)
149  jeux_indice_t2.set(j) = jeux_indice_res(j+t1.valence) ;
150 
151  res.set(jeux_indice_res) = t1(jeux_indice_t1)*t2(jeux_indice_t2) ;
152  }
153 
154  return res ;
155 }
156 
157 
158 
159 
160  //------------------//
161  // Contraction //
162  //------------------//
163 
164 // Contraction on one index
165 // ------------------------
166 Tensor contract(const Tensor& t1, int ind1, const Tensor& t2, int ind2,
167  bool desaliasing) {
168 
169  int val1 = t1.get_valence() ;
170  int val2 = t2.get_valence() ;
171 
172  // Verifs :
173  assert((ind1>=0) && (ind1<val1)) ;
174  assert((ind2>=0) && (ind2<val2)) ;
175  assert(t1.get_mp() == t2.get_mp()) ;
176 
177  // Contraction possible ?
178  if ( (val1 != 0) && (val2 != 0) ) {
179  assert ( *(t1.get_triad()) == *(t2.get_triad()) ) ;
180  }
181  assert (t1.get_index_type(ind1) != t2.get_index_type(ind2)) ;
182 
183  int val_res = val1 + val2 - 2;
184 
185  Itbl tipe(val_res) ;
186 
187  for (int i=0 ; i<ind1 ; i++)
188  tipe.set(i) = t1.get_index_type(i) ;
189  for (int i=ind1 ; i<val1-1 ; i++)
190  tipe.set(i) = t1.get_index_type(i+1) ;
191  for (int i=val1-1 ; i<val1+ind2-1 ; i++)
192  tipe.set(i) = t2.get_index_type(i-val1+1) ;
193  for (int i = val1+ind2-1 ; i<val_res ; i++)
194  tipe.set(i) = t2.get_index_type(i-val1+2) ;
195 
196  const Base_vect* triad_res = (val_res == 0) ? 0x0 : t1.get_triad() ;
197 
198  Tensor res(t1.get_mp(), val_res, tipe, triad_res) ;
199 
200  // Boucle sur les composantes de res :
201 
202  Itbl jeux_indice_t1(val1) ;
203  Itbl jeux_indice_t2(val2) ;
204 
205  for (int i=0 ; i<res.get_n_comp() ; i++) {
206 
207  Itbl jeux_indice_res(res.indices(i)) ;
208 
209  for (int j=0 ; j<ind1 ; j++)
210  jeux_indice_t1.set(j) = jeux_indice_res(j) ;
211 
212  for (int j=ind1+1 ; j<val1 ; j++)
213  jeux_indice_t1.set(j) = jeux_indice_res(j-1) ;
214 
215  for (int j=0 ; j<ind2 ; j++)
216  jeux_indice_t2.set(j) = jeux_indice_res(val1+j-1) ;
217 
218  for (int j=ind2+1 ; j<val2 ; j++)
219  jeux_indice_t2.set(j) = jeux_indice_res(val1+j-2) ;
220 
221  Scalar& work = res.set(jeux_indice_res) ;
222  work.set_etat_zero() ;
223 
224  for (int j=1 ; j<=3 ; j++) {
225  jeux_indice_t1.set(ind1) = j ;
226  jeux_indice_t2.set(ind2) = j ;
227  if (desaliasing) {
228  work += t1(jeux_indice_t1) | t2(jeux_indice_t2) ;
229  }
230  else {
231  work += t1(jeux_indice_t1) * t2(jeux_indice_t2) ;
232  }
233  }
234 
235  }
236 
237  return res ;
238 }
239 
240 
241 
242 // Contraction on two indices
243 // --------------------------
244 Tensor contract(const Tensor& t1, int i1, int j1,
245  const Tensor& t2, int i2, int j2,
246  bool desaliasing) {
247 
248  int val1 = t1.get_valence() ;
249  int val2 = t2.get_valence() ;
250 
251  // Verifs :
252  assert( val1 >= 2 ) ;
253  assert( val2 >= 2 ) ;
254  assert( (0<=i1) && (i1<j1) && (j1<val1) ) ;
255  assert( (0<=i2) && (i2<j2) && (j2<val2) ) ;
256  assert( t1.get_mp() == t2.get_mp() ) ;
257 
258  // Contraction possible ?
259  assert( *(t1.get_triad()) == *(t2.get_triad()) ) ;
260  assert( t1.get_index_type(i1) != t2.get_index_type(i2) ) ;
261  assert( t1.get_index_type(j1) != t2.get_index_type(j2) ) ;
262 
263  int val_res = val1 + val2 - 4 ;
264 
265  Itbl tipe(val_res) ;
266 
267  for (int i=0 ; i<i1 ; i++)
268  tipe.set(i) = t1.get_index_type(i) ;
269 
270  for (int i=i1 ; i<j1-1 ; i++)
271  tipe.set(i) = t1.get_index_type(i+1) ;
272 
273  for (int i=j1-1 ; i<val1-2 ; i++)
274  tipe.set(i) = t1.get_index_type(i+2) ;
275 
276  for (int i=val1-2 ; i<val1-2+i2 ; i++)
277  tipe.set(i) = t2.get_index_type(i-val1+2) ;
278 
279  for (int i=val1-2+i2 ; i<val1+j2-3 ; i++)
280  tipe.set(i) = t2.get_index_type(i-val1+3) ;
281 
282  for (int i=val1+j2-3 ; i<val_res ; i++)
283  tipe.set(i) = t2.get_index_type(i-val1+4) ;
284 
285  const Base_vect* triad_res = (val_res == 0) ? 0x0 : t1.get_triad() ;
286 
287  Tensor res(t1.get_mp(), val_res, tipe, triad_res) ;
288 
289  // Boucle sur les composantes de res :
290 
291  Itbl jeux_indice_t1(val1) ;
292  Itbl jeux_indice_t2(val2) ;
293 
294  for (int ic=0 ; ic<res.get_n_comp() ; ic++) {
295 
296  Itbl jeux_indice_res(res.indices(ic)) ;
297 
298  for (int k=0 ; k<i1 ; k++)
299  jeux_indice_t1.set(k) = jeux_indice_res(k) ;
300 
301  for (int k=i1+1 ; k<j1 ; k++)
302  jeux_indice_t1.set(k) = jeux_indice_res(k-1) ;
303 
304  for (int k=j1+1 ; k<val1 ; k++)
305  jeux_indice_t1.set(k) = jeux_indice_res(k-2) ;
306 
307  for (int k=0 ; k<i2 ; k++)
308  jeux_indice_t2.set(k) = jeux_indice_res(val1+k-2) ;
309 
310  for (int k=i2+1 ; k<j2 ; k++)
311  jeux_indice_t2.set(k) = jeux_indice_res(val1+k-3) ;
312 
313  for (int k=j2+1 ; k<val2 ; k++)
314  jeux_indice_t2.set(k) = jeux_indice_res(val1+k-4) ;
315 
316  Scalar& work = res.set(jeux_indice_res) ;
317  work.set_etat_zero() ;
318 
319  for (int i=1 ; i<=3 ; i++) {
320 
321  jeux_indice_t1.set(i1) = i ;
322  jeux_indice_t2.set(i2) = i ;
323 
324  for (int j=1 ; j<=3 ; j++) {
325 
326  jeux_indice_t1.set(j1) = j ;
327  jeux_indice_t2.set(j2) = j ;
328 
329  if (desaliasing) {
330  work += t1(jeux_indice_t1) | t2(jeux_indice_t2) ;
331  }
332  else {
333  work += t1(jeux_indice_t1) * t2(jeux_indice_t2) ;
334  }
335  }
336  }
337 
338  }
339 
340  return res ;
341 }
342 
343 
344 
345 
346 Tensor contract(const Tensor& source, int ind_1, int ind_2) {
347 
348  int val = source.get_valence() ;
349 
350  // Les verifications :
351  assert ((ind_1 >= 0) && (ind_1 < val)) ;
352  assert ((ind_2 >= 0) && (ind_2 < val)) ;
353  assert (ind_1 != ind_2) ;
354  assert (source.get_index_type(ind_1) != source.get_index_type(ind_2)) ;
355 
356  // On veut ind_1 < ind_2 :
357  if (ind_1 > ind_2) {
358  int auxi = ind_2 ;
359  ind_2 = ind_1 ;
360  ind_1 = auxi ;
361  }
362 
363  // On construit le resultat :
364  int val_res = val - 2 ;
365 
366  Itbl tipe(val_res) ;
367 
368  for (int i=0 ; i<ind_1 ; i++)
369  tipe.set(i) = source.get_index_type(i) ;
370  for (int i=ind_1 ; i<ind_2-1 ; i++)
371  tipe.set(i) = source.get_index_type(i+1) ;
372  for (int i = ind_2-1 ; i<val_res ; i++)
373  tipe.set(i) = source.get_index_type(i+2) ;
374 
375  Tensor res(source.get_mp(), val_res, tipe, source.get_triad()) ;
376 
377  // Boucle sur les composantes de res :
378 
379  Itbl jeux_indice_source(val) ;
380 
381  for (int i=0 ; i<res.get_n_comp() ; i++) {
382 
383  Itbl jeux_indice_res(res.indices(i)) ;
384 
385  for (int j=0 ; j<ind_1 ; j++)
386  jeux_indice_source.set(j) = jeux_indice_res(j) ;
387  for (int j=ind_1+1 ; j<ind_2 ; j++)
388  jeux_indice_source.set(j) = jeux_indice_res(j-1) ;
389  for (int j=ind_2+1 ; j<val ; j++)
390  jeux_indice_source.set(j) = jeux_indice_res(j-2) ;
391 
392  Scalar& work = res.set(jeux_indice_res) ;
393  work.set_etat_zero() ;
394 
395  for (int j=1 ; j<=3 ; j++) {
396  jeux_indice_source.set(ind_1) = j ;
397  jeux_indice_source.set(ind_2) = j ;
398  work += source(jeux_indice_source) ;
399  }
400 
401  }
402 
403  return res ;
404 }
405 
406 
407 
408 
409  //------------------//
410  // diffrel //
411  //------------------//
412 
413 
414 Tbl diffrel(const Tensor& aa, const Tensor& bb, const char* comment,
415  ostream& ost) {
416 
417  if (comment != 0x0) ost << comment << " : " << endl ;
418 
419  int val = aa.get_valence() ;
420 
421  assert(bb.get_valence() == val) ;
422 
423  int n_comp_a = aa.get_n_comp() ;
424  int n_comp_b = bb.get_n_comp() ;
425 
426  const Tensor* tmax ;
427  int n_comp_max ;
428  if (n_comp_a >= n_comp_b) {
429  n_comp_max = n_comp_a ;
430  tmax = &aa ;
431  }
432  else {
433  n_comp_max = n_comp_b ;
434  tmax = &bb ;
435  }
436 
437  int nz = aa.get_mp().get_mg()->get_nzone() ;
438  Tbl resu(n_comp_max, nz) ;
439  resu.set_etat_qcq() ;
440 
441  Itbl idx(val) ;
442 
443  for (int ic=0; ic<n_comp_max; ic++) {
444  idx = tmax->indices(ic) ;
445  Tbl diff = diffrel( aa(idx), bb(idx) ) ;
446 
447  if (n_comp_max > 1) ost << " Comp." ;
448  for (int j=0 ; j<val ; j++) {
449  ost << " " << idx(j) ;
450  }
451  if (n_comp_max > 1) ost << " : " ;
452  else ost << " " ;
453  for (int l=0; l<nz; l++) {
454  ost << " " << diff(l) ;
455  resu.set(ic, l) = diff(l) ;
456  }
457  ost << "\n" ;
458 
459  }
460 
461  return resu ;
462 }
463 
464 
465  //--------------------//
466  // diffrelmax //
467  //--------------------//
468 
469 
470 Tbl diffrelmax(const Tensor& aa, const Tensor& bb, const char* comment,
471  ostream& ost) {
472 
473  if (comment != 0x0) ost << comment << " : " << endl ;
474 
475  int val = aa.get_valence() ;
476 
477  assert(bb.get_valence() == val) ;
478 
479  int n_comp_a = aa.get_n_comp() ;
480  int n_comp_b = bb.get_n_comp() ;
481 
482  const Tensor* tmax ;
483  int n_comp_max ;
484  if (n_comp_a >= n_comp_b) {
485  n_comp_max = n_comp_a ;
486  tmax = &aa ;
487  }
488  else {
489  n_comp_max = n_comp_b ;
490  tmax = &bb ;
491  }
492 
493  int nz = aa.get_mp().get_mg()->get_nzone() ;
494  Tbl resu(n_comp_max, nz) ;
495  resu.set_etat_qcq() ;
496 
497  Itbl idx(val) ;
498 
499  for (int ic=0; ic<n_comp_max; ic++) {
500  idx = tmax->indices(ic) ;
501  Tbl diff = diffrelmax( aa(idx), bb(idx) ) ;
502 
503  if (n_comp_max > 1) ost << " Comp." ;
504  for (int j=0 ; j<val ; j++) {
505  ost << " " << idx(j) ;
506  }
507  if (n_comp_max > 1) ost << " : " ;
508  else ost << " " ;
509  for (int l=0; l<nz; l++) {
510  ost << " " << diff(l) ;
511  resu.set(ic, l) = diff(l) ;
512  }
513  ost << "\n" ;
514 
515  }
516 
517  return resu ;
518 }
519 
520 
521 
522  //----------------//
523  // max //
524  //----------------//
525 
526 
527 Tbl max(const Tensor& aa, const char* comment, ostream& ost) {
528 
529  if (comment != 0x0) ost << comment << " : " << endl ;
530 
531  int val = aa.get_valence() ;
532 
533  int n_comp = aa.get_n_comp() ;
534 
535  int nz = aa.get_mp().get_mg()->get_nzone() ;
536  Tbl resu(n_comp, nz) ;
537  resu.set_etat_qcq() ;
538 
539  Itbl idx(val) ;
540 
541  for (int ic=0; ic<n_comp; ic++) {
542 
543  idx = aa.indices(ic) ;
544  Tbl diff = max( aa(idx) ) ;
545 
546  if (val > 0) ost << " Comp." ;
547  for (int j=0 ; j<val ; j++) {
548  ost << " " << idx(j) ;
549  }
550  if (val > 0) ost << " : " ;
551  else ost << " " ;
552  for (int l=0; l<nz; l++) {
553  ost << " " << diff(l) ;
554  resu.set(ic, l) = diff(l) ;
555  }
556  ost << "\n" ;
557 
558  }
559 
560  return resu ;
561 }
562 
563 
564 
565  //----------------//
566  // min //
567  //----------------//
568 
569 
570 Tbl min(const Tensor& aa, const char* comment, ostream& ost) {
571 
572  if (comment != 0x0) ost << comment << " : " << endl ;
573 
574  int val = aa.get_valence() ;
575 
576  int n_comp = aa.get_n_comp() ;
577 
578  int nz = aa.get_mp().get_mg()->get_nzone() ;
579  Tbl resu(n_comp, nz) ;
580  resu.set_etat_qcq() ;
581 
582  Itbl idx(val) ;
583 
584  for (int ic=0; ic<n_comp; ic++) {
585 
586  idx = aa.indices(ic) ;
587  Tbl diff = min( aa(idx) ) ;
588 
589  if (val > 0) ost << " Comp." ;
590  for (int j=0 ; j<val ; j++) {
591  ost << " " << idx(j) ;
592  }
593  if (val > 0) ost << " : " ;
594  else ost << " " ;
595  for (int l=0; l<nz; l++) {
596  ost << " " << diff(l) ;
597  resu.set(ic, l) = diff(l) ;
598  }
599  ost << "\n" ;
600 
601  }
602 
603  return resu ;
604 }
605 
606 
607  //--------------------//
608  // maxabs //
609  //--------------------//
610 
611 
612 Tbl maxabs(const Tensor& aa, const char* comment, ostream& ost, bool verb) {
613 
614  if (comment != 0x0) ost << comment << " : " << endl ;
615 
616  int val = aa.get_valence() ;
617 
618  int n_comp = aa.get_n_comp() ;
619 
620  int nz = aa.get_mp().get_mg()->get_nzone() ;
621  Tbl resu(n_comp, nz) ;
622  resu.set_etat_qcq() ;
623 
624  Itbl idx(val) ;
625 
626  for (int ic=0; ic<n_comp; ic++) {
627 
628  idx = aa.indices(ic) ;
629  Tbl diff = max( abs( aa(idx) ) ) ;
630 
631  if (verb) {
632  if (val > 0) ost << " Comp." ;
633  for (int j=0 ; j<val ; j++) {
634  ost << " " << idx(j) ;
635  }
636  if (val > 0 ) ost << " : " ;
637  else ost << " " ;
638  }
639  for (int l=0; l<nz; l++) {
640  if (verb) ost << " " << diff(l) ;
641  resu.set(ic, l) = diff(l) ;
642  }
643  if (verb) ost << "\n" ;
644 
645  }
646 
647  return resu ;
648 }
649 
650 
651  //-------------------//
652  // Central value //
653  //-------------------//
654 
655 Tbl central_value(const Tensor& aa, const char* comment, ostream& ost) {
656 
657  if (comment != 0x0) ost << comment << " : " << endl ;
658 
659  int val = aa.get_valence() ;
660  int n_comp = aa.get_n_comp() ;
661 
662  Tbl resu(n_comp) ;
663  resu.set_etat_qcq() ;
664 
665  Itbl idx(val) ;
666 
667  for (int ic=0; ic<n_comp; ic++) {
668 
669  idx = aa.indices(ic) ;
670  double aa_c = aa(idx).val_grid_point(0,0,0,0) ;
671  resu.set(ic) = aa_c ;
672 
673  if ( comment != 0x0 ) {
674  if ( val > 0 ) ost << " Comp." ;
675  for (int j=0 ; j<val ; j++) {
676  ost << " " << idx(j) ;
677  }
678  if (val > 0 ) ost << " : " ;
679  else ost << " " ;
680  ost << aa_c << endl ;
681  }
682 
683  }
684 
685  return resu ;
686 }
687 
688 
689  //-------------------//
690  // max_all_domains //
691  //-------------------//
692 
693 Tbl max_all_domains(const Tensor& aa, int l_excluded, const char* comment,
694  ostream& ost) {
695 
696  if (comment != 0x0) ost << comment << " : " << endl ;
697 
698  Tbl max_dom = max(aa) ;
699 
700  int val = aa.get_valence() ;
701  int n_comp = aa.get_n_comp() ;
702  int nz = aa.get_mp().get_mg()->get_nzone() ;
703 
704  Tbl resu(n_comp) ;
705  resu.set_etat_qcq() ;
706 
707  Itbl idx(val) ;
708 
709  for (int ic=0; ic<n_comp; ic++) {
710 
711  double x0 ;
712  if (l_excluded != 0) x0 = max_dom(ic, 0) ;
713  else x0 = max_dom(ic, 1) ;
714  for (int l=0; l<nz; l++) {
715  if (l == l_excluded) continue ;
716  double x = max_dom(ic,l) ;
717  if (x > x0) x0 = x ;
718  }
719 
720  resu.set(ic) = x0 ;
721 
722  if ( comment != 0x0 ) {
723  if ( val > 0 ) ost << " Comp." ;
724  idx = aa.indices(ic) ;
725  for (int j=0 ; j<val ; j++) {
726  ost << " " << idx(j) ;
727  }
728  if (val > 0 ) ost << " : " ;
729  else ost << " " ;
730  ost << x0 << endl ;
731  }
732 
733  }
734 
735  return resu ;
736 
737 }
738 
739  //-------------------//
740  // min_all_domains //
741  //-------------------//
742 
743 Tbl min_all_domains(const Tensor& aa, int l_excluded, const char* comment,
744  ostream& ost) {
745 
746  if (comment != 0x0) ost << comment << " : " << endl ;
747 
748  Tbl min_dom = min(aa) ;
749 
750  int val = aa.get_valence() ;
751  int n_comp = aa.get_n_comp() ;
752  int nz = aa.get_mp().get_mg()->get_nzone() ;
753 
754  Tbl resu(n_comp) ;
755  resu.set_etat_qcq() ;
756 
757  Itbl idx(val) ;
758 
759  for (int ic=0; ic<n_comp; ic++) {
760 
761  double x0 ;
762  if (l_excluded != 0) x0 = min_dom(ic, 0) ;
763  else x0 = min_dom(ic, 1) ;
764  for (int l=0; l<nz; l++) {
765  if (l == l_excluded) continue ;
766  double x = min_dom(ic,l) ;
767  if (x < x0) x0 = x ;
768  }
769 
770  resu.set(ic) = x0 ;
771 
772  if ( comment != 0x0 ) {
773  if ( val > 0 ) ost << " Comp." ;
774  idx = aa.indices(ic) ;
775  for (int j=0 ; j<val ; j++) {
776  ost << " " << idx(j) ;
777  }
778  if (val > 0 ) ost << " : " ;
779  else ost << " " ;
780  ost << x0 << endl ;
781  }
782 
783  }
784 
785  return resu ;
786 
787 }
788 
789 
790  //----------------------//
791  // maxabs_all_domains //
792  //----------------------//
793 
794 Tbl maxabs_all_domains(const Tensor& aa, int l_excluded, const char* comment,
795  ostream& ost, bool verb) {
796 
797  if (comment != 0x0) ost << comment << " : " << endl ;
798 
799  Tbl maxabs_dom = maxabs(aa, 0x0, ost, verb) ;
800 
801  int val = aa.get_valence() ;
802  int n_comp = aa.get_n_comp() ;
803  int nz = aa.get_mp().get_mg()->get_nzone() ;
804 
805  Tbl resu(n_comp) ;
806  resu.set_etat_qcq() ;
807 
808  Itbl idx(val) ;
809 
810  for (int ic=0; ic<n_comp; ic++) {
811 
812  double x0 ;
813  if (l_excluded != 0) x0 = maxabs_dom(ic, 0) ;
814  else x0 = maxabs_dom(ic, 1) ;
815  for (int l=0; l<nz; l++) {
816  if (l == l_excluded) continue ;
817  double x = maxabs_dom(ic,l) ;
818  if (x > x0) x0 = x ;
819  }
820 
821  resu.set(ic) = x0 ;
822 
823  if ( comment != 0x0 ) {
824  if ( val > 0 ) ost << " Comp." ;
825  idx = aa.indices(ic) ;
826  for (int j=0 ; j<val ; j++) {
827  ost << " " << idx(j) ;
828  }
829  if (val > 0 ) ost << " : " ;
830  else ost << " " ;
831  ost << x0 << endl ;
832  }
833 
834  }
835 
836  return resu ;
837 
838 }
839 
840 
841 
842 }
Tbl maxabs(const Tensor &aa, const char *comment=0x0, ostream &ost=cout, bool verb=true)
Maxima in each domain of the absolute values of the tensor components.
Tbl min_all_domains(const Tensor &aa, int l_excluded=-1, const char *comment=0x0, ostream &ost=cout)
Minimum value of each component of a tensor over all the domains.
int & set(int i)
Read/write of a particular element (index i ) (1D case)
Definition: itbl.h:247
int n_comp
Number of stored components, depending on the symmetry.
Definition: tensor.h:318
virtual void set_etat_zero()
Sets the logical state to ETATZERO (zero).
Definition: scalar.C:330
Tbl central_value(const Tensor &aa, const char *comment=0x0, ostream &ost=cout)
Central value of each component of a tensor.
Lorene prototypes.
Definition: app_hor.h:67
const Mg3d * get_mg() const
Gives the Mg3d on which the mapping is defined.
Definition: map.h:783
double & set(int i)
Read/write of a particular element (index i) (1D case)
Definition: tbl.h:301
Tensor field of valence 0 (or component of a tensorial field).
Definition: scalar.h:393
Base_val operator*(const Base_val &, const Base_val &)
This operator is used when calling multiplication or division of Valeur .
int get_n_comp() const
Returns the number of stored components.
Definition: tensor.h:885
Basic integer array class.
Definition: itbl.h:122
Tbl min(const Cmp &)
Minimum values of a Cmp in each domain.
Definition: cmp_math.C:461
Vectorial bases (triads) with respect to which the tensorial components are defined.
Definition: base_vect.h:105
Tbl diffrel(const Cmp &a, const Cmp &b)
Relative difference between two Cmp (norme version).
Definition: cmp_math.C:507
void set_etat_qcq()
Sets the logical state to ETATQCQ (ordinary state).
Definition: tbl.C:364
Itbl type_indice
1D array of integers (class Itbl ) of size valence containing the type of each index: COV for a cova...
Definition: tensor.h:316
const Base_vect * get_triad() const
Returns the vectorial basis (triad) on which the components are defined.
Definition: tensor.h:879
Tbl maxabs_all_domains(const Tensor &aa, int l_excluded=-1, const char *comment=0x0, ostream &ost=cout, bool verb=true)
Maximum of the absolute value of each component of a tensor over all the domains. ...
Tbl max_all_domains(const Tensor &aa, int l_excluded=-1, const char *comment=0x0, ostream &ost=cout)
Maximum value of each component of a tensor over all the domains.
int get_index_type(int i) const
Gives the type (covariant or contravariant) of the index number i .
Definition: tensor.h:899
int get_nzone() const
Returns the number of domains.
Definition: grilles.h:465
Tbl max(const Cmp &)
Maximum values of a Cmp in each domain.
Definition: cmp_math.C:438
Tensor handling.
Definition: tensor.h:294
Tenseur contract(const Tenseur &, int id1, int id2)
Self contraction of two indices of a Tenseur .
int get_valence() const
Returns the valence.
Definition: tensor.h:882
int valence
Valence of the tensor (0 = scalar, 1 = vector, etc...)
Definition: tensor.h:304
Scalar & set(const Itbl &ind)
Returns the value of a component (read/write version).
Definition: tensor.C:663
Cmp abs(const Cmp &)
Absolute value.
Definition: cmp_math.C:413
Basic array class.
Definition: tbl.h:164
const Map *const mp
Mapping on which the numerical values at the grid points are defined.
Definition: tensor.h:301
const Map & get_mp() const
Returns the mapping.
Definition: tensor.h:874
Tbl diffrelmax(const Cmp &a, const Cmp &b)
Relative difference between two Cmp (max version).
Definition: cmp_math.C:542
virtual Itbl indices(int pos) const
Returns the indices of a component given by its position in the array cmp .
Definition: tensor.C:548