Logo  0.95.0-final
Finite Element Embedded Library and Language in C++
Feel++ Feel++ on Github Feel++ community
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
problem.hpp
Go to the documentation of this file.
1 /* -*- mode: c++; coding: utf-8; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; show-trailing-whitespace: t -*- vim:fenc=utf-8:ft=tcl:et:sw=4:ts=4:sts=4
2 
3  This file is part of the Feel library
4 
5  Author(s): Christophe Prud'homme <christophe.prudhomme@feelpp.org>
6  Date: 2008-02-14
7 
8  Copyright (C) 2008 Université Joseph Fourier (Grenoble I)
9 
10  This library is free software; you can redistribute it and/or
11  modify it under the terms of the GNU Lesser General Public
12  License as published by the Free Software Foundation; either
13  version 3.0 of the License, or (at your option) any later version.
14 
15  This library is distributed in the hope that it will be useful,
16  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  Lesser General Public License for more details.
19 
20  You should have received a copy of the GNU Lesser General Public
21  License along with this library; if not, write to the Free Software
22  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
29 #ifndef PROBLEM_HPP
30 #define PROBLEM_HPP
31 
32 namespace Feel
33 {
34 namespace ublas = boost::numeric::ublas;
35 
36 template<typename T, int>
37 struct noop
38 {
39  typedef T type;
40 };
41 
42 enum
43 {
44  FUNCTIONAL = 0,
47 };
48 
49 template<int O>
50 struct diff_order
51 {
52  enum { _E_order = O };
53 };
54 
60 template<typename Data,
61  int Id = FUNCTIONAL >
62 class core_data
63 {
64 public:
65  enum
66  {
67  _E_n = Data::_E_n,
68  _E_f = 1,
69  _E_g = Data::_E_g,
70  _E_ng = Data::_E_g + 2*_E_n,
71  _E_h = Data::_E_h,
72  _E_nA = Data::_E_n + ( _E_n )*( _E_n-1 )/2
73  };
74  core_data()
75  :
76  M_val (),
77  M_grad(),
78  M_hess()
79  {
80  for ( uint __i = 0; __i < M_grad.size(); ++__i )
81  {
82  M_grad[ __i ].resize( ( size_t )_E_n );
83  M_hess[ __i ].resize( ( size_t )_E_n, ( size_t )_E_n );
84  }
85  }
86  typedef typename Data::value_type numerical_type;
87  typedef ublas::vector<double> numerical_gradient_type;
89  typedef ublas::symmetric_matrix<double, ublas::upper> numerical_hessian_type;
90 
91 
93  typedef typename IF<Id == FUNCTIONAL,
94  typename array_fixed<numerical_type,_E_f>::type,
95  typename IF<Id == EQUALITIES,
96  typename array_fixed<numerical_type,_E_h>::type,
97  typename array_fixed<numerical_type,_E_ng>::type
98  >::Result
99  >::Result value_type;
100 
102  typedef typename IF<Id == FUNCTIONAL,
103  typename array_fixed<numerical_gradient_type,_E_f>::type,
104  typename IF<Id == EQUALITIES,
105  typename array_fixed<numerical_gradient_type,_E_h>::type,
106  typename array_fixed<numerical_gradient_type,_E_g>::type
107  >::Result
108  >::Result gradient_type;
109 
111  typedef typename IF<Id == FUNCTIONAL,
112  typename array_fixed<numerical_hessian_type,_E_f>::type,
113  typename IF<Id == EQUALITIES,
114  typename array_fixed<numerical_hessian_type,_E_h>::type,
115  typename array_fixed<numerical_hessian_type,_E_g>::type
116  >::Result
117  >::Result hessian_type;
118 
119 
120  value_type value() const
121  {
122  return M_val;
123  }
124  value_type & value()
125  {
126  return M_val;
127  }
128 
129  numerical_type value( int __i ) const
130  {
131  return M_val[ __i ];
132  }
133  numerical_type & value( int __i )
134  {
135  return M_val[ __i ];
136  }
137 
138  gradient_type const& gradient() const
139  {
140  return M_grad;
141  }
142  gradient_type & gradient()
143  {
144  return M_grad;
145  }
146 
147  numerical_gradient_type & gradient( int __i )
148  {
149  return M_grad[ __i ];
150  }
151  numerical_gradient_type gradient( int __i ) const
152  {
153  return M_grad[ __i ];
154  }
155 
156  numerical_type & gradient( int __i, int __j )
157  {
158  return M_grad[ __i]( __j );
159  }
160  numerical_type gradient( int __i, int __j ) const
161  {
162  return M_grad[ __i]( __j );
163  }
164 
165  hessian_type const& hessian() const
166  {
167  return M_hess;
168  }
169  hessian_type & hessian()
170  {
171  return M_hess;
172  }
173 
174  numerical_hessian_type const& hessian( int __i ) const
175  {
176  return M_hess[ __i ];
177  }
178  numerical_hessian_type & hessian( int __i )
179  {
180  return M_hess[ __i ];
181  }
182 
183  numerical_type & hessian( int __i, int __j, int __k )
184  {
185  return M_hess[__i]( __j, __k );
186  }
187  numerical_type hessian( int __i, int __j, int __k ) const
188  {
189  return M_hess[__i]( __j, __k );
190  }
191 
192 protected:
193  value_type M_val;
194  gradient_type M_grad;
195  hessian_type M_hess;
196 };
203 template<typename Data>
205  :
206 public core_data<Data, FUNCTIONAL>
207 {
208 public:
209 
211 
212  template<int Diff = 0>
213  struct diff
214  {
215  enum { diff_order = Diff };
216  typedef diff<diff_order> type;
217  };
218 };
219 
226 template<typename Data>
228  :
229 public core_data<Data, INEQUALITIES>
230 {
231 public:
232 
233  template<int Diff = 0>
234  struct diff
235  {
236  enum { diff_order = Diff };
237 
238  typedef diff<diff_order> type;
239  };
240 };
241 
242 
243 
250 template<typename Data>
252  :
253 public core_data<Data, EQUALITIES>
254 {
255  template<int Diff = 0>
256  struct diff
257  {
258  enum { diff_order = Diff };
259  typedef diff<diff_order> type;
260  };
261 };
262 
270 template<int N>
272 {
273  enum { _E_dummy = N };
274 };
275 
282 template<typename Data>
283 class problem
284  :
285 public Data
286 {
287 public:
288 
289 
290  enum
291  {
292  _E_n = Data::_E_n,
293  _E_f = 1,
294  _E_g = Data::_E_g,
295  _E_h = Data::_E_h,
296  _E_nA = _E_n + ( _E_n )*( _E_n-1 )/2,
297  _E_nL = 2*_E_n,
298  _E_nAL = _E_nL + ( _E_nL )*( _E_nL-1 )/2
299  };
300 
308  typedef Data super;
309  typedef Data data_type;
310  typedef problem<Data> problem_type;
311 
313  typedef typename super::ad_0_type ad_0_type;
314 
316  typedef typename super::ad_1_type ad_1_type;
317 
319  typedef typename super::ad_2_type ad_2_type;
320 
322  typedef typename super::ad_0_type::value_type value_type;
323 
325  typedef ublas::vector<double> vector_type;
326 
328  typedef ublas::symmetric_matrix<double, ublas::upper> symmetric_matrix_type;
329  typedef ublas::matrix<double> matrix_type;
330 
331 
332  typedef functional<Data> f_type;
333  typedef typename IF<_E_g!=0,
335  dummy_data<0> >::Result g_type;
336  typedef typename IF<_E_h!=0,
338  dummy_data<1> >::Result h_type;
339 
341  problem();
342 
344  problem( value_type x_definitions[ _E_n ][ 3 ] );
345 
346  problem( problem<Data> const& p )
347  :
348  super( p )
349  {}
350 
351  problem( Data const& p )
352  :
353  super( p ),
354  M_xlower ( _E_n ),
355  M_xupper ( _E_n ),
356  M_x0( _E_n )
357  {
358  this->define_problem( p.defs() );
359  this->initialize_x0();
360  }
361 
363  ~problem();
364 
366  //void define_problem( value_type x_definitions[ _E_n ][3] );
367  template<typename Defs>
368  void define_problem( Defs );
369 
371  int n() const
372  {
373  return _E_n;
374  }
375 
377  int nA() const
378  {
379  return _E_nA;
380  }
381 
383  value_type x_l( unsigned int i ) const
384  {
385  return M_xlower( i );
386  }
387 
389  value_type x_u( unsigned int i ) const
390  {
391  return M_xupper( i );
392  }
393 
394  vector_type const& lowerBounds() const
395  {
396  return M_xlower;
397  }
398  vector_type const& upperBounds() const
399  {
400  return M_xupper;
401  }
402 
403  vector_type distanceToLowerBounds( vector_type const& __x ) const
404  {
405  return __x - M_xlower;
406  }
407  vector_type distanceToUpperBounds( vector_type const& __x ) const
408  {
409  return M_xupper - __x;
410  }
411 
416  bool isOn ( int __i ) const
417  {
418  return __x_definitions[__i][0] >= value_type( .5 );
419  }
420 
430  template<int Order>
431  struct value
432  {
433  enum
434  {
435  _E_n = problem_type::_E_n
436  };
437 
438  // defines the automatic differentiation depending on \c Order
439  typedef typename mpl::if_<( Order == 0 ),
440  ad_0_type,
441  typename mpl::if_<Order == 1,
442  ad_1_type,
443  ad_2_type>::type >::type ad_type;
444 
445  typedef typename mpl::if_<( _E_g!=0 ),
446  typename data_type::template inequalities<ad_type>::type,
447  dummy_data<0> >::Result inequalities_array_type;
448 
449  typedef typename mpl::if_<_E_h!=0,
450  typename data_type::template equalities<ad_type>::type,
451  dummy_data<0> >::Result equalities_array_type;
452 
453  //
454  // F : functional
455  // \todo : not yet completely fixed for multiple objective functions
456  //
457  value( problem_type* __pt, vector_type const& __x, f_type& __fx )
458  {
459  ad_type __adt;
460  // call data::f() : it is supposed to be static
461  __pt->f( __x, __adt );
462  // FIXME
463  to ( __adt, __fx, mpl::int_<Order>() );
464  }
465  void to ( ad_type const& __adt, f_type& __fx, mpl::int_<0> )
466  {
467  __fx.value( 0 ) = __adt.value();
468  }
469  void to ( ad_type const& __adt, f_type& __fx, mpl::int_<1> )
470  {
471  __fx.value( 0 ) = __adt.value();
472  __fx.gradient( 0 ) = __adt.grad();
473  }
474  void to ( ad_type const& __adt, f_type& __fx, mpl::int_<2> )
475  {
476  __fx.value( 0 ) = __adt.value();
477  __fx.gradient( 0 ) = __adt.grad();
478  __fx.hessian( 0 ) = __adt.hessian();
479  }
480 #if 1
481  //
482  // G : inequalities
483  //
484  value( problem_type* __pt, vector_type const& __x, g_type& __gx )
485  {
486  inequalities_array_type __adt;
487  // call data::f() : it is supporsed to be static
488  __pt->g( __x, __adt );
489  to ( __adt, __gx, mpl::int_<Order>() );
490 
491  // in all cases add distances wrt the bounds
492  for ( int __i = 0; __i < _E_n; ++__i )
493  {
494  __gx.value( _E_g + __i ) = __pt->x_l( __i ) - __x( __i );
495  __gx.value( _E_g + _E_n + __i ) = __x( __i ) - __pt->x_u( __i );
496  }
497  }
498  void to ( inequalities_array_type const& __adt, g_type& __gx, mpl::int_<0> )
499  {
500  for ( int __i = 0; __i < _E_g; ++__i )
501  {
502  __gx.value( __i ) = __adt[ __i ].value();
503 
504 
505  }
506  }
507  void to ( inequalities_array_type const& __adt, g_type& __gx, mpl::int_<1> )
508  {
509  for ( int __i = 0; __i < _E_g; ++__i )
510  {
511  __gx.value( __i ) = __adt[ __i ].value();
512 
513  for ( int __j = 0; __j < _E_n; ++__j )
514  {
515 
516  __gx.gradient( __i,__j ) = __adt[ __i ].grad( __j );
517 
518  }
519  }
520  }
521  void to ( inequalities_array_type const& __adt, g_type& __gx, mpl::int_<2> )
522  {
523  for ( int __i = 0; __i < _E_g; ++__i )
524  {
525  __gx.value( __i ) = __adt[ __i ].value();
526 
527  for ( int __j = 0; __j < _E_n; ++__j )
528  {
529  __gx.gradient( __i, __j ) = __adt[ __i ].grad( __j );
530 
531  for ( int __k = 0; __k <= __j; ++__k )
532  {
533  __gx.hessian( __i, __k, __j ) =__adt[ __i ].hessian( __k, __j );
534  }
535  }
536  }
537  }
538  //
539  // H: equalities
540  //
541  value( problem_type* __pt, vector_type const& __x, h_type& __hx )
542  {
543  equalities_array_type __adt;
544  // call data::f() : it is supposed to be static
545  __pt->h( __x, __adt );
546  to ( __adt, __hx, mpl::int_<Order>() );
547  }
548  void to ( equalities_array_type const& __adt, h_type& __hx, mpl::int_<0> )
549  {
550  for ( int __i = 0; __i < _E_h; ++__i )
551  {
552  __hx.value( __i ) = __adt[ __i ].value();
553  }
554  }
555  void to ( equalities_array_type const& __adt, h_type& __hx, mpl::int_<1> )
556  {
557  for ( int __i = 0; __i < _E_h; ++__i )
558  {
559  __hx.value( __i ) = __adt[ __i ].value();
560 
561  for ( int __j = 0; __j < _E_n; ++__j )
562  {
563  __hx.gradient( __i,__j ) = __adt[ __i ].grad( __j );
564  }
565  }
566  }
567  void to ( equalities_array_type const& __adt, h_type& __hx, mpl::int_<2> )
568  {
569  for ( int __i = 0; __i < _E_h; ++__i )
570  {
571  __hx.value( __i ) = __adt[ __i ].value();
572 
573  for ( int __j = 0; __j < _E_n; ++__j )
574  {
575  __hx.gradient( __i, __j ) = __adt[ __i ].grad( __j );
576 
577  for ( int __k = 0; __k <= __j; ++__k )
578  {
579  __hx.hessian( __i, __k, __j ) =__adt[ __i ].hessian( __k, __j );
580  }
581  }
582  }
583  }
584 #endif
585  };
586 
594  template<int Order, typename Type>
595  void evaluate( vector_type const& __x, Type& __fx, diff_order<Order> ) const
596  {
597  value<Order> ( const_cast<problem*>( this ), __x, __fx );
598  }
599 
601  void copy_x0_to_x( vector_type& _x );
602 
604  void copy_x_to_x0( vector_type const& _x );
605 
607  void initialize_x0();
608 
610  void custom_initialize_x0( vector_type& __custom );
611 
613  void print( vector_type const& _x ) const;
614  void print_complete( std::string const&, vector_type const& _x ) const;
615  void print_complete( vector_type const& _x, vector_type const& _s ) const;
616  void print_bound_constraints() const;
617  void print_stationary_x( vector_type const& _x,
618  vector_type const& _lambda_l,
619  vector_type const& _lambda_u ) const;
620  void print_stationaryN_x( vector_type const& _x,
621  vector_type const& _s,
622  vector_type const& _lambda_h,
623  vector_type const& _lambda_g ) const;
624 
625 private:
626 
627  vector_type M_xlower;
628  vector_type M_xupper;
629  value_type __x_definitions[_E_n][3];
630 
631  vector_type M_x0;
632 
633  size_t M_n;
634  size_t M_nA;
635 
636 };
637 
638 template<typename Data>
640  :
641  Data(),
642  M_xlower ( _E_n ),
643  M_xupper ( _E_n ),
644  M_x0( _E_n )
645 {
646  M_xlower = ublas::scalar_vector<value_type>( M_xlower.size(), -inf );
647  M_xupper = ublas::scalar_vector<value_type>( M_xupper.size(), +inf );
648 }
649 
650 
651 template<typename Data>
652 problem<Data>::problem( value_type x_definitions[ _E_n][3 ] )
653  :
654  Data(),
655  M_xlower ( _E_n ),
656  M_xupper ( _E_n ),
657  M_x0( _E_n )
658 {
659  this->define_problem( x_definitions );
660  this->initialize_x0();
661 }
662 
663 
664 template<typename Data>
666 {
667 }
668 
669 template<typename Data>
670 template<typename Defs>
671 void
672 problem<Data>::define_problem( Defs x_definitions )
673 {
674  /* if ( !x_definitions )
675  {
676  for( int i = 0; i < _E_n; i++ )
677  {
678  __x_definitions[ i][0 ] = 1;
679  __x_definitions[ i][1 ] = -gopt::inf;
680  __x_definitions[ i][2 ] = +gopt::inf;
681  M_xlower( i ) = -gopt::inf;
682  M_xupper( i ) = +gopt::inf;
683  }
684  }
685  else*/
686  {
687  //_E_n = 0;
688  for ( int i = 0; i < _E_n; i++ )
689  {
690  __x_definitions[ i][0 ] = x_definitions[ i][0 ];
691  __x_definitions[ i][1 ] = x_definitions[ i][1 ];
692  __x_definitions[ i][2 ] = x_definitions[ i][2 ];
693 
694  // if( isOn ( i ) )
695  {
696  M_xlower( i ) = __x_definitions[ i][1 ];
697  M_xupper( i ) = __x_definitions[ i][2 ];
698  //_E_n++;
699  }
700  }
701 
702  //_E_nA = _E_n + _E_n * (_E_n - 1) / 2;
703  }
704 }
705 
706 template<typename Data>
707 void
709 {
710  int offset = 0;
711 
712  for ( int i = 0; i < _E_n; i++ )
713  {
714  if ( isOn( i ) )
715  __x( i-offset ) = M_x0( i );
716 
717  else
718  offset++;
719  }
720 }
721 
722 
723 template<typename Data>
724 void
726 {
727  int offset = 0;
728 
729  for ( int i = 0; i < _E_n; i++ )
730  {
731  if ( isOn ( i ) )
732  M_x0( i ) = __x( i-offset );
733 
734  else
735  offset++;
736  }
737 }
738 
739 
740 template<typename Data>
741 void
743 {
744  for ( int i = 0; i < _E_n; i++ )
745  {
746  if ( isOn ( i ) )
747  {
748  if ( __x_definitions[ i][1 ] <= -inf && __x_definitions[ i][2 ] >= inf )
749  M_x0( i ) = 0;
750 
751  if ( __x_definitions[ i][1 ] <= -inf && __x_definitions[ i][2 ] < inf )
752  M_x0( i ) = __x_definitions[ i][2 ] - value_type( 10 );
753 
754  if ( __x_definitions[ i][1 ] > -inf && __x_definitions[ i][2 ] >= inf )
755  M_x0( i ) = __x_definitions[ i][1 ] + value_type( 10 );
756 
757  if ( __x_definitions[ i][1 ] > -inf && __x_definitions[ i][2 ] < inf )
758  M_x0( i ) = 0.5 * ( __x_definitions[ i][1 ] + __x_definitions[ i][2 ] );
759  }
760 
761  else
762  M_x0( i ) = __x_definitions[ i][1 ];
763  }
764 }
765 
766 
767 template<typename Data>
768 void
770 {
771  for ( int i = 0; i < _E_n; i++ )
772  {
773  if ( __custom( i ) < __x_definitions[ i][1 ] )
774  {
775  std::ostringstream __ex;
776  __ex << "Initialization Error: initial x^0[ " << i << " ] < xlower[ " << i << " ]\n"
777  << " * x^0[ " << i << " ] = " << __custom( i ) << "\n"
778  << " * xlower[ " << i << " ] = " << __x_definitions[ i][1 ] << "\n";
779 
780  throw std::invalid_argument( __ex.str() );
781  }
782 
783  if ( __custom( i ) > __x_definitions[ i][2 ] )
784  {
785  std::ostringstream __ex;
786  __ex << "Initialization Error: initial x^0[ " << i << " ] > xupper[ " << i << " ]\n"
787  << " * x^0[ " << i << " ] = " << __custom( i ) << "\n"
788  << " * xupper[ " << i << " ] = " << __x_definitions[ i][2 ] << "\n";
789 
790  throw std::invalid_argument( __ex.str() );
791  }
792 
793  M_x0( i ) = __custom( i );
794  }
795 }
796 
797 
798 
799 //
800 // Printing routines
801 //
802 
803 template<typename Data>
804 void
806 {
807  int offset = 0;
808  std::cout << "x:\n";
809 
810  for ( int i = 0; i < _E_n; i++ )
811  {
812  if ( isOn ( i ) )
813  std::cout << " x [ " << i << " ] = " << _x( i-offset );
814 
815  else
816  {
817  std::cout << " x [ " << i << " ] = " << __x_definitions[ i][1 ] << " ( variable \"off\" )";
818  offset++;
819  }
820 
821  std::cout << "\n";
822  }
823 }
824 
825 template<typename Data>
826 void
828 {
829  //std::cerr << "\nBound constraints with N = " << N() << "\n";
830  for ( int i = 0; i < _E_n; i++ )
831  {
832  if ( isOn ( i ) )
833  {
834  if ( __x_definitions[ i][1 ] == -inf )
835  std::cerr << std::setw( 8 ) << std::right << " -Inf" << " <= ";
836 
837  else if ( abs( __x_definitions[ i][1 ] ) < 1e4 )
838  std::cerr << std::setw( 8 ) << std::right << __x_definitions[ i][1 ] << " <= ";
839 
840  else
841  std::cerr << std::setw( 8 ) << std::right << __x_definitions[ i][1 ] << " <= ";
842 
843  std::cerr << "x[" << i << "] <= ";
844 
845  if ( __x_definitions[ i][2 ] == inf )
846  std::cerr << std::left << "+Inf";
847 
848  else if ( fabs( __x_definitions[ i][2 ] ) < 1e4 )
849  std::cerr << std::left << __x_definitions[ i][2 ];
850 
851  else
852  std::cerr << std::left << __x_definitions[ i][2 ];
853  }
854 
855  else
856  {
857  std::cerr << "x[" << i << " ] := " << __x_definitions[ i][1 ];
858  }
859 
860  std::cerr << "\n";
861  }
862 }
863 
864 template<typename Data>
865 void
866 problem<Data>::print_complete( std::string const& __mesg, vector_type const& _x ) const
867 {
868  std::cout << __mesg << "\n";
869  print( _x );
870 
871  print_bound_constraints();
872 }
873 
874 template<typename Data>
875 void
876 problem<Data>::print_complete( vector_type const& _x,
877  vector_type const& _s ) const
878 {
879  print_bound_constraints();
880  std::cerr << "\n";
881  print( _x );
882 
883  f_type __fx;
884  this->evaluate ( _x, __fx, diff_order<2>() );
885 
886  std::cout << "fx.gradient:\n"
887  << __fx.gradient( 0 ) << "\n";
888 
889  std::cout << "fx.hessian:\n"
890  << __fx.hessian( 0 ) << "\n";
891 
892 
893  g_type __gx;
894  this->evaluate ( _x, __gx, diff_order<2>() );
895 
896  for ( int m = 0; m < _E_g; m++ )
897  {
898  std::cout << "gx(" << m << "):\n"
899  << __gx.value( m ) << "\n";
900 
901  std::cout << "gx(" << m << ").gradient:\n"
902  << __gx.gradient( m ) << "\n";
903 
904  std::cout << "gx(" << m << ").hessian:\n"
905  << __gx.hessian( m ) << "\n";
906  }
907 
908  h_type __hx;
909  this->evaluate ( _x, __hx, diff_order<2>() );
910 
911  for ( int m = 0; m < _E_h; m++ )
912  {
913  std::cout << "hx(" << m << "):\n"
914  << __hx.value( m ) << "\n";
915 
916  std::cout << "hx(" << m << ").gradient:\n"
917  << __hx.gradient( m ) << "\n";
918 
919  std::cout << "hx(" << m << ").hessian:\n"
920  << __hx.hessian( m ) << "\n";
921  }
922 }
923 
924 
925 template<typename Data>
926 void
927 problem<Data>::print_stationary_x( vector_type const& _x,
928  vector_type const& _lambda_l,
929  vector_type const& _lambda_u ) const
930 {
931 
932  vector_type __sum( _E_n );
933  value_type __prod;
934 
935  this->print_bound_constraints();
936 
937  std::cerr.precision ( 5 );
938 
939  std::cerr << "\n\nSolution:\n";
940  int offset = 0;
941 
942  for ( int i = 0; i < _E_n; i++ )
943  {
944  if ( isOn ( i ) )
945  {
946  std::cerr << " x*[ " << i << " ] = " << std::setw( 10 ) << _x( i-offset );
947 
948  if ( std::abs( _lambda_l( i-offset ) ) > 1e-6 )
949  std::cerr << " ( lower bound active )";
950 
951  if ( std::abs( _lambda_u( i-offset ) ) > 1e-6 )
952  std::cerr << " ( upper bound active )";
953  }
954 
955  else
956  {
957  std::cerr << " x*[ " << i << " ] = " << std::setw( 10 ) << __x_definitions[ i][1 ];
958  std::cerr << " ( variable \"off\" ) ";
959  offset++;
960  }
961 
962  std::cerr << "\n";
963  }
964 
965  f_type __f_x;
966  this->evaluate( _x, __f_x, diff_order<2>() );
967  std::cerr << "f(x*) = " << __f_x.value( 0 ) << "\n"
968  << "f'(x*) = " << __f_x.gradient( 0 ) << "\n"
969  << "f''(x*) = " << __f_x.hessian( 0 ) << "\n";
970 
971  std::cerr << "\nStationarity conditions (LS):\n"
972  << " \\nabla f - \\lambda_l + \\lambda_u = residual\n"
973  << "----------------------------------------------------------\n";
974 
975  offset = 0;
976 
977  for ( int i = 0; i < _E_n; i++ )
978  if ( isOn ( i ) )
979  {
980  __sum( i-offset ) = __f_x.gradient( 0, i-offset ) - _lambda_l( i-offset ) + _lambda_u( i-offset );
981 
982  std::cerr << " " << std::setw( 10 ) << __f_x.gradient( 0, i-offset ) << " - "
983  << " " << std::setw( 10 ) << _lambda_l( i-offset ) << " + "
984  << " " << std::setw( 10 ) << _lambda_u( i-offset ) << " = "
985  << " " << std::setw( 10 ) << __sum( i-offset ) << "\n";
986 
987  }
988 
989  else
990  {
991  std::cerr << " x x x x x x x x x x x x x x x x x x x x x x x x x x x\n";
992  offset++;
993  }
994 
995  std::cerr << "\n =========================================================";
996  std::cerr << "\n # Norm of residuals ------------------------> " ;
997  std::cerr << " " << std::setprecision( 2 ) << std::setw( 8 ) << norm_2( __sum ) << " #";
998  std::cerr << "\n =========================================================\n";
999 
1000  std::cerr << "\nComplimentarity conditions (LS):\n"
1001  << " \\lambda_l * ( l - x ) = 0.000000 | \\lambda_u * ( x - u ) = 0.00000\n"
1002  << "----------------------------------------------|----------------------------------------------\n";
1003 
1004  offset = 0;
1005 
1006  for ( int i = 0; i < _E_n; i++ )
1007  if ( isOn ( i ) )
1008  {
1009  __prod = _lambda_l( i-offset ) * ( __x_definitions[ i][1 ] - _x( i-offset ) );
1010 
1011  std::cerr << " " << std::setw( 9 ) << _lambda_l( i-offset ) << " * "
1012  << " " << std::setw( 15 ) << __x_definitions[ i][1 ] - _x( i-offset ) << " = "
1013  << " " << std::setw( 9 ) << __prod << " |";
1014 
1015  __prod = _lambda_u( i-offset ) * ( _x( i-offset ) - __x_definitions[ i][2 ] );
1016 
1017  std::cerr << " " << std::setw( 9 ) << _lambda_u( i-offset ) << " * "
1018  << " " << std::setw( 15 ) << _x( i-offset )-__x_definitions[ i][2 ] << " = "
1019  << " " << std::setw( 9 ) << __prod << "\n";
1020 
1021  }
1022 
1023  else
1024  {
1025  std::cerr << " x x x x x x x x x x x x x x x x x x x x x x |"
1026  << " x x x x x x x x x x x x x x x x x x x x x x\n";
1027  offset++;
1028  }
1029 }
1030 
1031 
1032 template<typename Data>
1033 void
1034 problem<Data>::print_stationaryN_x( vector_type const& _x,
1035  vector_type const& _s,
1036  vector_type const& _lambda_h,
1037  vector_type const& _lambda_g ) const
1038 {
1039  std::cerr << "\n\nVariables and (variable) constraints:\n";
1040 
1041  this->print_bound_constraints();
1042 
1043  std::cerr << "\n\nSolution:\n\n";
1044 
1045  std::cerr.precision ( 5 );
1046 
1047  int offset = 0;
1048 
1049  for ( int i = 0; i < _E_n; i++ )
1050  {
1051  if ( isOn ( i ) )
1052  {
1053  std::cerr << " x*[ " << i << " ] = " << std::setw( 10 ) << _x( i-offset );
1054 
1055  if ( _x( i-offset ) < M_xlower( i-offset ) ||
1056  _x( i-offset ) > M_xupper( i-offset ) )
1057  {
1058  std::cerr << " <- ( violated ! )";
1059  }
1060 
1061  else
1062  {
1063  value_type lam_l = _lambda_g( _E_g+i-offset );
1064  value_type lam_u = _lambda_g( _E_g+_E_n+i-offset );
1065 
1066  if ( std::abs( lam_l ) > 1e-4 )
1067  std::cerr << " ( lower bound active, lambda_l " << std::setw( 10 ) << lam_l;
1068 
1069  if ( std::abs( lam_u ) > 1e-4 )
1070  std::cerr << " ( upper bound active, lambda_u " << std::setw( 10 ) << lam_u;
1071  }
1072  }
1073 
1074  else
1075  {
1076  std::cerr << " x*[ " << i << " ] = " << std::setw( 10 ) << __x_definitions[ i][1 ];
1077  std::cerr << " ( variable \"off\" ) ";
1078  offset++;
1079  }
1080 
1081  std::cerr << "\n";
1082  }
1083 
1084  // Functional
1085  f_type __fx;
1086  this->evaluate ( _x, __fx, diff_order<1>() );
1087 
1088  std::cerr << " f(x*) = " << std::setw( 10 ) << __fx.value( 0 ) << "\n";
1089 
1090  // inequalities
1091  g_type __gx;
1092  this->evaluate ( _x, __gx, diff_order<0>() );
1093 
1094  for ( int m = 0; m < _E_g; m++ )
1095  {
1096  if ( __gx.value( m ) < 0 )
1097  {
1098  if ( std::abs( std::fabs( _lambda_g( m ) ) ) > 1e-3 )
1099  {
1100  std::cerr << " g[" << m << "](x*) = " << std::setw( 10 ) << __gx.value( m )
1101  << " ( satisfied - active, lambda = " << std::setw( 10 ) << _lambda_g( m );
1102  }
1103 
1104  else
1105  {
1106  std::cerr << " g[" << m << "](x*) = " << std::setw( 10 ) << __gx.value( m )
1107  << " ( satisfied - inactive )";
1108 
1109  }
1110  }
1111 
1112  else
1113  {
1114  std::cerr << " g[" << m << "](x*) = " << std::setw( 10 ) << __gx.value( m )
1115  << " <- ( violated ! )";
1116  }
1117 
1118  std::cerr << "\n";
1119  }
1120 
1121  // equalities
1122  h_type __hx;
1123  this->evaluate ( _x, __hx, diff_order<0>() );
1124 
1125  for ( int m = 0; m < _E_h; m++ )
1126  {
1127  std::cerr << " h[" << m << "](x*) = " << std::setw( 10 ) << __hx.value( m );
1128 
1129  if ( std::abs( __hx.value( m ) < 1e-3 ) )
1130  std::cerr << " ( satisfied )";
1131 
1132  else
1133  std::cerr << " <- ( violated ! )";
1134 
1135  std::cerr << "\n";
1136  }
1137 }
1138 }
1139 
1140 
1141 #endif
1142 
number of objective functions
Definition: problem.hpp:293
super::ad_1_type ad_1_type
automatic differentiation type of order one
Definition: problem.hpp:316
Definition: solverlinear.hpp:33
void custom_initialize_x0(vector_type &__custom)
use custom starting point
Definition: problem.hpp:769
super::ad_0_type::value_type value_type
numerical type
Definition: problem.hpp:322
number of objective functions
Definition: problem.hpp:68
IF< Id==FUNCTIONAL, typename array_fixed< numerical_type, _E_f >::type, typename IF< Id==EQUALITIES, typename array_fixed< numerical_type, _E_h >::type, typename array_fixed< numerical_type, _E_ng >::type >::Result >::Result value_type
value type for the functional
Definition: problem.hpp:99
value_type x_l(unsigned int i) const
Definition: problem.hpp:383
problem()
default constructor: initialize with infinite bounds
Definition: problem.hpp:639
void initialize_x0()
initialize the starting point x0
Definition: problem.hpp:742
ublas::vector< double > vector_type
vector type
Definition: problem.hpp:325
Data super
super class for the problem.
Definition: problem.hpp:308
number of equality constraints
Definition: problem.hpp:295
super::ad_0_type ad_0_type
automatic differentiation type of order one
Definition: problem.hpp:313
functional identifier
Definition: problem.hpp:44
number of multiplers
Definition: problem.hpp:297
super::ad_2_type ad_2_type
automatic differentiation type of order two
Definition: problem.hpp:319
~problem()
destructor
Definition: problem.hpp:665
Optimization problem specifications.
Definition: problem.hpp:283
number of inequalities
Definition: problem.hpp:69
number of entries in the matrix
Definition: problem.hpp:72
define the inequalities type
Definition: problem.hpp:227
IF< Id==FUNCTIONAL, typename array_fixed< numerical_gradient_type, _E_f >::type, typename IF< Id==EQUALITIES, typename array_fixed< numerical_gradient_type, _E_h >::type, typename array_fixed< numerical_gradient_type, _E_g >::type >::Result >::Result gradient_type
value type for the functional gradient
Definition: problem.hpp:108
void evaluate(vector_type const &__x, Type &__fx, diff_order< Order >) const
evaluate components of the problem Type can be #) the objective functions #) the inequality contraint...
Definition: problem.hpp:595
void copy_x0_to_x(vector_type &_x)
copy initialization x0 into x
Definition: problem.hpp:708
compute the value of the functionals, equalities and inequalities associated with the problem ...
Definition: problem.hpp:431
Definition: problem.hpp:62
define the equalities type
Definition: problem.hpp:251
defines the functional type
Definition: problem.hpp:204
number of control variables
Definition: problem.hpp:292
dummy data type
Definition: problem.hpp:271
number of inequality constraints
Definition: problem.hpp:294
equalities identifier
Definition: problem.hpp:45
number of control variables
Definition: problem.hpp:67
void print(vector_type const &_x) const
prints current value of x
Definition: problem.hpp:805
void define_problem(Defs)
define the bounds of the problem
Definition: problem.hpp:672
void copy_x_to_x0(vector_type const &_x)
copy x to initialization x0
Definition: problem.hpp:725
ublas::symmetric_matrix< double, ublas::upper > numerical_hessian_type
matrix type
Definition: problem.hpp:89
int nA() const
Definition: problem.hpp:377
bool isOn(int __i) const
Definition: problem.hpp:416
ublas::symmetric_matrix< double, ublas::upper > symmetric_matrix_type
matrix type
Definition: problem.hpp:328
size of the multipliers matrix
Definition: problem.hpp:298
size of informations stored for inequalities
Definition: problem.hpp:70
number of equalities
Definition: problem.hpp:71
IF< Id==FUNCTIONAL, typename array_fixed< numerical_hessian_type, _E_f >::type, typename IF< Id==EQUALITIES, typename array_fixed< numerical_hessian_type, _E_h >::type, typename array_fixed< numerical_hessian_type, _E_g >::type >::Result >::Result hessian_type
value type for the functional hessian: in packed format
Definition: problem.hpp:117
size of the matrix
Definition: problem.hpp:296
inequalities identifier
Definition: problem.hpp:46
value_type x_u(unsigned int i) const
Definition: problem.hpp:389
int n() const
Definition: problem.hpp:371

Generated on Sun Dec 22 2013 13:11:11 for Feel++ by doxygen 1.8.5