1 @c -----------------------------------------------------------------------------
2 @c File : augmented_lagrangian.de.texi
3 @c License : GNU General Public License (GPL)
5 @c Original : augmented_lagrangian.texi revision 10.10.2010
7 @c Revision : 11.05.2011
9 @c This file is part of Maxima -- GPL CAS based on DOE-MACSYMA
10 @c -----------------------------------------------------------------------------
13 * Functions and Variables for augmented_lagrangian::
16 @c -----------------------------------------------------------------------------
17 @node Functions and Variables for augmented_lagrangian, , augmented_lagrangian, augmented_lagrangian
18 @section Functions and Variables for augmented_lagrangian
19 @c -----------------------------------------------------------------------------
21 @c -----------------------------------------------------------------------------
22 @anchor{augmented_lagrangian_method}
23 @deffn {Function} augmented_lagrangian_method (@var{FOM}, @var{xx}, @var{C}, @var{yy})
24 @deffnx {Function} augmented_lagrangian_method (@var{FOM}, @var{xx}, @var{C}, @var{yy}, optional_args)
25 @deffnx {Function} augmented_lagrangian_method ([@var{FOM}, @var{grad}], @var{xx}, @var{C}, @var{yy})
26 @deffnx {Function} augmented_lagrangian_method ([@var{FOM}, @var{grad}], @var{xx}, @var{C}, @var{yy}, optional_args)
28 Returns an approximate minimum of the expression @var{FOM} with respect to the
29 variables @var{xx}, holding the constraints @var{C} equal to zero. @var{yy} is
30 a list of initial guesses for @var{xx}. The method employed is the augmented
31 Lagrangian method (see Refs [1] and [2]).
33 @var{grad}, if present, is the gradient of @var{FOM} with respect to @var{xx},
34 represented as a list of expressions, one for each variable in @var{xx}.
35 If not present, the gradient is constructed automatically.
37 @var{FOM} and each element of @var{grad}, if present, must be ordinary
38 expressions, not names of functions or lambda expressions.
40 @code{optional_args} represents additional arguments, specified as
41 @code{@var{symbol} = @var{value}}. The optional arguments recognized are:
45 Number of iterations of the augmented Lagrangian algorithm
48 Tolerance supplied to LBFGS
51 IPRINT parameter (a list of two integers which controls verbosity) supplied to
55 Initial value of @code{%lambda} to be used for calculating the augmented
59 This implementation minimizes the augmented Lagrangian by applying the
60 limited-memory BFGS (LBFGS) algorithm, which is a quasi-Newton algorithm.
62 @code{load("augmented_lagrangian")} loads this function.
64 See also @code{lbfgs}.
69 [1] @url{http://www-fp.mcs.anl.gov/otc/Guide/OptWeb/continuous/constrained/nonlinearcon/auglag.html}
71 [2] @url{http://www.cs.ubc.ca/spider/ascher/542/chap10.pdf}
75 [1] \texttt{http://\-www-fp.mcs.anl.gov/\-otc/\-Guide/\-OptWeb/\-continuous/\-constrained/\-
76 nonlinearcon/\-auglag.html}
77 [2] \texttt{http://\-www.cs.ubc.ca/\-spider/\-ascher/\-542/\-chap10.pdf}
85 (%o1) /maxima/share/lbfgs/lbfgs.mac
86 (%i2) load ("augmented_lagrangian");
88 /maxima/share/contrib/augmented_lagrangian.mac
89 (%i3) FOM: x^2 + 2*y^2;
100 (%i7) augmented_lagrangian_method(FOM, xx, C, yy, iprint=[-1,0]);
101 (%o7) [[x = 0.66665984108002, y = 0.33334027245545],
102 %lambda = [- 1.333337940892525]]
105 Same example as before, but this time the gradient is supplied as an argument.
108 (%i1) load ("lbfgs")$
109 (%i2) load ("augmented_lagrangian")$
110 (%i3) FOM: x^2 + 2*y^2;
113 (%i4) FOM: x^2 + 2*y^2;
118 (%i6) grad : [2*x, 4*y];
120 (%i7) C: [x + y - 1];
124 (%i9) augmented_lagrangian_method ([FOM, grad], xx, C, yy,
126 (%o9) [[x = 0.666659841080025, y = .3333402724554462],
127 %lambda = [- 1.333337940892543]]
131 @c --- End of file augmented_langrangian.de.texi -------------------------------