1 /* Original version of this file copyright 1999 by Michael Wester,
2 * and retrieved from http://www.math.unm.edu/~wester/demos/MatrixTheory/problems.macsyma
5 * Released under the terms of the GNU General Public License, version 2,
6 * per message dated 2007-06-03 from Michael Wester to Robert Dodier
7 * (contained in the file wester-gpl-permission-message.txt).
9 * See: "A Critique of the Mathematical Abilities of CA Systems"
10 * by Michael Wester, pp 25--60 in
11 * "Computer Algebra Systems: A Practical Guide", edited by Michael J. Wester
12 * and published by John Wiley and Sons, Chichester, United Kingdom, 1999.
14 /* ----------[ M a c s y m a ]---------- */
15 /* ---------- Initialization ---------- */
18 /* ---------- Matrix Theory ---------- */
21 /* Extract the superdiagonal => [2, 6] */
22 mat_diag(matrix([1, 2, 3], [4, 5, 6], [7, 8, 9]), 1);
23 /* (2, 3)-minor => [[1, 2], [7, 8]] */
24 minor(matrix([1, 2, 3], [4, 5, 6], [7, 8, 9]), 2, 3);
25 /* Create the 7 x 6 matrix B from rearrangements of the elements of the 4 x 4
26 matrix A (this is easiest to do with a MATLAB style notation):
27 B = [A(1:3,2:4), A([1,2,4],[3,1,4]); A, [A(1:2,3:4); A([4,1],[3,2])]]
28 => [[12 13 14|13 11 14],
36 [41 42 43 44|13 12]]. See Michael James Wester, _Symbolic Calculation
37 and Expression Swell Analysis of Matrix Determinants and Eigenstuff_, Ph.D.
38 dissertation, University of New Mexico, Albuquerque, New Mexico, December
40 A: matrix([11, 12, 13, 14],
44 [< A[1..3,2..4], A[[1,2,4],[3,1,4]]; A, [A[1..2,3..4]; A[[4,1],[3,2]]] >];
46 /* Create a block diagonal matrix */
47 mat_diag([matrix([a, 1],[0, a]), b, matrix([c, 1, 0],[0, c, 1],[0, 0, c])]);
48 /* => [[1 1], [1 0]] */
50 rat(matrix([7, 11], [3, 8]));
52 /* => [[-cos t, -sin t], [sin t, -cos t]] */
53 matrix([cos(t), sin(t)], [-sin(t), cos(t)]);
55 /* => [[(a + 7) x + (2 a - 8) y, (3 a - 9) x + (4 a + 10) y,
56 (5 a + 11) x + (6 a - 12) y]] */
57 matrix([x, y]) . (a*matrix([1, 3, 5], [2, 4, 6])
58 + matrix([7, -9, 11], [-8, 10, -12]));
59 /* Matrix norms: infinity norm => 7 */
60 mat_norm(matrix([1, -2*%i], [-3*%i, 4]), 'inf);
61 /* Frobenius norm => (a^2 + b^2 + c^2)/(|a| |b| |c|) (a, b, c real) */
62 mat_norm(matrix([a/(b*c), 1/c, 1/b], [1/c, b/(a*c), 1/a], [1/b, 1/a, c/(a*b)]),
65 /* Hermitian (complex conjugate transpose) => [[1, f(4 + 5 i)], [2 - 3 i, 6]]
66 (This assumes f is a real valued function. In general, the (1, 2) entry
67 will be conjugate[f(4 - 5 i)] = conjugate(f)(4 + 5 i).) */
68 hermitian(matrix([1, 2 + 3*%i], [f(4 - 5*%i), 6]));
69 m: matrix([a, b], [1, a*b]);
70 /* Invert the matrix => 1/(a^2 - 1) [[a, -1], [-1/b, a/b]] */
74 /* Inverse of a triangular partitioned (or block) matrix
75 => [[A_11^(-1), -A_11^(-1) A_12 A_22^(-1)], [0, A_22^(-1)]].
76 See Charles G. Cullen, _Matrices and Linear Transformations_, Second
77 Edition, Dover Publications Inc., 1990, p. 35. */
78 declare([A11, A12, A22], nonscalar)$
79 matrix([A11, A12], [0, A22])^^(-1);
80 remove([A11, A12, A22], nonscalar)$
81 /* LU decomposition of a symbolic matrix [David Wood]
82 [ 1 0 0] [1 x-2 x-3] [ 1 x-2 x-3 ]
83 [x-1 1 0] [0 4 x-5] = [x-1 x^2-3x+6 x^2-3x-2 ]
84 [x-2 x-3 1] [0 0 x-7] [x-2 x^2-8 2x^2-12x+14] */
85 matrix([ 1, x-2, x-3 ],
86 [x-1, x^2-3*x+6, x^2-3*x-2 ],
87 [x-2, x^2-8, 2*x^2-12*x+14])$
88 ratsimp(lu_decomp_symb(%));
89 lu_matrices(%, 1..mat_nrows(%));
90 /* Reduced row echelon form [Cullen, p. 43]
91 => [[1 0 -1 0 2], [0 1 2 0 -1], [0 0 0 1 3], [0 0 0 0 0]] */
92 matrix([1, 2, 3, 1, 3],
97 /* => 2. See Gerald L. Bradley, _A Primer of Linear Algebra_, Prentice-Hall,
98 Inc., 1975, p. 135. */
99 rank(matrix([-1, 3, 7, -5], [4, -2, 1, 3], [2, 4, 15, -7]));
101 rank(matrix([2*sqrt(2), 8], [6*sqrt(6), 24*sqrt(3)]));
103 rank(matrix([sin(2*t), cos(2*t)],
104 [2*(1 - cos(t)^2)*cos(t), (1 - 2*sin(t)^2)*sin(t)]));
105 /* Null space => [[2 4 1 0], [0 -3 0 1]]^T or variant [Bradley, p. 207] */
106 nullspace(matrix([1, 0, -2, 0], [-2, 1, 0, 3], [-1, 2, -6, 6]));
107 /* Define a Vandermonde matrix (useful for doing polynomial interpolations) */
108 matrix([1, 1, 1, 1 ],
110 [w^2, x^2, y^2, z^2],
111 [w^3, x^3, y^3, z^3]);
113 /* The following formula implies a general result:
114 => (w - x) (w - y) (w - z) (x - y) (x - z) (y - z) */
116 /* Minimum polynomial => (lambda - 1)^2 (lambda + 1) [Cullen, p. 181] */
117 matrix([17, -8, -12, 14],
121 /* Compute the eigenvalues of a matrix from its characteristic polynomial
122 => lambda = {1, -2, 3} */
123 m: matrix([ 5, -3, -7],
127 solve(% = 0, lambda);
129 /* In theory, an easy eigenvalue problem! => lambda = {2 - a} for k = 1..100
133 eigenvalues((2 - a)*ident(100));
136 /* => lambda = {4 sin^2(pi k/[2 (n + 1)])} for k = 1..n for an n x n matrix.
137 For n = 5, lambda = {2 - sqrt(3), 1, 2, 3, 2 + sqrt(3)}
138 See J. H. Wilkinson, _The Algebraic Eigenvalue Problem_, Oxford University
139 Press, 1965, p. 307. */
140 matrix([2, 1, 0, 0, 0],
146 /* Eigenvalues of the Rosser matrix. This matrix is notorious for causing
147 numerical eigenvalue routines to fail. [Wester, p. 146 (Cleve Moler)]
148 => {-10 sqrt(10405), 0, 510 - 100 sqrt(26), 1000, 1000,
149 510 + 100 sqrt(26), 1020, 10 sqrt(10405)} =
150 {-1020.049, 0, 0.098, 1000, 1000, 1019.902, 1020, 1020.049} */
151 rosser: matrix([ 611, 196, -192, 407, -8, -52, -49, 29],
152 [ 196, 899, 113, -192, -71, -43, -8, -44],
153 [-192, 113, 899, 196, 61, 49, 8, 52],
154 [ 407, -192, 196, 611, 8, 44, 59, -23],
155 [ -8, -71, 61, 8, 411, -599, 208, 208],
156 [ -52, -43, 49, 44, -599, 411, 208, 208],
157 [ -49, -8, 8, 59, 208, 208, 99, -911],
158 [ 29, -44, 52, -23, 208, 208, -911, 99])$
160 errcatch(eigenvalues(sfloat(rosser)));
161 eigenvalues_by_schur(sfloat(rosser));
163 /* Eigenvalues of the generalized hypercompanion matrix of
164 (x^5 + a4*x^4 + a3*x^3 + a2*x^2 + a1*x + a0)*(x^2 + x + 1)^2
165 => {[-1 +- sqrt(3) i]/2, [-1 +- sqrt(3) i]/2,
166 RootsOf(x^5 + a4*x^4 + a3*x^3 + a2*x^2 + a1*x + a0)} */
167 matrix([-a4, -a3, -a2, -a1, -a0, 0, 0, 0, 0],
168 [ 1, 0, 0, 0, 0, 0, 0, 0, 0],
169 [ 0, 1, 0, 0, 0, 0, 0, 0, 0],
170 [ 0, 0, 1, 0, 0, 0, 0, 0, 0],
171 [ 0, 0, 0, 1, 0, 0, 0, 0, 0],
172 [ 0, 0, 0, 0, 0, -1, -1, 0, 0],
173 [ 0, 0, 0, 0, 0, 1, 0, 0, 0],
174 [ 0, 0, 0, 0, 0, 0, 1, -1, -1],
175 [ 0, 0, 0, 0, 0, 0, 0, 1, 0])$
177 /* Eigenvalues and eigenvectors => lambda = {a, a, a, 1 - i, 1 + i},
178 eigenvectors = [[1 0 0 0 0], [0 0 1 0 0], [0 0 0 1 0],
179 [0, (1 + i)/2, 0, 0, 1], [0, (1 - i)/2, 0, 0, 1]]^T */
180 matrix([a, 0, 0, 0, 0],
186 /* Eigenvalues and generalized eigenvectors [Johnson and Riess, p. 193]
187 => lambda = {1, 1, 1}, eigenvectors = [[4 -1 4], [1 -1 2], [3 -1 3]]^T */
192 /* Eigenvalues and generalized eigenvectors [Johnson and Riess, p. 199]
193 => lambda = {1, 1, 1, 1, 2, 2}, eigenvectors =
194 [[1 -1 0 0 0 0], [-1 0 0 1 0 0], [0 0 1 -1 0 -1],
195 [0 0 -1 -2 -1 3], [ 0 2 0 0 0 0], [2 0 1 1 0 0]]^T */
196 matrix([1, 0, 1, 1, 0, 1],
203 /* Jordan form => diag([[1 1],[0 1]], [[1 1],[0 1]], -1) [Gantmacher, p. 172]
205 matrix([1, 0, 0, 1, -1],
211 /* Smith normal form => [[1, 0], [0, x^4 - x^2 + 1]] [Cullen, p. 230] */
212 matrix([x^2, x - 1], [x + 1, x^2]);
213 /* Matrix exponential => e [[cos 2, -sin 2], [sin 2, cos 2]] */
214 mat_expm(matrix([1, -2], [2, 1]));
216 /* Matrix exponential [Rick Niles] =>
217 [[1, 4 sin(w t)/w - 3 t , 6 [w t - sin(w t)], 2/w [1 - cos(w t)] ],
218 [0, 4 cos(w t) - 3 , 6 w [1 - cos(w t)], 2 sin(w t) ],
219 [0, -2/w [1 - cos(w t)], 4 - 3 cos(w t) , sin(w t)/w ],
220 [0, -2 sin(w t) , 3 w sin(w t) , cos(w t) ]] */
221 matrix([0, 1, 0, 0 ],
224 [0, -2*w, 3*w^2, 0 ])$
225 rectform(matrix_exp(%, t));
226 /* Sine of a Jordan matrix => diag([[sin a, cos a],[0, sin a]], sin b,
227 [[sin c, cos c, -sin(c)/2],[0, sin c, cos c],[0, 0, sin c]])
228 See F. R. Gantmacher, _The Theory of Matrices_, Volume One, Chelsea
229 Publishing Company, 1977, p. 100 to see how to do a general function. */
230 matrix([a, 1, 0, 0, 0, 0],
237 /* Sine of a matrix => [[1 0 0], [0 1 0], [0 0 1]] [Cullen, p. 261] */
238 %pi/2*matrix([2, 1, 1], [2, 3, 2], [1, 1, 2])$
240 /* Matrix square root => {+-[[3 1], [1 4]], +-1/sqrt(5) [[-1 7], [7 6]]} */
241 matrix([10, 7], [7, 17]);
244 /* Square root of a non-singular matrix [Gantmacher, p. 233]
245 => [[e, (e - n) v w + e/2, (n - e) v], [0, e, 0], [0, (e - n) w, n]
246 for arbitrary v and w with arbitrary signs e and n = +-1 */
247 matrix([1, 1, 0], [0, 1, 0], [0, 0, 1])$
250 /* Square root of a singular matrix [Gantmacher, p. 239]
251 => [[0 a b], [0 0 0], [0 1/b 0]] for arbitrary a and b */
252 matrix([0, 1, 0], [0, 0, 0], [0, 0, 0])$
253 errcatch(mat_sqrtm(%));
254 /* Singular value decomposition
255 => [1/sqrt(14) 3/sqrt(10) 1/sqrt(35) ] [2 sqrt(7) 0] [1/sqrt(2) 1/sqrt(2)]
256 [2/sqrt(14) 0 -sqrt(5/7)] [0 0] [1/sqrt(2) -1/sqrt(2)]
257 [3/sqrt(14) -1/sqrt(10) 3/sqrt(35) ] [0 0]
258 = U Sigma V^T --- singular values are [2 sqrt(7), 0] */
259 matrix([1, 1], [2, 2], [3, 3]);
260 [u, s, vT] <: svd_symb(%);
263 /* Jacobian of (r cos t, r sin t) => [[cos t, -r sin t], [sin t, r cos t]] */
264 jacobian([r*cos(t), r*sin(t)], [r, t]);
265 /* Hessian of r^2 sin t => [[2 sin t, 2 r cos t], [2 r cos t, -r^2 sin t]] */
267 /* Wronskian of (cos t, sin t) => [[cos t, sin t], [-sin t, cos t]] */
268 wronskian([cos(t), sin(t)], t);
269 /* How easy is it to define functions to do the last three operations?
270 Jacobian of (r cos t, r sin t) => [[cos t, -r sin t], [sin t, r cos t]] */
271 MYjacobian(f, x):= block([n: length(x)],
272 genmatrix(lambda([i, j], diff(f[i], x[j])), n, n))$
273 MYjacobian([r*cos(t), r*sin(t)], [r, t]);
274 /* Hessian of r^2 sin t => [[2 sin t, 2 r cos t], [2 r cos t, -r^2 sin t]] */
275 MYhessian(f, x):= block([n: length(x)],
276 genmatrix(lambda([i, j], diff(f, x[i], 1, x[j], 1)), n, n))$
277 MYhessian(r^2*sin(t), [r, t]);
278 /* Wronskian of (cos t, sin t) => [[cos t, sin t], [-sin t, cos t]] */
279 MYwronskian(f, x):= block([n: length(f)],
280 genmatrix(lambda([i, j], diff(f[j], x, i-1)), n, n))$
281 MYwronskian([cos(t), sin(t)], t);