58 if (U.is_square () && fact.
regular ())
64 DEFUN (lu, args, nargout,
66 @deftypefn {Built-in Function} {[@var{L}, @var{U}] =} lu (@var{A})\n\
67 @deftypefnx {Built-in Function} {[@var{L}, @var{U}, @var{P}] =} lu (@var{A})\n\
68 @deftypefnx {Built-in Function} {[@var{L}, @var{U}, @var{P}, @var{Q}] =} lu (@var{S})\n\
69 @deftypefnx {Built-in Function} {[@var{L}, @var{U}, @var{P}, @var{Q}, @var{R}] =} lu (@var{S})\n\
70 @deftypefnx {Built-in Function} {[@dots{}] =} lu (@var{S}, @var{thres})\n\
71 @deftypefnx {Built-in Function} {@var{y} =} lu (@dots{})\n\
72 @deftypefnx {Built-in Function} {[@dots{}] =} lu (@dots{}, \"vector\")\n\
73 @cindex LU decomposition\n\
74 Compute the LU@tie{}decomposition of @var{A}. If @var{A} is full\n\
76 @sc{lapack} are used and if @var{A} is sparse then @sc{umfpack} is used. The\n\
77 result is returned in a permuted form, according to the optional return\n\
78 value @var{P}. For example, given the matrix @code{a = [1, 2; 3, 4]},\n\
81 [l, u, p] = lu (@var{a})\n\
106 The matrix is not required to be square.\n\
108 When called with two or three output arguments and a spare input matrix,\n\
109 @code{lu} does not attempt to perform sparsity preserving column\n\
110 permutations. Called with a fourth output argument, the sparsity\n\
111 preserving column transformation @var{Q} is returned, such that\n\
112 @code{@var{P} * @var{A} * @var{Q} = @var{L} * @var{U}}.\n\
114 Called with a fifth output argument and a sparse input matrix,\n\
115 @code{lu} attempts to use a scaling factor @var{R} on the input matrix\n\
117 @code{@var{P} * (@var{R} \\ @var{A}) * @var{Q} = @var{L} * @var{U}}.\n\
118 This typically leads to a sparser and more stable factorization.\n\
120 An additional input argument @var{thres}, that defines the pivoting\n\
121 threshold can be given. @var{thres} can be a scalar, in which case\n\
122 it defines the @sc{umfpack} pivoting tolerance for both symmetric and\n\
123 unsymmetric cases. If @var{thres} is a 2-element vector, then the first\n\
124 element defines the pivoting tolerance for the unsymmetric @sc{umfpack}\n\
125 pivoting strategy and the second for the symmetric strategy. By default,\n\
126 the values defined by @code{spparms} are used ([0.1, 0.001]).\n\
128 Given the string argument @qcode{\"vector\"}, @code{lu} returns the values\n\
129 of @var{P} and @var{Q} as vector values, such that for full matrix,\n\
130 @code{@var{A} (@var{P},:) = @var{L} * @var{U}}, and @code{@var{R}(@var{P},:)\n\
131 * @var{A} (:, @var{Q}) = @var{L} * @var{U}}.\n\
133 With two output arguments, returns the permuted forms of the upper and\n\
134 lower triangular matrices, such that @code{@var{A} = @var{L} * @var{U}}.\n\
135 With one output argument @var{y}, then the matrix returned by the @sc{lapack}\n\
136 routines is returned. If the input matrix is sparse then the matrix @var{L}\n\
137 is embedded into @var{U} to give a return value similar to the full case.\n\
138 For both full and sparse matrices, @code{lu} loses the permutation\n\
140 @seealso{luupdate, chol, hess, qr, qz, schur, svd}\n\
144 int nargin = args.
length ();
145 bool issparse = (nargin > 0 && args(0).is_sparse_type ());
146 bool scale = (nargout == 5);
148 if (nargin < 1 || (issparse && (nargin > 3 || nargout > 5))
149 || (!issparse && (nargin > 2 || nargout > 3)))
161 if (args (n).is_string ())
163 std::string tmp = args(n++).string_value ();
167 if (tmp.compare (
"vector") == 0)
170 error (
"lu: unrecognized string argument");
175 Matrix tmp = args(n++).matrix_value ();
180 error (
"lu: can not define pivoting threshold THRES for full matrices");
181 else if (tmp.
nelem () == 1)
187 else if (tmp.
nelem () == 2)
190 error (
"lu: expecting 2-element vector for THRES");
200 int arg_is_empty =
empty_arg (
"lu", nr, nc);
204 if (arg_is_empty < 0)
206 else if (arg_is_empty > 0)
227 SparseLU fact (m, Qinit, thres,
false,
true);
230 retval(0) = fact.
Y ();
247 SparseLU fact (m, Qinit, thres,
false,
true);
250 retval(2) = fact.
Pr_vec ();
252 retval(2) = fact.
Pr_mat ();
267 retval(4) = fact.
R ();
271 retval(3) = fact.
Pc_vec ();
272 retval(2) = fact.
Pr_vec ();
276 retval(3) = fact.
Pc_mat ();
277 retval(2) = fact.
Pr_mat ();
300 retval(0) = fact.
Y ();
320 retval(2) = fact.
Pr_vec ();
322 retval(2) = fact.
Pr_mat ();
337 retval(4) = fact.
R ();
341 retval(3) = fact.
Pc_vec ();
342 retval(2) = fact.
Pr_vec ();
346 retval(3) = fact.
Pc_mat ();
347 retval(2) = fact.
Pr_mat ();
362 if (arg_is_empty < 0)
364 else if (arg_is_empty > 0)
381 retval(0) = fact.
Y ();
397 retval(2) = fact.
P_vec ();
399 retval(2) = fact.
P ();
419 retval(0) = fact.
Y ();
435 retval(2) = fact.
P_vec ();
437 retval(2) = fact.
P ();
460 retval(0) = fact.
Y ();
476 retval(2) = fact.
P_vec ();
478 retval(2) = fact.
P ();
498 retval(0) = fact.
Y ();
514 retval(2) = fact.
P_vec ();
516 retval(2) = fact.
P ();
597 DEFUN (luupdate, args, ,
599 @deftypefn {Built-in Function} {[@var{L}, @var{U}] =} luupdate (@var{L}, @var{U}, @var{x}, @var{y})\n\
600 @deftypefnx {Built-in Function} {[@var{L}, @var{U}, @var{P}] =} luupdate (@var{L}, @var{U}, @var{P}, @var{x}, @var{y})\n\
601 Given an LU@tie{}factorization of a real or complex matrix\n\
602 @w{@var{A} = @var{L}*@var{U}}, @var{L}@tie{}lower unit trapezoidal and\n\
603 @var{U}@tie{}upper trapezoidal, return the LU@tie{}factorization\n\
604 of @w{@var{A} + @var{x}*@var{y}.'}, where @var{x} and @var{y} are\n\
605 column vectors (rank-1 update) or matrices with equal number of columns\n\
607 Optionally, row-pivoted updating can be used by supplying\n\
608 a row permutation (pivoting) matrix @var{P};\n\
609 in that case, an updated permutation matrix is returned.\n\
610 Note that if @var{L}, @var{U}, @var{P} is a pivoted LU@tie{}factorization\n\
611 as obtained by @code{lu}:\n\
614 [@var{L}, @var{U}, @var{P}] = lu (@var{A});\n\
618 then a factorization of @tcode{@var{A}+@var{x}*@var{y}.'} can be obtained\n\
622 [@var{L1}, @var{U1}] = lu (@var{L}, @var{U}, @var{P}*@var{x}, @var{y})\n\
629 [@var{L1}, @var{U1}, @var{P1}] = lu (@var{L}, @var{U}, @var{P}, @var{x}, @var{y})\n\
632 The first form uses the unpivoted algorithm, which is faster, but less\n\
633 stable. The second form uses a slower pivoted algorithm, which is more\n\
636 The matrix case is done as a sequence of rank-1 updates;\n\
637 thus, for large enough k, it will be both faster and more accurate to\n\
638 recompute the factorization from scratch.\n\
639 @seealso{lu, cholupdate, qrupdate}\n\
645 bool pivoted = nargin == 5;
647 if (nargin != 4 && nargin != 5)
692 retval(2) = fact.
P ();
710 retval(2) = fact.
P ();
735 retval(2) = fact.
P ();
753 retval(2) = fact.
P ();
760 error (
"luupdate: dimension mismatch");
763 error (
"luupdate: L, U, X, and Y must be numeric");