GNU Octave  9.1.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
sparse-qr.cc
Go to the documentation of this file.
1 ////////////////////////////////////////////////////////////////////////
2 //
3 // Copyright (C) 2005-2024 The Octave Project Developers
4 //
5 // See the file COPYRIGHT.md in the top-level directory of this
6 // distribution or <https://octave.org/copyright/>.
7 //
8 // This file is part of Octave.
9 //
10 // Octave is free software: you can redistribute it and/or modify it
11 // under the terms of the GNU General Public License as published by
12 // the Free Software Foundation, either version 3 of the License, or
13 // (at your option) any later version.
14 //
15 // Octave is distributed in the hope that it will be useful, but
16 // WITHOUT ANY WARRANTY; without even the implied warranty of
17 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 // GNU General Public License for more details.
19 //
20 // You should have received a copy of the GNU General Public License
21 // along with Octave; see the file COPYING. If not, see
22 // <https://www.gnu.org/licenses/>.
23 //
24 ////////////////////////////////////////////////////////////////////////
25 
26 #if defined (HAVE_CONFIG_H)
27 # include "config.h"
28 #endif
29 
30 #include "CMatrix.h"
31 #include "CSparse.h"
32 #include "MArray.h"
33 #include "dColVector.h"
34 #include "dMatrix.h"
35 #include "dSparse.h"
36 #include "lo-error.h"
37 #include "oct-locbuf.h"
38 #include "oct-sparse.h"
39 #include "quit.h"
40 #include "sparse-qr.h"
41 
43 
45 
46 #if defined (HAVE_CXSPARSE)
47 template <typename SPARSE_T>
48 class
49 cxsparse_types
50 { };
51 
52 template <>
53 class
54 cxsparse_types<SparseMatrix>
55 {
56 public:
57  typedef CXSPARSE_DNAME (s) symbolic_type;
58  typedef CXSPARSE_DNAME (n) numeric_type;
59 };
60 
61 template <>
62 class
63 cxsparse_types<SparseComplexMatrix>
64 {
65 public:
66  typedef CXSPARSE_ZNAME (s) symbolic_type;
67  typedef CXSPARSE_ZNAME (n) numeric_type;
68 };
69 #endif
70 
71 template <typename SPARSE_T>
72 class sparse_qr<SPARSE_T>::sparse_qr_rep
73 {
74 public:
75 
76  sparse_qr_rep (const SPARSE_T& a, int order);
77 
78  OCTAVE_DISABLE_CONSTRUCT_COPY_MOVE (sparse_qr_rep)
79 
80  ~sparse_qr_rep ();
81 
82  bool ok () const
83  {
84 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
85  return (m_H && m_Htau && m_HPinv && m_R && m_E);
86 #elif defined (HAVE_CXSPARSE)
87  return (N && S);
88 #else
89  return false;
90 #endif
91  }
92 
93  SPARSE_T V () const;
94 
95  ColumnVector Pinv () const;
96 
97  ColumnVector P () const;
98 
99  ColumnVector E () const;
100 
101  SPARSE_T R (bool econ) const;
102 
103  typename SPARSE_T::dense_matrix_type
104  C (const typename SPARSE_T::dense_matrix_type& b, bool econ = false);
105 
106  typename SPARSE_T::dense_matrix_type Q (bool econ = false);
107 
108  octave_idx_type nrows;
109  octave_idx_type ncols;
110 
111 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
112 
113  template <typename RHS_T, typename RET_T>
114  RET_T solve (const RHS_T& b, octave_idx_type& info) const;
115 
116 #endif
117 
118 #if defined (HAVE_CXSPARSE)
119 
120  typename cxsparse_types<SPARSE_T>::symbolic_type *S;
121  typename cxsparse_types<SPARSE_T>::numeric_type *N;
122 
123 #endif
124 
125  template <typename RHS_T, typename RET_T>
126  RET_T tall_solve (const RHS_T& b, octave_idx_type& info);
127 
128  template <typename RHS_T, typename RET_T>
129  RET_T wide_solve (const RHS_T& b, octave_idx_type& info) const;
130 
131 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
132 
133 private:
134 
135  cholmod_common m_cc;
136  cholmod_sparse *m_R; // R factor
137  // Column permutation for A. Fill-reducing ordering.
138  SuiteSparse_long *m_E;
139  cholmod_sparse *m_H; // Householder vectors
140  cholmod_dense *m_Htau; // beta scalars
141  SuiteSparse_long *m_HPinv;
142 
143 #endif
144 };
145 
146 template <typename SPARSE_T>
149 {
150 #if defined (HAVE_CXSPARSE)
151 
152  ColumnVector ret (N->L->m);
153 
154  for (octave_idx_type i = 0; i < N->L->m; i++)
155  ret.xelem (i) = S->pinv[i];
156 
157  return ret;
158 
159 #else
160 
161  return ColumnVector ();
162 
163 #endif
164 }
165 
166 template <typename SPARSE_T>
169 {
170 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
171 
172  ColumnVector ret (nrows);
173 
174  // FIXME: Is ret.xelem (m_HPinv[i]) = i + 1 correct?
175  for (octave_idx_type i = 0; i < nrows; i++)
176  ret.xelem (from_suitesparse_long (m_HPinv[i])) = i + 1;
177 
178  return ret;
179 
180 #elif defined (HAVE_CXSPARSE)
181 
182  ColumnVector ret (N->L->m);
183 
184  for (octave_idx_type i = 0; i < N->L->m; i++)
185  ret.xelem (S->pinv[i]) = i;
186 
187  return ret;
188 
189 #else
190 
191  return ColumnVector ();
192 
193 #endif
194 }
195 
196 template <typename SPARSE_T>
199 {
200 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
201 
202  ColumnVector ret (ncols);
203 
204  if (m_E)
205  for (octave_idx_type i = 0; i < ncols; i++)
206  ret(i) = from_suitesparse_long (m_E[i]) + 1;
207  else
208  for (octave_idx_type i = 0; i < ncols; i++)
209  ret(i) = i + 1;
210 
211  return ret;
212 
213 #else
214 
215  return ColumnVector ();
216 
217 #endif
218 }
219 
220 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
221 
222 // Convert real sparse octave matrix to real sparse cholmod matrix.
223 // Returns a "shallow" copy of a.
224 static cholmod_sparse
225 ros2rcs (const SparseMatrix& a)
226 {
227  cholmod_sparse A;
228 
229  octave_idx_type ncols = a.cols ();
230  octave_idx_type nnz = a.nnz ();
231 
232  A.ncol = ncols;
233  A.nrow = a.rows ();
234  A.itype = CHOLMOD_LONG;
235  A.nzmax = nnz;
236  A.sorted = 0;
237  A.packed = 1;
238  A.stype = 0;
239  A.xtype = CHOLMOD_REAL;
240  A.dtype = CHOLMOD_DOUBLE;
241  A.nz = nullptr;
242  A.z = nullptr;
243  if (sizeof (octave_idx_type) == sizeof (SuiteSparse_long))
244  {
245  A.p = reinterpret_cast<SuiteSparse_long *> (a.cidx ());
246  A.i = reinterpret_cast<SuiteSparse_long *> (a.ridx ());
247  }
248  else
249  {
250  SuiteSparse_long *A_p;
251  A_p = new SuiteSparse_long[ncols+1];
252  for (octave_idx_type i = 0; i < ncols+1; i++)
253  A_p[i] = a.cidx (i);
254  A.p = A_p;
255  SuiteSparse_long *A_i;
256  A_i = new SuiteSparse_long[nnz];
257  for (octave_idx_type i = 0; i < nnz; i++)
258  A_i[i] = a.ridx (i);
259  A.i = A_i;
260  }
261  A.x = const_cast<double *> (a.data ());
262 
263  return A;
264 }
265 
266 // Convert complex sparse octave matrix to complex sparse cholmod matrix.
267 // Returns a "shallow" copy of a.
268 static cholmod_sparse
269 cos2ccs (const SparseComplexMatrix& a)
270 {
271  cholmod_sparse A;
272 
273  octave_idx_type ncols = a.cols ();
274  octave_idx_type nnz = a.nnz ();
275 
276  A.ncol = ncols;
277  A.nrow = a.rows ();
278  A.itype = CHOLMOD_LONG;
279  A.nzmax = nnz;
280  A.sorted = 0;
281  A.packed = 1;
282  A.stype = 0;
283  A.xtype = CHOLMOD_COMPLEX;
284  A.dtype = CHOLMOD_DOUBLE;
285  A.nz = nullptr;
286  A.z = nullptr;
287  if (sizeof (octave_idx_type) == sizeof (SuiteSparse_long))
288  {
289  A.p = reinterpret_cast<SuiteSparse_long *> (a.cidx ());
290  A.i = reinterpret_cast<SuiteSparse_long *> (a.ridx ());
291  }
292  else
293  {
294  SuiteSparse_long *A_p;
295  A_p = new SuiteSparse_long[ncols+1];
296  for (octave_idx_type i = 0; i < ncols+1; i++)
297  A_p[i] = a.cidx (i);
298  A.p = A_p;
299  SuiteSparse_long *A_i;
300  A_i = new SuiteSparse_long[nnz];
301  for (octave_idx_type i = 0; i < nnz; i++)
302  A_i[i] = a.ridx (i);
303  A.i = A_i;
304  }
305  A.x = const_cast<Complex *>
306  (reinterpret_cast<const Complex *> (a.data ()));
307 
308  return A;
309 }
310 
311 // Convert real dense octave matrix to complex dense cholmod matrix.
312 // Returns a "deep" copy of a.
313 static cholmod_dense *
314 rod2ccd (const MArray<double>& a, cholmod_common *cc1)
315 {
316  cholmod_dense *A
317  = cholmod_l_allocate_dense (a.rows (), a.cols (), a.rows(),
318  CHOLMOD_COMPLEX, cc1);
319 
320  const double *a_x = a.data ();
321 
322  Complex *A_x = reinterpret_cast<Complex *> (A->x);
323  for (octave_idx_type j = 0; j < a.cols() * a.rows() ; j++)
324  A_x[j] = Complex (a_x[j], 0.0);
325 
326  return A;
327 }
328 
329 // Convert real dense octave matrix to real dense cholmod matrix.
330 // Returns a "shallow" copy of a.
331 static cholmod_dense
332 rod2rcd (const MArray<double>& a)
333 {
334  cholmod_dense A;
335 
336  A.ncol = a.cols ();
337  A.nrow = a.rows ();
338  A.nzmax = a.cols () * a.rows ();
339  A.xtype = CHOLMOD_REAL;
340  A.dtype = CHOLMOD_DOUBLE;
341  A.z = nullptr;
342  A.d = a.rows ();
343  A.x = const_cast<double *> (a.data ());
344 
345  return A;
346 }
347 
348 // Convert complex dense octave matrix to complex dense cholmod matrix.
349 // Returns a "shallow" copy of a.
350 static cholmod_dense
351 cod2ccd (const ComplexMatrix& a)
352 {
353  cholmod_dense A;
354 
355  A.ncol = a.cols ();
356  A.nrow = a.rows ();
357  A.nzmax = a.cols () * a.rows ();
358  A.xtype = CHOLMOD_COMPLEX;
359  A.dtype = CHOLMOD_DOUBLE;
360  A.z = nullptr;
361  A.d = a.rows ();
362  A.x = const_cast<Complex *> (reinterpret_cast<const Complex *> (a.data ()));
363 
364  return A;
365 }
366 
367 // Convert real sparse cholmod matrix to real sparse octave matrix.
368 // Returns a "shallow" copy of y.
369 static SparseMatrix
370 rcs2ros (cholmod_sparse *y, const cholmod_common *cc1)
371 {
372  octave_idx_type nrow = from_size_t (y->nrow);
373  octave_idx_type ncol = from_size_t (y->ncol);
374  octave_idx_type nz
376  (cholmod_l_nnz (y, const_cast<cholmod_common *> (cc1)));
377  SparseMatrix ret (nrow, ncol, nz);
378 
379  SuiteSparse_long *y_p = reinterpret_cast<SuiteSparse_long *> (y->p);
380  for (octave_idx_type j = 0; j < ncol + 1; j++)
381  ret.xcidx (j) = from_suitesparse_long (y_p[j]);
382 
383  SuiteSparse_long *y_i = reinterpret_cast<SuiteSparse_long *> (y->i);
384  double *y_x = reinterpret_cast<double *> (y->x);
385  for (octave_idx_type j = 0; j < nz; j++)
386  {
387  ret.xridx (j) = from_suitesparse_long (y_i[j]);
388  ret.xdata (j) = y_x[j];
389  }
390 
391  return ret;
392 }
393 
394 // Convert complex sparse cholmod matrix to complex sparse octave matrix.
395 // Returns a "deep" copy of a.
396 static SparseComplexMatrix
397 ccs2cos (cholmod_sparse *a, cholmod_common *cc1)
398 {
399  octave_idx_type nrow = from_size_t (a->nrow);
400  octave_idx_type ncol = from_size_t (a->ncol);
401  octave_idx_type nz = from_suitesparse_long (cholmod_l_nnz (a, cc1));
402  SparseComplexMatrix ret (nrow, ncol, nz);
403 
404  SuiteSparse_long *a_p = reinterpret_cast<SuiteSparse_long *> (a->p);
405  for (octave_idx_type j = 0; j < ncol + 1; j++)
406  ret.xcidx(j) = from_suitesparse_long (a_p[j]);
407 
408  SuiteSparse_long *a_i = reinterpret_cast<SuiteSparse_long *> (a->i);
409  Complex *a_x = reinterpret_cast<Complex *> (a->x);
410  for (octave_idx_type j = 0; j < nz; j++)
411  {
412  ret.xridx(j) = from_suitesparse_long (a_i[j]);
413  ret.xdata(j) = a_x[j];
414  }
415 
416  return ret;
417 }
418 
419 // Convert real sparse octave matrix to complex sparse cholmod matrix.
420 // Returns a "deep" copy of a.
421 static cholmod_sparse *
422 ros2ccs (const SparseMatrix& a, cholmod_common *cc)
423 {
424  cholmod_sparse *A
425  = cholmod_l_allocate_sparse (a.rows (), a.cols (), a.nnz (), 0, 1, 0,
426  CHOLMOD_COMPLEX, cc);
427 
428  octave_idx_type ncol = a.cols ();
429  SuiteSparse_long *A_p = reinterpret_cast<SuiteSparse_long *> (A->p);
430  for (octave_idx_type j = 0; j < ncol + 1; j++)
431  A_p[j] = a.cidx(j);
432 
433  const double *a_x = a.data ();
434  Complex *A_x = reinterpret_cast<Complex *> (A->x);
435  SuiteSparse_long *A_i = reinterpret_cast<SuiteSparse_long *> (A->i);
436  for (octave_idx_type j = 0; j < a.nnz (); j++)
437  {
438  A_x[j] = Complex (a_x[j], 0.0);
439  A_i[j] = a.ridx(j);
440  }
441  return A;
442 }
443 
444 static suitesparse_integer
445 suitesparse_long_to_suitesparse_integer (SuiteSparse_long x)
446 {
449  (*current_liboctave_error_handler)
450  ("integer dimension or index out of range for SuiteSparse's indexing type");
451 
452  return static_cast<suitesparse_integer> (x);
453 }
454 
455 static void
456 spqr_error_handler (const cholmod_common *cc)
457 {
458  if (cc->status >= 0)
459  return;
460 
461  switch (cc->status)
462  {
463  case CHOLMOD_OUT_OF_MEMORY:
464  (*current_liboctave_error_handler)
465  ("sparse_qr: sparse matrix QR factorization failed"
466  " - out of memory");
467  case CHOLMOD_TOO_LARGE:
468  (*current_liboctave_error_handler)
469  ("sparse_qr: sparse matrix QR factorization failed"
470  " - integer overflow occurred");
471  default:
472  (*current_liboctave_error_handler)
473  ("sparse_qr: sparse matrix QR factorization failed"
474  " - error %d", cc->status);
475  }
476 
477  // FIXME: Free memory?
478  // FIXME: Can cc-status > 0 (CHOLMOD_NOT_POSDEF, CHOLMOD_DSMALL) occur?
479 }
480 #endif
481 
482 // Specializations.
483 
484 // Real-valued matrices.
485 
486 // Arguments for parameter order (taken from SuiteSparseQR documentation).
487 // 0: fixed ordering 0 (no permutation of columns)
488 // 1: natural ordering 1 (only singleton columns are permuted to the left of
489 // the matrix)
490 // 2: colamd
491 // 3:
492 // 4: CHOLMOD best-effort (COLAMD, METIS,...)
493 // 5: AMD(a'*a)
494 // 6: metis(a'*a)
495 // 7: SuiteSparseQR default ordering
496 // 8: try COLAMD, AMD, and METIS; pick best
497 // 9: try COLAMD and AMD; pick best
498 //FIXME: What is order = 3?
499 template <>
501 (const SparseMatrix& a, int order)
502  : nrows (a.rows ()), ncols (a.columns ())
503 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
504  , m_cc (), m_R (nullptr), m_E (nullptr), m_H (nullptr), m_Htau (nullptr),
505  m_HPinv (nullptr)
506 {
507  octave_idx_type nr = a.rows ();
508  octave_idx_type nc = a.cols ();
509 
510  if (nr < 0 || nc < 0)
511  (*current_liboctave_error_handler)
512  ("matrix dimension with negative size");
513 
514  if (order < 0 || order > 9)
515  (*current_liboctave_error_handler)
516  ("ordering %d is not supported by SPQR", order);
517 
518  cholmod_l_start (&m_cc);
519  cholmod_sparse A = ros2rcs (a);
520 
521  SuiteSparseQR<double> (order, static_cast<double> (SPQR_DEFAULT_TOL),
522  static_cast<SuiteSparse_long> (A.nrow),
523  &A, &m_R, &m_E, &m_H, &m_HPinv, &m_Htau, &m_cc);
524  spqr_error_handler (&m_cc);
525 
526  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
527  {
528  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
529  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
530  }
531 }
532 
533 #elif defined (HAVE_CXSPARSE)
534  , S (nullptr), N (nullptr)
535 {
536  CXSPARSE_DNAME () A;
537 
538  A.nzmax = a.nnz ();
539  A.m = nrows;
540  A.n = ncols;
541  // Cast away const on A, with full knowledge that CSparse won't touch it
542  // Prevents the methods below making a copy of the data.
543  A.p = const_cast<suitesparse_integer *>
544  (to_suitesparse_intptr (a.cidx ()));
545  A.i = const_cast<suitesparse_integer *>
546  (to_suitesparse_intptr (a.ridx ()));
547  A.x = const_cast<double *> (a.data ());
548  A.nz = -1;
549 
550  S = CXSPARSE_DNAME (_sqr) (order, &A, 1);
551  N = CXSPARSE_DNAME (_qr) (&A, S);
552 
553  if (! N)
555  ("sparse_qr: sparse matrix QR factorization filled");
556 
557 }
558 
559 #else
560 
561 {
562  octave_unused_parameter (order);
563 
564  (*current_liboctave_error_handler)
565  ("sparse_qr: support for SPQR or CXSparse was unavailable or disabled when liboctave was built");
566 }
567 
568 #endif
569 
570 template <>
572 {
573 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
574 
575  cholmod_l_free_sparse (&m_R, &m_cc);
576  cholmod_l_free_sparse (&m_H, &m_cc);
577  cholmod_l_free_dense (&m_Htau, &m_cc);
578  free (m_E); // FIXME: use cholmod_l_free
579  free (m_HPinv);
580  cholmod_l_finish (&m_cc);
581 
582 #elif defined (HAVE_CXSPARSE)
583 
584  CXSPARSE_DNAME (_sfree) (S);
585  CXSPARSE_DNAME (_nfree) (N);
586 
587 #endif
588 }
589 
590 template <>
593 {
594 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
595 
596  return rcs2ros (m_H, &m_cc);
597 
598 #elif defined (HAVE_CXSPARSE)
599 
600  // Drop zeros from V and sort
601  // FIXME: Is the double transpose to sort necessary?
602 
603  CXSPARSE_DNAME (_dropzeros) (N->L);
604  CXSPARSE_DNAME () *D = CXSPARSE_DNAME (_transpose) (N->L, 1);
605  CXSPARSE_DNAME (_spfree) (N->L);
606  N->L = CXSPARSE_DNAME (_transpose) (D, 1);
607  CXSPARSE_DNAME (_spfree) (D);
608 
609  octave_idx_type nc = N->L->n;
610  octave_idx_type nz = N->L->p[nc];
611  SparseMatrix ret (N->L->m, nc, nz);
612 
613  for (octave_idx_type j = 0; j < nc+1; j++)
614  ret.xcidx (j) = N->L->p[j];
615 
616  for (octave_idx_type j = 0; j < nz; j++)
617  {
618  ret.xridx (j) = N->L->i[j];
619  ret.xdata (j) = N->L->x[j];
620  }
621 
622  return ret;
623 
624 #else
625 
626  return SparseMatrix ();
627 
628 #endif
629 }
630 
631 template <>
634 {
635 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
636 
637  octave_idx_type nr = from_size_t (m_R->nrow);
638  octave_idx_type nc = from_size_t (m_R->ncol);
639  octave_idx_type nz
641  (cholmod_l_nnz (m_R, const_cast<cholmod_common *> (&m_cc)));
642 
643  // FIXME: Does this work if econ = true?
644  SparseMatrix ret ((econ ? (nc > nr ? nr : nc) : nr), nc, nz);
645  SuiteSparse_long *Rp = reinterpret_cast<SuiteSparse_long *> (m_R->p);
646  SuiteSparse_long *Ri = reinterpret_cast<SuiteSparse_long *> (m_R->i);
647 
648  for (octave_idx_type j = 0; j < nc + 1; j++)
649  ret.xcidx (j) = from_suitesparse_long (Rp[j]);
650 
651  for (octave_idx_type j = 0; j < nz; j++)
652  {
653  ret.xridx (j) = from_suitesparse_long (Ri[j]);
654  ret.xdata (j) = (reinterpret_cast<double *> (m_R->x))[j];
655  }
656 
657  return ret;
658 
659 #elif defined (HAVE_CXSPARSE)
660 
661  // Drop zeros from R and sort
662  // FIXME: Is the double transpose to sort necessary?
663 
664  CXSPARSE_DNAME (_dropzeros) (N->U);
665  CXSPARSE_DNAME () *D = CXSPARSE_DNAME (_transpose) (N->U, 1);
666  CXSPARSE_DNAME (_spfree) (N->U);
667  N->U = CXSPARSE_DNAME (_transpose) (D, 1);
668  CXSPARSE_DNAME (_spfree) (D);
669 
670  octave_idx_type nc = N->U->n;
671  octave_idx_type nz = N->U->p[nc];
672 
673  SparseMatrix ret ((econ ? (nc > nrows ? nrows : nc) : nrows), nc, nz);
674 
675  for (octave_idx_type j = 0; j < nc+1; j++)
676  ret.xcidx (j) = N->U->p[j];
677 
678  for (octave_idx_type j = 0; j < nz; j++)
679  {
680  ret.xridx (j) = N->U->i[j];
681  ret.xdata (j) = N->U->x[j];
682  }
683 
684  return ret;
685 
686 #else
687 
688  octave_unused_parameter (econ);
689 
690  return SparseMatrix ();
691 
692 #endif
693 }
694 
695 template <>
696 Matrix
698 {
699 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
700  octave_idx_type nr = (econ
701  ? (ncols > nrows ? nrows : ncols)
702  : nrows);
703  octave_idx_type b_nr = b.rows ();
704  octave_idx_type b_nc = b.cols ();
705  Matrix ret (nr, b_nc);
706 
707  if (nrows != b_nr)
708  (*current_liboctave_error_handler)
709  ("sparse_qr: matrix dimension mismatch");
710  else if (b_nc < 0 || b_nr < 0)
711  (*current_liboctave_error_handler)
712  ("sparse_qr: matrix dimension with negative size");
713 
714  cholmod_dense *QTB; // Q' * B
715  cholmod_dense B = rod2rcd (b);
716 
717  QTB = SuiteSparseQR_qmult<double> (SPQR_QTX, m_H, m_Htau, m_HPinv, &B,
718  &m_cc);
719  spqr_error_handler (&m_cc);
720 
721  // copy QTB into ret
722  double *QTB_x = reinterpret_cast<double *> (QTB->x);
723  double *ret_vec = reinterpret_cast<double *> (ret.fortran_vec ());
724  for (octave_idx_type j = 0; j < b_nc; j++)
725  for (octave_idx_type i = 0; i < nr; i++)
726  ret_vec[j * nr + i] = QTB_x[j * b_nr + i];
727 
728  cholmod_l_free_dense (&QTB, &m_cc);
729 
730  return ret;
731 
732 #elif defined (HAVE_CXSPARSE)
733 
734  if (econ)
735  (*current_liboctave_error_handler)
736  ("sparse-qr: economy mode with CXSparse not supported");
737 
738  octave_idx_type b_nr = b.rows ();
739  octave_idx_type b_nc = b.cols ();
740 
741  octave_idx_type nc = N->L->n;
742  octave_idx_type nr = nrows;
743 
744  const double *bvec = b.data ();
745 
746  Matrix ret (b_nr, b_nc);
747  double *vec = ret.fortran_vec ();
748 
749  if (nr < 0 || nc < 0 || nr != b_nr)
750  (*current_liboctave_error_handler) ("matrix dimension mismatch");
751 
752  if (nr == 0 || nc == 0 || b_nc == 0)
753  ret = Matrix (nc, b_nc, 0.0);
754  else
755  {
756  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
757 
758  for (volatile octave_idx_type j = 0, idx = 0;
759  j < b_nc;
760  j++, idx += b_nr)
761  {
762  octave_quit ();
763 
764  for (octave_idx_type i = nr; i < S->m2; i++)
765  buf[i] = 0.;
766 
767  volatile octave_idx_type nm = (nr < nc ? nr : nc);
768 
769  CXSPARSE_DNAME (_ipvec) (S->pinv, bvec + idx, buf, b_nr);
770 
771  for (volatile octave_idx_type i = 0; i < nm; i++)
772  {
773  octave_quit ();
774 
775  CXSPARSE_DNAME (_happly) (N->L, i, N->B[i], buf);
776  }
777 
778  for (octave_idx_type i = 0; i < b_nr; i++)
779  vec[i+idx] = buf[i];
780  }
781  }
782 
783  return ret;
784 
785 #else
786 
787  octave_unused_parameter (b);
788  octave_unused_parameter (econ);
789 
790  return Matrix ();
791 
792 #endif
793 }
794 
795 template <>
796 Matrix
798 {
799 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
800 
801  octave_idx_type nc = (econ
802  ? (ncols > nrows ? nrows : ncols)
803  : nrows);
804  Matrix ret (nrows, nc);
805  cholmod_dense *q;
806 
807  // I is nrows x nrows identity matrix
808  cholmod_dense *I_mat
809  = cholmod_l_allocate_dense (nrows, nrows, nrows, CHOLMOD_REAL, &m_cc);
810 
811  for (octave_idx_type i = 0; i < nrows * nrows; i++)
812  (reinterpret_cast<double *> (I_mat->x))[i] = 0.0;
813 
814  for (octave_idx_type i = 0; i < nrows; i++)
815  (reinterpret_cast<double *> (I_mat->x))[i * nrows + i] = 1.0;
816 
817  q = SuiteSparseQR_qmult<double> (SPQR_QX, m_H, m_Htau, m_HPinv, I_mat,
818  &m_cc);
819  spqr_error_handler (&m_cc);
820 
821  double *q_x = reinterpret_cast<double *> (q->x);
822  double *ret_vec = const_cast<double *> (ret.fortran_vec ());
823  for (octave_idx_type j = 0; j < nc; j++)
824  for (octave_idx_type i = 0; i < nrows; i++)
825  ret_vec[j * nrows + i] = q_x[j * nrows + i];
826 
827  cholmod_l_free_dense (&q, &m_cc);
828  cholmod_l_free_dense (&I_mat, &m_cc);
829 
830  return ret;
831 
832 #elif defined (HAVE_CXSPARSE)
833 
834  if (econ)
835  (*current_liboctave_error_handler)
836  ("sparse-qr: economy mode with CXSparse not supported");
837 
838  octave_idx_type nc = N->L->n;
839  octave_idx_type nr = nrows;
840  Matrix ret (nr, nr);
841  double *ret_vec = ret.fortran_vec ();
842 
843  if (nr < 0 || nc < 0)
844  (*current_liboctave_error_handler) ("matrix dimension mismatch");
845 
846  if (nr == 0 || nc == 0)
847  ret = Matrix (nc, nr, 0.0);
848  else
849  {
850  OCTAVE_LOCAL_BUFFER (double, bvec, nr + 1);
851 
852  for (octave_idx_type i = 0; i < nr; i++)
853  bvec[i] = 0.;
854 
855  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
856 
857  for (volatile octave_idx_type j = 0, idx = 0; j < nr; j++, idx += nr)
858  {
859  octave_quit ();
860 
861  bvec[j] = 1.0;
862  for (octave_idx_type i = nr; i < S->m2; i++)
863  buf[i] = 0.;
864 
865  volatile octave_idx_type nm = (nr < nc ? nr : nc);
866 
867  CXSPARSE_DNAME (_ipvec) (S->pinv, bvec, buf, nr);
868 
869  for (volatile octave_idx_type i = 0; i < nm; i++)
870  {
871  octave_quit ();
872 
873  CXSPARSE_DNAME (_happly) (N->L, i, N->B[i], buf);
874  }
875 
876  for (octave_idx_type i = 0; i < nr; i++)
877  ret_vec[i+idx] = buf[i];
878 
879  bvec[j] = 0.0;
880  }
881  }
882 
883  return ret.transpose ();
884 
885 #else
886 
887  octave_unused_parameter (econ);
888 
889  return Matrix ();
890 
891 #endif
892 }
893 
894 template <>
895 template <>
896 Matrix
898 (const MArray<double>& b, octave_idx_type& info)
899 {
900  info = -1;
901 
902 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD) && defined (HAVE_CXSPARSE))
903 
904  octave_idx_type b_nr = b.rows ();
905  octave_idx_type b_nc = b.cols ();
906  Matrix x (ncols, b_nc); // X = m_E'*(m_R\‍(Q'*B))
907 
908  if (nrows < 0 || ncols < 0 || b_nc < 0 || b_nr < 0)
909  (*current_liboctave_error_handler)
910  ("matrix dimension with negative size");
911 
912  if (nrows < 0 || ncols < 0 || nrows != b_nr)
913  (*current_liboctave_error_handler) ("matrix dimension mismatch");
914 
915  cholmod_dense *QTB; // Q' * B
916  cholmod_dense B = rod2rcd (b);
917 
918  // FIXME: Process b column by column as in the CXSPARSE version below.
919  // This avoids a large dense matrix Q' * B in memory.
920  QTB = SuiteSparseQR_qmult<double> (SPQR_QTX, m_H, m_Htau, m_HPinv, &B,
921  &m_cc);
922 
923  spqr_error_handler (&m_cc);
924 
925  // convert m_R into CXSPARSE matrix R2
926  CXSPARSE_DNAME (_sparse) R2;
927  R2.n = ncols;
928  R2.m = ncols;
929  R2.nzmax = m_R->nzmax;
930  R2.x = reinterpret_cast<double *> (m_R->x);
931  suitesparse_integer *R2_p;
932  suitesparse_integer *R2_i;
933  if (sizeof (suitesparse_integer) == sizeof (SuiteSparse_long))
934  {
935  R2.p = reinterpret_cast<suitesparse_integer *> (m_R->p);
936  R2.i = reinterpret_cast<suitesparse_integer *> (m_R->i);
937  }
938  else
939  {
940  R2_p = new suitesparse_integer[ncols+1];
941  SuiteSparse_long *R_p = reinterpret_cast<SuiteSparse_long *> (m_R->p);
942  for (octave_idx_type i = 0; i < ncols+1; i++)
943  R2_p[i] = suitesparse_long_to_suitesparse_integer (R_p[i]);
944  R2.p = R2_p;
945  octave_idx_type nnz = from_suitesparse_long (cholmod_l_nnz (m_R, &m_cc));
946  R2_i = new suitesparse_integer[nnz];
947  SuiteSparse_long *R_i = reinterpret_cast<SuiteSparse_long *> (m_R->i);
948  for (octave_idx_type i = 0; i < nnz; i++)
949  R2_i[i] = suitesparse_long_to_suitesparse_integer (R_i[i]);
950  R2.i = R2_i;
951  }
952  R2.nz = -1;
953  double *x_vec = const_cast<double *> (x.fortran_vec ());
955  if (sizeof (suitesparse_integer) != sizeof (SuiteSparse_long))
956  {
957  E = new suitesparse_integer [ncols];
958  for (octave_idx_type i = 0; i < ncols; i++)
959  E[i] = suitesparse_long_to_suitesparse_integer (m_E[i]);
960  }
961  else
962  E = reinterpret_cast<suitesparse_integer *> (m_E);
963  for (volatile octave_idx_type j = 0; j < b_nc; j++)
964  {
965  // fill x(:,j)
966  // solve (m_R\‍(Q'*B(:,j)) and store result in QTB(:,j)
967  CXSPARSE_DNAME (_usolve)
968  (&R2, &(reinterpret_cast<double *> (QTB->x)[j * b_nr]));
969  // x(:,j) = m_E' * (m_R\‍(Q'*B(:,j))
970  CXSPARSE_DNAME (_ipvec)
971  (E, &(reinterpret_cast<double *> (QTB->x)[j * b_nr]),
972  &x_vec[j * ncols], ncols);
973  }
974 
975  if (sizeof (suitesparse_integer) != sizeof (SuiteSparse_long))
976  {
977  delete [] R2_p;
978  delete [] R2_i;
979  delete [] E;
980  }
981  cholmod_l_free_dense (&QTB, &m_cc);
982 
983  info = 0;
984 
985  return x;
986 
987 #elif defined (HAVE_CXSPARSE)
988 
989  octave_idx_type nr = nrows;
990  octave_idx_type nc = ncols;
991 
992  octave_idx_type b_nc = b.cols ();
993  octave_idx_type b_nr = b.rows ();
994 
995  const double *bvec = b.data ();
996 
997  Matrix x (nc, b_nc);
998  double *vec = x.fortran_vec ();
999 
1000  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
1001 
1002  for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc;
1003  i++, idx+=nc, bidx+=b_nr)
1004  {
1005  octave_quit ();
1006 
1007  for (octave_idx_type j = nr; j < S->m2; j++)
1008  buf[j] = 0.;
1009 
1010  CXSPARSE_DNAME (_ipvec) (S->pinv, bvec + bidx, buf, nr);
1011 
1012  for (volatile octave_idx_type j = 0; j < nc; j++)
1013  {
1014  octave_quit ();
1015 
1016  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1017  }
1018 
1019  CXSPARSE_DNAME (_usolve) (N->U, buf);
1020  CXSPARSE_DNAME (_ipvec) (S->q, buf, vec + idx, nc);
1021  }
1022 
1023  info = 0;
1024 
1025  return x;
1026 
1027 #else
1028 
1029  octave_unused_parameter (b);
1030 
1031  return Matrix ();
1032 
1033 #endif
1034 }
1035 
1036 template <>
1037 template <>
1038 Matrix
1040 (const MArray<double>& b, octave_idx_type& info) const
1041 {
1042  info = -1;
1043 #if defined (HAVE_CXSPARSE)
1044 
1045  // These are swapped because the original matrix was transposed in
1046  // sparse_qr<SparseMatrix>::solve.
1047 
1048  octave_idx_type nr = ncols;
1049  octave_idx_type nc = nrows;
1050 
1051  octave_idx_type b_nc = b.cols ();
1052  octave_idx_type b_nr = b.rows ();
1053 
1054  const double *bvec = b.data ();
1055 
1056  Matrix x (nc, b_nc);
1057  double *vec = x.fortran_vec ();
1058 
1059  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
1060 
1061  OCTAVE_LOCAL_BUFFER (double, buf, nbuf);
1062 
1063  for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc;
1064  i++, idx+=nc, bidx+=b_nr)
1065  {
1066  octave_quit ();
1067 
1068  for (octave_idx_type j = nr; j < nbuf; j++)
1069  buf[j] = 0.;
1070 
1071  CXSPARSE_DNAME (_pvec) (S->q, bvec + bidx, buf, nr);
1072  CXSPARSE_DNAME (_utsolve) (N->U, buf);
1073 
1074  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
1075  {
1076  octave_quit ();
1077 
1078  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1079  }
1080 
1081  CXSPARSE_DNAME (_pvec) (S->pinv, buf, vec + idx, nc);
1082  }
1083 
1084  info = 0;
1085 
1086  return x;
1087 
1088 #else
1089  octave_unused_parameter (b);
1090 
1091  return Matrix ();
1092 
1093 #endif
1094 }
1095 
1096 template <>
1097 template <>
1100 (const SparseMatrix& b, octave_idx_type& info)
1101 {
1102  info = -1;
1103 
1104 #if defined (HAVE_CXSPARSE)
1105 
1106  octave_idx_type nr = nrows;
1107  octave_idx_type nc = ncols;
1108 
1109  octave_idx_type b_nr = b.rows ();
1110  octave_idx_type b_nc = b.cols ();
1111 
1112  SparseMatrix x (nc, b_nc, b.nnz ());
1113  x.xcidx (0) = 0;
1114 
1115  volatile octave_idx_type x_nz = b.nnz ();
1116  volatile octave_idx_type ii = 0;
1117 
1118  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1119  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
1120 
1121  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1122  {
1123  octave_quit ();
1124 
1125  for (octave_idx_type j = 0; j < b_nr; j++)
1126  Xx[j] = b.xelem (j, i);
1127 
1128  for (octave_idx_type j = nr; j < S->m2; j++)
1129  buf[j] = 0.;
1130 
1131  CXSPARSE_DNAME (_ipvec) (S->pinv, Xx, buf, nr);
1132 
1133  for (volatile octave_idx_type j = 0; j < nc; j++)
1134  {
1135  octave_quit ();
1136 
1137  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1138  }
1139 
1140  CXSPARSE_DNAME (_usolve) (N->U, buf);
1141  CXSPARSE_DNAME (_ipvec) (S->q, buf, Xx, nc);
1142 
1143  for (octave_idx_type j = 0; j < nc; j++)
1144  {
1145  double tmp = Xx[j];
1146 
1147  if (tmp != 0.0)
1148  {
1149  if (ii == x_nz)
1150  {
1151  // Resize the sparse matrix
1152  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
1153  sz = (sz > 10 ? sz : 10) + x_nz;
1154  x.change_capacity (sz);
1155  x_nz = sz;
1156  }
1157 
1158  x.xdata (ii) = tmp;
1159  x.xridx (ii++) = j;
1160  }
1161  }
1162 
1163  x.xcidx (i+1) = ii;
1164  }
1165 
1166  info = 0;
1167 
1168  return x;
1169 
1170 #else
1171 
1172  octave_unused_parameter (b);
1173 
1174  return SparseMatrix ();
1175 
1176 #endif
1177 }
1178 
1179 template <>
1180 template <>
1183 (const SparseMatrix& b, octave_idx_type& info) const
1184 {
1185  info = -1;
1186 
1187 #if defined (HAVE_CXSPARSE)
1188 
1189  // These are swapped because the original matrix was transposed in
1190  // sparse_qr<SparseMatrix>::solve.
1191 
1192  octave_idx_type nr = ncols;
1193  octave_idx_type nc = nrows;
1194 
1195  octave_idx_type b_nr = b.rows ();
1196  octave_idx_type b_nc = b.cols ();
1197 
1198  SparseMatrix x (nc, b_nc, b.nnz ());
1199  x.xcidx (0) = 0;
1200 
1201  volatile octave_idx_type x_nz = b.nnz ();
1202  volatile octave_idx_type ii = 0;
1203  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
1204 
1205  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1206  OCTAVE_LOCAL_BUFFER (double, buf, nbuf);
1207 
1208  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1209  {
1210  octave_quit ();
1211 
1212  for (octave_idx_type j = 0; j < b_nr; j++)
1213  Xx[j] = b.xelem (j, i);
1214 
1215  for (octave_idx_type j = nr; j < nbuf; j++)
1216  buf[j] = 0.;
1217 
1218  CXSPARSE_DNAME (_pvec) (S->q, Xx, buf, nr);
1219  CXSPARSE_DNAME (_utsolve) (N->U, buf);
1220 
1221  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
1222  {
1223  octave_quit ();
1224 
1225  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1226  }
1227 
1228  CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xx, nc);
1229 
1230  for (octave_idx_type j = 0; j < nc; j++)
1231  {
1232  double tmp = Xx[j];
1233 
1234  if (tmp != 0.0)
1235  {
1236  if (ii == x_nz)
1237  {
1238  // Resize the sparse matrix
1239  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
1240  sz = (sz > 10 ? sz : 10) + x_nz;
1241  x.change_capacity (sz);
1242  x_nz = sz;
1243  }
1244 
1245  x.xdata (ii) = tmp;
1246  x.xridx (ii++) = j;
1247  }
1248  }
1249 
1250  x.xcidx (i+1) = ii;
1251  }
1252 
1253  info = 0;
1254 
1255  x.maybe_compress ();
1256 
1257  return x;
1258 
1259 #else
1260 
1261  octave_unused_parameter (b);
1262 
1263  return SparseMatrix ();
1264 
1265 #endif
1266 }
1267 
1268 template <>
1269 template <>
1272 (const MArray<Complex>& b, octave_idx_type& info)
1273 {
1274  info = -1;
1275 
1276 #if defined (HAVE_CXSPARSE)
1277 
1278  octave_idx_type nr = nrows;
1279  octave_idx_type nc = ncols;
1280 
1281  octave_idx_type b_nc = b.cols ();
1282  octave_idx_type b_nr = b.rows ();
1283 
1284  ComplexMatrix x (nc, b_nc);
1285  Complex *vec = x.fortran_vec ();
1286 
1287  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1288  OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc));
1289  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
1290 
1291  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1292  {
1293  octave_quit ();
1294 
1295  for (octave_idx_type j = 0; j < b_nr; j++)
1296  {
1297  Complex c = b.xelem (j, i);
1298  Xx[j] = c.real ();
1299  Xz[j] = c.imag ();
1300  }
1301 
1302  for (octave_idx_type j = nr; j < S->m2; j++)
1303  buf[j] = 0.;
1304 
1305  CXSPARSE_DNAME (_ipvec) (S->pinv, Xx, buf, nr);
1306 
1307  for (volatile octave_idx_type j = 0; j < nc; j++)
1308  {
1309  octave_quit ();
1310 
1311  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1312  }
1313 
1314  CXSPARSE_DNAME (_usolve) (N->U, buf);
1315  CXSPARSE_DNAME (_ipvec) (S->q, buf, Xx, nc);
1316 
1317  for (octave_idx_type j = nr; j < S->m2; j++)
1318  buf[j] = 0.;
1319 
1320  CXSPARSE_DNAME (_ipvec) (S->pinv, Xz, buf, nr);
1321 
1322  for (volatile octave_idx_type j = 0; j < nc; j++)
1323  {
1324  octave_quit ();
1325 
1326  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1327  }
1328 
1329  CXSPARSE_DNAME (_usolve) (N->U, buf);
1330  CXSPARSE_DNAME (_ipvec) (S->q, buf, Xz, nc);
1331 
1332  for (octave_idx_type j = 0; j < nc; j++)
1333  vec[j+idx] = Complex (Xx[j], Xz[j]);
1334  }
1335 
1336  info = 0;
1337 
1338  return x;
1339 
1340 #else
1341 
1342  octave_unused_parameter (b);
1343 
1344  return ComplexMatrix ();
1345 
1346 #endif
1347 }
1348 
1349 template <>
1350 template <>
1353 (const MArray<Complex>& b, octave_idx_type& info) const
1354 {
1355  info = -1;
1356 
1357 #if defined (HAVE_CXSPARSE)
1358 
1359  // These are swapped because the original matrix was transposed in
1360  // sparse_qr<SparseMatrix>::solve.
1361 
1362  octave_idx_type nr = ncols;
1363  octave_idx_type nc = nrows;
1364 
1365  octave_idx_type b_nc = b.cols ();
1366  octave_idx_type b_nr = b.rows ();
1367 
1368  ComplexMatrix x (nc, b_nc);
1369  Complex *vec = x.fortran_vec ();
1370 
1371  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
1372 
1373  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1374  OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc));
1375  OCTAVE_LOCAL_BUFFER (double, buf, nbuf);
1376 
1377  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1378  {
1379  octave_quit ();
1380 
1381  for (octave_idx_type j = 0; j < b_nr; j++)
1382  {
1383  Complex c = b.xelem (j, i);
1384  Xx[j] = c.real ();
1385  Xz[j] = c.imag ();
1386  }
1387 
1388  for (octave_idx_type j = nr; j < nbuf; j++)
1389  buf[j] = 0.;
1390 
1391  CXSPARSE_DNAME (_pvec) (S->q, Xx, buf, nr);
1392  CXSPARSE_DNAME (_utsolve) (N->U, buf);
1393 
1394  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
1395  {
1396  octave_quit ();
1397 
1398  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1399  }
1400 
1401  CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xx, nc);
1402 
1403  for (octave_idx_type j = nr; j < nbuf; j++)
1404  buf[j] = 0.;
1405 
1406  CXSPARSE_DNAME (_pvec) (S->q, Xz, buf, nr);
1407  CXSPARSE_DNAME (_utsolve) (N->U, buf);
1408 
1409  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
1410  {
1411  octave_quit ();
1412 
1413  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1414  }
1415 
1416  CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xz, nc);
1417 
1418  for (octave_idx_type j = 0; j < nc; j++)
1419  vec[j+idx] = Complex (Xx[j], Xz[j]);
1420  }
1421 
1422  info = 0;
1423 
1424  return x;
1425 
1426 #else
1427 
1428  octave_unused_parameter (b);
1429 
1430  return ComplexMatrix ();
1431 
1432 #endif
1433 }
1434 
1435 // Complex-valued matrices.
1436 
1437 template <>
1439 (const SparseComplexMatrix& a, int order)
1440  : nrows (a.rows ()), ncols (a.columns ())
1441 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
1442  , m_cc (), m_R (nullptr), m_E (nullptr), m_H (nullptr),
1443  m_Htau (nullptr), m_HPinv (nullptr)
1444 {
1445  octave_idx_type nr = a.rows ();
1446  octave_idx_type nc = a.cols ();
1447 
1448  if (nr < 0 || nc < 0)
1449  (*current_liboctave_error_handler)
1450  ("matrix dimension with negative size");
1451 
1452  if (order < 0 || order > 9)
1453  (*current_liboctave_error_handler)
1454  ("ordering %d is not supported by SPQR", order);
1455 
1456  cholmod_l_start (&m_cc);
1457  cholmod_sparse A = cos2ccs (a);
1458 
1459  SuiteSparseQR<Complex> (order, static_cast<double> (SPQR_DEFAULT_TOL),
1460  static_cast<SuiteSparse_long> (A.nrow),
1461  &A, &m_R, &m_E, &m_H,
1462  &m_HPinv, &m_Htau, &m_cc);
1463  spqr_error_handler (&m_cc);
1464 
1465  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
1466  {
1467  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
1468  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
1469  }
1470 }
1471 
1472 #elif defined (HAVE_CXSPARSE)
1473  , S (nullptr), N (nullptr)
1474 {
1475  CXSPARSE_ZNAME () A;
1476 
1477  A.nzmax = a.nnz ();
1478  A.m = nrows;
1479  A.n = ncols;
1480  // Cast away const on A, with full knowledge that CSparse won't touch it
1481  // Prevents the methods below making a copy of the data.
1482  A.p = const_cast<suitesparse_integer *>
1483  (to_suitesparse_intptr (a.cidx ()));
1484  A.i = const_cast<suitesparse_integer *>
1485  (to_suitesparse_intptr (a.ridx ()));
1486  A.x = const_cast<cs_complex_t *>
1487  (reinterpret_cast<const cs_complex_t *> (a.data ()));
1488  A.nz = -1;
1489 
1490  S = CXSPARSE_ZNAME (_sqr) (order, &A, 1);
1491  N = CXSPARSE_ZNAME (_qr) (&A, S);
1492 
1493  if (! N)
1495  ("sparse_qr: sparse matrix QR factorization filled");
1496 
1497 }
1498 
1499 #else
1500 
1501 {
1502  octave_unused_parameter (order);
1503 
1504  (*current_liboctave_error_handler)
1505  ("sparse_qr: support for CXSparse was unavailable or disabled when liboctave was built");
1506 }
1507 
1508 #endif
1509 
1510 template <>
1512 {
1513 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
1514 
1515  cholmod_l_free_sparse (&m_R, &m_cc);
1516  cholmod_l_free_sparse (&m_H, &m_cc);
1517  cholmod_l_free_dense (&m_Htau, &m_cc);
1518  free (m_E); // FIXME: use cholmod_l_free
1519  free (m_HPinv);
1520  cholmod_l_finish (&m_cc);
1521 
1522 #elif defined (HAVE_CXSPARSE)
1523 
1524  CXSPARSE_ZNAME (_sfree) (S);
1525  CXSPARSE_ZNAME (_nfree) (N);
1526 
1527 #endif
1528 }
1529 
1530 template <>
1533 {
1534 #if defined (HAVE_CXSPARSE)
1535  // Drop zeros from V and sort
1536  // FIXME: Is the double transpose to sort necessary?
1537 
1538  CXSPARSE_ZNAME (_dropzeros) (N->L);
1539  CXSPARSE_ZNAME () *D = CXSPARSE_ZNAME (_transpose) (N->L, 1);
1540  CXSPARSE_ZNAME (_spfree) (N->L);
1541  N->L = CXSPARSE_ZNAME (_transpose) (D, 1);
1542  CXSPARSE_ZNAME (_spfree) (D);
1543 
1544  octave_idx_type nc = N->L->n;
1545  octave_idx_type nz = N->L->p[nc];
1546  SparseComplexMatrix ret (N->L->m, nc, nz);
1547 
1548  for (octave_idx_type j = 0; j < nc+1; j++)
1549  ret.xcidx (j) = N->L->p[j];
1550 
1551  for (octave_idx_type j = 0; j < nz; j++)
1552  {
1553  ret.xridx (j) = N->L->i[j];
1554  ret.xdata (j) = reinterpret_cast<Complex *> (N->L->x)[j];
1555  }
1556 
1557  return ret;
1558 
1559 #else
1560 
1561  return SparseComplexMatrix ();
1562 
1563 #endif
1564 }
1565 
1566 template <>
1569 {
1570 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
1571 
1572  octave_idx_type nr = from_size_t (m_R->nrow);
1573  octave_idx_type nc = from_size_t (m_R->ncol);
1574  octave_idx_type nz
1576  (cholmod_l_nnz (m_R, const_cast<cholmod_common *> (&m_cc)));
1577 
1578  // FIXME: Does this work if econ = true?
1579  SparseComplexMatrix ret ((econ ? (nc > nr ? nr : nc) : nr), nc, nz);
1580  SuiteSparse_long *Rp = reinterpret_cast<SuiteSparse_long *> (m_R->p);
1581  SuiteSparse_long *Ri = reinterpret_cast<SuiteSparse_long *> (m_R->i);
1582 
1583  for (octave_idx_type j = 0; j < nc + 1; j++)
1584  ret.xcidx (j) = from_suitesparse_long (Rp[j]);
1585 
1586  for (octave_idx_type j = 0; j < nz; j++)
1587  {
1588  ret.xridx (j) = from_suitesparse_long (Ri[j]);
1589  ret.xdata (j) = (reinterpret_cast<Complex *> (m_R->x))[j];
1590  }
1591 
1592  return ret;
1593 
1594 #elif defined (HAVE_CXSPARSE)
1595 
1596  // Drop zeros from R and sort
1597  // FIXME: Is the double transpose to sort necessary?
1598 
1599  CXSPARSE_ZNAME (_dropzeros) (N->U);
1600  CXSPARSE_ZNAME () *D = CXSPARSE_ZNAME (_transpose) (N->U, 1);
1601  CXSPARSE_ZNAME (_spfree) (N->U);
1602  N->U = CXSPARSE_ZNAME (_transpose) (D, 1);
1603  CXSPARSE_ZNAME (_spfree) (D);
1604 
1605  octave_idx_type nc = N->U->n;
1606  octave_idx_type nz = N->U->p[nc];
1607 
1608  SparseComplexMatrix ret ((econ ? (nc > nrows ? nrows : nc) : nrows),
1609  nc, nz);
1610 
1611  for (octave_idx_type j = 0; j < nc+1; j++)
1612  ret.xcidx (j) = N->U->p[j];
1613 
1614  for (octave_idx_type j = 0; j < nz; j++)
1615  {
1616  ret.xridx (j) = N->U->i[j];
1617  ret.xdata (j) = reinterpret_cast<Complex *> (N->U->x)[j];
1618  }
1619 
1620  return ret;
1621 
1622 #else
1623 
1624  octave_unused_parameter (econ);
1625 
1626  return SparseComplexMatrix ();
1627 
1628 #endif
1629 }
1630 
1631 template <>
1634 (const ComplexMatrix& b, bool econ)
1635 {
1636 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
1637 
1638  // FIXME: not tested
1639  octave_idx_type nr = (econ
1640  ? (ncols > nrows ? nrows : ncols)
1641  : nrows);
1642  octave_idx_type b_nr = b.rows ();
1643  octave_idx_type b_nc = b.cols ();
1644  ComplexMatrix ret (nr, b_nc);
1645 
1646  if (nrows != b_nr)
1647  (*current_liboctave_error_handler) ("matrix dimension mismatch");
1648 
1649  if (b_nc < 0 || b_nr < 0)
1650  (*current_liboctave_error_handler)
1651  ("matrix dimension with negative size");
1652 
1653  cholmod_dense *QTB; // Q' * B
1654  cholmod_dense B = cod2ccd (b);
1655 
1656  QTB = SuiteSparseQR_qmult<Complex> (SPQR_QTX, m_H, m_Htau, m_HPinv, &B,
1657  &m_cc);
1658  spqr_error_handler (&m_cc);
1659 
1660  // copy QTB into ret
1661  Complex *QTB_x = reinterpret_cast<Complex *> (QTB->x);
1662  Complex *ret_vec = reinterpret_cast<Complex *> (ret.fortran_vec ());
1663  for (octave_idx_type j = 0; j < b_nc; j++)
1664  for (octave_idx_type i = 0; i < nr; i++)
1665  ret_vec[j * nr + i] = QTB_x[j * b_nr + i];
1666 
1667  cholmod_l_free_dense (&QTB, &m_cc);
1668 
1669  return ret;
1670 
1671 #elif defined (HAVE_CXSPARSE)
1672 
1673  if (econ)
1674  (*current_liboctave_error_handler)
1675  ("sparse-qr: economy mode with CXSparse not supported");
1676 
1677  octave_idx_type b_nr = b.rows ();
1678  octave_idx_type b_nc = b.cols ();
1679  octave_idx_type nc = N->L->n;
1680  octave_idx_type nr = nrows;
1681  const cs_complex_t *bvec
1682  = reinterpret_cast<const cs_complex_t *> (b.data ());
1683  ComplexMatrix ret (b_nr, b_nc);
1684  Complex *vec = ret.fortran_vec ();
1685 
1686  if (nr < 0 || nc < 0 || nr != b_nr)
1687  (*current_liboctave_error_handler) ("matrix dimension mismatch");
1688 
1689  if (nr == 0 || nc == 0 || b_nc == 0)
1690  ret = ComplexMatrix (nc, b_nc, Complex (0.0, 0.0));
1691  else
1692  {
1693  OCTAVE_LOCAL_BUFFER (Complex, buf, S->m2);
1694 
1695  for (volatile octave_idx_type j = 0, idx = 0;
1696  j < b_nc;
1697  j++, idx += b_nr)
1698  {
1699  octave_quit ();
1700 
1701  volatile octave_idx_type nm = (nr < nc ? nr : nc);
1702 
1703  CXSPARSE_ZNAME (_ipvec) (S->pinv, bvec + idx,
1704  reinterpret_cast<cs_complex_t *> (buf),
1705  b_nr);
1706 
1707  for (volatile octave_idx_type i = 0; i < nm; i++)
1708  {
1709  octave_quit ();
1710 
1711  CXSPARSE_ZNAME (_happly) (N->L, i, N->B[i],
1712  reinterpret_cast<cs_complex_t *> (buf));
1713  }
1714 
1715  for (octave_idx_type i = 0; i < b_nr; i++)
1716  vec[i+idx] = buf[i];
1717  }
1718  }
1719 
1720  return ret;
1721 
1722 #else
1723 
1724  octave_unused_parameter (b);
1725  octave_unused_parameter (econ);
1726 
1727  return ComplexMatrix ();
1728 
1729 #endif
1730 }
1731 
1732 template <>
1735 {
1736 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
1737 
1738  octave_idx_type nc = (econ
1739  ? (ncols > nrows ? nrows : ncols)
1740  : nrows);
1741  ComplexMatrix ret (nrows, nc);
1742  cholmod_dense *q;
1743 
1744  // I is nrows x nrows identity matrix
1745  cholmod_dense *I_mat
1746  = reinterpret_cast<cholmod_dense *>
1747  (cholmod_l_allocate_dense (nrows, nrows, nrows, CHOLMOD_COMPLEX, &m_cc));
1748 
1749  for (octave_idx_type i = 0; i < nrows * nrows; i++)
1750  (reinterpret_cast<Complex *> (I_mat->x))[i] = 0.0;
1751 
1752  for (octave_idx_type i = 0; i < nrows; i++)
1753  (reinterpret_cast<Complex *> (I_mat->x))[i * nrows + i] = 1.0;
1754 
1755  q = SuiteSparseQR_qmult<Complex> (SPQR_QX, m_H, m_Htau, m_HPinv, I_mat,
1756  &m_cc);
1757  spqr_error_handler (&m_cc);
1758 
1759  Complex *q_x = reinterpret_cast<Complex *> (q->x);
1760  Complex *ret_vec = const_cast<Complex *> (ret.fortran_vec ());
1761 
1762  for (octave_idx_type j = 0; j < nc; j++)
1763  for (octave_idx_type i = 0; i < nrows; i++)
1764  ret_vec[j * nrows + i] = q_x[j * nrows + i];
1765 
1766  cholmod_l_free_dense (&q, &m_cc);
1767  cholmod_l_free_dense (&I_mat, &m_cc);
1768 
1769  return ret;
1770 
1771 #elif defined (HAVE_CXSPARSE)
1772 
1773  if (econ)
1774  (*current_liboctave_error_handler)
1775  ("sparse-qr: economy mode with CXSparse not supported");
1776 
1777  octave_idx_type nc = N->L->n;
1778  octave_idx_type nr = nrows;
1779  ComplexMatrix ret (nr, nr);
1780  Complex *vec = ret.fortran_vec ();
1781 
1782  if (nr < 0 || nc < 0)
1783  (*current_liboctave_error_handler) ("matrix dimension mismatch");
1784 
1785  if (nr == 0 || nc == 0)
1786  ret = ComplexMatrix (nc, nr, Complex (0.0, 0.0));
1787  else
1788  {
1789  OCTAVE_LOCAL_BUFFER (cs_complex_t, bvec, nr);
1790 
1791  for (octave_idx_type i = 0; i < nr; i++)
1792  bvec[i] = 0.0;
1793 
1794  OCTAVE_LOCAL_BUFFER (Complex, buf, S->m2);
1795 
1796  for (volatile octave_idx_type j = 0, idx = 0; j < nr; j++, idx+=nr)
1797  {
1798  octave_quit ();
1799 
1800  bvec[j] = 1.0;
1801 
1802  volatile octave_idx_type nm = (nr < nc ? nr : nc);
1803 
1804  CXSPARSE_ZNAME (_ipvec) (S->pinv, bvec,
1805  reinterpret_cast<cs_complex_t *> (buf),
1806  nr);
1807 
1808  for (volatile octave_idx_type i = 0; i < nm; i++)
1809  {
1810  octave_quit ();
1811 
1812  CXSPARSE_ZNAME (_happly) (N->L, i, N->B[i],
1813  reinterpret_cast<cs_complex_t *> (buf));
1814  }
1815 
1816  for (octave_idx_type i = 0; i < nr; i++)
1817  vec[i+idx] = buf[i];
1818 
1819  bvec[j] = 0.0;
1820  }
1821  }
1822 
1823  return ret.hermitian ();
1824 
1825 #else
1826 
1827  octave_unused_parameter (econ);
1828 
1829  return ComplexMatrix ();
1830 
1831 #endif
1832 }
1833 
1834 template <>
1835 template <>
1839  (const SparseComplexMatrix& b, octave_idx_type& info)
1840 {
1841  info = -1;
1842 
1843 #if defined (HAVE_CXSPARSE)
1844 
1845  octave_idx_type nr = nrows;
1846  octave_idx_type nc = ncols;
1847 
1848  octave_idx_type b_nr = b.rows ();
1849  octave_idx_type b_nc = b.cols ();
1850 
1851  SparseComplexMatrix x (nc, b_nc, b.nnz ());
1852  x.xcidx (0) = 0;
1853 
1854  volatile octave_idx_type x_nz = b.nnz ();
1855  volatile octave_idx_type ii = 0;
1856 
1857  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1858  OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc));
1859  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
1860 
1861  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1862  {
1863  octave_quit ();
1864 
1865  for (octave_idx_type j = 0; j < b_nr; j++)
1866  {
1867  Complex c = b.xelem (j, i);
1868  Xx[j] = c.real ();
1869  Xz[j] = c.imag ();
1870  }
1871 
1872  for (octave_idx_type j = nr; j < S->m2; j++)
1873  buf[j] = 0.;
1874 
1875  CXSPARSE_DNAME (_ipvec) (S->pinv, Xx, buf, nr);
1876 
1877  for (volatile octave_idx_type j = 0; j < nc; j++)
1878  {
1879  octave_quit ();
1880 
1881  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1882  }
1883 
1884  CXSPARSE_DNAME (_usolve) (N->U, buf);
1885  CXSPARSE_DNAME (_ipvec) (S->q, buf, Xx, nc);
1886 
1887  for (octave_idx_type j = nr; j < S->m2; j++)
1888  buf[j] = 0.;
1889 
1890  CXSPARSE_DNAME (_ipvec) (S->pinv, Xz, buf, nr);
1891 
1892  for (volatile octave_idx_type j = 0; j < nc; j++)
1893  {
1894  octave_quit ();
1895 
1896  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1897  }
1898 
1899  CXSPARSE_DNAME (_usolve) (N->U, buf);
1900  CXSPARSE_DNAME (_ipvec) (S->q, buf, Xz, nc);
1901 
1902  for (octave_idx_type j = 0; j < nc; j++)
1903  {
1904  Complex tmp = Complex (Xx[j], Xz[j]);
1905 
1906  if (tmp != 0.0)
1907  {
1908  if (ii == x_nz)
1909  {
1910  // Resize the sparse matrix
1911  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
1912  sz = (sz > 10 ? sz : 10) + x_nz;
1913  x.change_capacity (sz);
1914  x_nz = sz;
1915  }
1916 
1917  x.xdata (ii) = tmp;
1918  x.xridx (ii++) = j;
1919  }
1920  }
1921 
1922  x.xcidx (i+1) = ii;
1923  }
1924 
1925  info = 0;
1926 
1927  return x;
1928 
1929 #else
1930 
1931  octave_unused_parameter (b);
1932 
1933  return SparseComplexMatrix ();
1934 
1935 #endif
1936 }
1937 
1938 template <>
1939 template <>
1943  (const SparseComplexMatrix& b, octave_idx_type& info) const
1944 {
1945  info = -1;
1946 
1947 #if defined (HAVE_CXSPARSE)
1948 
1949  // These are swapped because the original matrix was transposed in
1950  // sparse_qr<SparseMatrix>::solve.
1951 
1952  octave_idx_type nr = ncols;
1953  octave_idx_type nc = nrows;
1954 
1955  octave_idx_type b_nr = b.rows ();
1956  octave_idx_type b_nc = b.cols ();
1957 
1958  SparseComplexMatrix x (nc, b_nc, b.nnz ());
1959  x.xcidx (0) = 0;
1960 
1961  volatile octave_idx_type x_nz = b.nnz ();
1962  volatile octave_idx_type ii = 0;
1963  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
1964 
1965  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1966  OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc));
1967  OCTAVE_LOCAL_BUFFER (double, buf, nbuf);
1968 
1969  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1970  {
1971  octave_quit ();
1972 
1973  for (octave_idx_type j = 0; j < b_nr; j++)
1974  {
1975  Complex c = b.xelem (j, i);
1976  Xx[j] = c.real ();
1977  Xz[j] = c.imag ();
1978  }
1979 
1980  for (octave_idx_type j = nr; j < nbuf; j++)
1981  buf[j] = 0.;
1982 
1983  CXSPARSE_DNAME (_pvec) (S->q, Xx, buf, nr);
1984  CXSPARSE_DNAME (_utsolve) (N->U, buf);
1985 
1986  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
1987  {
1988  octave_quit ();
1989 
1990  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1991  }
1992 
1993  CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xx, nc);
1994 
1995  for (octave_idx_type j = nr; j < nbuf; j++)
1996  buf[j] = 0.;
1997 
1998  CXSPARSE_DNAME (_pvec) (S->q, Xz, buf, nr);
1999  CXSPARSE_DNAME (_utsolve) (N->U, buf);
2000 
2001  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
2002  {
2003  octave_quit ();
2004 
2005  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
2006  }
2007 
2008  CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xz, nc);
2009 
2010  for (octave_idx_type j = 0; j < nc; j++)
2011  {
2012  Complex tmp = Complex (Xx[j], Xz[j]);
2013 
2014  if (tmp != 0.0)
2015  {
2016  if (ii == x_nz)
2017  {
2018  // Resize the sparse matrix
2019  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
2020  sz = (sz > 10 ? sz : 10) + x_nz;
2021  x.change_capacity (sz);
2022  x_nz = sz;
2023  }
2024 
2025  x.xdata (ii) = tmp;
2026  x.xridx (ii++) = j;
2027  }
2028  }
2029 
2030  x.xcidx (i+1) = ii;
2031  }
2032 
2033  info = 0;
2034 
2035  x.maybe_compress ();
2036 
2037  return x;
2038 
2039 #else
2040 
2041  octave_unused_parameter (b);
2042 
2043  return SparseComplexMatrix ();
2044 
2045 #endif
2046 }
2047 
2048 template <>
2049 template <>
2052  ComplexMatrix>
2053  (const MArray<double>& b, octave_idx_type& info)
2054 {
2055  info = -1;
2056 
2057 #if defined (HAVE_CXSPARSE)
2058 
2059  octave_idx_type nr = nrows;
2060  octave_idx_type nc = ncols;
2061 
2062  octave_idx_type b_nc = b.cols ();
2063  octave_idx_type b_nr = b.rows ();
2064 
2065  ComplexMatrix x (nc, b_nc);
2066  cs_complex_t *vec = reinterpret_cast<cs_complex_t *> (x.fortran_vec ());
2067 
2068  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2);
2069  OCTAVE_LOCAL_BUFFER (Complex, Xx, b_nr);
2070 
2071  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2072  {
2073  octave_quit ();
2074 
2075  for (octave_idx_type j = 0; j < b_nr; j++)
2076  Xx[j] = b.xelem (j, i);
2077 
2078  for (octave_idx_type j = nr; j < S->m2; j++)
2079  buf[j] = 0.0;
2080 
2081  CXSPARSE_ZNAME (_ipvec) (S->pinv,
2082  reinterpret_cast<cs_complex_t *> (Xx),
2083  buf, nr);
2084 
2085  for (volatile octave_idx_type j = 0; j < nc; j++)
2086  {
2087  octave_quit ();
2088 
2089  CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf);
2090  }
2091 
2092  CXSPARSE_ZNAME (_usolve) (N->U, buf);
2093  CXSPARSE_ZNAME (_ipvec) (S->q, buf, vec + idx, nc);
2094  }
2095 
2096  info = 0;
2097 
2098  return x;
2099 
2100 #else
2101 
2102  octave_unused_parameter (b);
2103 
2104  return ComplexMatrix ();
2105 
2106 #endif
2107 }
2108 
2109 template <>
2110 template <>
2113  ComplexMatrix>
2114  (const MArray<double>& b, octave_idx_type& info) const
2115 {
2116  info = -1;
2117 
2118 #if defined (HAVE_CXSPARSE)
2119 
2120  // These are swapped because the original matrix was transposed in
2121  // sparse_qr<SparseComplexMatrix>::solve.
2122 
2123  octave_idx_type nr = ncols;
2124  octave_idx_type nc = nrows;
2125 
2126  octave_idx_type b_nc = b.cols ();
2127  octave_idx_type b_nr = b.rows ();
2128 
2129  ComplexMatrix x (nc, b_nc);
2130  cs_complex_t *vec = reinterpret_cast<cs_complex_t *> (x.fortran_vec ());
2131 
2132  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
2133 
2134  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf);
2135  OCTAVE_LOCAL_BUFFER (Complex, Xx, b_nr);
2136  OCTAVE_LOCAL_BUFFER (double, B, nr);
2137 
2138  for (octave_idx_type i = 0; i < nr; i++)
2139  B[i] = N->B[i];
2140 
2141  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2142  {
2143  octave_quit ();
2144 
2145  for (octave_idx_type j = 0; j < b_nr; j++)
2146  Xx[j] = b.xelem (j, i);
2147 
2148  for (octave_idx_type j = nr; j < nbuf; j++)
2149  buf[j] = 0.0;
2150 
2151  CXSPARSE_ZNAME (_pvec) (S->q, reinterpret_cast<cs_complex_t *> (Xx),
2152  buf, nr);
2153  CXSPARSE_ZNAME (_utsolve) (N->U, buf);
2154 
2155  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
2156  {
2157  octave_quit ();
2158 
2159  CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf);
2160  }
2161 
2162  CXSPARSE_ZNAME (_pvec) (S->pinv, buf, vec + idx, nc);
2163  }
2164 
2165  info = 0;
2166 
2167  return x;
2168 
2169 #else
2170 
2171  octave_unused_parameter (b);
2172 
2173  return ComplexMatrix ();
2174 
2175 #endif
2176 }
2177 
2178 template <>
2179 template <>
2183  (const SparseMatrix& b, octave_idx_type& info)
2184 {
2185  info = -1;
2186 
2187 #if defined (HAVE_CXSPARSE)
2188 
2189  octave_idx_type nr = nrows;
2190  octave_idx_type nc = ncols;
2191 
2192  octave_idx_type b_nc = b.cols ();
2193  octave_idx_type b_nr = b.rows ();
2194 
2195  SparseComplexMatrix x (nc, b_nc, b.nnz ());
2196  x.xcidx (0) = 0;
2197 
2198  volatile octave_idx_type x_nz = b.nnz ();
2199  volatile octave_idx_type ii = 0;
2200 
2201  OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc));
2202  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2);
2203 
2204  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2205  {
2206  octave_quit ();
2207 
2208  for (octave_idx_type j = 0; j < b_nr; j++)
2209  Xx[j] = b.xelem (j, i);
2210 
2211  for (octave_idx_type j = nr; j < S->m2; j++)
2212  buf[j] = 0.0;
2213 
2214  CXSPARSE_ZNAME (_ipvec) (S->pinv,
2215  reinterpret_cast<cs_complex_t *> (Xx),
2216  buf, nr);
2217 
2218  for (volatile octave_idx_type j = 0; j < nc; j++)
2219  {
2220  octave_quit ();
2221 
2222  CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf);
2223  }
2224 
2225  CXSPARSE_ZNAME (_usolve) (N->U, buf);
2226  CXSPARSE_ZNAME (_ipvec) (S->q, buf,
2227  reinterpret_cast<cs_complex_t *> (Xx),
2228  nc);
2229 
2230  for (octave_idx_type j = 0; j < nc; j++)
2231  {
2232  Complex tmp = Xx[j];
2233 
2234  if (tmp != 0.0)
2235  {
2236  if (ii == x_nz)
2237  {
2238  // Resize the sparse matrix
2239  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
2240  sz = (sz > 10 ? sz : 10) + x_nz;
2241  x.change_capacity (sz);
2242  x_nz = sz;
2243  }
2244 
2245  x.xdata (ii) = tmp;
2246  x.xridx (ii++) = j;
2247  }
2248  }
2249 
2250  x.xcidx (i+1) = ii;
2251  }
2252 
2253  info = 0;
2254 
2255  x.maybe_compress ();
2256 
2257  return x;
2258 
2259 #else
2260 
2261  octave_unused_parameter (b);
2262 
2263  return SparseComplexMatrix ();
2264 
2265 #endif
2266 }
2267 
2268 template <>
2269 template <>
2273  (const SparseMatrix& b, octave_idx_type& info) const
2274 {
2275  info = -1;
2276 
2277 #if defined (HAVE_CXSPARSE)
2278 
2279  // These are swapped because the original matrix was transposed in
2280  // sparse_qr<SparseComplexMatrix>::solve.
2281 
2282  octave_idx_type nr = ncols;
2283  octave_idx_type nc = nrows;
2284 
2285  octave_idx_type b_nc = b.cols ();
2286  octave_idx_type b_nr = b.rows ();
2287 
2288  SparseComplexMatrix x (nc, b_nc, b.nnz ());
2289  x.xcidx (0) = 0;
2290 
2291  volatile octave_idx_type x_nz = b.nnz ();
2292  volatile octave_idx_type ii = 0;
2293  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
2294 
2295  OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc));
2296  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf);
2297  OCTAVE_LOCAL_BUFFER (double, B, nr);
2298 
2299  for (octave_idx_type i = 0; i < nr; i++)
2300  B[i] = N->B[i];
2301 
2302  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2303  {
2304  octave_quit ();
2305 
2306  for (octave_idx_type j = 0; j < b_nr; j++)
2307  Xx[j] = b.xelem (j, i);
2308 
2309  for (octave_idx_type j = nr; j < nbuf; j++)
2310  buf[j] = 0.0;
2311 
2312  CXSPARSE_ZNAME (_pvec) (S->q,
2313  reinterpret_cast<cs_complex_t *> (Xx),
2314  buf, nr);
2315  CXSPARSE_ZNAME (_utsolve) (N->U, buf);
2316 
2317  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
2318  {
2319  octave_quit ();
2320 
2321  CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf);
2322  }
2323 
2324  CXSPARSE_ZNAME (_pvec) (S->pinv, buf,
2325  reinterpret_cast<cs_complex_t *> (Xx),
2326  nc);
2327 
2328  for (octave_idx_type j = 0; j < nc; j++)
2329  {
2330  Complex tmp = Xx[j];
2331 
2332  if (tmp != 0.0)
2333  {
2334  if (ii == x_nz)
2335  {
2336  // Resize the sparse matrix
2337  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
2338  sz = (sz > 10 ? sz : 10) + x_nz;
2339  x.change_capacity (sz);
2340  x_nz = sz;
2341  }
2342 
2343  x.xdata (ii) = tmp;
2344  x.xridx (ii++) = j;
2345  }
2346  }
2347 
2348  x.xcidx (i+1) = ii;
2349  }
2350 
2351  info = 0;
2352 
2353  x.maybe_compress ();
2354 
2355  return x;
2356 
2357 #else
2358 
2359  octave_unused_parameter (b);
2360 
2361  return SparseComplexMatrix ();
2362 
2363 #endif
2364 }
2365 
2366 template <>
2367 template <>
2370  ComplexMatrix>
2371  (const MArray<Complex>& b, octave_idx_type& info)
2372 {
2373  info = -1;
2374 
2375 #if defined (HAVE_CXSPARSE)
2376 
2377  octave_idx_type nr = nrows;
2378  octave_idx_type nc = ncols;
2379 
2380  octave_idx_type b_nc = b.cols ();
2381  octave_idx_type b_nr = b.rows ();
2382 
2383  const cs_complex_t *bvec = reinterpret_cast<const cs_complex_t *>
2384  (b.data ());
2385 
2386  ComplexMatrix x (nc, b_nc);
2387  cs_complex_t *vec = reinterpret_cast<cs_complex_t *>
2388  (x.fortran_vec ());
2389 
2390  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2);
2391 
2392  for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc;
2393  i++, idx+=nc, bidx+=b_nr)
2394  {
2395  octave_quit ();
2396 
2397  for (octave_idx_type j = nr; j < S->m2; j++)
2398  buf[j] = 0.0;
2399 
2400  CXSPARSE_ZNAME (_ipvec) (S->pinv, bvec + bidx, buf, nr);
2401 
2402  for (volatile octave_idx_type j = 0; j < nc; j++)
2403  {
2404  octave_quit ();
2405 
2406  CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf);
2407  }
2408 
2409  CXSPARSE_ZNAME (_usolve) (N->U, buf);
2410  CXSPARSE_ZNAME (_ipvec) (S->q, buf, vec + idx, nc);
2411  }
2412 
2413  info = 0;
2414 
2415  return x;
2416 
2417 #else
2418 
2419  octave_unused_parameter (b);
2420 
2421  return ComplexMatrix ();
2422 
2423 #endif
2424 }
2425 
2426 template <>
2427 template <>
2430  ComplexMatrix>
2431  (const MArray<Complex>& b, octave_idx_type& info) const
2432 {
2433  info = -1;
2434 
2435 #if defined (HAVE_CXSPARSE)
2436 
2437  // These are swapped because the original matrix was transposed in
2438  // sparse_qr<SparseComplexMatrix>::solve.
2439 
2440  octave_idx_type nr = ncols;
2441  octave_idx_type nc = nrows;
2442 
2443  octave_idx_type b_nc = b.cols ();
2444  octave_idx_type b_nr = b.rows ();
2445 
2446  const cs_complex_t *bvec = reinterpret_cast<const cs_complex_t *>
2447  (b.data ());
2448 
2449  ComplexMatrix x (nc, b_nc);
2450  cs_complex_t *vec = reinterpret_cast<cs_complex_t *> (x.fortran_vec ());
2451 
2452  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
2453 
2454  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf);
2455  OCTAVE_LOCAL_BUFFER (double, B, nr);
2456 
2457  for (octave_idx_type i = 0; i < nr; i++)
2458  B[i] = N->B[i];
2459 
2460  for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc;
2461  i++, idx+=nc, bidx+=b_nr)
2462  {
2463  octave_quit ();
2464 
2465  for (octave_idx_type j = nr; j < nbuf; j++)
2466  buf[j] = 0.0;
2467 
2468  CXSPARSE_ZNAME (_pvec) (S->q, bvec + bidx, buf, nr);
2469  CXSPARSE_ZNAME (_utsolve) (N->U, buf);
2470 
2471  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
2472  {
2473  octave_quit ();
2474 
2475  CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf);
2476  }
2477 
2478  CXSPARSE_ZNAME (_pvec) (S->pinv, buf, vec + idx, nc);
2479  }
2480 
2481  info = 0;
2482 
2483  return x;
2484 
2485 #else
2486 
2487  octave_unused_parameter (b);
2488 
2489  return ComplexMatrix ();
2490 
2491 #endif
2492 }
2493 
2494 template <>
2495 template <>
2499  (const SparseComplexMatrix& b, octave_idx_type& info)
2500 {
2501  info = -1;
2502 
2503 #if defined (HAVE_CXSPARSE)
2504 
2505  octave_idx_type nr = nrows;
2506  octave_idx_type nc = ncols;
2507 
2508  octave_idx_type b_nc = b.cols ();
2509  octave_idx_type b_nr = b.rows ();
2510 
2511  SparseComplexMatrix x (nc, b_nc, b.nnz ());
2512  x.xcidx (0) = 0;
2513 
2514  volatile octave_idx_type x_nz = b.nnz ();
2515  volatile octave_idx_type ii = 0;
2516 
2517  OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc));
2518  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2);
2519 
2520  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2521  {
2522  octave_quit ();
2523 
2524  for (octave_idx_type j = 0; j < b_nr; j++)
2525  Xx[j] = b.xelem (j, i);
2526 
2527  for (octave_idx_type j = nr; j < S->m2; j++)
2528  buf[j] = 0.0;
2529 
2530  CXSPARSE_ZNAME (_ipvec) (S->pinv,
2531  reinterpret_cast<cs_complex_t *> (Xx),
2532  buf, nr);
2533 
2534  for (volatile octave_idx_type j = 0; j < nc; j++)
2535  {
2536  octave_quit ();
2537 
2538  CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf);
2539  }
2540 
2541  CXSPARSE_ZNAME (_usolve) (N->U, buf);
2542  CXSPARSE_ZNAME (_ipvec) (S->q, buf,
2543  reinterpret_cast<cs_complex_t *> (Xx),
2544  nc);
2545 
2546  for (octave_idx_type j = 0; j < nc; j++)
2547  {
2548  Complex tmp = Xx[j];
2549 
2550  if (tmp != 0.0)
2551  {
2552  if (ii == x_nz)
2553  {
2554  // Resize the sparse matrix
2555  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
2556  sz = (sz > 10 ? sz : 10) + x_nz;
2557  x.change_capacity (sz);
2558  x_nz = sz;
2559  }
2560 
2561  x.xdata (ii) = tmp;
2562  x.xridx (ii++) = j;
2563  }
2564  }
2565 
2566  x.xcidx (i+1) = ii;
2567  }
2568 
2569  info = 0;
2570 
2571  x.maybe_compress ();
2572 
2573  return x;
2574 
2575 #else
2576 
2577  octave_unused_parameter (b);
2578 
2579  return SparseComplexMatrix ();
2580 
2581 #endif
2582 }
2583 
2584 template <>
2585 template <>
2589  (const SparseComplexMatrix& b, octave_idx_type& info) const
2590 {
2591  info = -1;
2592 
2593 #if defined (HAVE_CXSPARSE)
2594 
2595  // These are swapped because the original matrix was transposed in
2596  // sparse_qr<SparseComplexMatrix>::solve.
2597 
2598  octave_idx_type nr = ncols;
2599  octave_idx_type nc = nrows;
2600 
2601  octave_idx_type b_nc = b.cols ();
2602  octave_idx_type b_nr = b.rows ();
2603 
2604  SparseComplexMatrix x (nc, b_nc, b.nnz ());
2605  x.xcidx (0) = 0;
2606 
2607  volatile octave_idx_type x_nz = b.nnz ();
2608  volatile octave_idx_type ii = 0;
2609  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
2610 
2611  OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc));
2612  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf);
2613  OCTAVE_LOCAL_BUFFER (double, B, nr);
2614 
2615  for (octave_idx_type i = 0; i < nr; i++)
2616  B[i] = N->B[i];
2617 
2618  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2619  {
2620  octave_quit ();
2621 
2622  for (octave_idx_type j = 0; j < b_nr; j++)
2623  Xx[j] = b.xelem (j, i);
2624 
2625  for (octave_idx_type j = nr; j < nbuf; j++)
2626  buf[j] = 0.0;
2627 
2628  CXSPARSE_ZNAME (_pvec) (S->q, reinterpret_cast<cs_complex_t *> (Xx),
2629  buf, nr);
2630  CXSPARSE_ZNAME (_utsolve) (N->U, buf);
2631 
2632  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
2633  {
2634  octave_quit ();
2635 
2636  CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf);
2637  }
2638 
2639  CXSPARSE_ZNAME (_pvec) (S->pinv, buf,
2640  reinterpret_cast<cs_complex_t *> (Xx), nc);
2641 
2642  for (octave_idx_type j = 0; j < nc; j++)
2643  {
2644  Complex tmp = Xx[j];
2645 
2646  if (tmp != 0.0)
2647  {
2648  if (ii == x_nz)
2649  {
2650  // Resize the sparse matrix
2651  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
2652  sz = (sz > 10 ? sz : 10) + x_nz;
2653  x.change_capacity (sz);
2654  x_nz = sz;
2655  }
2656 
2657  x.xdata (ii) = tmp;
2658  x.xridx (ii++) = j;
2659  }
2660  }
2661 
2662  x.xcidx (i+1) = ii;
2663  }
2664 
2665  info = 0;
2666 
2667  x.maybe_compress ();
2668 
2669  return x;
2670 
2671 #else
2672 
2673  octave_unused_parameter (b);
2674 
2675  return SparseComplexMatrix ();
2676 
2677 #endif
2678 }
2679 
2680 template <typename SPARSE_T>
2682  : m_rep (new sparse_qr_rep (SPARSE_T (), 0))
2683 { }
2684 
2685 template <typename SPARSE_T>
2686 sparse_qr<SPARSE_T>::sparse_qr (const SPARSE_T& a, int order)
2687  : m_rep (new sparse_qr_rep (a, order))
2688 { }
2689 
2690 template <typename SPARSE_T>
2691 bool
2693 {
2694  return m_rep->ok ();
2695 }
2696 
2697 template <typename SPARSE_T>
2698 SPARSE_T
2700 {
2701  return m_rep->V ();
2702 }
2703 
2704 template <typename SPARSE_T>
2707 {
2708  return m_rep->P ();
2709 }
2710 
2711 template <typename SPARSE_T>
2714 {
2715  return m_rep->P ();
2716 }
2717 
2718 template <typename SPARSE_T>
2721 {
2722  return m_rep->E();
2723 }
2724 
2725 
2726 template <typename SPARSE_T>
2729 {
2730  ColumnVector perm = m_rep->E ();
2731  octave_idx_type nrows = perm.rows ();
2732  SparseMatrix ret (nrows, nrows, nrows);
2733  for (octave_idx_type i = 0; i < nrows; i++)
2734  ret(perm(i) - 1, i) = 1.0;
2735  return ret;
2736 }
2737 
2738 
2739 template <typename SPARSE_T>
2740 SPARSE_T
2741 sparse_qr<SPARSE_T>::R (bool econ) const
2742 {
2743  return m_rep->R (econ);
2744 }
2745 
2746 template <typename SPARSE_T>
2747 typename SPARSE_T::dense_matrix_type
2748 sparse_qr<SPARSE_T>::C (const typename SPARSE_T::dense_matrix_type& b,
2749  bool econ) const
2750 {
2751  return m_rep->C (b, econ);
2752 }
2753 
2754 template <typename SPARSE_T>
2755 typename SPARSE_T::dense_matrix_type
2756 sparse_qr<SPARSE_T>::Q (bool econ) const
2757 {
2758  return m_rep->Q (econ);
2759 }
2760 
2761 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
2762 //specializations of function min2norm_solve
2763 template <>
2764 template <>
2767  (const SparseMatrix& a, const MArray<double>& b, octave_idx_type& info, int order)
2768 {
2769  info = -1;
2770 
2771  octave_idx_type b_nc = b.cols ();
2772  octave_idx_type nc = a.cols ();
2773  Matrix x (nc, b_nc);
2774  cholmod_common cc;
2775 
2776  cholmod_l_start (&cc);
2777  cholmod_sparse A = ros2rcs (a);
2778  cholmod_dense B = rod2rcd (b);
2779  cholmod_dense *X;
2780 
2781  X = SuiteSparseQR_min2norm<double> (order, SPQR_DEFAULT_TOL, &A, &B, &cc);
2782  spqr_error_handler (&cc);
2783 
2784  double *xdata = x.fortran_vec ();
2785  for (volatile octave_idx_type i = 0; i < nc * b_nc; i++)
2786  xdata[i] = reinterpret_cast<double *> (X->x)[i];
2787  info = 0;
2788 
2789  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
2790  {
2791  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
2792  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
2793  }
2794  cholmod_l_free_dense (&X, &cc);
2795  cholmod_l_finish (&cc);
2796 
2797  return x;
2798 }
2799 
2800 template <>
2801 template <>
2804  (const SparseMatrix& a, const SparseMatrix& b, octave_idx_type& info, int order)
2805 {
2806  info = -1;
2807 
2808  SparseMatrix x;
2809  cholmod_common cc;
2810 
2811  cholmod_l_start (&cc);
2812  cholmod_sparse A = ros2rcs (a);
2813  cholmod_sparse B = ros2rcs (b);
2814  cholmod_sparse *X;
2815 
2816  X = SuiteSparseQR_min2norm<double> (order, SPQR_DEFAULT_TOL, &A, &B, &cc);
2817  spqr_error_handler (&cc);
2818 
2819  x = rcs2ros (X, &cc);
2820  info = 0;
2821 
2822  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
2823  {
2824  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
2825  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
2826  delete [] reinterpret_cast<SuiteSparse_long *> (B.p);
2827  delete [] reinterpret_cast<SuiteSparse_long *> (B.i);
2828  }
2829  cholmod_l_free_sparse (&X, &cc);
2830  cholmod_l_finish (&cc);
2831 
2832  return x;
2833 }
2834 
2835 template <>
2836 template <>
2839  (const SparseMatrix& a, const MArray<Complex>& b, octave_idx_type& info, int order)
2840 {
2841  info = -1;
2842 
2843  octave_idx_type b_nc = b.cols ();
2844  octave_idx_type nc = a.cols ();
2845  ComplexMatrix x (nc, b_nc);
2846  cholmod_common cc;
2847 
2848  cholmod_l_start (&cc);
2849  cholmod_sparse *A = ros2ccs (a, &cc);
2850  cholmod_dense B = cod2ccd (b);
2851  cholmod_dense *X;
2852 
2853  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, A, &B, &cc);
2854  spqr_error_handler (&cc);
2855 
2856  Complex *xdata = x.fortran_vec ();
2857  for (volatile octave_idx_type i = 0; i < nc * b_nc; i++)
2858  xdata[i] = reinterpret_cast<Complex *> (X->x)[i];
2859  info = 0;
2860 
2861  cholmod_l_free_sparse (&A, &cc);
2862  cholmod_l_free_dense (&X, &cc);
2863  cholmod_l_finish (&cc);
2864 
2865  return x;
2866 }
2867 
2868 template <>
2869 template <>
2872  (const SparseMatrix& a, const SparseComplexMatrix& b, octave_idx_type& info, int order)
2873 {
2874  info = -1;
2875 
2876  cholmod_common cc;
2877 
2878  cholmod_l_start (&cc);
2879  cholmod_sparse *A = ros2ccs (a, &cc);
2880  cholmod_sparse B = cos2ccs (b);
2881  cholmod_sparse *X;
2882 
2883  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, A, &B, &cc);
2884  spqr_error_handler (&cc);
2885 
2886  SparseComplexMatrix ret = ccs2cos (X, &cc);
2887  info = 0;
2888 
2889  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
2890  {
2891  delete [] reinterpret_cast<SuiteSparse_long *> (B.p);
2892  delete [] reinterpret_cast<SuiteSparse_long *> (B.i);
2893  }
2894  cholmod_l_free_sparse (&A, &cc);
2895  cholmod_l_free_sparse (&X, &cc);
2896  cholmod_l_finish (&cc);
2897 
2898  return ret;
2899 }
2900 
2901 template <>
2902 template <>
2905  (const SparseComplexMatrix& a, const MArray<Complex>& b, octave_idx_type& info, int order)
2906 {
2907  info = -1;
2908 
2909  octave_idx_type b_nc = b.cols ();
2910  octave_idx_type nc = a.cols ();
2911  ComplexMatrix x (nc, b_nc);
2912  cholmod_common cc;
2913 
2914  cholmod_l_start (&cc);
2915  cholmod_sparse A = cos2ccs (a);
2916  cholmod_dense B = cod2ccd (b);
2917  cholmod_dense *X;
2918 
2919  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, &B, &cc);
2920  spqr_error_handler (&cc);
2921 
2922  Complex *xdata = x.fortran_vec ();
2923  for (volatile octave_idx_type i = 0; i < nc * b_nc; i++)
2924  xdata[i] = reinterpret_cast<Complex *> (X->x)[i];
2925  info = 0;
2926 
2927  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
2928  {
2929  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
2930  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
2931  }
2932  cholmod_l_free_dense (&X, &cc);
2933  cholmod_l_finish (&cc);
2934 
2935  return x;
2936 }
2937 
2938 // FIXME: 2024/01/07: This template specialization does not appear to be
2939 // reachable from current Octave code calling qrsolve from sparse-dmsolve.cc.
2940 template <>
2941 template <>
2944  (const SparseComplexMatrix& a, const MArray<double>& b, octave_idx_type& info, int order)
2945 {
2946  info = -1;
2947 
2948  octave_idx_type b_nc = b.cols ();
2949  octave_idx_type nc = a.cols ();
2950  ComplexMatrix x (nc, b_nc);
2951  cholmod_common cc;
2952 
2953  cholmod_l_start (&cc);
2954  cholmod_sparse A = cos2ccs (a);
2955  cholmod_dense *B = rod2ccd (b, &cc);
2956  cholmod_dense *X;
2957 
2958  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, B, &cc);
2959  spqr_error_handler (&cc);
2960 
2961  Complex *xdata = x.fortran_vec ();
2962  for (volatile octave_idx_type i = 0; i < nc * b_nc; i++)
2963  xdata[i] = reinterpret_cast<Complex *> (X->x)[i];
2964  info = 0;
2965 
2966  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
2967  {
2968  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
2969  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
2970  }
2971  cholmod_l_free_dense (&B, &cc);
2972  cholmod_l_free_dense (&X, &cc);
2973  cholmod_l_finish (&cc);
2974 
2975  return x;
2976 }
2977 
2978 template <>
2979 template <>
2982  (const SparseComplexMatrix& a, const SparseComplexMatrix& b, octave_idx_type& info, int order)
2983 {
2984  info = -1;
2985 
2986  cholmod_common cc;
2987 
2988  cholmod_l_start (&cc);
2989  cholmod_sparse A = cos2ccs (a);
2990  cholmod_sparse B = cos2ccs (b);
2991  cholmod_sparse *X;
2992 
2993  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, &B, &cc);
2994  spqr_error_handler (&cc);
2995 
2996  SparseComplexMatrix ret = ccs2cos (X, &cc);
2997  info = 0;
2998 
2999  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
3000  {
3001  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
3002  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
3003  delete [] reinterpret_cast<SuiteSparse_long *> (B.p);
3004  delete [] reinterpret_cast<SuiteSparse_long *> (B.i);
3005  }
3006  cholmod_l_free_sparse (&X, &cc);
3007  cholmod_l_finish (&cc);
3008 
3009  return ret;
3010 }
3011 
3012 // FIXME: 2024/01/07: This template specialization does not appear to be
3013 // reachable from current Octave code calling qrsolve from sparse-dmsolve.cc.
3014 template <>
3015 template <>
3018  (const SparseComplexMatrix& a, const SparseMatrix& b, octave_idx_type& info, int order)
3019 {
3020  info = -1;
3021 
3022  cholmod_common cc;
3023 
3024  cholmod_l_start (&cc);
3025  cholmod_sparse A = cos2ccs (a);
3026  cholmod_sparse *B = ros2ccs (b, &cc);
3027  cholmod_sparse *X;
3028 
3029  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, B, &cc);
3030  spqr_error_handler (&cc);
3031 
3032  SparseComplexMatrix ret = ccs2cos (X, &cc);
3033  info = 0;
3034 
3035  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
3036  {
3037  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
3038  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
3039  }
3040  cholmod_l_free_sparse (&B, &cc);
3041  cholmod_l_free_sparse (&X, &cc);
3042  cholmod_l_finish (&cc);
3043 
3044  return ret;
3045 }
3046 #endif
3047 
3048 // FIXME: Why is the "order" of the QR calculation as used in the
3049 // CXSparse function sqr 3 for real matrices and 2 for complex? These
3050 // values seem to be required but there was no explanation in David
3051 // Bateman's original code.
3052 
3053 template <typename SPARSE_T>
3054 class
3055 cxsparse_defaults
3056 {
3057 public:
3058  enum { order = -1 };
3059 };
3060 
3061 template <>
3062 class
3063 cxsparse_defaults<SparseMatrix>
3064 {
3065 public:
3066 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
3067  enum { order = SPQR_ORDERING_DEFAULT };
3068 #elif defined (HAVE_CXSPARSE)
3069  enum { order = 3 };
3070 #endif
3071 };
3072 
3073 template <>
3074 class
3075 cxsparse_defaults<SparseComplexMatrix>
3076 {
3077 public:
3078 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
3079  enum { order = SPQR_ORDERING_DEFAULT };
3080 #elif defined (HAVE_CXSPARSE)
3081  enum { order = 2 };
3082 #endif
3083 };
3084 
3085 template <typename SPARSE_T>
3086 template <typename RHS_T, typename RET_T>
3087 RET_T
3088 sparse_qr<SPARSE_T>::solve (const SPARSE_T& a, const RHS_T& b,
3089  octave_idx_type& info)
3090 {
3091 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
3092 
3093  info = -1;
3094 
3095  octave_idx_type nr = a.rows ();
3096  octave_idx_type nc = a.cols ();
3097 
3098  octave_idx_type b_nc = b.cols ();
3099  octave_idx_type b_nr = b.rows ();
3100 
3101  int order = cxsparse_defaults<SPARSE_T>::order;
3102 
3103  if (nr < 0 || nc < 0 || b_nc < 0 || b_nr < 0)
3104  (*current_liboctave_error_handler)
3105  ("matrix dimension with negative size");
3106 
3107  if ( nr != b_nr)
3108  (*current_liboctave_error_handler)
3109  ("matrix dimension mismatch in solution of minimum norm problem");
3110 
3111  info = 0;
3112 
3113  return min2norm_solve<RHS_T, RET_T> (a, b, info, order);
3114 
3115 #elif defined (HAVE_CXSPARSE)
3116 
3117  info = -1;
3118 
3119  octave_idx_type nr = a.rows ();
3120  octave_idx_type nc = a.cols ();
3121 
3122  octave_idx_type b_nc = b.cols ();
3123  octave_idx_type b_nr = b.rows ();
3124 
3125  int order = cxsparse_defaults<SPARSE_T>::order;
3126 
3127  if (nr < 0 || nc < 0 || nr != b_nr)
3128  (*current_liboctave_error_handler)
3129  ("matrix dimension mismatch in solution of minimum norm problem");
3130 
3131  if (nr == 0 || nc == 0 || b_nc == 0)
3132  {
3133  info = 0;
3134 
3135  return RET_T (nc, b_nc, 0.0);
3136  }
3137  else if (nr >= nc)
3138  {
3139  sparse_qr<SPARSE_T> q (a, order);
3140 
3141  return q.ok () ? q.tall_solve<RHS_T, RET_T> (b, info) : RET_T ();
3142  }
3143  else
3144  {
3145  sparse_qr<SPARSE_T> q (a.hermitian (), order);
3146 
3147  return q.ok () ? q.wide_solve<RHS_T, RET_T> (b, info) : RET_T ();
3148  }
3149 
3150 #else
3151 
3152  octave_unused_parameter (a);
3153  octave_unused_parameter (b);
3154  octave_unused_parameter (info);
3155 
3156  return RET_T ();
3157 
3158 #endif
3159 }
3160 
3161 //explicit instantiations of static member function solve
3162 template
3165 (const SparseMatrix& a, const MArray<double>& b, octave_idx_type& info);
3166 
3167 template
3170 (const SparseMatrix& a, const SparseMatrix& b, octave_idx_type& info);
3171 
3172 template
3175 (const SparseMatrix& a, const MArray<Complex>& b, octave_idx_type& info);
3176 
3177 template
3180 (const SparseMatrix& a, const SparseComplexMatrix& b,
3181  octave_idx_type& info);
3182 
3183 template
3186 (const SparseComplexMatrix& a, const MArray<Complex>& b,
3187  octave_idx_type& info);
3188 
3189 template
3193 (const SparseComplexMatrix& a, const SparseComplexMatrix& b,
3194  octave_idx_type& info);
3195 
3196 template
3199 (const SparseComplexMatrix& a, const MArray<double>& b,
3200  octave_idx_type& info);
3201 
3202 template
3205 (const SparseComplexMatrix& a, const SparseMatrix& b,
3206  octave_idx_type& info);
3207 
3208 //explicit instantiations of member function E_MAT
3209 template
3212 
3213 template
3216 
3217 template <typename SPARSE_T>
3218 template <typename RHS_T, typename RET_T>
3219 RET_T
3220 sparse_qr<SPARSE_T>::tall_solve (const RHS_T& b, octave_idx_type& info) const
3221 {
3222  return m_rep->template tall_solve<RHS_T, RET_T> (b, info);
3223 }
3224 
3225 template <typename SPARSE_T>
3226 template <typename RHS_T, typename RET_T>
3227 RET_T
3228 sparse_qr<SPARSE_T>::wide_solve (const RHS_T& b, octave_idx_type& info) const
3229 {
3230  return m_rep->template wide_solve<RHS_T, RET_T> (b, info);
3231 }
3232 
3233 // Explicitly instantiate all member functions
3234 
3236 template OCTAVE_API
3237 sparse_qr<SparseMatrix>::sparse_qr (const SparseMatrix& a, int order);
3238 template OCTAVE_API bool sparse_qr<SparseMatrix>::ok () const;
3243 template OCTAVE_API SparseMatrix
3244 sparse_qr<SparseMatrix>::R (bool econ) const;
3245 template OCTAVE_API Matrix
3246 sparse_qr<SparseMatrix>::C (const Matrix& b, bool econ) const;
3247 template OCTAVE_API Matrix sparse_qr<SparseMatrix>::Q (bool econ) const;
3248 
3250 template OCTAVE_API
3252 (const SparseComplexMatrix& a, int order);
3253 template OCTAVE_API bool sparse_qr<SparseComplexMatrix>::ok () const;
3254 template OCTAVE_API ColumnVector
3258 template OCTAVE_API ColumnVector
3260 template OCTAVE_API ColumnVector
3263 sparse_qr<SparseComplexMatrix>::R (bool econ) const;
3264 template OCTAVE_API ComplexMatrix
3265 sparse_qr<SparseComplexMatrix>::C (const ComplexMatrix& b, bool econ) const;
3266 template OCTAVE_API ComplexMatrix
3267 sparse_qr<SparseComplexMatrix>::Q (bool econ) const;
3268 
3269 Matrix
3271  octave_idx_type& info)
3272 {
3274  info);
3275 }
3276 
3278 qrsolve (const SparseMatrix& a, const SparseMatrix& b,
3279  octave_idx_type& info)
3280 {
3282  info);
3283 }
3284 
3287  octave_idx_type& info)
3288 {
3290  ComplexMatrix> (a, b, info);
3291 }
3292 
3295  octave_idx_type& info)
3296 {
3298  SparseComplexMatrix> (a, b, info);
3299 }
3300 
3303  octave_idx_type& info)
3304 {
3306  ComplexMatrix> (a, b, info);
3307 }
3308 
3311  octave_idx_type& info)
3312 {
3315  (a, b, info);
3316 }
3317 
3320  octave_idx_type& info)
3321 {
3323  ComplexMatrix> (a, b, info);
3324 }
3325 
3328  octave_idx_type& info)
3329 {
3332  (a, b, info);
3333 }
3334 
3335 OCTAVE_END_NAMESPACE(math)
3336 OCTAVE_END_NAMESPACE(octave)
#define C(a, b)
Definition: Faddeeva.cc:259
charNDArray max(char d, const charNDArray &m)
Definition: chNDArray.cc:230
charNDArray min(char d, const charNDArray &m)
Definition: chNDArray.cc:207
octave_idx_type rows() const
Definition: Array.h:459
const T * data() const
Size of the specified dimension.
Definition: Array.h:663
octave_idx_type cols() const
Definition: Array.h:469
Definition: dMatrix.h:42
octave_idx_type cols() const
Definition: Sparse.h:352
octave_idx_type * cidx()
Definition: Sparse.h:596
T * data()
Definition: Sparse.h:574
octave_idx_type * ridx()
Definition: Sparse.h:583
T & xelem(octave_idx_type n)
Definition: Sparse.h:395
octave_idx_type nnz() const
Actual number of nonzero terms.
Definition: Sparse.h:339
octave_idx_type rows() const
Definition: Sparse.h:351
ColumnVector E() const
Definition: sparse-qr.cc:2720
SPARSE_T R(bool econ=false) const
Definition: sparse-qr.cc:2741
SparseMatrix E_MAT() const
Definition: sparse-qr.cc:2728
SPARSE_T::dense_matrix_type Q(bool econ=false) const
Definition: sparse-qr.cc:2756
SPARSE_T::dense_matrix_type C(const typename SPARSE_T::dense_matrix_type &b, bool econ=false) const
Definition: sparse-qr.cc:2748
bool ok() const
Definition: sparse-qr.cc:2692
static RET_T solve(const SPARSE_T &a, const RHS_T &b, octave_idx_type &info)
Definition: sparse-qr.cc:3088
SPARSE_T V() const
Definition: sparse-qr.cc:2699
ColumnVector Pinv() const
Definition: sparse-qr.cc:2706
ColumnVector P() const
Definition: sparse-qr.cc:2713
OCTAVE_BEGIN_NAMESPACE(octave) static octave_value daspk_fcn
OCTAVE_NORETURN liboctave_error_handler current_liboctave_error_handler
Definition: lo-error.c:41
F77_RET_T const F77_INT F77_CMPLX const F77_INT F77_CMPLX * B
F77_RET_T const F77_INT const F77_INT const F77_INT F77_DBLE const F77_INT F77_DBLE const F77_INT F77_DBLE * Q
F77_RET_T const F77_INT const F77_INT const F77_INT const F77_DBLE const F77_DBLE F77_INT F77_DBLE * V
F77_RET_T const F77_INT F77_CMPLX * A
F77_RET_T const F77_INT & N
F77_RET_T const F77_DBLE * x
#define OCTAVE_API
Definition: main.cc:55
T octave_idx_type m
Definition: mx-inlines.cc:781
for(octave_idx_type i=0;i< n;i++) ac+
Definition: mx-inlines.cc:764
octave_idx_type n
Definition: mx-inlines.cc:761
std::complex< double > Complex
Definition: oct-cmplx.h:33
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition: oct-locbuf.h:44
suitesparse_integer * to_suitesparse_intptr(octave_idx_type *i)
Definition: oct-sparse.cc:51
#define CXSPARSE_DNAME(name)
Definition: oct-sparse.h:159
int suitesparse_integer
Definition: oct-sparse.h:184
#define CXSPARSE_ZNAME(name)
Definition: oct-sparse.h:160
octave_idx_type from_suitesparse_long(SuiteSparse_long x)
Definition: oct-sparse.h:200
octave_idx_type from_size_t(std::size_t x)
Definition: oct-sparse.h:211
void free(void *)
Matrix qrsolve(const SparseMatrix &a, const MArray< double > &b, octave_idx_type &info)
Definition: sparse-qr.cc:3270