GNU Octave  8.1.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
sparse-qr.cc
Go to the documentation of this file.
1 ////////////////////////////////////////////////////////////////////////
2 //
3 // Copyright (C) 2005-2023 The Octave Project Developers
4 //
5 // See the file COPYRIGHT.md in the top-level directory of this
6 // distribution or <https://octave.org/copyright/>.
7 //
8 // This file is part of Octave.
9 //
10 // Octave is free software: you can redistribute it and/or modify it
11 // under the terms of the GNU General Public License as published by
12 // the Free Software Foundation, either version 3 of the License, or
13 // (at your option) any later version.
14 //
15 // Octave is distributed in the hope that it will be useful, but
16 // WITHOUT ANY WARRANTY; without even the implied warranty of
17 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 // GNU General Public License for more details.
19 //
20 // You should have received a copy of the GNU General Public License
21 // along with Octave; see the file COPYING. If not, see
22 // <https://www.gnu.org/licenses/>.
23 //
24 ////////////////////////////////////////////////////////////////////////
25 
26 #if defined (HAVE_CONFIG_H)
27 # include "config.h"
28 #endif
29 
30 #include "CMatrix.h"
31 #include "CSparse.h"
32 #include "MArray.h"
33 #include "dColVector.h"
34 #include "dMatrix.h"
35 #include "dSparse.h"
36 #include "lo-error.h"
37 #include "oct-locbuf.h"
38 #include "oct-sparse.h"
39 #include "quit.h"
40 #include "sparse-qr.h"
41 
43 
45 
46 #if defined (HAVE_CXSPARSE)
47 template <typename SPARSE_T>
48 class
50 { };
51 
52 template <>
53 class
55 {
56 public:
59 };
60 
61 template <>
62 class
64 {
65 public:
68 };
69 #endif
70 
71 template <typename SPARSE_T>
72 class sparse_qr<SPARSE_T>::sparse_qr_rep
73 {
74 public:
75 
76  sparse_qr_rep (const SPARSE_T& a, int order);
77 
78  // No copying!
79 
80  sparse_qr_rep (const sparse_qr_rep&) = delete;
81 
83 
85 
86  bool ok (void) const
87  {
88 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
89  return (m_H && m_Htau && m_HPinv && m_R && m_E);
90 #elif defined (HAVE_CXSPARSE)
91  return (N && S);
92 #else
93  return false;
94 #endif
95  }
96 
97  SPARSE_T V (void) const;
98 
99  ColumnVector Pinv (void) const;
100 
101  ColumnVector P (void) const;
102 
103  ColumnVector E (void) const;
104 
105  SPARSE_T R (bool econ) const;
106 
107  typename SPARSE_T::dense_matrix_type
108  C (const typename SPARSE_T::dense_matrix_type& b, bool econ = false);
109 
110  typename SPARSE_T::dense_matrix_type Q (bool econ = false);
111 
114 
115 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
116 
117  template <typename RHS_T, typename RET_T>
118  RET_T solve (const RHS_T& b, octave_idx_type& info) const;
119 
120 #endif
121 
122 #if defined (HAVE_CXSPARSE)
123 
126 
127 #endif
128 
129  template <typename RHS_T, typename RET_T>
130  RET_T tall_solve (const RHS_T& b, octave_idx_type& info);
131 
132  template <typename RHS_T, typename RET_T>
133  RET_T wide_solve (const RHS_T& b, octave_idx_type& info) const;
134 
135 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
136 
137 private:
138 
139  cholmod_common m_cc;
140  cholmod_sparse *m_R; // R factor
141  // Column permutation for A. Fill-reducing ordering.
142  SuiteSparse_long *m_E;
143  cholmod_sparse *m_H; // Householder vectors
144  cholmod_dense *m_Htau; // beta scalars
145  SuiteSparse_long *m_HPinv;
146 
147 #endif
148 };
149 
150 template <typename SPARSE_T>
153 {
154 #if defined (HAVE_CXSPARSE)
155 
156  ColumnVector ret (N->L->m);
157 
158  for (octave_idx_type i = 0; i < N->L->m; i++)
159  ret.xelem (i) = S->pinv[i];
160 
161  return ret;
162 
163 #else
164 
165  return ColumnVector ();
166 
167 #endif
168 }
169 
170 template <typename SPARSE_T>
173 {
174 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
175 
176  ColumnVector ret (nrows);
177 
178  // FIXME: Is ret.xelem (m_HPinv[i]) = i + 1 correct?
179  for (octave_idx_type i = 0; i < nrows; i++)
180  ret.xelem (from_suitesparse_long (m_HPinv[i])) = i + 1;
181 
182  return ret;
183 
184 #elif defined (HAVE_CXSPARSE)
185 
186  ColumnVector ret (N->L->m);
187 
188  for (octave_idx_type i = 0; i < N->L->m; i++)
189  ret.xelem (S->pinv[i]) = i;
190 
191  return ret;
192 
193 #else
194 
195  return ColumnVector ();
196 
197 #endif
198 }
199 
200 template <typename SPARSE_T>
203 {
204 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
205 
206  ColumnVector ret (ncols);
207 
208  if (m_E)
209  for (octave_idx_type i = 0; i < ncols; i++)
210  ret(i) = from_suitesparse_long (m_E[i]) + 1;
211  else
212  for (octave_idx_type i = 0; i < ncols; i++)
213  ret(i) = i + 1;
214 
215  return ret;
216 
217 #else
218 
219  return ColumnVector ();
220 
221 #endif
222 }
223 
224 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
225 
226 // Convert real sparse octave matrix to real sparse cholmod matrix.
227 // Returns a "shallow" copy of a.
228 static cholmod_sparse
229 ros2rcs (const SparseMatrix& a)
230 {
231  cholmod_sparse A;
232 
233  octave_idx_type ncols = a.cols ();
234  octave_idx_type nnz = a.nnz ();
235 
236  A.ncol = ncols;
237  A.nrow = a.rows ();
238  A.itype = CHOLMOD_LONG;
239  A.nzmax = nnz;
240  A.sorted = 0;
241  A.packed = 1;
242  A.stype = 0;
243  A.xtype = CHOLMOD_REAL;
244  A.dtype = CHOLMOD_DOUBLE;
245  A.nz = nullptr;
246  A.z = nullptr;
247  if (sizeof (octave_idx_type) == sizeof (SuiteSparse_long))
248  {
249  A.p = reinterpret_cast<SuiteSparse_long *> (a.cidx ());
250  A.i = reinterpret_cast<SuiteSparse_long *> (a.ridx ());
251  }
252  else
253  {
254  SuiteSparse_long *A_p;
255  A_p = new SuiteSparse_long[ncols+1];
256  for (octave_idx_type i = 0; i < ncols+1; i++)
257  A_p[i] = a.cidx (i);
258  A.p = A_p;
259  SuiteSparse_long *A_i;
260  A_i = new SuiteSparse_long[nnz];
261  for (octave_idx_type i = 0; i < nnz; i++)
262  A_i[i] = a.ridx (i);
263  A.i = A_i;
264  }
265  A.x = const_cast<double *> (a.data ());
266 
267  return A;
268 }
269 
270 // Convert complex sparse octave matrix to complex sparse cholmod matrix.
271 // Returns a "shallow" copy of a.
272 static cholmod_sparse
273 cos2ccs (const SparseComplexMatrix& a)
274 {
275  cholmod_sparse A;
276 
277  octave_idx_type ncols = a.cols ();
278  octave_idx_type nnz = a.nnz ();
279 
280  A.ncol = ncols;
281  A.nrow = a.rows ();
282  A.itype = CHOLMOD_LONG;
283  A.nzmax = nnz;
284  A.sorted = 0;
285  A.packed = 1;
286  A.stype = 0;
287  A.xtype = CHOLMOD_COMPLEX;
288  A.dtype = CHOLMOD_DOUBLE;
289  A.nz = nullptr;
290  A.z = nullptr;
291  if (sizeof (octave_idx_type) == sizeof (SuiteSparse_long))
292  {
293  A.p = reinterpret_cast<SuiteSparse_long *> (a.cidx ());
294  A.i = reinterpret_cast<SuiteSparse_long *> (a.ridx ());
295  }
296  else
297  {
298  SuiteSparse_long *A_p;
299  A_p = new SuiteSparse_long[ncols+1];
300  for (octave_idx_type i = 0; i < ncols+1; i++)
301  A_p[i] = a.cidx (i);
302  A.p = A_p;
303  SuiteSparse_long *A_i;
304  A_i = new SuiteSparse_long[nnz];
305  for (octave_idx_type i = 0; i < nnz; i++)
306  A_i[i] = a.ridx (i);
307  A.i = A_i;
308  }
309  A.x = const_cast<Complex *>
310  (reinterpret_cast<const Complex *> (a.data ()));
311 
312  return A;
313 }
314 
315 // Convert real dense octave matrix to complex dense cholmod matrix.
316 // Returns a "deep" copy of a.
317 static cholmod_dense *
318 rod2ccd (const MArray<double>& a, cholmod_common *cc1)
319 {
320  cholmod_dense *A
321  = cholmod_l_allocate_dense (a.rows (), a.cols (), a.rows(),
322  CHOLMOD_COMPLEX, cc1);
323 
324  const double *a_x = a.data ();
325 
326  Complex *A_x = reinterpret_cast<Complex *> (A->x);
327  for (octave_idx_type j = 0; j < a.cols() * a.rows() ; j++)
328  A_x[j] = Complex (a_x[j], 0.0);
329 
330  return A;
331 }
332 
333 // Convert real dense octave matrix to real dense cholmod matrix.
334 // Returns a "shallow" copy of a.
335 static cholmod_dense
336 rod2rcd (const MArray<double>& a)
337 {
338  cholmod_dense A;
339 
340  A.ncol = a.cols ();
341  A.nrow = a.rows ();
342  A.nzmax = a.cols () * a.rows ();
343  A.xtype = CHOLMOD_REAL;
344  A.dtype = CHOLMOD_DOUBLE;
345  A.z = nullptr;
346  A.d = a.rows ();
347  A.x = const_cast<double *> (a.data ());
348 
349  return A;
350 }
351 
352 // Convert complex dense octave matrix to complex dense cholmod matrix.
353 // Returns a "shallow" copy of a.
354 static cholmod_dense
355 cod2ccd (const ComplexMatrix& a)
356 {
357  cholmod_dense A;
358 
359  A.ncol = a.cols ();
360  A.nrow = a.rows ();
361  A.nzmax = a.cols () * a.rows ();
362  A.xtype = CHOLMOD_COMPLEX;
363  A.dtype = CHOLMOD_DOUBLE;
364  A.z = nullptr;
365  A.d = a.rows ();
366  A.x = const_cast<Complex *> (reinterpret_cast<const Complex *> (a.data ()));
367 
368  return A;
369 }
370 
371 // Convert real sparse cholmod matrix to real sparse octave matrix.
372 // Returns a "shallow" copy of y.
373 static SparseMatrix
374 rcs2ros (const cholmod_sparse *y)
375 {
376  octave_idx_type nrow = from_size_t (y->nrow);
377  octave_idx_type ncol = from_size_t (y->ncol);
378  octave_idx_type nz = from_size_t (y->nzmax);
379  SparseMatrix ret (nrow, ncol, nz);
380 
381  SuiteSparse_long *y_p = reinterpret_cast<SuiteSparse_long *> (y->p);
382  for (octave_idx_type j = 0; j < ncol + 1; j++)
383  ret.xcidx (j) = from_suitesparse_long (y_p[j]);
384 
385  SuiteSparse_long *y_i = reinterpret_cast<SuiteSparse_long *> (y->i);
386  double *y_x = reinterpret_cast<double *> (y->x);
387  for (octave_idx_type j = 0; j < nz; j++)
388  {
389  ret.xridx (j) = from_suitesparse_long (y_i[j]);
390  ret.xdata (j) = y_x[j];
391  }
392 
393  return ret;
394 }
395 
396 // Convert complex sparse cholmod matrix to complex sparse octave matrix.
397 // Returns a "deep" copy of a.
398 static SparseComplexMatrix
399 ccs2cos (const cholmod_sparse *a)
400 {
401  octave_idx_type nrow = from_size_t (a->nrow);
402  octave_idx_type ncol = from_size_t (a->ncol);
403  octave_idx_type nz = from_size_t (a->nzmax);
404  SparseComplexMatrix ret (nrow, ncol, nz);
405 
406  SuiteSparse_long *a_p = reinterpret_cast<SuiteSparse_long *> (a->p);
407  for (octave_idx_type j = 0; j < ncol + 1; j++)
408  ret.xcidx(j) = from_suitesparse_long (a_p[j]);
409 
410  SuiteSparse_long *a_i = reinterpret_cast<SuiteSparse_long *> (a->i);
411  Complex *a_x = reinterpret_cast<Complex *> (a->x);
412  for (octave_idx_type j = 0; j < nz; j++)
413  {
414  ret.xridx(j) = from_suitesparse_long (a_i[j]);
415  ret.xdata(j) = a_x[j];
416  }
417 
418  return ret;
419 }
420 
421 // Convert real sparse octave matrix to complex sparse cholmod matrix.
422 // Returns a "deep" copy of a.
423 static cholmod_sparse *
424 ros2ccs (const SparseMatrix& a, cholmod_common *cc)
425 {
426  cholmod_sparse *A
427  = cholmod_l_allocate_sparse (a.rows (), a.cols (), a.nnz (), 0, 1, 0,
428  CHOLMOD_COMPLEX, cc);
429 
430  octave_idx_type ncol = a.cols ();
431  SuiteSparse_long *A_p = reinterpret_cast<SuiteSparse_long *> (A->p);
432  for (octave_idx_type j = 0; j < ncol + 1; j++)
433  A_p[j] = a.cidx(j);
434 
435  const double *a_x = a.data ();
436  Complex *A_x = reinterpret_cast<Complex *> (A->x);
437  SuiteSparse_long *A_i = reinterpret_cast<SuiteSparse_long *> (A->i);
438  for (octave_idx_type j = 0; j < a.nnz (); j++)
439  {
440  A_x[j] = Complex (a_x[j], 0.0);
441  A_i[j] = a.ridx(j);
442  }
443  return A;
444 }
445 
446 static suitesparse_integer
447 suitesparse_long_to_suitesparse_integer (SuiteSparse_long x)
448 {
451  (*current_liboctave_error_handler)
452  ("integer dimension or index out of range for SuiteSparse's indexing type");
453 
454  return static_cast<suitesparse_integer> (x);
455 }
456 
457 static void
458 spqr_error_handler (const cholmod_common *cc)
459 {
460  if (cc->status >= 0)
461  return;
462 
463  switch (cc->status)
464  {
465  case CHOLMOD_OUT_OF_MEMORY:
466  (*current_liboctave_error_handler)
467  ("sparse_qr: sparse matrix QR factorization failed"
468  " - out of memory");
469  case CHOLMOD_TOO_LARGE:
470  (*current_liboctave_error_handler)
471  ("sparse_qr: sparse matrix QR factorization failed"
472  " - integer overflow occurred");
473  default:
474  (*current_liboctave_error_handler)
475  ("sparse_qr: sparse matrix QR factorization failed"
476  " - error %d", cc->status);
477  }
478 
479  // FIXME: Free memory?
480  // FIXME: Can cc-status > 0 (CHOLMOD_NOT_POSDEF, CHOLMOD_DSMALL) occur?
481 }
482 #endif
483 
484 // Specializations.
485 
486 // Real-valued matrices.
487 
488 // Arguments for parameter order (taken from SuiteSparseQR documentation).
489 // 0: fixed ordering 0 (no permutation of columns)
490 // 1: natural ordering 1 (only singleton columns are permuted to the left of
491 // the matrix)
492 // 2: colamd
493 // 3:
494 // 4: CHOLMOD best-effort (COLAMD, METIS,...)
495 // 5: AMD(a'*a)
496 // 6: metis(a'*a)
497 // 7: SuiteSparseQR default ordering
498 // 8: try COLAMD, AMD, and METIS; pick best
499 // 9: try COLAMD and AMD; pick best
500 //FIXME: What is order = 3?
501 template <>
503 (const SparseMatrix& a, int order)
504  : nrows (a.rows ()), ncols (a.columns ())
505 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
506  , m_cc (), m_R (nullptr), m_E (nullptr), m_H (nullptr), m_Htau (nullptr),
507  m_HPinv (nullptr)
508 {
509  octave_idx_type nr = a.rows ();
510  octave_idx_type nc = a.cols ();
511 
512  if (nr < 0 || nc < 0)
513  (*current_liboctave_error_handler)
514  ("matrix dimension with negative size");
515 
516  if (order < 0 || order > 9)
517  (*current_liboctave_error_handler)
518  ("ordering %d is not supported by SPQR", order);
519 
520  cholmod_l_start (&m_cc);
521  cholmod_sparse A = ros2rcs (a);
522 
523  SuiteSparseQR<double> (order, static_cast<double> (SPQR_DEFAULT_TOL),
524  static_cast<SuiteSparse_long> (A.nrow),
525  &A, &m_R, &m_E, &m_H, &m_HPinv, &m_Htau, &m_cc);
526  spqr_error_handler (&m_cc);
527 
528  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
529  {
530  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
531  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
532  }
533 }
534 
535 #elif defined (HAVE_CXSPARSE)
536  , S (nullptr), N (nullptr)
537 {
538  CXSPARSE_DNAME () A;
539 
540  A.nzmax = a.nnz ();
541  A.m = nrows;
542  A.n = ncols;
543  // Cast away const on A, with full knowledge that CSparse won't touch it
544  // Prevents the methods below making a copy of the data.
545  A.p = const_cast<suitesparse_integer *>
546  (to_suitesparse_intptr (a.cidx ()));
547  A.i = const_cast<suitesparse_integer *>
548  (to_suitesparse_intptr (a.ridx ()));
549  A.x = const_cast<double *> (a.data ());
550  A.nz = -1;
551 
552  S = CXSPARSE_DNAME (_sqr) (order, &A, 1);
553  N = CXSPARSE_DNAME (_qr) (&A, S);
554 
555  if (! N)
557  ("sparse_qr: sparse matrix QR factorization filled");
558 
559 }
560 
561 #else
562 
563 {
564  octave_unused_parameter (order);
565 
566  (*current_liboctave_error_handler)
567  ("sparse_qr: support for SPQR or CXSparse was unavailable or disabled when liboctave was built");
568 }
569 
570 #endif
571 
572 template <>
574 {
575 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
576 
577  cholmod_l_free_sparse (&m_R, &m_cc);
578  cholmod_l_free_sparse (&m_H, &m_cc);
579  cholmod_l_free_dense (&m_Htau, &m_cc);
580  free (m_E); // FIXME: use cholmod_l_free
581  free (m_HPinv);
582  cholmod_l_finish (&m_cc);
583 
584 #elif defined (HAVE_CXSPARSE)
585 
586  CXSPARSE_DNAME (_sfree) (S);
587  CXSPARSE_DNAME (_nfree) (N);
588 
589 #endif
590 }
591 
592 template <>
595 {
596 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
597 
598  return rcs2ros (m_H);
599 
600 #elif defined (HAVE_CXSPARSE)
601 
602  // Drop zeros from V and sort
603  // FIXME: Is the double transpose to sort necessary?
604 
605  CXSPARSE_DNAME (_dropzeros) (N->L);
606  CXSPARSE_DNAME () *D = CXSPARSE_DNAME (_transpose) (N->L, 1);
607  CXSPARSE_DNAME (_spfree) (N->L);
608  N->L = CXSPARSE_DNAME (_transpose) (D, 1);
609  CXSPARSE_DNAME (_spfree) (D);
610 
611  octave_idx_type nc = N->L->n;
612  octave_idx_type nz = N->L->nzmax;
613  SparseMatrix ret (N->L->m, nc, nz);
614 
615  for (octave_idx_type j = 0; j < nc+1; j++)
616  ret.xcidx (j) = N->L->p[j];
617 
618  for (octave_idx_type j = 0; j < nz; j++)
619  {
620  ret.xridx (j) = N->L->i[j];
621  ret.xdata (j) = N->L->x[j];
622  }
623 
624  return ret;
625 
626 #else
627 
628  return SparseMatrix ();
629 
630 #endif
631 }
632 
633 template <>
636 {
637 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
638 
639  octave_idx_type nr = static_cast<octave_idx_type> (m_R->nrow);
640  octave_idx_type nc = static_cast<octave_idx_type> (m_R->ncol);
641  octave_idx_type nz = static_cast<octave_idx_type> (m_R->nzmax);
642 
643  // FIXME: Does this work if econ = true?
644  SparseMatrix ret ((econ ? (nc > nr ? nr : nc) : nr), nc, nz);
645  SuiteSparse_long *Rp = reinterpret_cast<SuiteSparse_long *> (m_R->p);
646  SuiteSparse_long *Ri = reinterpret_cast<SuiteSparse_long *> (m_R->i);
647 
648  for (octave_idx_type j = 0; j < nc + 1; j++)
649  ret.xcidx (j) = from_suitesparse_long (Rp[j]);
650 
651  for (octave_idx_type j = 0; j < nz; j++)
652  {
653  ret.xridx (j) = from_suitesparse_long (Ri[j]);
654  ret.xdata (j) = (reinterpret_cast<double *> (m_R->x))[j];
655  }
656 
657  return ret;
658 
659 #elif defined (HAVE_CXSPARSE)
660 
661  // Drop zeros from R and sort
662  // FIXME: Is the double transpose to sort necessary?
663 
664  CXSPARSE_DNAME (_dropzeros) (N->U);
665  CXSPARSE_DNAME () *D = CXSPARSE_DNAME (_transpose) (N->U, 1);
666  CXSPARSE_DNAME (_spfree) (N->U);
667  N->U = CXSPARSE_DNAME (_transpose) (D, 1);
668  CXSPARSE_DNAME (_spfree) (D);
669 
670  octave_idx_type nc = N->U->n;
671  octave_idx_type nz = N->U->nzmax;
672 
673  SparseMatrix ret ((econ ? (nc > nrows ? nrows : nc) : nrows), nc, nz);
674 
675  for (octave_idx_type j = 0; j < nc+1; j++)
676  ret.xcidx (j) = N->U->p[j];
677 
678  for (octave_idx_type j = 0; j < nz; j++)
679  {
680  ret.xridx (j) = N->U->i[j];
681  ret.xdata (j) = N->U->x[j];
682  }
683 
684  return ret;
685 
686 #else
687 
688  octave_unused_parameter (econ);
689 
690  return SparseMatrix ();
691 
692 #endif
693 }
694 
695 template <>
696 Matrix
698 {
699 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
700  octave_idx_type nr = (econ
701  ? (ncols > nrows ? nrows : ncols)
702  : nrows);
703  octave_idx_type b_nr = b.rows ();
704  octave_idx_type b_nc = b.cols ();
705  Matrix ret (nr, b_nc);
706 
707  if (nrows != b_nr)
708  (*current_liboctave_error_handler)
709  ("sparse_qr: matrix dimension mismatch");
710  else if (b_nc < 0 || b_nr < 0)
711  (*current_liboctave_error_handler)
712  ("sparse_qr: matrix dimension with negative size");
713 
714  cholmod_dense *QTB; // Q' * B
715  cholmod_dense B = rod2rcd (b);
716 
717  QTB = SuiteSparseQR_qmult<double> (SPQR_QTX, m_H, m_Htau, m_HPinv, &B,
718  &m_cc);
719  spqr_error_handler (&m_cc);
720 
721  // copy QTB into ret
722  double *QTB_x = reinterpret_cast<double *> (QTB->x);
723  double *ret_vec = reinterpret_cast<double *> (ret.fortran_vec ());
724  for (octave_idx_type j = 0; j < b_nc; j++)
725  for (octave_idx_type i = 0; i < nr; i++)
726  ret_vec[j * nr + i] = QTB_x[j * b_nr + i];
727 
728  cholmod_l_free_dense (&QTB, &m_cc);
729 
730  return ret;
731 
732 #elif defined (HAVE_CXSPARSE)
733 
734  if (econ)
735  (*current_liboctave_error_handler)
736  ("sparse-qr: economy mode with CXSparse not supported");
737 
738  octave_idx_type b_nr = b.rows ();
739  octave_idx_type b_nc = b.cols ();
740 
741  octave_idx_type nc = N->L->n;
742  octave_idx_type nr = nrows;
743 
744  const double *bvec = b.data ();
745 
746  Matrix ret (b_nr, b_nc);
747  double *vec = ret.fortran_vec ();
748 
749  if (nr < 0 || nc < 0 || nr != b_nr)
750  (*current_liboctave_error_handler) ("matrix dimension mismatch");
751 
752  if (nr == 0 || nc == 0 || b_nc == 0)
753  ret = Matrix (nc, b_nc, 0.0);
754  else
755  {
756  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
757 
758  for (volatile octave_idx_type j = 0, idx = 0;
759  j < b_nc;
760  j++, idx += b_nr)
761  {
762  octave_quit ();
763 
764  for (octave_idx_type i = nr; i < S->m2; i++)
765  buf[i] = 0.;
766 
767  volatile octave_idx_type nm = (nr < nc ? nr : nc);
768 
769  CXSPARSE_DNAME (_ipvec) (S->pinv, bvec + idx, buf, b_nr);
770 
771  for (volatile octave_idx_type i = 0; i < nm; i++)
772  {
773  octave_quit ();
774 
775  CXSPARSE_DNAME (_happly) (N->L, i, N->B[i], buf);
776  }
777 
778  for (octave_idx_type i = 0; i < b_nr; i++)
779  vec[i+idx] = buf[i];
780  }
781  }
782 
783  return ret;
784 
785 #else
786 
787  octave_unused_parameter (b);
788  octave_unused_parameter (econ);
789 
790  return Matrix ();
791 
792 #endif
793 }
794 
795 template <>
796 Matrix
798 {
799 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
800 
801  octave_idx_type nc = (econ
802  ? (ncols > nrows ? nrows : ncols)
803  : nrows);
804  Matrix ret (nrows, nc);
805  cholmod_dense *q;
806 
807  // I is nrows x nrows identity matrix
808  cholmod_dense *I
809  = cholmod_l_allocate_dense (nrows, nrows, nrows, CHOLMOD_REAL, &m_cc);
810 
811  for (octave_idx_type i = 0; i < nrows * nrows; i++)
812  (reinterpret_cast<double *> (I->x))[i] = 0.0;
813 
814  for (octave_idx_type i = 0; i < nrows; i++)
815  (reinterpret_cast<double *> (I->x))[i * nrows + i] = 1.0;
816 
817  q = SuiteSparseQR_qmult<double> (SPQR_QX, m_H, m_Htau, m_HPinv, I, &m_cc);
818  spqr_error_handler (&m_cc);
819 
820  double *q_x = reinterpret_cast<double *> (q->x);
821  double *ret_vec = const_cast<double *> (ret.fortran_vec ());
822  for (octave_idx_type j = 0; j < nc; j++)
823  for (octave_idx_type i = 0; i < nrows; i++)
824  ret_vec[j * nrows + i] = q_x[j * nrows + i];
825 
826  cholmod_l_free_dense (&q, &m_cc);
827  cholmod_l_free_dense (&I, &m_cc);
828 
829  return ret;
830 
831 #elif defined (HAVE_CXSPARSE)
832 
833  if (econ)
834  (*current_liboctave_error_handler)
835  ("sparse-qr: economy mode with CXSparse not supported");
836 
837  octave_idx_type nc = N->L->n;
838  octave_idx_type nr = nrows;
839  Matrix ret (nr, nr);
840  double *ret_vec = ret.fortran_vec ();
841 
842  if (nr < 0 || nc < 0)
843  (*current_liboctave_error_handler) ("matrix dimension mismatch");
844 
845  if (nr == 0 || nc == 0)
846  ret = Matrix (nc, nr, 0.0);
847  else
848  {
849  OCTAVE_LOCAL_BUFFER (double, bvec, nr + 1);
850 
851  for (octave_idx_type i = 0; i < nr; i++)
852  bvec[i] = 0.;
853 
854  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
855 
856  for (volatile octave_idx_type j = 0, idx = 0; j < nr; j++, idx += nr)
857  {
858  octave_quit ();
859 
860  bvec[j] = 1.0;
861  for (octave_idx_type i = nr; i < S->m2; i++)
862  buf[i] = 0.;
863 
864  volatile octave_idx_type nm = (nr < nc ? nr : nc);
865 
866  CXSPARSE_DNAME (_ipvec) (S->pinv, bvec, buf, nr);
867 
868  for (volatile octave_idx_type i = 0; i < nm; i++)
869  {
870  octave_quit ();
871 
872  CXSPARSE_DNAME (_happly) (N->L, i, N->B[i], buf);
873  }
874 
875  for (octave_idx_type i = 0; i < nr; i++)
876  ret_vec[i+idx] = buf[i];
877 
878  bvec[j] = 0.0;
879  }
880  }
881 
882  return ret.transpose ();
883 
884 #else
885 
886  octave_unused_parameter (econ);
887 
888  return Matrix ();
889 
890 #endif
891 }
892 
893 template <>
894 template <>
897 (const MArray<double>& b, octave_idx_type& info)
898 {
899  info = -1;
900 
901 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD) && defined (HAVE_CXSPARSE))
902 
903  octave_idx_type b_nr = b.rows ();
904  octave_idx_type b_nc = b.cols ();
905  Matrix x (ncols, b_nc); // X = m_E'*(m_R\‍(Q'*B))
906 
907  if (nrows < 0 || ncols < 0 || b_nc < 0 || b_nr < 0)
908  (*current_liboctave_error_handler)
909  ("matrix dimension with negative size");
910 
911  if (nrows < 0 || ncols < 0 || nrows != b_nr)
912  (*current_liboctave_error_handler) ("matrix dimension mismatch");
913 
914  cholmod_dense *QTB; // Q' * B
915  cholmod_dense B = rod2rcd (b);
916 
917  // FIXME: Process b column by column as in the CXSPARSE version below.
918  // This avoids a large dense matrix Q' * B in memory.
919  QTB = SuiteSparseQR_qmult<double> (SPQR_QTX, m_H, m_Htau, m_HPinv, &B,
920  &m_cc);
921 
922  spqr_error_handler (&m_cc);
923 
924  // convert m_R into CXSPARSE matrix R2
925  CXSPARSE_DNAME (_sparse) R2;
926  R2.n = ncols;
927  R2.m = ncols;
928  R2.nzmax = m_R->nzmax;
929  R2.x = reinterpret_cast<double *> (m_R->x);
930  suitesparse_integer *R2_p;
931  suitesparse_integer *R2_i;
932  if (sizeof (suitesparse_integer) == sizeof (SuiteSparse_long))
933  {
934  R2.p = reinterpret_cast<suitesparse_integer *> (m_R->p);
935  R2.i = reinterpret_cast<suitesparse_integer *> (m_R->i);
936  }
937  else
938  {
939  R2_p = new suitesparse_integer[ncols+1];
940  SuiteSparse_long *R_p = reinterpret_cast<SuiteSparse_long *> (m_R->p);
941  for (octave_idx_type i = 0; i < ncols+1; i++)
942  R2_p[i] = suitesparse_long_to_suitesparse_integer (R_p[i]);
943  R2.p = R2_p;
944  octave_idx_type nnz = m_R->nzmax;
945  R2_i = new suitesparse_integer[nnz];
946  SuiteSparse_long *R_i = reinterpret_cast<SuiteSparse_long *> (m_R->i);
947  for (octave_idx_type i = 0; i < nnz; i++)
948  R2_i[i] = suitesparse_long_to_suitesparse_integer (R_i[i]);
949  R2.i = R2_i;
950  }
951  R2.nz = -1;
952  double *x_vec = const_cast<double *> (x.fortran_vec ());
954  if (sizeof (suitesparse_integer) != sizeof (SuiteSparse_long))
955  {
956  E = new suitesparse_integer [ncols];
957  for (octave_idx_type i = 0; i < ncols; i++)
958  E[i] = suitesparse_long_to_suitesparse_integer (m_E[i]);
959  }
960  else
961  E = reinterpret_cast<suitesparse_integer *> (m_E);
962  for (volatile octave_idx_type j = 0; j < b_nc; j++)
963  {
964  // fill x(:,j)
965  // solve (m_R\‍(Q'*B(:,j)) and store result in QTB(:,j)
966  CXSPARSE_DNAME (_usolve)
967  (&R2, &(reinterpret_cast<double *> (QTB->x)[j * b_nr]));
968  // x(:,j) = m_E' * (m_R\‍(Q'*B(:,j))
969  CXSPARSE_DNAME (_ipvec)
970  (E, &(reinterpret_cast<double *> (QTB->x)[j * b_nr]),
971  &x_vec[j * ncols], ncols);
972  }
973 
974  if (sizeof (suitesparse_integer) != sizeof (SuiteSparse_long))
975  {
976  delete [] R2_p;
977  delete [] R2_i;
978  delete [] E;
979  }
980  cholmod_l_free_dense (&QTB, &m_cc);
981 
982  info = 0;
983 
984  return x;
985 
986 #elif defined (HAVE_CXSPARSE)
987 
988  octave_idx_type nr = nrows;
989  octave_idx_type nc = ncols;
990 
991  octave_idx_type b_nc = b.cols ();
992  octave_idx_type b_nr = b.rows ();
993 
994  const double *bvec = b.data ();
995 
996  Matrix x (nc, b_nc);
997  double *vec = x.fortran_vec ();
998 
999  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
1000 
1001  for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc;
1002  i++, idx+=nc, bidx+=b_nr)
1003  {
1004  octave_quit ();
1005 
1006  for (octave_idx_type j = nr; j < S->m2; j++)
1007  buf[j] = 0.;
1008 
1009  CXSPARSE_DNAME (_ipvec) (S->pinv, bvec + bidx, buf, nr);
1010 
1011  for (volatile octave_idx_type j = 0; j < nc; j++)
1012  {
1013  octave_quit ();
1014 
1015  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1016  }
1017 
1018  CXSPARSE_DNAME (_usolve) (N->U, buf);
1019  CXSPARSE_DNAME (_ipvec) (S->q, buf, vec + idx, nc);
1020  }
1021 
1022  info = 0;
1023 
1024  return x;
1025 
1026 #else
1027 
1028  octave_unused_parameter (b);
1029 
1030  return Matrix ();
1031 
1032 #endif
1033 }
1034 
1035 template <>
1036 template <>
1039 (const MArray<double>& b, octave_idx_type& info) const
1040 {
1041  info = -1;
1042 #if defined (HAVE_CXSPARSE)
1043 
1044  // These are swapped because the original matrix was transposed in
1045  // sparse_qr<SparseMatrix>::solve.
1046 
1047  octave_idx_type nr = ncols;
1048  octave_idx_type nc = nrows;
1049 
1050  octave_idx_type b_nc = b.cols ();
1051  octave_idx_type b_nr = b.rows ();
1052 
1053  const double *bvec = b.data ();
1054 
1055  Matrix x (nc, b_nc);
1056  double *vec = x.fortran_vec ();
1057 
1058  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
1059 
1060  OCTAVE_LOCAL_BUFFER (double, buf, nbuf);
1061 
1062  for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc;
1063  i++, idx+=nc, bidx+=b_nr)
1064  {
1065  octave_quit ();
1066 
1067  for (octave_idx_type j = nr; j < nbuf; j++)
1068  buf[j] = 0.;
1069 
1070  CXSPARSE_DNAME (_pvec) (S->q, bvec + bidx, buf, nr);
1071  CXSPARSE_DNAME (_utsolve) (N->U, buf);
1072 
1073  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
1074  {
1075  octave_quit ();
1076 
1077  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1078  }
1079 
1080  CXSPARSE_DNAME (_pvec) (S->pinv, buf, vec + idx, nc);
1081  }
1082 
1083  info = 0;
1084 
1085  return x;
1086 
1087 #else
1088  octave_unused_parameter (b);
1089 
1090  return Matrix ();
1091 
1092 #endif
1093 }
1094 
1095 template <>
1096 template <>
1099 (const SparseMatrix& b, octave_idx_type& info)
1100 {
1101  info = -1;
1102 
1103 #if defined (HAVE_CXSPARSE)
1104 
1105  octave_idx_type nr = nrows;
1106  octave_idx_type nc = ncols;
1107 
1108  octave_idx_type b_nr = b.rows ();
1109  octave_idx_type b_nc = b.cols ();
1110 
1111  SparseMatrix x (nc, b_nc, b.nnz ());
1112  x.xcidx (0) = 0;
1113 
1114  volatile octave_idx_type x_nz = b.nnz ();
1115  volatile octave_idx_type ii = 0;
1116 
1117  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1118  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
1119 
1120  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1121  {
1122  octave_quit ();
1123 
1124  for (octave_idx_type j = 0; j < b_nr; j++)
1125  Xx[j] = b.xelem (j, i);
1126 
1127  for (octave_idx_type j = nr; j < S->m2; j++)
1128  buf[j] = 0.;
1129 
1130  CXSPARSE_DNAME (_ipvec) (S->pinv, Xx, buf, nr);
1131 
1132  for (volatile octave_idx_type j = 0; j < nc; j++)
1133  {
1134  octave_quit ();
1135 
1136  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1137  }
1138 
1139  CXSPARSE_DNAME (_usolve) (N->U, buf);
1140  CXSPARSE_DNAME (_ipvec) (S->q, buf, Xx, nc);
1141 
1142  for (octave_idx_type j = 0; j < nc; j++)
1143  {
1144  double tmp = Xx[j];
1145 
1146  if (tmp != 0.0)
1147  {
1148  if (ii == x_nz)
1149  {
1150  // Resize the sparse matrix
1151  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
1152  sz = (sz > 10 ? sz : 10) + x_nz;
1153  x.change_capacity (sz);
1154  x_nz = sz;
1155  }
1156 
1157  x.xdata (ii) = tmp;
1158  x.xridx (ii++) = j;
1159  }
1160  }
1161 
1162  x.xcidx (i+1) = ii;
1163  }
1164 
1165  info = 0;
1166 
1167  return x;
1168 
1169 #else
1170 
1171  octave_unused_parameter (b);
1172 
1173  return SparseMatrix ();
1174 
1175 #endif
1176 }
1177 
1178 template <>
1179 template <>
1182 (const SparseMatrix& b, octave_idx_type& info) const
1183 {
1184  info = -1;
1185 
1186 #if defined (HAVE_CXSPARSE)
1187 
1188  // These are swapped because the original matrix was transposed in
1189  // sparse_qr<SparseMatrix>::solve.
1190 
1191  octave_idx_type nr = ncols;
1192  octave_idx_type nc = nrows;
1193 
1194  octave_idx_type b_nr = b.rows ();
1195  octave_idx_type b_nc = b.cols ();
1196 
1197  SparseMatrix x (nc, b_nc, b.nnz ());
1198  x.xcidx (0) = 0;
1199 
1200  volatile octave_idx_type x_nz = b.nnz ();
1201  volatile octave_idx_type ii = 0;
1202  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
1203 
1204  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1205  OCTAVE_LOCAL_BUFFER (double, buf, nbuf);
1206 
1207  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1208  {
1209  octave_quit ();
1210 
1211  for (octave_idx_type j = 0; j < b_nr; j++)
1212  Xx[j] = b.xelem (j, i);
1213 
1214  for (octave_idx_type j = nr; j < nbuf; j++)
1215  buf[j] = 0.;
1216 
1217  CXSPARSE_DNAME (_pvec) (S->q, Xx, buf, nr);
1218  CXSPARSE_DNAME (_utsolve) (N->U, buf);
1219 
1220  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
1221  {
1222  octave_quit ();
1223 
1224  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1225  }
1226 
1227  CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xx, nc);
1228 
1229  for (octave_idx_type j = 0; j < nc; j++)
1230  {
1231  double tmp = Xx[j];
1232 
1233  if (tmp != 0.0)
1234  {
1235  if (ii == x_nz)
1236  {
1237  // Resize the sparse matrix
1238  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
1239  sz = (sz > 10 ? sz : 10) + x_nz;
1240  x.change_capacity (sz);
1241  x_nz = sz;
1242  }
1243 
1244  x.xdata (ii) = tmp;
1245  x.xridx (ii++) = j;
1246  }
1247  }
1248 
1249  x.xcidx (i+1) = ii;
1250  }
1251 
1252  info = 0;
1253 
1254  x.maybe_compress ();
1255 
1256  return x;
1257 
1258 #else
1259 
1260  octave_unused_parameter (b);
1261 
1262  return SparseMatrix ();
1263 
1264 #endif
1265 }
1266 
1267 template <>
1268 template <>
1271 (const MArray<Complex>& b, octave_idx_type& info)
1272 {
1273  info = -1;
1274 
1275 #if defined (HAVE_CXSPARSE)
1276 
1277  octave_idx_type nr = nrows;
1278  octave_idx_type nc = ncols;
1279 
1280  octave_idx_type b_nc = b.cols ();
1281  octave_idx_type b_nr = b.rows ();
1282 
1283  ComplexMatrix x (nc, b_nc);
1284  Complex *vec = x.fortran_vec ();
1285 
1286  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1287  OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc));
1288  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
1289 
1290  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1291  {
1292  octave_quit ();
1293 
1294  for (octave_idx_type j = 0; j < b_nr; j++)
1295  {
1296  Complex c = b.xelem (j, i);
1297  Xx[j] = c.real ();
1298  Xz[j] = c.imag ();
1299  }
1300 
1301  for (octave_idx_type j = nr; j < S->m2; j++)
1302  buf[j] = 0.;
1303 
1304  CXSPARSE_DNAME (_ipvec) (S->pinv, Xx, buf, nr);
1305 
1306  for (volatile octave_idx_type j = 0; j < nc; j++)
1307  {
1308  octave_quit ();
1309 
1310  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1311  }
1312 
1313  CXSPARSE_DNAME (_usolve) (N->U, buf);
1314  CXSPARSE_DNAME (_ipvec) (S->q, buf, Xx, nc);
1315 
1316  for (octave_idx_type j = nr; j < S->m2; j++)
1317  buf[j] = 0.;
1318 
1319  CXSPARSE_DNAME (_ipvec) (S->pinv, Xz, buf, nr);
1320 
1321  for (volatile octave_idx_type j = 0; j < nc; j++)
1322  {
1323  octave_quit ();
1324 
1325  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1326  }
1327 
1328  CXSPARSE_DNAME (_usolve) (N->U, buf);
1329  CXSPARSE_DNAME (_ipvec) (S->q, buf, Xz, nc);
1330 
1331  for (octave_idx_type j = 0; j < nc; j++)
1332  vec[j+idx] = Complex (Xx[j], Xz[j]);
1333  }
1334 
1335  info = 0;
1336 
1337  return x;
1338 
1339 #else
1340 
1341  octave_unused_parameter (b);
1342 
1343  return ComplexMatrix ();
1344 
1345 #endif
1346 }
1347 
1348 template <>
1349 template <>
1352 (const MArray<Complex>& b, octave_idx_type& info) const
1353 {
1354  info = -1;
1355 
1356 #if defined (HAVE_CXSPARSE)
1357 
1358  // These are swapped because the original matrix was transposed in
1359  // sparse_qr<SparseMatrix>::solve.
1360 
1361  octave_idx_type nr = ncols;
1362  octave_idx_type nc = nrows;
1363 
1364  octave_idx_type b_nc = b.cols ();
1365  octave_idx_type b_nr = b.rows ();
1366 
1367  ComplexMatrix x (nc, b_nc);
1368  Complex *vec = x.fortran_vec ();
1369 
1370  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
1371 
1372  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1373  OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc));
1374  OCTAVE_LOCAL_BUFFER (double, buf, nbuf);
1375 
1376  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1377  {
1378  octave_quit ();
1379 
1380  for (octave_idx_type j = 0; j < b_nr; j++)
1381  {
1382  Complex c = b.xelem (j, i);
1383  Xx[j] = c.real ();
1384  Xz[j] = c.imag ();
1385  }
1386 
1387  for (octave_idx_type j = nr; j < nbuf; j++)
1388  buf[j] = 0.;
1389 
1390  CXSPARSE_DNAME (_pvec) (S->q, Xx, buf, nr);
1391  CXSPARSE_DNAME (_utsolve) (N->U, buf);
1392 
1393  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
1394  {
1395  octave_quit ();
1396 
1397  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1398  }
1399 
1400  CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xx, nc);
1401 
1402  for (octave_idx_type j = nr; j < nbuf; j++)
1403  buf[j] = 0.;
1404 
1405  CXSPARSE_DNAME (_pvec) (S->q, Xz, buf, nr);
1406  CXSPARSE_DNAME (_utsolve) (N->U, buf);
1407 
1408  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
1409  {
1410  octave_quit ();
1411 
1412  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1413  }
1414 
1415  CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xz, nc);
1416 
1417  for (octave_idx_type j = 0; j < nc; j++)
1418  vec[j+idx] = Complex (Xx[j], Xz[j]);
1419  }
1420 
1421  info = 0;
1422 
1423  return x;
1424 
1425 #else
1426 
1427  octave_unused_parameter (b);
1428 
1429  return ComplexMatrix ();
1430 
1431 #endif
1432 }
1433 
1434 // Complex-valued matrices.
1435 
1436 template <>
1438 (const SparseComplexMatrix& a, int order)
1439  : nrows (a.rows ()), ncols (a.columns ())
1440 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
1441  , m_cc (), m_R (nullptr), m_E (nullptr), m_H (nullptr),
1442  m_Htau (nullptr), m_HPinv (nullptr)
1443 {
1444  octave_idx_type nr = a.rows ();
1445  octave_idx_type nc = a.cols ();
1446 
1447  if (nr < 0 || nc < 0)
1448  (*current_liboctave_error_handler)
1449  ("matrix dimension with negative size");
1450 
1451  if (order < 0 || order > 9)
1452  (*current_liboctave_error_handler)
1453  ("ordering %d is not supported by SPQR", order);
1454 
1455  cholmod_l_start (&m_cc);
1456  cholmod_sparse A = cos2ccs (a);
1457 
1458  SuiteSparseQR<Complex> (order, static_cast<double> (SPQR_DEFAULT_TOL),
1459  static_cast<SuiteSparse_long> (A.nrow),
1460  &A, &m_R, &m_E, &m_H,
1461  &m_HPinv, &m_Htau, &m_cc);
1462  spqr_error_handler (&m_cc);
1463 
1464  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
1465  {
1466  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
1467  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
1468  }
1469 }
1470 
1471 #elif defined (HAVE_CXSPARSE)
1472  , S (nullptr), N (nullptr)
1473 {
1474  CXSPARSE_ZNAME () A;
1475 
1476  A.nzmax = a.nnz ();
1477  A.m = nrows;
1478  A.n = ncols;
1479  // Cast away const on A, with full knowledge that CSparse won't touch it
1480  // Prevents the methods below making a copy of the data.
1481  A.p = const_cast<suitesparse_integer *>
1482  (to_suitesparse_intptr (a.cidx ()));
1483  A.i = const_cast<suitesparse_integer *>
1484  (to_suitesparse_intptr (a.ridx ()));
1485  A.x = const_cast<cs_complex_t *>
1486  (reinterpret_cast<const cs_complex_t *> (a.data ()));
1487  A.nz = -1;
1488 
1489  S = CXSPARSE_ZNAME (_sqr) (order, &A, 1);
1490  N = CXSPARSE_ZNAME (_qr) (&A, S);
1491 
1492  if (! N)
1494  ("sparse_qr: sparse matrix QR factorization filled");
1495 
1496 }
1497 
1498 #else
1499 
1500 {
1501  octave_unused_parameter (order);
1502 
1503  (*current_liboctave_error_handler)
1504  ("sparse_qr: support for CXSparse was unavailable or disabled when liboctave was built");
1505 }
1506 
1507 #endif
1508 
1509 template <>
1511 {
1512 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
1513 
1514  cholmod_l_free_sparse (&m_R, &m_cc);
1515  cholmod_l_free_sparse (&m_H, &m_cc);
1516  cholmod_l_free_dense (&m_Htau, &m_cc);
1517  free (m_E); // FIXME: use cholmod_l_free
1518  free (m_HPinv);
1519  cholmod_l_finish (&m_cc);
1520 
1521 #elif defined (HAVE_CXSPARSE)
1522 
1523  CXSPARSE_ZNAME (_sfree) (S);
1524  CXSPARSE_ZNAME (_nfree) (N);
1525 
1526 #endif
1527 }
1528 
1529 template <>
1532 {
1533 #if defined (HAVE_CXSPARSE)
1534  // Drop zeros from V and sort
1535  // FIXME: Is the double transpose to sort necessary?
1536 
1537  CXSPARSE_ZNAME (_dropzeros) (N->L);
1538  CXSPARSE_ZNAME () *D = CXSPARSE_ZNAME (_transpose) (N->L, 1);
1539  CXSPARSE_ZNAME (_spfree) (N->L);
1540  N->L = CXSPARSE_ZNAME (_transpose) (D, 1);
1541  CXSPARSE_ZNAME (_spfree) (D);
1542 
1543  octave_idx_type nc = N->L->n;
1544  octave_idx_type nz = N->L->nzmax;
1545  SparseComplexMatrix ret (N->L->m, nc, nz);
1546 
1547  for (octave_idx_type j = 0; j < nc+1; j++)
1548  ret.xcidx (j) = N->L->p[j];
1549 
1550  for (octave_idx_type j = 0; j < nz; j++)
1551  {
1552  ret.xridx (j) = N->L->i[j];
1553  ret.xdata (j) = reinterpret_cast<Complex *> (N->L->x)[j];
1554  }
1555 
1556  return ret;
1557 
1558 #else
1559 
1560  return SparseComplexMatrix ();
1561 
1562 #endif
1563 }
1564 
1565 template <>
1568 {
1569 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
1570 
1571  octave_idx_type nr = from_size_t (m_R->nrow);
1572  octave_idx_type nc = from_size_t (m_R->ncol);
1573  octave_idx_type nz = from_size_t (m_R->nzmax);
1574 
1575  // FIXME: Does this work if econ = true?
1576  SparseComplexMatrix ret ((econ ? (nc > nr ? nr : nc) : nr), nc, nz);
1577  SuiteSparse_long *Rp = reinterpret_cast<SuiteSparse_long *> (m_R->p);
1578  SuiteSparse_long *Ri = reinterpret_cast<SuiteSparse_long *> (m_R->i);
1579 
1580  for (octave_idx_type j = 0; j < nc + 1; j++)
1581  ret.xcidx (j) = from_suitesparse_long (Rp[j]);
1582 
1583  for (octave_idx_type j = 0; j < nz; j++)
1584  {
1585  ret.xridx (j) = from_suitesparse_long (Ri[j]);
1586  ret.xdata (j) = (reinterpret_cast<Complex *> (m_R->x))[j];
1587  }
1588 
1589  return ret;
1590 
1591 #elif defined (HAVE_CXSPARSE)
1592 
1593  // Drop zeros from R and sort
1594  // FIXME: Is the double transpose to sort necessary?
1595 
1596  CXSPARSE_ZNAME (_dropzeros) (N->U);
1597  CXSPARSE_ZNAME () *D = CXSPARSE_ZNAME (_transpose) (N->U, 1);
1598  CXSPARSE_ZNAME (_spfree) (N->U);
1599  N->U = CXSPARSE_ZNAME (_transpose) (D, 1);
1600  CXSPARSE_ZNAME (_spfree) (D);
1601 
1602  octave_idx_type nc = N->U->n;
1603  octave_idx_type nz = N->U->nzmax;
1604 
1605  SparseComplexMatrix ret ((econ ? (nc > nrows ? nrows : nc) : nrows),
1606  nc, nz);
1607 
1608  for (octave_idx_type j = 0; j < nc+1; j++)
1609  ret.xcidx (j) = N->U->p[j];
1610 
1611  for (octave_idx_type j = 0; j < nz; j++)
1612  {
1613  ret.xridx (j) = N->U->i[j];
1614  ret.xdata (j) = reinterpret_cast<Complex *>(N->U->x)[j];
1615  }
1616 
1617  return ret;
1618 
1619 #else
1620 
1621  octave_unused_parameter (econ);
1622 
1623  return SparseComplexMatrix ();
1624 
1625 #endif
1626 }
1627 
1628 template <>
1631 (const ComplexMatrix& b, bool econ)
1632 {
1633 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
1634 
1635  // FIXME: not tested
1636  octave_idx_type nr = (econ
1637  ? (ncols > nrows ? nrows : ncols)
1638  : nrows);
1639  octave_idx_type b_nr = b.rows ();
1640  octave_idx_type b_nc = b.cols ();
1641  ComplexMatrix ret (nr, b_nc);
1642 
1643  if (nrows != b_nr)
1644  (*current_liboctave_error_handler) ("matrix dimension mismatch");
1645 
1646  if (b_nc < 0 || b_nr < 0)
1647  (*current_liboctave_error_handler)
1648  ("matrix dimension with negative size");
1649 
1650  cholmod_dense *QTB; // Q' * B
1651  cholmod_dense B = cod2ccd (b);
1652 
1653  QTB = SuiteSparseQR_qmult<Complex> (SPQR_QTX, m_H, m_Htau, m_HPinv, &B,
1654  &m_cc);
1655  spqr_error_handler (&m_cc);
1656 
1657  // copy QTB into ret
1658  Complex *QTB_x = reinterpret_cast<Complex *> (QTB->x);
1659  Complex *ret_vec = reinterpret_cast<Complex *> (ret.fortran_vec ());
1660  for (octave_idx_type j = 0; j < b_nc; j++)
1661  for (octave_idx_type i = 0; i < nr; i++)
1662  ret_vec[j * nr + i] = QTB_x[j * b_nr + i];
1663 
1664  cholmod_l_free_dense (&QTB, &m_cc);
1665 
1666  return ret;
1667 
1668 #elif defined (HAVE_CXSPARSE)
1669 
1670  if (econ)
1671  (*current_liboctave_error_handler)
1672  ("sparse-qr: economy mode with CXSparse not supported");
1673 
1674  octave_idx_type b_nr = b.rows ();
1675  octave_idx_type b_nc = b.cols ();
1676  octave_idx_type nc = N->L->n;
1677  octave_idx_type nr = nrows;
1678  const cs_complex_t *bvec
1679  = reinterpret_cast<const cs_complex_t *> (b.data ());
1680  ComplexMatrix ret (b_nr, b_nc);
1681  Complex *vec = ret.fortran_vec ();
1682 
1683  if (nr < 0 || nc < 0 || nr != b_nr)
1684  (*current_liboctave_error_handler) ("matrix dimension mismatch");
1685 
1686  if (nr == 0 || nc == 0 || b_nc == 0)
1687  ret = ComplexMatrix (nc, b_nc, Complex (0.0, 0.0));
1688  else
1689  {
1690  OCTAVE_LOCAL_BUFFER (Complex, buf, S->m2);
1691 
1692  for (volatile octave_idx_type j = 0, idx = 0;
1693  j < b_nc;
1694  j++, idx += b_nr)
1695  {
1696  octave_quit ();
1697 
1698  volatile octave_idx_type nm = (nr < nc ? nr : nc);
1699 
1700  CXSPARSE_ZNAME (_ipvec) (S->pinv, bvec + idx,
1701  reinterpret_cast<cs_complex_t *> (buf),
1702  b_nr);
1703 
1704  for (volatile octave_idx_type i = 0; i < nm; i++)
1705  {
1706  octave_quit ();
1707 
1708  CXSPARSE_ZNAME (_happly) (N->L, i, N->B[i],
1709  reinterpret_cast<cs_complex_t *> (buf));
1710  }
1711 
1712  for (octave_idx_type i = 0; i < b_nr; i++)
1713  vec[i+idx] = buf[i];
1714  }
1715  }
1716 
1717  return ret;
1718 
1719 #else
1720 
1721  octave_unused_parameter (b);
1722  octave_unused_parameter (econ);
1723 
1724  return ComplexMatrix ();
1725 
1726 #endif
1727 }
1728 
1729 template <>
1732 {
1733 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
1734 
1735  octave_idx_type nc = (econ
1736  ? (ncols > nrows ? nrows : ncols)
1737  : nrows);
1738  ComplexMatrix ret (nrows, nc);
1739  cholmod_dense *q;
1740 
1741  // I is nrows x nrows identity matrix
1742  cholmod_dense *I
1743  = reinterpret_cast<cholmod_dense *>
1744  (cholmod_l_allocate_dense (nrows, nrows, nrows, CHOLMOD_COMPLEX, &m_cc));
1745 
1746  for (octave_idx_type i = 0; i < nrows * nrows; i++)
1747  (reinterpret_cast<Complex *> (I->x))[i] = 0.0;
1748 
1749  for (octave_idx_type i = 0; i < nrows; i++)
1750  (reinterpret_cast<Complex *> (I->x))[i * nrows + i] = 1.0;
1751 
1752  q = SuiteSparseQR_qmult<Complex> (SPQR_QX, m_H, m_Htau, m_HPinv, I,
1753  &m_cc);
1754  spqr_error_handler (&m_cc);
1755 
1756  Complex *q_x = reinterpret_cast<Complex *> (q->x);
1757  Complex *ret_vec = const_cast<Complex *> (ret.fortran_vec ());
1758 
1759  for (octave_idx_type j = 0; j < nc; j++)
1760  for (octave_idx_type i = 0; i < nrows; i++)
1761  ret_vec[j * nrows + i] = q_x[j * nrows + i];
1762 
1763  cholmod_l_free_dense (&q, &m_cc);
1764  cholmod_l_free_dense (&I, &m_cc);
1765 
1766  return ret;
1767 
1768 #elif defined (HAVE_CXSPARSE)
1769 
1770  if (econ)
1771  (*current_liboctave_error_handler)
1772  ("sparse-qr: economy mode with CXSparse not supported");
1773 
1774  octave_idx_type nc = N->L->n;
1775  octave_idx_type nr = nrows;
1776  ComplexMatrix ret (nr, nr);
1777  Complex *vec = ret.fortran_vec ();
1778 
1779  if (nr < 0 || nc < 0)
1780  (*current_liboctave_error_handler) ("matrix dimension mismatch");
1781 
1782  if (nr == 0 || nc == 0)
1783  ret = ComplexMatrix (nc, nr, Complex (0.0, 0.0));
1784  else
1785  {
1786  OCTAVE_LOCAL_BUFFER (cs_complex_t, bvec, nr);
1787 
1788  for (octave_idx_type i = 0; i < nr; i++)
1789  bvec[i] = cs_complex_t (0.0, 0.0);
1790 
1791  OCTAVE_LOCAL_BUFFER (Complex, buf, S->m2);
1792 
1793  for (volatile octave_idx_type j = 0, idx = 0; j < nr; j++, idx+=nr)
1794  {
1795  octave_quit ();
1796 
1797  bvec[j] = cs_complex_t (1.0, 0.0);
1798 
1799  volatile octave_idx_type nm = (nr < nc ? nr : nc);
1800 
1801  CXSPARSE_ZNAME (_ipvec) (S->pinv, bvec,
1802  reinterpret_cast<cs_complex_t *> (buf),
1803  nr);
1804 
1805  for (volatile octave_idx_type i = 0; i < nm; i++)
1806  {
1807  octave_quit ();
1808 
1809  CXSPARSE_ZNAME (_happly) (N->L, i, N->B[i],
1810  reinterpret_cast<cs_complex_t *> (buf));
1811  }
1812 
1813  for (octave_idx_type i = 0; i < nr; i++)
1814  vec[i+idx] = buf[i];
1815 
1816  bvec[j] = cs_complex_t (0.0, 0.0);
1817  }
1818  }
1819 
1820  return ret.hermitian ();
1821 
1822 #else
1823 
1824  octave_unused_parameter (econ);
1825 
1826  return ComplexMatrix ();
1827 
1828 #endif
1829 }
1830 
1831 template <>
1832 template <>
1836  (const SparseComplexMatrix& b, octave_idx_type& info)
1837 {
1838  info = -1;
1839 
1840 #if defined (HAVE_CXSPARSE)
1841 
1842  octave_idx_type nr = nrows;
1843  octave_idx_type nc = ncols;
1844 
1845  octave_idx_type b_nr = b.rows ();
1846  octave_idx_type b_nc = b.cols ();
1847 
1848  SparseComplexMatrix x (nc, b_nc, b.nnz ());
1849  x.xcidx (0) = 0;
1850 
1851  volatile octave_idx_type x_nz = b.nnz ();
1852  volatile octave_idx_type ii = 0;
1853 
1854  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1855  OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc));
1856  OCTAVE_LOCAL_BUFFER (double, buf, S->m2);
1857 
1858  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1859  {
1860  octave_quit ();
1861 
1862  for (octave_idx_type j = 0; j < b_nr; j++)
1863  {
1864  Complex c = b.xelem (j, i);
1865  Xx[j] = c.real ();
1866  Xz[j] = c.imag ();
1867  }
1868 
1869  for (octave_idx_type j = nr; j < S->m2; j++)
1870  buf[j] = 0.;
1871 
1872  CXSPARSE_DNAME (_ipvec) (S->pinv, Xx, buf, nr);
1873 
1874  for (volatile octave_idx_type j = 0; j < nc; j++)
1875  {
1876  octave_quit ();
1877 
1878  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1879  }
1880 
1881  CXSPARSE_DNAME (_usolve) (N->U, buf);
1882  CXSPARSE_DNAME (_ipvec) (S->q, buf, Xx, nc);
1883 
1884  for (octave_idx_type j = nr; j < S->m2; j++)
1885  buf[j] = 0.;
1886 
1887  CXSPARSE_DNAME (_ipvec) (S->pinv, Xz, buf, nr);
1888 
1889  for (volatile octave_idx_type j = 0; j < nc; j++)
1890  {
1891  octave_quit ();
1892 
1893  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1894  }
1895 
1896  CXSPARSE_DNAME (_usolve) (N->U, buf);
1897  CXSPARSE_DNAME (_ipvec) (S->q, buf, Xz, nc);
1898 
1899  for (octave_idx_type j = 0; j < nc; j++)
1900  {
1901  Complex tmp = Complex (Xx[j], Xz[j]);
1902 
1903  if (tmp != 0.0)
1904  {
1905  if (ii == x_nz)
1906  {
1907  // Resize the sparse matrix
1908  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
1909  sz = (sz > 10 ? sz : 10) + x_nz;
1910  x.change_capacity (sz);
1911  x_nz = sz;
1912  }
1913 
1914  x.xdata (ii) = tmp;
1915  x.xridx (ii++) = j;
1916  }
1917  }
1918 
1919  x.xcidx (i+1) = ii;
1920  }
1921 
1922  info = 0;
1923 
1924  return x;
1925 
1926 #else
1927 
1928  octave_unused_parameter (b);
1929 
1930  return SparseComplexMatrix ();
1931 
1932 #endif
1933 }
1934 
1935 template <>
1936 template <>
1940  (const SparseComplexMatrix& b, octave_idx_type& info) const
1941 {
1942  info = -1;
1943 
1944 #if defined (HAVE_CXSPARSE)
1945 
1946  // These are swapped because the original matrix was transposed in
1947  // sparse_qr<SparseMatrix>::solve.
1948 
1949  octave_idx_type nr = ncols;
1950  octave_idx_type nc = nrows;
1951 
1952  octave_idx_type b_nr = b.rows ();
1953  octave_idx_type b_nc = b.cols ();
1954 
1955  SparseComplexMatrix x (nc, b_nc, b.nnz ());
1956  x.xcidx (0) = 0;
1957 
1958  volatile octave_idx_type x_nz = b.nnz ();
1959  volatile octave_idx_type ii = 0;
1960  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
1961 
1962  OCTAVE_LOCAL_BUFFER (double, Xx, (b_nr > nc ? b_nr : nc));
1963  OCTAVE_LOCAL_BUFFER (double, Xz, (b_nr > nc ? b_nr : nc));
1964  OCTAVE_LOCAL_BUFFER (double, buf, nbuf);
1965 
1966  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
1967  {
1968  octave_quit ();
1969 
1970  for (octave_idx_type j = 0; j < b_nr; j++)
1971  {
1972  Complex c = b.xelem (j, i);
1973  Xx[j] = c.real ();
1974  Xz[j] = c.imag ();
1975  }
1976 
1977  for (octave_idx_type j = nr; j < nbuf; j++)
1978  buf[j] = 0.;
1979 
1980  CXSPARSE_DNAME (_pvec) (S->q, Xx, buf, nr);
1981  CXSPARSE_DNAME (_utsolve) (N->U, buf);
1982 
1983  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
1984  {
1985  octave_quit ();
1986 
1987  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
1988  }
1989 
1990  CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xx, nc);
1991 
1992  for (octave_idx_type j = nr; j < nbuf; j++)
1993  buf[j] = 0.;
1994 
1995  CXSPARSE_DNAME (_pvec) (S->q, Xz, buf, nr);
1996  CXSPARSE_DNAME (_utsolve) (N->U, buf);
1997 
1998  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
1999  {
2000  octave_quit ();
2001 
2002  CXSPARSE_DNAME (_happly) (N->L, j, N->B[j], buf);
2003  }
2004 
2005  CXSPARSE_DNAME (_pvec) (S->pinv, buf, Xz, nc);
2006 
2007  for (octave_idx_type j = 0; j < nc; j++)
2008  {
2009  Complex tmp = Complex (Xx[j], Xz[j]);
2010 
2011  if (tmp != 0.0)
2012  {
2013  if (ii == x_nz)
2014  {
2015  // Resize the sparse matrix
2016  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
2017  sz = (sz > 10 ? sz : 10) + x_nz;
2018  x.change_capacity (sz);
2019  x_nz = sz;
2020  }
2021 
2022  x.xdata (ii) = tmp;
2023  x.xridx (ii++) = j;
2024  }
2025  }
2026 
2027  x.xcidx (i+1) = ii;
2028  }
2029 
2030  info = 0;
2031 
2032  x.maybe_compress ();
2033 
2034  return x;
2035 
2036 #else
2037 
2038  octave_unused_parameter (b);
2039 
2040  return SparseComplexMatrix ();
2041 
2042 #endif
2043 }
2044 
2045 template <>
2046 template <>
2049  ComplexMatrix>
2050  (const MArray<double>& b, octave_idx_type& info)
2051 {
2052  info = -1;
2053 
2054 #if defined (HAVE_CXSPARSE)
2055 
2056  octave_idx_type nr = nrows;
2057  octave_idx_type nc = ncols;
2058 
2059  octave_idx_type b_nc = b.cols ();
2060  octave_idx_type b_nr = b.rows ();
2061 
2062  ComplexMatrix x (nc, b_nc);
2063  cs_complex_t *vec = reinterpret_cast<cs_complex_t *> (x.fortran_vec ());
2064 
2065  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2);
2066  OCTAVE_LOCAL_BUFFER (Complex, Xx, b_nr);
2067 
2068  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2069  {
2070  octave_quit ();
2071 
2072  for (octave_idx_type j = 0; j < b_nr; j++)
2073  Xx[j] = b.xelem (j, i);
2074 
2075  for (octave_idx_type j = nr; j < S->m2; j++)
2076  buf[j] = cs_complex_t (0.0, 0.0);
2077 
2078  CXSPARSE_ZNAME (_ipvec) (S->pinv,
2079  reinterpret_cast<cs_complex_t *>(Xx),
2080  buf, nr);
2081 
2082  for (volatile octave_idx_type j = 0; j < nc; j++)
2083  {
2084  octave_quit ();
2085 
2086  CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf);
2087  }
2088 
2089  CXSPARSE_ZNAME (_usolve) (N->U, buf);
2090  CXSPARSE_ZNAME (_ipvec) (S->q, buf, vec + idx, nc);
2091  }
2092 
2093  info = 0;
2094 
2095  return x;
2096 
2097 #else
2098 
2099  octave_unused_parameter (b);
2100 
2101  return ComplexMatrix ();
2102 
2103 #endif
2104 }
2105 
2106 template <>
2107 template <>
2110  ComplexMatrix>
2111  (const MArray<double>& b, octave_idx_type& info) const
2112 {
2113  info = -1;
2114 
2115 #if defined (HAVE_CXSPARSE)
2116 
2117  // These are swapped because the original matrix was transposed in
2118  // sparse_qr<SparseComplexMatrix>::solve.
2119 
2120  octave_idx_type nr = ncols;
2121  octave_idx_type nc = nrows;
2122 
2123  octave_idx_type b_nc = b.cols ();
2124  octave_idx_type b_nr = b.rows ();
2125 
2126  ComplexMatrix x (nc, b_nc);
2127  cs_complex_t *vec = reinterpret_cast<cs_complex_t *> (x.fortran_vec ());
2128 
2129  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
2130 
2131  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf);
2132  OCTAVE_LOCAL_BUFFER (Complex, Xx, b_nr);
2133  OCTAVE_LOCAL_BUFFER (double, B, nr);
2134 
2135  for (octave_idx_type i = 0; i < nr; i++)
2136  B[i] = N->B[i];
2137 
2138  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2139  {
2140  octave_quit ();
2141 
2142  for (octave_idx_type j = 0; j < b_nr; j++)
2143  Xx[j] = b.xelem (j, i);
2144 
2145  for (octave_idx_type j = nr; j < nbuf; j++)
2146  buf[j] = cs_complex_t (0.0, 0.0);
2147 
2148  CXSPARSE_ZNAME (_pvec) (S->q, reinterpret_cast<cs_complex_t *> (Xx),
2149  buf, nr);
2150  CXSPARSE_ZNAME (_utsolve) (N->U, buf);
2151 
2152  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
2153  {
2154  octave_quit ();
2155 
2156  CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf);
2157  }
2158 
2159  CXSPARSE_ZNAME (_pvec) (S->pinv, buf, vec + idx, nc);
2160  }
2161 
2162  info = 0;
2163 
2164  return x;
2165 
2166 #else
2167 
2168  octave_unused_parameter (b);
2169 
2170  return ComplexMatrix ();
2171 
2172 #endif
2173 }
2174 
2175 template <>
2176 template <>
2180  (const SparseMatrix& b, octave_idx_type& info)
2181 {
2182  info = -1;
2183 
2184 #if defined (HAVE_CXSPARSE)
2185 
2186  octave_idx_type nr = nrows;
2187  octave_idx_type nc = ncols;
2188 
2189  octave_idx_type b_nc = b.cols ();
2190  octave_idx_type b_nr = b.rows ();
2191 
2192  SparseComplexMatrix x (nc, b_nc, b.nnz ());
2193  x.xcidx (0) = 0;
2194 
2195  volatile octave_idx_type x_nz = b.nnz ();
2196  volatile octave_idx_type ii = 0;
2197 
2198  OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc));
2199  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2);
2200 
2201  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2202  {
2203  octave_quit ();
2204 
2205  for (octave_idx_type j = 0; j < b_nr; j++)
2206  Xx[j] = b.xelem (j, i);
2207 
2208  for (octave_idx_type j = nr; j < S->m2; j++)
2209  buf[j] = cs_complex_t (0.0, 0.0);
2210 
2211  CXSPARSE_ZNAME (_ipvec) (S->pinv,
2212  reinterpret_cast<cs_complex_t *> (Xx),
2213  buf, nr);
2214 
2215  for (volatile octave_idx_type j = 0; j < nc; j++)
2216  {
2217  octave_quit ();
2218 
2219  CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf);
2220  }
2221 
2222  CXSPARSE_ZNAME (_usolve) (N->U, buf);
2223  CXSPARSE_ZNAME (_ipvec) (S->q, buf,
2224  reinterpret_cast<cs_complex_t *> (Xx),
2225  nc);
2226 
2227  for (octave_idx_type j = 0; j < nc; j++)
2228  {
2229  Complex tmp = Xx[j];
2230 
2231  if (tmp != 0.0)
2232  {
2233  if (ii == x_nz)
2234  {
2235  // Resize the sparse matrix
2236  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
2237  sz = (sz > 10 ? sz : 10) + x_nz;
2238  x.change_capacity (sz);
2239  x_nz = sz;
2240  }
2241 
2242  x.xdata (ii) = tmp;
2243  x.xridx (ii++) = j;
2244  }
2245  }
2246 
2247  x.xcidx (i+1) = ii;
2248  }
2249 
2250  info = 0;
2251 
2252  x.maybe_compress ();
2253 
2254  return x;
2255 
2256 #else
2257 
2258  octave_unused_parameter (b);
2259 
2260  return SparseComplexMatrix ();
2261 
2262 #endif
2263 }
2264 
2265 template <>
2266 template <>
2270  (const SparseMatrix& b, octave_idx_type& info) const
2271 {
2272  info = -1;
2273 
2274 #if defined (HAVE_CXSPARSE)
2275 
2276  // These are swapped because the original matrix was transposed in
2277  // sparse_qr<SparseComplexMatrix>::solve.
2278 
2279  octave_idx_type nr = ncols;
2280  octave_idx_type nc = nrows;
2281 
2282  octave_idx_type b_nc = b.cols ();
2283  octave_idx_type b_nr = b.rows ();
2284 
2285  SparseComplexMatrix x (nc, b_nc, b.nnz ());
2286  x.xcidx (0) = 0;
2287 
2288  volatile octave_idx_type x_nz = b.nnz ();
2289  volatile octave_idx_type ii = 0;
2290  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
2291 
2292  OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc));
2293  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf);
2294  OCTAVE_LOCAL_BUFFER (double, B, nr);
2295 
2296  for (octave_idx_type i = 0; i < nr; i++)
2297  B[i] = N->B[i];
2298 
2299  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2300  {
2301  octave_quit ();
2302 
2303  for (octave_idx_type j = 0; j < b_nr; j++)
2304  Xx[j] = b.xelem (j, i);
2305 
2306  for (octave_idx_type j = nr; j < nbuf; j++)
2307  buf[j] = cs_complex_t (0.0, 0.0);
2308 
2309  CXSPARSE_ZNAME (_pvec) (S->q,
2310  reinterpret_cast<cs_complex_t *> (Xx),
2311  buf, nr);
2312  CXSPARSE_ZNAME (_utsolve) (N->U, buf);
2313 
2314  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
2315  {
2316  octave_quit ();
2317 
2318  CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf);
2319  }
2320 
2321  CXSPARSE_ZNAME (_pvec) (S->pinv, buf,
2322  reinterpret_cast<cs_complex_t *> (Xx),
2323  nc);
2324 
2325  for (octave_idx_type j = 0; j < nc; j++)
2326  {
2327  Complex tmp = Xx[j];
2328 
2329  if (tmp != 0.0)
2330  {
2331  if (ii == x_nz)
2332  {
2333  // Resize the sparse matrix
2334  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
2335  sz = (sz > 10 ? sz : 10) + x_nz;
2336  x.change_capacity (sz);
2337  x_nz = sz;
2338  }
2339 
2340  x.xdata (ii) = tmp;
2341  x.xridx (ii++) = j;
2342  }
2343  }
2344 
2345  x.xcidx (i+1) = ii;
2346  }
2347 
2348  info = 0;
2349 
2350  x.maybe_compress ();
2351 
2352  return x;
2353 
2354 #else
2355 
2356  octave_unused_parameter (b);
2357 
2358  return SparseComplexMatrix ();
2359 
2360 #endif
2361 }
2362 
2363 template <>
2364 template <>
2367  ComplexMatrix>
2368  (const MArray<Complex>& b, octave_idx_type& info)
2369 {
2370  info = -1;
2371 
2372 #if defined (HAVE_CXSPARSE)
2373 
2374  octave_idx_type nr = nrows;
2375  octave_idx_type nc = ncols;
2376 
2377  octave_idx_type b_nc = b.cols ();
2378  octave_idx_type b_nr = b.rows ();
2379 
2380  const cs_complex_t *bvec = reinterpret_cast<const cs_complex_t *>
2381  (b.data ());
2382 
2383  ComplexMatrix x (nc, b_nc);
2384  cs_complex_t *vec = reinterpret_cast<cs_complex_t *>
2385  (x.fortran_vec ());
2386 
2387  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2);
2388 
2389  for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc;
2390  i++, idx+=nc, bidx+=b_nr)
2391  {
2392  octave_quit ();
2393 
2394  for (octave_idx_type j = nr; j < S->m2; j++)
2395  buf[j] = cs_complex_t (0.0, 0.0);
2396 
2397  CXSPARSE_ZNAME (_ipvec) (S->pinv, bvec + bidx, buf, nr);
2398 
2399  for (volatile octave_idx_type j = 0; j < nc; j++)
2400  {
2401  octave_quit ();
2402 
2403  CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf);
2404  }
2405 
2406  CXSPARSE_ZNAME (_usolve) (N->U, buf);
2407  CXSPARSE_ZNAME (_ipvec) (S->q, buf, vec + idx, nc);
2408  }
2409 
2410  info = 0;
2411 
2412  return x;
2413 
2414 #else
2415 
2416  octave_unused_parameter (b);
2417 
2418  return ComplexMatrix ();
2419 
2420 #endif
2421 }
2422 
2423 template <>
2424 template <>
2427  ComplexMatrix>
2428  (const MArray<Complex>& b, octave_idx_type& info) const
2429 {
2430  info = -1;
2431 
2432 #if defined (HAVE_CXSPARSE)
2433 
2434  // These are swapped because the original matrix was transposed in
2435  // sparse_qr<SparseComplexMatrix>::solve.
2436 
2437  octave_idx_type nr = ncols;
2438  octave_idx_type nc = nrows;
2439 
2440  octave_idx_type b_nc = b.cols ();
2441  octave_idx_type b_nr = b.rows ();
2442 
2443  const cs_complex_t *bvec = reinterpret_cast<const cs_complex_t *>
2444  (b.data ());
2445 
2446  ComplexMatrix x (nc, b_nc);
2447  cs_complex_t *vec = reinterpret_cast<cs_complex_t *> (x.fortran_vec ());
2448 
2449  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
2450 
2451  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf);
2452  OCTAVE_LOCAL_BUFFER (double, B, nr);
2453 
2454  for (octave_idx_type i = 0; i < nr; i++)
2455  B[i] = N->B[i];
2456 
2457  for (volatile octave_idx_type i = 0, idx = 0, bidx = 0; i < b_nc;
2458  i++, idx+=nc, bidx+=b_nr)
2459  {
2460  octave_quit ();
2461 
2462  for (octave_idx_type j = nr; j < nbuf; j++)
2463  buf[j] = cs_complex_t (0.0, 0.0);
2464 
2465  CXSPARSE_ZNAME (_pvec) (S->q, bvec + bidx, buf, nr);
2466  CXSPARSE_ZNAME (_utsolve) (N->U, buf);
2467 
2468  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
2469  {
2470  octave_quit ();
2471 
2472  CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf);
2473  }
2474 
2475  CXSPARSE_ZNAME (_pvec) (S->pinv, buf, vec + idx, nc);
2476  }
2477 
2478  info = 0;
2479 
2480  return x;
2481 
2482 #else
2483 
2484  octave_unused_parameter (b);
2485 
2486  return ComplexMatrix ();
2487 
2488 #endif
2489 }
2490 
2491 template <>
2492 template <>
2496  (const SparseComplexMatrix& b, octave_idx_type& info)
2497 {
2498  info = -1;
2499 
2500 #if defined (HAVE_CXSPARSE)
2501 
2502  octave_idx_type nr = nrows;
2503  octave_idx_type nc = ncols;
2504 
2505  octave_idx_type b_nc = b.cols ();
2506  octave_idx_type b_nr = b.rows ();
2507 
2508  SparseComplexMatrix x (nc, b_nc, b.nnz ());
2509  x.xcidx (0) = 0;
2510 
2511  volatile octave_idx_type x_nz = b.nnz ();
2512  volatile octave_idx_type ii = 0;
2513 
2514  OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc));
2515  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, S->m2);
2516 
2517  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2518  {
2519  octave_quit ();
2520 
2521  for (octave_idx_type j = 0; j < b_nr; j++)
2522  Xx[j] = b.xelem (j, i);
2523 
2524  for (octave_idx_type j = nr; j < S->m2; j++)
2525  buf[j] = cs_complex_t (0.0, 0.0);
2526 
2527  CXSPARSE_ZNAME (_ipvec) (S->pinv,
2528  reinterpret_cast<cs_complex_t *> (Xx),
2529  buf, nr);
2530 
2531  for (volatile octave_idx_type j = 0; j < nc; j++)
2532  {
2533  octave_quit ();
2534 
2535  CXSPARSE_ZNAME (_happly) (N->L, j, N->B[j], buf);
2536  }
2537 
2538  CXSPARSE_ZNAME (_usolve) (N->U, buf);
2539  CXSPARSE_ZNAME (_ipvec) (S->q, buf,
2540  reinterpret_cast<cs_complex_t *> (Xx),
2541  nc);
2542 
2543  for (octave_idx_type j = 0; j < nc; j++)
2544  {
2545  Complex tmp = Xx[j];
2546 
2547  if (tmp != 0.0)
2548  {
2549  if (ii == x_nz)
2550  {
2551  // Resize the sparse matrix
2552  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
2553  sz = (sz > 10 ? sz : 10) + x_nz;
2554  x.change_capacity (sz);
2555  x_nz = sz;
2556  }
2557 
2558  x.xdata (ii) = tmp;
2559  x.xridx (ii++) = j;
2560  }
2561  }
2562 
2563  x.xcidx (i+1) = ii;
2564  }
2565 
2566  info = 0;
2567 
2568  x.maybe_compress ();
2569 
2570  return x;
2571 
2572 #else
2573 
2574  octave_unused_parameter (b);
2575 
2576  return SparseComplexMatrix ();
2577 
2578 #endif
2579 }
2580 
2581 template <>
2582 template <>
2586  (const SparseComplexMatrix& b, octave_idx_type& info) const
2587 {
2588  info = -1;
2589 
2590 #if defined (HAVE_CXSPARSE)
2591 
2592  // These are swapped because the original matrix was transposed in
2593  // sparse_qr<SparseComplexMatrix>::solve.
2594 
2595  octave_idx_type nr = ncols;
2596  octave_idx_type nc = nrows;
2597 
2598  octave_idx_type b_nc = b.cols ();
2599  octave_idx_type b_nr = b.rows ();
2600 
2601  SparseComplexMatrix x (nc, b_nc, b.nnz ());
2602  x.xcidx (0) = 0;
2603 
2604  volatile octave_idx_type x_nz = b.nnz ();
2605  volatile octave_idx_type ii = 0;
2606  volatile octave_idx_type nbuf = (nc > S->m2 ? nc : S->m2);
2607 
2608  OCTAVE_LOCAL_BUFFER (Complex, Xx, (b_nr > nc ? b_nr : nc));
2609  OCTAVE_LOCAL_BUFFER (cs_complex_t, buf, nbuf);
2610  OCTAVE_LOCAL_BUFFER (double, B, nr);
2611 
2612  for (octave_idx_type i = 0; i < nr; i++)
2613  B[i] = N->B[i];
2614 
2615  for (volatile octave_idx_type i = 0, idx = 0; i < b_nc; i++, idx+=nc)
2616  {
2617  octave_quit ();
2618 
2619  for (octave_idx_type j = 0; j < b_nr; j++)
2620  Xx[j] = b.xelem (j, i);
2621 
2622  for (octave_idx_type j = nr; j < nbuf; j++)
2623  buf[j] = cs_complex_t (0.0, 0.0);
2624 
2625  CXSPARSE_ZNAME (_pvec) (S->q, reinterpret_cast<cs_complex_t *>(Xx),
2626  buf, nr);
2627  CXSPARSE_ZNAME (_utsolve) (N->U, buf);
2628 
2629  for (volatile octave_idx_type j = nr-1; j >= 0; j--)
2630  {
2631  octave_quit ();
2632 
2633  CXSPARSE_ZNAME (_happly) (N->L, j, B[j], buf);
2634  }
2635 
2636  CXSPARSE_ZNAME (_pvec) (S->pinv, buf,
2637  reinterpret_cast<cs_complex_t *>(Xx), nc);
2638 
2639  for (octave_idx_type j = 0; j < nc; j++)
2640  {
2641  Complex tmp = Xx[j];
2642 
2643  if (tmp != 0.0)
2644  {
2645  if (ii == x_nz)
2646  {
2647  // Resize the sparse matrix
2648  octave_idx_type sz = x_nz * (b_nc - i) / b_nc;
2649  sz = (sz > 10 ? sz : 10) + x_nz;
2650  x.change_capacity (sz);
2651  x_nz = sz;
2652  }
2653 
2654  x.xdata (ii) = tmp;
2655  x.xridx (ii++) = j;
2656  }
2657  }
2658 
2659  x.xcidx (i+1) = ii;
2660  }
2661 
2662  info = 0;
2663 
2664  x.maybe_compress ();
2665 
2666  return x;
2667 
2668 #else
2669 
2670  octave_unused_parameter (b);
2671 
2672  return SparseComplexMatrix ();
2673 
2674 #endif
2675 }
2676 
2677 template <typename SPARSE_T>
2679  : m_rep (new sparse_qr_rep (SPARSE_T (), 0))
2680 { }
2681 
2682 template <typename SPARSE_T>
2683 sparse_qr<SPARSE_T>::sparse_qr (const SPARSE_T& a, int order)
2684  : m_rep (new sparse_qr_rep (a, order))
2685 { }
2686 
2687 template <typename SPARSE_T>
2688 bool
2690 {
2691  return m_rep->ok ();
2692 }
2693 
2694 template <typename SPARSE_T>
2695 SPARSE_T
2697 {
2698  return m_rep->V ();
2699 }
2700 
2701 template <typename SPARSE_T>
2704 {
2705  return m_rep->P ();
2706 }
2707 
2708 template <typename SPARSE_T>
2711 {
2712  return m_rep->P ();
2713 }
2714 
2715 template <typename SPARSE_T>
2718 {
2719  return m_rep->E();
2720 }
2721 
2722 
2723 template <typename SPARSE_T>
2726 {
2727  ColumnVector perm = m_rep->E ();
2728  octave_idx_type nrows = perm.rows ();
2729  SparseMatrix ret (nrows, nrows, nrows);
2730  for (octave_idx_type i = 0; i < nrows; i++)
2731  ret(perm(i) - 1, i) = 1.0;
2732  return ret;
2733 }
2734 
2735 
2736 template <typename SPARSE_T>
2737 SPARSE_T
2738 sparse_qr<SPARSE_T>::R (bool econ) const
2739 {
2740  return m_rep->R (econ);
2741 }
2742 
2743 template <typename SPARSE_T>
2744 typename SPARSE_T::dense_matrix_type
2745 sparse_qr<SPARSE_T>::C (const typename SPARSE_T::dense_matrix_type& b,
2746  bool econ) const
2747 {
2748  return m_rep->C (b, econ);
2749 }
2750 
2751 template <typename SPARSE_T>
2752 typename SPARSE_T::dense_matrix_type
2753 sparse_qr<SPARSE_T>::Q (bool econ) const
2754 {
2755  return m_rep->Q (econ);
2756 }
2757 
2758 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
2759 //specializations of function min2norm_solve
2760 template <>
2761 template <>
2764 (const SparseMatrix& a, const MArray<double>& b,
2765  octave_idx_type& info, int order)
2766 {
2767  info = -1;
2768  octave_idx_type b_nc = b.cols ();
2769  octave_idx_type nc = a.cols ();
2770  Matrix x (nc, b_nc);
2771  cholmod_common cc;
2772 
2773  cholmod_l_start (&cc);
2774  cholmod_sparse A = ros2rcs (a);
2775  cholmod_dense B = rod2rcd (b);
2776  cholmod_dense *X;
2777 
2778  X = SuiteSparseQR_min2norm<double> (order, SPQR_DEFAULT_TOL, &A, &B, &cc);
2779  spqr_error_handler (&cc);
2780 
2781  double *vec = x.fortran_vec ();
2782  for (volatile octave_idx_type i = 0; i < nc * b_nc; i++)
2783  vec[i] = reinterpret_cast<double *> (X->x)[i];
2784 
2785  info = 0;
2786  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
2787  {
2788  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
2789  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
2790  }
2791  cholmod_l_finish (&cc);
2792 
2793  return x;
2794 
2795 }
2796 
2797 template <>
2798 template <>
2801 (const SparseMatrix& a, const SparseMatrix& b, octave_idx_type& info,
2802  int order)
2803 {
2804  info = -1;
2805  SparseMatrix x;
2806  cholmod_common cc;
2807 
2808  cholmod_l_start (&cc);
2809  cholmod_sparse A = ros2rcs (a);
2810  cholmod_sparse B = ros2rcs (b);
2811  cholmod_sparse *X;
2812 
2813  X = SuiteSparseQR_min2norm<double> (order, SPQR_DEFAULT_TOL, &A, &B, &cc);
2814  spqr_error_handler (&cc);
2815 
2816  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
2817  {
2818  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
2819  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
2820  delete [] reinterpret_cast<SuiteSparse_long *> (B.p);
2821  delete [] reinterpret_cast<SuiteSparse_long *> (B.i);
2822  }
2823 
2824  x = rcs2ros (X);
2825  cholmod_l_finish (&cc);
2826  info = 0;
2827 
2828  return x;
2829 
2830 }
2831 
2832 template <>
2833 template <>
2836 (const SparseMatrix& a, const MArray<Complex>& b,
2837  octave_idx_type& info, int order)
2838 {
2839  info = -1;
2840 
2841  octave_idx_type b_nc = b.cols ();
2842  octave_idx_type nc = a.cols ();
2843 
2844  ComplexMatrix x (nc, b_nc);
2845 
2846  cholmod_common cc;
2847 
2848  cholmod_l_start (&cc);
2849 
2850  cholmod_sparse *A = ros2ccs (a, &cc);
2851  cholmod_dense B = cod2ccd (b);
2852  cholmod_dense *X;
2853 
2854  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, A, &B, &cc);
2855  spqr_error_handler (&cc);
2856 
2857  Complex *vec = x.fortran_vec ();
2858  for (volatile octave_idx_type i = 0; i < nc * b_nc; i++)
2859  vec[i] = reinterpret_cast<Complex *> (X->x)[i];
2860 
2861  cholmod_l_free_sparse (&A, &cc);
2862  cholmod_l_finish (&cc);
2863 
2864  info = 0;
2865 
2866  return x;
2867 
2868 }
2869 
2870 template <>
2871 template <>
2875  (const SparseMatrix& a, const SparseComplexMatrix& b,
2876  octave_idx_type& info, int order)
2877 {
2878  info = -1;
2879 
2880  cholmod_common cc;
2881 
2882  cholmod_l_start (&cc);
2883 
2884  cholmod_sparse *A = ros2ccs (a, &cc);
2885  cholmod_sparse B = cos2ccs (b);
2886  cholmod_sparse *X;
2887 
2888  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, A, &B, &cc);
2889  spqr_error_handler (&cc);
2890 
2891  cholmod_l_free_sparse (&A, &cc);
2892  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
2893  {
2894  delete [] reinterpret_cast<SuiteSparse_long *> (B.p);
2895  delete [] reinterpret_cast<SuiteSparse_long *> (B.i);
2896  }
2897  cholmod_l_finish (&cc);
2898 
2899  SparseComplexMatrix ret = ccs2cos(X);
2900 
2901  info = 0;
2902 
2903  return ret;
2904 
2905 }
2906 
2907 template <>
2908 template <>
2911  ComplexMatrix>
2912  (const SparseComplexMatrix& a, const MArray<Complex>& b,
2913  octave_idx_type& info, int order)
2914 {
2915  info = -1;
2916  octave_idx_type b_nc = b.cols ();
2917  octave_idx_type nc = a.cols ();
2918  ComplexMatrix x (nc, b_nc);
2919 
2920  cholmod_common cc;
2921 
2922  cholmod_l_start (&cc);
2923 
2924  cholmod_sparse A = cos2ccs (a);
2925  cholmod_dense B = cod2ccd (b);
2926  cholmod_dense *X;
2927 
2928  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, &B, &cc);
2929  spqr_error_handler (&cc);
2930 
2931  Complex *vec = x.fortran_vec ();
2932  for (volatile octave_idx_type i = 0; i < nc * b_nc; i++)
2933  vec[i] = reinterpret_cast<Complex *> (X->x)[i];
2934 
2935  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
2936  {
2937  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
2938  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
2939  }
2940  cholmod_l_finish (&cc);
2941 
2942  info = 0;
2943 
2944  return x;
2945 
2946 }
2947 
2948 template <>
2949 template <>
2952  ComplexMatrix>
2953  (const SparseComplexMatrix& a, const MArray<double>& b,
2954  octave_idx_type& info, int order)
2955 {
2956  info = -1;
2957 
2958  octave_idx_type b_nc = b.cols ();
2959  octave_idx_type nc = a.cols ();
2960  ComplexMatrix x (nc, b_nc);
2961 
2962  cholmod_common cc;
2963 
2964  cholmod_l_start (&cc);
2965 
2966  cholmod_sparse A = cos2ccs (a);
2967  cholmod_dense *B = rod2ccd (b, &cc);
2968  cholmod_dense *X;
2969 
2970  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, B, &cc);
2971  spqr_error_handler (&cc);
2972 
2973  Complex *vec = x.fortran_vec ();
2974 
2975  for (volatile octave_idx_type i = 0; i < nc * b_nc; i++)
2976  vec[i] = reinterpret_cast<Complex *> (X->x)[i];
2977 
2978  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
2979  {
2980  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
2981  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
2982  }
2983  cholmod_l_free_dense (&B, &cc);
2984  cholmod_l_finish (&cc);
2985 
2986  info = 0;
2987 
2988  return x;
2989 
2990 }
2991 
2992 template <>
2993 template <>
2997  (const SparseComplexMatrix& a, const SparseComplexMatrix& b,
2998  octave_idx_type& info, int order)
2999 {
3000  info = -1;
3001 
3002  cholmod_common cc;
3003 
3004  cholmod_l_start (&cc);
3005 
3006  cholmod_sparse A = cos2ccs (a);
3007  cholmod_sparse B = cos2ccs (b);
3008  cholmod_sparse *X;
3009 
3010  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, &B, &cc);
3011  spqr_error_handler (&cc);
3012 
3013  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
3014  {
3015  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
3016  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
3017  delete [] reinterpret_cast<SuiteSparse_long *> (B.p);
3018  delete [] reinterpret_cast<SuiteSparse_long *> (B.i);
3019  }
3020  cholmod_l_finish (&cc);
3021 
3022  info = 0;
3023 
3024  return ccs2cos (X);
3025 
3026 }
3027 
3028 template <>
3029 template <>
3033  (const SparseComplexMatrix& a, const SparseMatrix& b,
3034  octave_idx_type& info, int order)
3035 {
3036  info = -1;
3037 
3038  cholmod_common cc;
3039 
3040  cholmod_l_start (&cc);
3041 
3042  cholmod_sparse A = cos2ccs (a);
3043  cholmod_sparse *B = ros2ccs (b, &cc);
3044  cholmod_sparse *X;
3045 
3046  X = SuiteSparseQR_min2norm<Complex> (order, SPQR_DEFAULT_TOL, &A, B, &cc);
3047  spqr_error_handler (&cc);
3048 
3049  SparseComplexMatrix ret = ccs2cos(X);
3050 
3051  if (sizeof (octave_idx_type) != sizeof (SuiteSparse_long))
3052  {
3053  delete [] reinterpret_cast<SuiteSparse_long *> (A.p);
3054  delete [] reinterpret_cast<SuiteSparse_long *> (A.i);
3055  }
3056  cholmod_l_free_sparse (&B, &cc);
3057  cholmod_l_finish (&cc);
3058 
3059  info = 0;
3060 
3061  return ret;
3062 
3063 }
3064 #endif
3065 
3066 // FIXME: Why is the "order" of the QR calculation as used in the
3067 // CXSparse function sqr 3 for real matrices and 2 for complex? These
3068 // values seem to be required but there was no explanation in David
3069 // Bateman's original code.
3070 
3071 template <typename SPARSE_T>
3072 class
3074 {
3075 public:
3076  enum { order = -1 };
3077 };
3078 
3079 template <>
3080 class
3082 {
3083 public:
3084 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
3085  enum { order = SPQR_ORDERING_DEFAULT };
3086 #elif defined (HAVE_CXSPARSE)
3087  enum { order = 3 };
3088 #endif
3089 };
3090 
3091 template <>
3092 class
3094 {
3095 public:
3096 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
3097  enum { order = SPQR_ORDERING_DEFAULT };
3098 #elif defined (HAVE_CXSPARSE)
3099  enum { order = 2 };
3100 #endif
3101 };
3102 
3103 template <typename SPARSE_T>
3104 template <typename RHS_T, typename RET_T>
3105 RET_T
3106 sparse_qr<SPARSE_T>::solve (const SPARSE_T& a, const RHS_T& b,
3107  octave_idx_type& info)
3108 {
3109 #if (defined (HAVE_SPQR) && defined (HAVE_CHOLMOD))
3110 
3111  info = -1;
3112 
3113  octave_idx_type nr = a.rows ();
3114  octave_idx_type nc = a.cols ();
3115 
3116  octave_idx_type b_nc = b.cols ();
3117  octave_idx_type b_nr = b.rows ();
3118 
3120 
3121  if (nr < 0 || nc < 0 || b_nc < 0 || b_nr < 0)
3122  (*current_liboctave_error_handler)
3123  ("matrix dimension with negative size");
3124 
3125  if ( nr != b_nr)
3126  (*current_liboctave_error_handler)
3127  ("matrix dimension mismatch in solution of minimum norm problem");
3128 
3129  info = 0;
3130 
3131  return min2norm_solve<RHS_T, RET_T> (a, b, info, order);
3132 
3133 #elif defined (HAVE_CXSPARSE)
3134 
3135  info = -1;
3136 
3137  octave_idx_type nr = a.rows ();
3138  octave_idx_type nc = a.cols ();
3139 
3140  octave_idx_type b_nc = b.cols ();
3141  octave_idx_type b_nr = b.rows ();
3142 
3144 
3145  if (nr < 0 || nc < 0 || nr != b_nr)
3146  (*current_liboctave_error_handler)
3147  ("matrix dimension mismatch in solution of minimum norm problem");
3148 
3149  if (nr == 0 || nc == 0 || b_nc == 0)
3150  {
3151  info = 0;
3152 
3153  return RET_T (nc, b_nc, 0.0);
3154  }
3155  else if (nr >= nc)
3156  {
3157  sparse_qr<SPARSE_T> q (a, order);
3158 
3159  return q.ok () ? q.tall_solve<RHS_T, RET_T> (b, info) : RET_T ();
3160  }
3161  else
3162  {
3163  sparse_qr<SPARSE_T> q (a.hermitian (), order);
3164 
3165  return q.ok () ? q.wide_solve<RHS_T, RET_T> (b, info) : RET_T ();
3166  }
3167 
3168 #else
3169 
3170  octave_unused_parameter (a);
3171  octave_unused_parameter (b);
3172  octave_unused_parameter (info);
3173 
3174  return RET_T ();
3175 
3176 #endif
3177 }
3178 
3179 //explicit instantiations of static member function solve
3180 template
3183 (const SparseMatrix& a, const MArray<double>& b, octave_idx_type& info);
3184 
3185 template
3188 (const SparseMatrix& a, const SparseMatrix& b, octave_idx_type& info);
3189 
3190 template
3193 (const SparseMatrix& a, const MArray<Complex>& b, octave_idx_type& info);
3194 
3195 template
3198 (const SparseMatrix& a, const SparseComplexMatrix& b,
3199  octave_idx_type& info);
3200 
3201 template
3204 (const SparseComplexMatrix& a, const MArray<Complex>& b,
3205  octave_idx_type& info);
3206 
3207 template
3211 (const SparseComplexMatrix& a, const SparseComplexMatrix& b,
3212  octave_idx_type& info);
3213 
3214 template
3217 (const SparseComplexMatrix& a, const MArray<double>& b,
3218  octave_idx_type& info);
3219 
3220 template
3223 (const SparseComplexMatrix& a, const SparseMatrix& b,
3224  octave_idx_type& info);
3225 
3226 //explicit instantiations of member function E_MAT
3227 template
3229 sparse_qr<SparseMatrix>::E_MAT (void) const;
3230 
3231 template
3234 
3235 template <typename SPARSE_T>
3236 template <typename RHS_T, typename RET_T>
3237 RET_T
3239 {
3240  return m_rep->template tall_solve<RHS_T, RET_T> (b, info);
3241 }
3242 
3243 template <typename SPARSE_T>
3244 template <typename RHS_T, typename RET_T>
3245 RET_T
3247 {
3248  return m_rep->template wide_solve<RHS_T, RET_T> (b, info);
3249 }
3250 
3251 // Explicitly instantiate all member functions
3252 
3254 template OCTAVE_API
3255 sparse_qr<SparseMatrix>::sparse_qr (const SparseMatrix& a, int order);
3256 template OCTAVE_API bool sparse_qr<SparseMatrix>::ok (void) const;
3257 template OCTAVE_API ColumnVector sparse_qr<SparseMatrix>::E (void) const;
3258 template OCTAVE_API SparseMatrix sparse_qr<SparseMatrix>::V (void) const;
3260 template OCTAVE_API ColumnVector sparse_qr<SparseMatrix>::P (void) const;
3261 template OCTAVE_API SparseMatrix
3262 sparse_qr<SparseMatrix>::R (bool econ) const;
3263 template OCTAVE_API Matrix
3264 sparse_qr<SparseMatrix>::C (const Matrix& b, bool econ) const;
3265 template OCTAVE_API Matrix sparse_qr<SparseMatrix>::Q (bool econ) const;
3266 
3268 template OCTAVE_API
3270 (const SparseComplexMatrix& a, int order);
3271 template OCTAVE_API bool sparse_qr<SparseComplexMatrix>::ok (void) const;
3272 template OCTAVE_API ColumnVector
3276 template OCTAVE_API ColumnVector
3278 template OCTAVE_API ColumnVector
3281 sparse_qr<SparseComplexMatrix>::R (bool econ) const;
3282 template OCTAVE_API ComplexMatrix
3283 sparse_qr<SparseComplexMatrix>::C (const ComplexMatrix& b, bool econ) const;
3284 template OCTAVE_API ComplexMatrix
3285 sparse_qr<SparseComplexMatrix>::Q (bool econ) const;
3286 
3287 Matrix
3289  octave_idx_type& info)
3290 {
3292  info);
3293 }
3294 
3296 qrsolve (const SparseMatrix& a, const SparseMatrix& b,
3297  octave_idx_type& info)
3298 {
3300  info);
3301 }
3302 
3305  octave_idx_type& info)
3306 {
3308  ComplexMatrix> (a, b, info);
3309 }
3310 
3313  octave_idx_type& info)
3314 {
3316  SparseComplexMatrix> (a, b, info);
3317 }
3318 
3321  octave_idx_type& info)
3322 {
3324  ComplexMatrix> (a, b, info);
3325 }
3326 
3329  octave_idx_type& info)
3330 {
3333  (a, b, info);
3334 }
3335 
3338  octave_idx_type& info)
3339 {
3341  ComplexMatrix> (a, b, info);
3342 }
3343 
3346  octave_idx_type& info)
3347 {
3350  (a, b, info);
3351 }
3352 
OCTAVE_END_NAMESPACE(octave)
charNDArray max(char d, const charNDArray &m)
Definition: chNDArray.cc:230
charNDArray min(char d, const charNDArray &m)
Definition: chNDArray.cc:207
OCTARRAY_OVERRIDABLE_FUNC_API const T * data(void) const
Size of the specified dimension.
Definition: Array.h:663
OCTARRAY_OVERRIDABLE_FUNC_API octave_idx_type rows(void) const
Definition: Array.h:459
OCTARRAY_API T * fortran_vec(void)
Size of the specified dimension.
Definition: Array-base.cc:1766
OCTARRAY_OVERRIDABLE_FUNC_API octave_idx_type cols(void) const
Definition: Array.h:469
OCTARRAY_OVERRIDABLE_FUNC_API T & xelem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:524
ComplexMatrix hermitian(void) const
Definition: CMatrix.h:170
Definition: dMatrix.h:42
Matrix transpose(void) const
Definition: dMatrix.h:140
octave_idx_type rows(void) const
Definition: Sparse.h:351
octave_idx_type * cidx(void)
Definition: Sparse.h:596
T * xdata(void)
Definition: Sparse.h:576
octave_idx_type * ridx(void)
Definition: Sparse.h:583
T * data(void)
Definition: Sparse.h:574
octave_idx_type nnz(void) const
Actual number of nonzero terms.
Definition: Sparse.h:339
T & xelem(octave_idx_type n)
Definition: Sparse.h:395
octave_idx_type cols(void) const
Definition: Sparse.h:352
octave_idx_type * xcidx(void)
Definition: Sparse.h:602
octave_idx_type * xridx(void)
Definition: Sparse.h:589
ColumnVector E(void) const
Definition: sparse-qr.cc:202
ColumnVector P(void) const
Definition: sparse-qr.cc:172
RET_T tall_solve(const RHS_T &b, octave_idx_type &info)
cxsparse_types< SPARSE_T >::symbolic_type * S
Definition: sparse-qr.cc:124
SPARSE_T::dense_matrix_type C(const typename SPARSE_T::dense_matrix_type &b, bool econ=false)
sparse_qr_rep(const SPARSE_T &a, int order)
octave_idx_type nrows
Definition: sparse-qr.cc:112
SPARSE_T R(bool econ) const
octave_idx_type ncols
Definition: sparse-qr.cc:113
ColumnVector Pinv(void) const
Definition: sparse-qr.cc:152
bool ok(void) const
Definition: sparse-qr.cc:86
RET_T wide_solve(const RHS_T &b, octave_idx_type &info) const
cxsparse_types< SPARSE_T >::numeric_type * N
Definition: sparse-qr.cc:125
sparse_qr_rep(const sparse_qr_rep &)=delete
SPARSE_T V(void) const
SPARSE_T::dense_matrix_type Q(bool econ=false)
OCTAVE_API RET_T tall_solve(const RHS_T &b, octave_idx_type &info) const
OCTAVE_API SPARSE_T V(void) const
Definition: sparse-qr.cc:2696
OCTAVE_API SPARSE_T::dense_matrix_type Q(bool econ=false) const
Definition: sparse-qr.cc:2753
OCTAVE_API ColumnVector P(void) const
Definition: sparse-qr.cc:2710
OCTAVE_API ColumnVector Pinv(void) const
Definition: sparse-qr.cc:2703
static OCTAVE_API RET_T solve(const SPARSE_T &a, const RHS_T &b, octave_idx_type &info)
OCTAVE_API ColumnVector E(void) const
Definition: sparse-qr.cc:2717
OCTAVE_API SPARSE_T::dense_matrix_type C(const typename SPARSE_T::dense_matrix_type &b, bool econ=false) const
Definition: sparse-qr.cc:2745
sparse_qr & operator=(const sparse_qr &a)=default
OCTAVE_API SparseMatrix E_MAT() const
Definition: sparse-qr.cc:2725
OCTAVE_API sparse_qr(void)
Definition: sparse-qr.cc:2678
std::shared_ptr< sparse_qr_rep > m_rep
Definition: sparse-qr.h:109
OCTAVE_API RET_T wide_solve(const RHS_T &b, octave_idx_type &info) const
OCTAVE_API bool ok(void) const
Definition: sparse-qr.cc:2689
OCTAVE_API SPARSE_T R(bool econ=false) const
Definition: sparse-qr.cc:2738
OCTAVE_BEGIN_NAMESPACE(octave) static octave_value daspk_fcn
OCTAVE_NORETURN liboctave_error_handler current_liboctave_error_handler
Definition: lo-error.c:41
F77_RET_T const F77_INT F77_CMPLX const F77_INT F77_CMPLX * B
F77_RET_T const F77_INT F77_CMPLX * A
F77_RET_T const F77_INT & N
F77_RET_T const F77_DBLE * x
#define OCTAVE_API
Definition: main.in.cc:55
class OCTAVE_API Matrix
Definition: mx-fwd.h:31
class OCTAVE_API ComplexMatrix
Definition: mx-fwd.h:32
class OCTAVE_API SparseMatrix
Definition: mx-fwd.h:55
class OCTAVE_API ColumnVector
Definition: mx-fwd.h:45
class OCTAVE_API SparseComplexMatrix
Definition: mx-fwd.h:56
T octave_idx_type m
Definition: mx-inlines.cc:773
for(octave_idx_type i=0;i< n;i++) ac+
Definition: mx-inlines.cc:756
octave_idx_type n
Definition: mx-inlines.cc:753
std::complex< double > Complex
Definition: oct-cmplx.h:33
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition: oct-locbuf.h:44
suitesparse_integer * to_suitesparse_intptr(octave_idx_type *i)
Definition: oct-sparse.cc:51
#define CXSPARSE_DNAME(name)
Definition: oct-sparse.h:159
int suitesparse_integer
Definition: oct-sparse.h:184
#define CXSPARSE_ZNAME(name)
Definition: oct-sparse.h:160
octave_idx_type from_suitesparse_long(SuiteSparse_long x)
Definition: oct-sparse.h:200
octave_idx_type from_size_t(std::size_t x)
Definition: oct-sparse.h:211
void free(void *)
Matrix qrsolve(const SparseMatrix &a, const MArray< double > &b, octave_idx_type &info)
Definition: sparse-qr.cc:3288