GNU Octave  6.2.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
Sparse.cc
Go to the documentation of this file.
1 ////////////////////////////////////////////////////////////////////////
2 //
3 // Copyright (C) 1998-2021 The Octave Project Developers
4 //
5 // See the file COPYRIGHT.md in the top-level directory of this
6 // distribution or <https://octave.org/copyright/>.
7 //
8 // This file is part of Octave.
9 //
10 // Octave is free software: you can redistribute it and/or modify it
11 // under the terms of the GNU General Public License as published by
12 // the Free Software Foundation, either version 3 of the License, or
13 // (at your option) any later version.
14 //
15 // Octave is distributed in the hope that it will be useful, but
16 // WITHOUT ANY WARRANTY; without even the implied warranty of
17 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 // GNU General Public License for more details.
19 //
20 // You should have received a copy of the GNU General Public License
21 // along with Octave; see the file COPYING. If not, see
22 // <https://www.gnu.org/licenses/>.
23 //
24 ////////////////////////////////////////////////////////////////////////
25 
26 // This file should not include config.h. It is only included in other
27 // C++ source files that should have included config.h before including
28 // this file.
29 
30 #include <cassert>
31 #include <cinttypes>
32 
33 #include <algorithm>
34 #include <istream>
35 #include <ostream>
36 #include <sstream>
37 #include <vector>
38 
39 #include "Array.h"
40 #include "MArray.h"
41 #include "Array-util.h"
42 #include "Range.h"
43 #include "idx-vector.h"
44 #include "lo-error.h"
45 #include "quit.h"
46 #include "oct-locbuf.h"
47 
48 #include "Sparse.h"
49 #include "sparse-sort.h"
50 #include "sparse-util.h"
51 #include "oct-spparms.h"
52 #include "mx-inlines.cc"
53 
54 #include "PermMatrix.h"
55 
56 template <typename T>
57 typename Sparse<T>::SparseRep *
59 {
60  static typename Sparse<T>::SparseRep nr;
61  return &nr;
62 }
63 
64 template <typename T>
65 T&
67 {
69 
70  if (nzmx <= 0)
71  (*current_liboctave_error_handler)
72  ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
73 
74  for (i = c[_c]; i < c[_c + 1]; i++)
75  if (r[i] == _r)
76  return d[i];
77  else if (r[i] > _r)
78  break;
79 
80  // Ok, If we've gotten here, we're in trouble. Have to create a
81  // new element in the sparse array. This' gonna be slow!!!
82  if (c[ncols] == nzmx)
83  (*current_liboctave_error_handler)
84  ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
85 
86  octave_idx_type to_move = c[ncols] - i;
87  if (to_move != 0)
88  {
89  for (octave_idx_type j = c[ncols]; j > i; j--)
90  {
91  d[j] = d[j-1];
92  r[j] = r[j-1];
93  }
94  }
95 
96  for (octave_idx_type j = _c + 1; j < ncols + 1; j++)
97  c[j] = c[j] + 1;
98 
99  d[i] = 0.;
100  r[i] = _r;
101 
102  return d[i];
103 }
104 
105 template <typename T>
106 T
108 {
109  if (nzmx > 0)
110  for (octave_idx_type i = c[_c]; i < c[_c + 1]; i++)
111  if (r[i] == _r)
112  return d[i];
113  return T ();
114 }
115 
116 template <typename T>
117 void
119 {
120  if (remove_zeros)
121  {
122  octave_idx_type i = 0;
124  for (octave_idx_type j = 1; j <= ncols; j++)
125  {
126  octave_idx_type u = c[j];
127  for (; i < u; i++)
128  if (d[i] != T ())
129  {
130  d[k] = d[i];
131  r[k++] = r[i];
132  }
133  c[j] = k;
134  }
135  }
136 
137  change_length (c[ncols]);
138 }
139 
140 template <typename T>
141 void
143 {
144  for (octave_idx_type j = ncols; j > 0 && c[j] > nz; j--)
145  c[j] = nz;
146 
147  // Always preserve space for 1 element.
148  nz = (nz > 0 ? nz : 1);
149 
150  // Skip reallocation if we have less than 1/frac extra elements to discard.
151  static const int frac = 5;
152  if (nz > nzmx || nz < nzmx - nzmx/frac)
153  {
154  // Reallocate.
155  octave_idx_type min_nzmx = std::min (nz, nzmx);
156 
157  octave_idx_type *new_ridx = new octave_idx_type [nz];
158  std::copy_n (r, min_nzmx, new_ridx);
159 
160  delete [] r;
161  r = new_ridx;
162 
163  T *new_data = new T [nz];
164  std::copy_n (d, min_nzmx, new_data);
165 
166  delete [] d;
167  d = new_data;
168 
169  nzmx = nz;
170  }
171 }
172 
173 template <typename T>
174 bool
176 {
177  return sparse_indices_ok (r, c, nrows, ncols, nnz ());
178 }
179 
180 template <typename T>
181 bool
183 {
184  octave_idx_type nz = nnz ();
185 
186  for (octave_idx_type i = 0; i < nz; i++)
187  if (octave::math::isnan (d[i]))
188  return true;
189 
190  return false;
191 }
192 
193 template <typename T>
195  : rep (nullptr), dimensions (dim_vector (nr, nc))
196 {
197  if (val != T ())
198  {
199  rep = new typename Sparse<T>::SparseRep (nr, nc, dimensions.safe_numel ());
200 
201  octave_idx_type ii = 0;
202  xcidx (0) = 0;
203  for (octave_idx_type j = 0; j < nc; j++)
204  {
205  for (octave_idx_type i = 0; i < nr; i++)
206  {
207  xdata (ii) = val;
208  xridx (ii++) = i;
209  }
210  xcidx (j+1) = ii;
211  }
212  }
213  else
214  {
215  rep = new typename Sparse<T>::SparseRep (nr, nc, 0);
216  for (octave_idx_type j = 0; j < nc+1; j++)
217  xcidx (j) = 0;
218  }
219 }
220 
221 template <typename T>
223  : rep (new typename Sparse<T>::SparseRep (a.rows (), a.cols (), a.rows ())),
224  dimensions (dim_vector (a.rows (), a.cols ()))
225 {
226  octave_idx_type n = a.rows ();
227  for (octave_idx_type i = 0; i <= n; i++)
228  cidx (i) = i;
229 
230  const Array<octave_idx_type> pv = a.col_perm_vec ();
231 
232  for (octave_idx_type i = 0; i < n; i++)
233  ridx (i) = pv(i);
234 
235  for (octave_idx_type i = 0; i < n; i++)
236  data (i) = 1.0;
237 }
238 
239 template <typename T>
241  : rep (nullptr), dimensions (dv)
242 {
243  if (dv.ndims () != 2)
244  (*current_liboctave_error_handler)
245  ("Sparse::Sparse (const dim_vector&): dimension mismatch");
246 
247  rep = new typename Sparse<T>::SparseRep (dv(0), dv(1), 0);
248 }
249 
250 template <typename T>
252  : rep (nullptr), dimensions (dv)
253 {
254 
255  // Work in unsigned long long to avoid overflow issues with numel
256  unsigned long long a_nel = static_cast<unsigned long long>(a.rows ()) *
257  static_cast<unsigned long long>(a.cols ());
258  unsigned long long dv_nel = static_cast<unsigned long long>(dv(0)) *
259  static_cast<unsigned long long>(dv(1));
260 
261  if (a_nel != dv_nel)
262  (*current_liboctave_error_handler)
263  ("Sparse::Sparse (const Sparse&, const dim_vector&): dimension mismatch");
264 
265  dim_vector old_dims = a.dims ();
266  octave_idx_type new_nzmx = a.nnz ();
267  octave_idx_type new_nr = dv(0);
268  octave_idx_type new_nc = dv(1);
269  octave_idx_type old_nr = old_dims(0);
270  octave_idx_type old_nc = old_dims(1);
271 
272  rep = new typename Sparse<T>::SparseRep (new_nr, new_nc, new_nzmx);
273 
275  xcidx (0) = 0;
276  for (octave_idx_type i = 0; i < old_nc; i++)
277  for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++)
278  {
279  octave_idx_type tmp = i * old_nr + a.ridx (j);
280  octave_idx_type ii = tmp % new_nr;
281  octave_idx_type jj = (tmp - ii) / new_nr;
282  for (octave_idx_type k = kk; k < jj; k++)
283  xcidx (k+1) = j;
284  kk = jj;
285  xdata (j) = a.data (j);
286  xridx (j) = ii;
287  }
288  for (octave_idx_type k = kk; k < new_nc; k++)
289  xcidx (k+1) = new_nzmx;
290 }
291 
292 template <typename T>
294  const idx_vector& c, octave_idx_type nr,
295  octave_idx_type nc, bool sum_terms,
296  octave_idx_type nzm)
297  : rep (nullptr), dimensions ()
298 {
299  if (nr < 0)
300  nr = r.extent (0);
301  else if (r.extent (nr) > nr)
302  (*current_liboctave_error_handler)
303  ("sparse: row index %" OCTAVE_IDX_TYPE_FORMAT "out of bound "
304  "%" OCTAVE_IDX_TYPE_FORMAT, r.extent (nr), nr);
305 
306  if (nc < 0)
307  nc = c.extent (0);
308  else if (c.extent (nc) > nc)
310  ("sparse: column index %" OCTAVE_IDX_TYPE_FORMAT " out of bound "
311  "%" OCTAVE_IDX_TYPE_FORMAT, r.extent (nc), nc);
312 
313  dimensions = dim_vector (nr, nc);
314 
315  octave_idx_type n = a.numel ();
316  octave_idx_type rl = r.length (nr);
317  octave_idx_type cl = c.length (nc);
318  bool a_scalar = n == 1;
319  if (a_scalar)
320  {
321  if (rl != 1)
322  n = rl;
323  else if (cl != 1)
324  n = cl;
325  }
326 
327  if ((rl != 1 && rl != n) || (cl != 1 && cl != n))
328  (*current_liboctave_error_handler) ("sparse: dimension mismatch");
329 
330  // Only create rep after input validation to avoid memory leak.
331  rep = new typename Sparse<T>::SparseRep (nr, nc, (nzm > 0 ? nzm : 0));
332 
333  if (rl <= 1 && cl <= 1)
334  {
335  if (n == 1 && a(0) != T ())
336  {
337  change_capacity (nzm > 1 ? nzm : 1);
338  xridx (0) = r(0);
339  xdata (0) = a(0);
340  std::fill_n (xcidx () + c(0) + 1, nc - c(0), 1);
341  }
342  }
343  else if (a_scalar)
344  {
345  // This is completely specialized, because the sorts can be simplified.
346  T a0 = a(0);
347  if (a0 == T ())
348  {
349  // Do nothing, it's an empty matrix.
350  }
351  else if (cl == 1)
352  {
353  // Sparse column vector. Sort row indices.
354  idx_vector rs = r.sorted ();
355 
356  octave_quit ();
357 
358  const octave_idx_type *rd = rs.raw ();
359  // Count unique indices.
360  octave_idx_type new_nz = 1;
361  for (octave_idx_type i = 1; i < n; i++)
362  new_nz += rd[i-1] != rd[i];
363 
364  // Allocate result.
365  change_capacity (nzm > new_nz ? nzm : new_nz);
366  std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
367 
368  octave_idx_type *rri = ridx ();
369  T *rrd = data ();
370 
371  octave_quit ();
372 
373  octave_idx_type k = -1;
374  octave_idx_type l = -1;
375 
376  if (sum_terms)
377  {
378  // Sum repeated indices.
379  for (octave_idx_type i = 0; i < n; i++)
380  {
381  if (rd[i] != l)
382  {
383  l = rd[i];
384  rri[++k] = rd[i];
385  rrd[k] = a0;
386  }
387  else
388  rrd[k] += a0;
389  }
390  }
391  else
392  {
393  // Pick the last one.
394  for (octave_idx_type i = 0; i < n; i++)
395  {
396  if (rd[i] != l)
397  {
398  l = rd[i];
399  rri[++k] = rd[i];
400  rrd[k] = a0;
401  }
402  }
403  }
404 
405  }
406  else
407  {
408  idx_vector rr = r;
409  idx_vector cc = c;
410  const octave_idx_type *rd = rr.raw ();
411  const octave_idx_type *cd = cc.raw ();
413  ci[0] = 0;
414  // Bin counts of column indices.
415  for (octave_idx_type i = 0; i < n; i++)
416  ci[cd[i]+1]++;
417  // Make them cumulative, shifted one to right.
418  for (octave_idx_type i = 1, s = 0; i <= nc; i++)
419  {
420  octave_idx_type s1 = s + ci[i];
421  ci[i] = s;
422  s = s1;
423  }
424 
425  octave_quit ();
426 
427  // Bucket sort.
429  for (octave_idx_type i = 0; i < n; i++)
430  if (rl == 1)
431  sidx[ci[cd[i]+1]++] = rd[0];
432  else
433  sidx[ci[cd[i]+1]++] = rd[i];
434 
435  // Subsorts. We don't need a stable sort, all values are equal.
436  xcidx (0) = 0;
437  for (octave_idx_type j = 0; j < nc; j++)
438  {
439  std::sort (sidx + ci[j], sidx + ci[j+1]);
440  octave_idx_type l = -1;
442  // Count.
443  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
444  {
445  octave_idx_type k = sidx[i];
446  if (k != l)
447  {
448  l = k;
449  nzj++;
450  }
451  }
452  // Set column pointer.
453  xcidx (j+1) = xcidx (j) + nzj;
454  }
455 
456  change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
457  octave_idx_type *rri = ridx ();
458  T *rrd = data ();
459 
460  // Fill-in data.
461  for (octave_idx_type j = 0, jj = -1; j < nc; j++)
462  {
463  octave_quit ();
464  octave_idx_type l = -1;
465  if (sum_terms)
466  {
467  // Sum adjacent terms.
468  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
469  {
470  octave_idx_type k = sidx[i];
471  if (k != l)
472  {
473  l = k;
474  rrd[++jj] = a0;
475  rri[jj] = k;
476  }
477  else
478  rrd[jj] += a0;
479  }
480  }
481  else
482  {
483  // Use the last one.
484  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
485  {
486  octave_idx_type k = sidx[i];
487  if (k != l)
488  {
489  l = k;
490  rrd[++jj] = a0;
491  rri[jj] = k;
492  }
493  }
494  }
495  }
496  }
497  }
498  else if (cl == 1)
499  {
500  // Sparse column vector. Sort row indices.
502  idx_vector rs = r.sorted (rsi);
503 
504  octave_quit ();
505 
506  const octave_idx_type *rd = rs.raw ();
507  const octave_idx_type *rdi = rsi.data ();
508  // Count unique indices.
509  octave_idx_type new_nz = 1;
510  for (octave_idx_type i = 1; i < n; i++)
511  new_nz += rd[i-1] != rd[i];
512 
513  // Allocate result.
514  change_capacity (nzm > new_nz ? nzm : new_nz);
515  std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
516 
517  octave_idx_type *rri = ridx ();
518  T *rrd = data ();
519 
520  octave_quit ();
521 
523  rri[k] = rd[0];
524  rrd[k] = a(rdi[0]);
525 
526  if (sum_terms)
527  {
528  // Sum repeated indices.
529  for (octave_idx_type i = 1; i < n; i++)
530  {
531  if (rd[i] != rd[i-1])
532  {
533  rri[++k] = rd[i];
534  rrd[k] = a(rdi[i]);
535  }
536  else
537  rrd[k] += a(rdi[i]);
538  }
539  }
540  else
541  {
542  // Pick the last one.
543  for (octave_idx_type i = 1; i < n; i++)
544  {
545  if (rd[i] != rd[i-1])
546  rri[++k] = rd[i];
547  rrd[k] = a(rdi[i]);
548  }
549  }
550 
551  maybe_compress (true);
552  }
553  else
554  {
555  idx_vector rr = r;
556  idx_vector cc = c;
557  const octave_idx_type *rd = rr.raw ();
558  const octave_idx_type *cd = cc.raw ();
560  ci[0] = 0;
561  // Bin counts of column indices.
562  for (octave_idx_type i = 0; i < n; i++)
563  ci[cd[i]+1]++;
564  // Make them cumulative, shifted one to right.
565  for (octave_idx_type i = 1, s = 0; i <= nc; i++)
566  {
567  octave_idx_type s1 = s + ci[i];
568  ci[i] = s;
569  s = s1;
570  }
571 
572  octave_quit ();
573 
574  typedef std::pair<octave_idx_type, octave_idx_type> idx_pair;
575  // Bucket sort.
576  OCTAVE_LOCAL_BUFFER (idx_pair, spairs, n);
577  for (octave_idx_type i = 0; i < n; i++)
578  {
579  idx_pair& p = spairs[ci[cd[i]+1]++];
580  if (rl == 1)
581  p.first = rd[0];
582  else
583  p.first = rd[i];
584  p.second = i;
585  }
586 
587  // Subsorts. We don't need a stable sort, the second index stabilizes it.
588  xcidx (0) = 0;
589  for (octave_idx_type j = 0; j < nc; j++)
590  {
591  std::sort (spairs + ci[j], spairs + ci[j+1]);
592  octave_idx_type l = -1;
593  octave_idx_type nzj = 0;
594  // Count.
595  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
596  {
597  octave_idx_type k = spairs[i].first;
598  if (k != l)
599  {
600  l = k;
601  nzj++;
602  }
603  }
604  // Set column pointer.
605  xcidx (j+1) = xcidx (j) + nzj;
606  }
607 
608  change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
609  octave_idx_type *rri = ridx ();
610  T *rrd = data ();
611 
612  // Fill-in data.
613  for (octave_idx_type j = 0, jj = -1; j < nc; j++)
614  {
615  octave_quit ();
616  octave_idx_type l = -1;
617  if (sum_terms)
618  {
619  // Sum adjacent terms.
620  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
621  {
622  octave_idx_type k = spairs[i].first;
623  if (k != l)
624  {
625  l = k;
626  rrd[++jj] = a(spairs[i].second);
627  rri[jj] = k;
628  }
629  else
630  rrd[jj] += a(spairs[i].second);
631  }
632  }
633  else
634  {
635  // Use the last one.
636  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
637  {
638  octave_idx_type k = spairs[i].first;
639  if (k != l)
640  {
641  l = k;
642  rri[++jj] = k;
643  }
644  rrd[jj] = a(spairs[i].second);
645  }
646  }
647  }
648 
649  maybe_compress (true);
650  }
651 }
652 
653 /*
654 %!assert <51880> (sparse (1:2, 2, 1:2, 2, 2), sparse ([0, 1; 0, 2]))
655 %!assert <51880> (sparse (1:2, 1, 1:2, 2, 2), sparse ([1, 0; 2, 0]))
656 %!assert <51880> (sparse (1:2, 2, 1:2, 2, 3), sparse ([0, 1, 0; 0, 2, 0]))
657 */
658 
659 template <typename T>
661  : rep (nullptr), dimensions (a.dims ())
662 {
663  if (dimensions.ndims () > 2)
664  (*current_liboctave_error_handler)
665  ("Sparse::Sparse (const Array<T>&): dimension mismatch");
666 
667  octave_idx_type nr = rows ();
668  octave_idx_type nc = cols ();
669  octave_idx_type len = a.numel ();
670  octave_idx_type new_nzmx = 0;
671 
672  // First count the number of nonzero terms
673  for (octave_idx_type i = 0; i < len; i++)
674  if (a(i) != T ())
675  new_nzmx++;
676 
677  rep = new typename Sparse<T>::SparseRep (nr, nc, new_nzmx);
678 
679  octave_idx_type ii = 0;
680  xcidx (0) = 0;
681  for (octave_idx_type j = 0; j < nc; j++)
682  {
683  for (octave_idx_type i = 0; i < nr; i++)
684  if (a.elem (i,j) != T ())
685  {
686  xdata (ii) = a.elem (i,j);
687  xridx (ii++) = i;
688  }
689  xcidx (j+1) = ii;
690  }
691 }
692 
693 template <typename T>
695 {
696  if (--rep->count == 0)
697  delete rep;
698 }
699 
700 template <typename T>
701 Sparse<T>&
703 {
704  if (this != &a)
705  {
706  if (--rep->count == 0)
707  delete rep;
708 
709  rep = a.rep;
710  rep->count++;
711 
712  dimensions = a.dimensions;
713  }
714 
715  return *this;
716 }
717 
718 template <typename T>
721 {
722  octave_idx_type n = dimensions.ndims ();
723 
724  if (n <= 0 || n != ra_idx.numel ())
725  (*current_liboctave_error_handler)
726  ("Sparse<T>::compute_index: invalid ra_idxing operation");
727 
728  octave_idx_type retval = -1;
729 
730  retval = ra_idx(--n);
731 
732  while (--n >= 0)
733  {
734  retval *= dimensions(n);
735  retval += ra_idx(n);
736  }
737 
738  return retval;
739 }
740 
741 template <typename T>
742 T
743 Sparse<T>::range_error (const char *fcn, octave_idx_type n) const
744 {
745  (*current_liboctave_error_handler) ("%s (%" OCTAVE_IDX_TYPE_FORMAT "): "
746  "range error", fcn, n);
747 }
748 
749 template <typename T>
750 T&
752 {
753  (*current_liboctave_error_handler) ("%s (%" OCTAVE_IDX_TYPE_FORMAT "): "
754  "range error", fcn, n);
755 }
756 
757 template <typename T>
758 T
760  octave_idx_type j) const
761 {
762  (*current_liboctave_error_handler)
763  ("%s (%" OCTAVE_IDX_TYPE_FORMAT ", %" OCTAVE_IDX_TYPE_FORMAT "): "
764  "range error", fcn, i, j);
765 }
766 
767 template <typename T>
768 T&
770 {
771  (*current_liboctave_error_handler)
772  ("%s (%" OCTAVE_IDX_TYPE_FORMAT ", %" OCTAVE_IDX_TYPE_FORMAT "): "
773  "range error", fcn, i, j);
774 }
775 
776 template <typename T>
777 T
778 Sparse<T>::range_error (const char *fcn,
779  const Array<octave_idx_type>& ra_idx) const
780 {
781  std::ostringstream buf;
782 
783  buf << fcn << " (";
784 
786 
787  if (n > 0)
788  buf << ra_idx(0);
789 
790  for (octave_idx_type i = 1; i < n; i++)
791  buf << ", " << ra_idx(i);
792 
793  buf << "): range error";
794 
795  std::string buf_str = buf.str ();
796 
797  (*current_liboctave_error_handler) ("%s", buf_str.c_str ());
798 }
799 
800 template <typename T>
801 T&
803 {
804  std::ostringstream buf;
805 
806  buf << fcn << " (";
807 
809 
810  if (n > 0)
811  buf << ra_idx(0);
812 
813  for (octave_idx_type i = 1; i < n; i++)
814  buf << ", " << ra_idx(i);
815 
816  buf << "): range error";
817 
818  std::string buf_str = buf.str ();
819 
820  (*current_liboctave_error_handler) ("%s", buf_str.c_str ());
821 }
822 
823 template <typename T>
824 Sparse<T>
825 Sparse<T>::reshape (const dim_vector& new_dims) const
826 {
828  dim_vector dims2 = new_dims;
829 
830  if (dims2.ndims () > 2)
831  {
832  (*current_liboctave_warning_with_id_handler)
833  ("Octave:reshape-smashes-dims",
834  "reshape: sparse reshape to N-D array smashes dims");
835 
836  for (octave_idx_type i = 2; i < dims2.ndims (); i++)
837  dims2(1) *= dims2(i);
838 
839  dims2.resize (2);
840  }
841 
842  if (dimensions != dims2)
843  {
844  if (dimensions.numel () == dims2.numel ())
845  {
846  octave_idx_type new_nnz = nnz ();
847  octave_idx_type new_nr = dims2 (0);
848  octave_idx_type new_nc = dims2 (1);
849  octave_idx_type old_nr = rows ();
850  octave_idx_type old_nc = cols ();
851  retval = Sparse<T> (new_nr, new_nc, new_nnz);
852 
853  octave_idx_type kk = 0;
854  retval.xcidx (0) = 0;
855  // Quotient and remainder of i * old_nr divided by new_nr.
856  // Track them individually to avoid overflow (bug #42850).
857  octave_idx_type i_old_qu = 0;
858  octave_idx_type i_old_rm = static_cast<octave_idx_type> (-old_nr);
859  for (octave_idx_type i = 0; i < old_nc; i++)
860  {
861  i_old_rm += old_nr;
862  if (i_old_rm >= new_nr)
863  {
864  i_old_qu += i_old_rm / new_nr;
865  i_old_rm = i_old_rm % new_nr;
866  }
867  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
868  {
869  octave_idx_type ii, jj;
870  ii = (i_old_rm + ridx (j)) % new_nr;
871  jj = i_old_qu + (i_old_rm + ridx (j)) / new_nr;
872 
873  // Original calculation subject to overflow
874  // ii = (i*old_nr + ridx (j)) % new_nr
875  // jj = (i*old_nr + ridx (j)) / new_nr
876  for (octave_idx_type k = kk; k < jj; k++)
877  retval.xcidx (k+1) = j;
878  kk = jj;
879  retval.xdata (j) = data (j);
880  retval.xridx (j) = ii;
881  }
882  }
883  for (octave_idx_type k = kk; k < new_nc; k++)
884  retval.xcidx (k+1) = new_nnz;
885  }
886  else
887  {
888  std::string dimensions_str = dimensions.str ();
889  std::string new_dims_str = new_dims.str ();
890 
891  (*current_liboctave_error_handler)
892  ("reshape: can't reshape %s array to %s array",
893  dimensions_str.c_str (), new_dims_str.c_str ());
894  }
895  }
896  else
897  retval = *this;
898 
899  return retval;
900 }
901 
902 template <typename T>
903 Sparse<T>
904 Sparse<T>::permute (const Array<octave_idx_type>& perm_vec, bool) const
905 {
906  // The only valid permutations of a sparse array are [1, 2] and [2, 1].
907 
908  bool fail = false;
909  bool trans = false;
910 
911  if (perm_vec.numel () == 2)
912  {
913  if (perm_vec(0) == 0 && perm_vec(1) == 1)
914  /* do nothing */;
915  else if (perm_vec(0) == 1 && perm_vec(1) == 0)
916  trans = true;
917  else
918  fail = true;
919  }
920  else
921  fail = true;
922 
923  if (fail)
924  (*current_liboctave_error_handler)
925  ("permutation vector contains an invalid element");
926 
927  return trans ? this->transpose () : *this;
928 }
929 
930 template <typename T>
931 void
933 {
934  octave_idx_type nr = rows ();
935  octave_idx_type nc = cols ();
936 
937  if (nr == 0)
938  resize (1, std::max (nc, n));
939  else if (nc == 0)
940  resize (nr, (n + nr - 1) / nr); // Ain't it wicked?
941  else if (nr == 1)
942  resize (1, n);
943  else if (nc == 1)
944  resize (n, 1);
945  else
947 }
948 
949 template <typename T>
950 void
952 {
953  octave_idx_type n = dv.ndims ();
954 
955  if (n != 2)
956  (*current_liboctave_error_handler) ("sparse array must be 2-D");
957 
958  resize (dv(0), dv(1));
959 }
960 
961 template <typename T>
962 void
964 {
965  if (r < 0 || c < 0)
966  (*current_liboctave_error_handler) ("can't resize to negative dimension");
967 
968  if (r == dim1 () && c == dim2 ())
969  return;
970 
971  // This wouldn't be necessary for r >= rows () if nrows wasn't part of the
972  // Sparse rep. It is not good for anything in there.
973  make_unique ();
974 
975  if (r < rows ())
976  {
977  octave_idx_type i = 0;
978  octave_idx_type k = 0;
979  for (octave_idx_type j = 1; j <= rep->ncols; j++)
980  {
981  octave_idx_type u = xcidx (j);
982  for (; i < u; i++)
983  if (xridx (i) < r)
984  {
985  xdata (k) = xdata (i);
986  xridx (k++) = xridx (i);
987  }
988  xcidx (j) = k;
989  }
990  }
991 
992  rep->nrows = dimensions(0) = r;
993 
994  if (c != rep->ncols)
995  {
996  octave_idx_type *new_cidx = new octave_idx_type [c+1];
997  std::copy_n (rep->c, std::min (c, rep->ncols) + 1, new_cidx);
998  delete [] rep->c;
999  rep->c = new_cidx;
1000 
1001  if (c > rep->ncols)
1002  std::fill_n (rep->c + rep->ncols + 1, c - rep->ncols,
1003  rep->c[rep->ncols]);
1004  }
1005 
1006  rep->ncols = dimensions(1) = c;
1007 
1008  rep->change_length (rep->nnz ());
1009 }
1010 
1011 template <typename T>
1012 Sparse<T>&
1014 {
1015  octave_idx_type a_rows = a.rows ();
1016  octave_idx_type a_cols = a.cols ();
1017  octave_idx_type nr = rows ();
1018  octave_idx_type nc = cols ();
1019 
1020  if (r < 0 || r + a_rows > rows () || c < 0 || c + a_cols > cols ())
1021  (*current_liboctave_error_handler) ("range error for insert");
1022 
1023  // First count the number of elements in the final array
1024  octave_idx_type nel = cidx (c) + a.nnz ();
1025 
1026  if (c + a_cols < nc)
1027  nel += cidx (nc) - cidx (c + a_cols);
1028 
1029  for (octave_idx_type i = c; i < c + a_cols; i++)
1030  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1031  if (ridx (j) < r || ridx (j) >= r + a_rows)
1032  nel++;
1033 
1034  Sparse<T> tmp (*this);
1035  --rep->count;
1036  rep = new typename Sparse<T>::SparseRep (nr, nc, nel);
1037 
1038  for (octave_idx_type i = 0; i < tmp.cidx (c); i++)
1039  {
1040  data (i) = tmp.data (i);
1041  ridx (i) = tmp.ridx (i);
1042  }
1043  for (octave_idx_type i = 0; i < c + 1; i++)
1044  cidx (i) = tmp.cidx (i);
1045 
1046  octave_idx_type ii = cidx (c);
1047 
1048  for (octave_idx_type i = c; i < c + a_cols; i++)
1049  {
1050  octave_quit ();
1051 
1052  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1053  if (tmp.ridx (j) < r)
1054  {
1055  data (ii) = tmp.data (j);
1056  ridx (ii++) = tmp.ridx (j);
1057  }
1058 
1059  octave_quit ();
1060 
1061  for (octave_idx_type j = a.cidx (i-c); j < a.cidx (i-c+1); j++)
1062  {
1063  data (ii) = a.data (j);
1064  ridx (ii++) = r + a.ridx (j);
1065  }
1066 
1067  octave_quit ();
1068 
1069  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1070  if (tmp.ridx (j) >= r + a_rows)
1071  {
1072  data (ii) = tmp.data (j);
1073  ridx (ii++) = tmp.ridx (j);
1074  }
1075 
1076  cidx (i+1) = ii;
1077  }
1078 
1079  for (octave_idx_type i = c + a_cols; i < nc; i++)
1080  {
1081  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1082  {
1083  data (ii) = tmp.data (j);
1084  ridx (ii++) = tmp.ridx (j);
1085  }
1086  cidx (i+1) = ii;
1087  }
1088 
1089  return *this;
1090 }
1091 
1092 template <typename T>
1093 Sparse<T>&
1095 {
1096 
1097  if (ra_idx.numel () != 2)
1098  (*current_liboctave_error_handler) ("range error for insert");
1099 
1100  return insert (a, ra_idx(0), ra_idx(1));
1101 }
1102 
1103 template <typename T>
1104 Sparse<T>
1106 {
1107  assert (ndims () == 2);
1108 
1109  octave_idx_type nr = rows ();
1110  octave_idx_type nc = cols ();
1111  octave_idx_type nz = nnz ();
1112  Sparse<T> retval (nc, nr, nz);
1113 
1114  for (octave_idx_type i = 0; i < nz; i++)
1115  retval.xcidx (ridx (i) + 1)++;
1116  // retval.xcidx[1:nr] holds the row degrees for rows 0:(nr-1)
1117  nz = 0;
1118  for (octave_idx_type i = 1; i <= nr; i++)
1119  {
1120  const octave_idx_type tmp = retval.xcidx (i);
1121  retval.xcidx (i) = nz;
1122  nz += tmp;
1123  }
1124  // retval.xcidx[1:nr] holds row entry *start* offsets for rows 0:(nr-1)
1125 
1126  for (octave_idx_type j = 0; j < nc; j++)
1127  for (octave_idx_type k = cidx (j); k < cidx (j+1); k++)
1128  {
1129  octave_idx_type q = retval.xcidx (ridx (k) + 1)++;
1130  retval.xridx (q) = j;
1131  retval.xdata (q) = data (k);
1132  }
1133  assert (nnz () == retval.xcidx (nr));
1134  // retval.xcidx[1:nr] holds row entry *end* offsets for rows 0:(nr-1)
1135  // and retval.xcidx[0:(nr-1)] holds their row entry *start* offsets
1136 
1137  return retval;
1138 }
1139 
1140 // Lower bound lookup. Could also use octave_sort, but that has upper bound
1141 // semantics, so requires some manipulation to set right. Uses a plain loop
1142 // for small columns.
1143 static octave_idx_type
1145  octave_idx_type ri)
1146 {
1147  if (nr <= 8)
1148  {
1149  octave_idx_type l;
1150  for (l = 0; l < nr; l++)
1151  if (ridx[l] >= ri)
1152  break;
1153  return l;
1154  }
1155  else
1156  return std::lower_bound (ridx, ridx + nr, ri) - ridx;
1157 }
1158 
1159 template <typename T>
1160 void
1162 {
1163  Sparse<T> retval;
1164 
1165  assert (ndims () == 2);
1166 
1167  octave_idx_type nr = dim1 ();
1168  octave_idx_type nc = dim2 ();
1169  octave_idx_type nz = nnz ();
1170 
1171  octave_idx_type nel = numel (); // Can throw.
1172 
1173  const dim_vector idx_dims = idx.orig_dimensions ();
1174 
1175  if (idx.extent (nel) > nel)
1176  octave::err_del_index_out_of_range (true, idx.extent (nel), nel);
1177 
1178  if (nc == 1)
1179  {
1180  // Sparse column vector.
1181  const Sparse<T> tmp = *this; // constant copy to prevent COW.
1182 
1183  octave_idx_type lb, ub;
1184 
1185  if (idx.is_cont_range (nel, lb, ub))
1186  {
1187  // Special-case a contiguous range.
1188  // Look-up indices first.
1189  octave_idx_type li = lblookup (tmp.ridx (), nz, lb);
1190  octave_idx_type ui = lblookup (tmp.ridx (), nz, ub);
1191  // Copy data and adjust indices.
1192  octave_idx_type nz_new = nz - (ui - li);
1193  *this = Sparse<T> (nr - (ub - lb), 1, nz_new);
1194  std::copy_n (tmp.data (), li, data ());
1195  std::copy_n (tmp.ridx (), li, xridx ());
1196  std::copy (tmp.data () + ui, tmp.data () + nz, xdata () + li);
1197  mx_inline_sub (nz - ui, xridx () + li, tmp.ridx () + ui, ub - lb);
1198  xcidx (1) = nz_new;
1199  }
1200  else
1201  {
1202  OCTAVE_LOCAL_BUFFER (octave_idx_type, ridx_new, nz);
1203  OCTAVE_LOCAL_BUFFER (T, data_new, nz);
1204  idx_vector sidx = idx.sorted (true);
1205  const octave_idx_type *sj = sidx.raw ();
1206  octave_idx_type sl = sidx.length (nel);
1207  octave_idx_type nz_new = 0;
1208  octave_idx_type j = 0;
1209  for (octave_idx_type i = 0; i < nz; i++)
1210  {
1211  octave_idx_type r = tmp.ridx (i);
1212  for (; j < sl && sj[j] < r; j++) ;
1213  if (j == sl || sj[j] > r)
1214  {
1215  data_new[nz_new] = tmp.data (i);
1216  ridx_new[nz_new++] = r - j;
1217  }
1218  }
1219 
1220  *this = Sparse<T> (nr - sl, 1, nz_new);
1221  std::copy_n (ridx_new, nz_new, ridx ());
1222  std::copy_n (data_new, nz_new, xdata ());
1223  xcidx (1) = nz_new;
1224  }
1225  }
1226  else if (nr == 1)
1227  {
1228  // Sparse row vector.
1229  octave_idx_type lb, ub;
1230  if (idx.is_cont_range (nc, lb, ub))
1231  {
1232  const Sparse<T> tmp = *this;
1233  octave_idx_type lbi = tmp.cidx (lb);
1234  octave_idx_type ubi = tmp.cidx (ub);
1235  octave_idx_type new_nz = nz - (ubi - lbi);
1236  *this = Sparse<T> (1, nc - (ub - lb), new_nz);
1237  std::copy_n (tmp.data (), lbi, data ());
1238  std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1239  std::fill_n (ridx (), new_nz, static_cast<octave_idx_type> (0));
1240  std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1241  mx_inline_sub (nc - ub, xcidx () + 1, tmp.cidx () + ub + 1,
1242  ubi - lbi);
1243  }
1244  else
1245  *this = index (idx.complement (nc));
1246  }
1247  else if (idx.length (nel) != 0)
1248  {
1249  if (idx.is_colon_equiv (nel))
1250  *this = Sparse<T> ();
1251  else
1252  {
1253  *this = index (idx_vector::colon);
1254  delete_elements (idx);
1255  *this = transpose (); // We want a row vector.
1256  }
1257  }
1258 }
1259 
1260 template <typename T>
1261 void
1263 {
1264  assert (ndims () == 2);
1265 
1266  octave_idx_type nr = dim1 ();
1267  octave_idx_type nc = dim2 ();
1268  octave_idx_type nz = nnz ();
1269 
1270  if (idx_i.is_colon ())
1271  {
1272  // Deleting columns.
1273  octave_idx_type lb, ub;
1274  if (idx_j.extent (nc) > nc)
1275  octave::err_del_index_out_of_range (false, idx_j.extent (nc), nc);
1276  else if (idx_j.is_cont_range (nc, lb, ub))
1277  {
1278  if (lb == 0 && ub == nc)
1279  {
1280  // Delete all rows and columns.
1281  *this = Sparse<T> (nr, 0);
1282  }
1283  else if (nz == 0)
1284  {
1285  // No elements to preserve; adjust dimensions.
1286  *this = Sparse<T> (nr, nc - (ub - lb));
1287  }
1288  else
1289  {
1290  const Sparse<T> tmp = *this;
1291  octave_idx_type lbi = tmp.cidx (lb);
1292  octave_idx_type ubi = tmp.cidx (ub);
1293  octave_idx_type new_nz = nz - (ubi - lbi);
1294 
1295  *this = Sparse<T> (nr, nc - (ub - lb), new_nz);
1296  std::copy_n (tmp.data (), lbi, data ());
1297  std::copy_n (tmp.ridx (), lbi, ridx ());
1298  std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1299  std::copy (tmp.ridx () + ubi, tmp.ridx () + nz, xridx () + lbi);
1300  std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1301  mx_inline_sub (nc - ub, xcidx () + lb + 1,
1302  tmp.cidx () + ub + 1, ubi - lbi);
1303  }
1304  }
1305  else
1306  *this = index (idx_i, idx_j.complement (nc));
1307  }
1308  else if (idx_j.is_colon ())
1309  {
1310  // Deleting rows.
1311  octave_idx_type lb, ub;
1312  if (idx_i.extent (nr) > nr)
1313  octave::err_del_index_out_of_range (false, idx_i.extent (nr), nr);
1314  else if (idx_i.is_cont_range (nr, lb, ub))
1315  {
1316  if (lb == 0 && ub == nr)
1317  {
1318  // Delete all rows and columns.
1319  *this = Sparse<T> (0, nc);
1320  }
1321  else if (nz == 0)
1322  {
1323  // No elements to preserve; adjust dimensions.
1324  *this = Sparse<T> (nr - (ub - lb), nc);
1325  }
1326  else
1327  {
1328  // This is more memory-efficient than the approach below.
1329  const Sparse<T> tmpl = index (idx_vector (0, lb), idx_j);
1330  const Sparse<T> tmpu = index (idx_vector (ub, nr), idx_j);
1331  *this = Sparse<T> (nr - (ub - lb), nc,
1332  tmpl.nnz () + tmpu.nnz ());
1333  for (octave_idx_type j = 0, k = 0; j < nc; j++)
1334  {
1335  for (octave_idx_type i = tmpl.cidx (j); i < tmpl.cidx (j+1);
1336  i++)
1337  {
1338  xdata (k) = tmpl.data (i);
1339  xridx (k++) = tmpl.ridx (i);
1340  }
1341  for (octave_idx_type i = tmpu.cidx (j); i < tmpu.cidx (j+1);
1342  i++)
1343  {
1344  xdata (k) = tmpu.data (i);
1345  xridx (k++) = tmpu.ridx (i) + lb;
1346  }
1347 
1348  xcidx (j+1) = k;
1349  }
1350  }
1351  }
1352  else
1353  {
1354  // This is done by transposing, deleting columns, then transposing
1355  // again.
1356  Sparse<T> tmp = transpose ();
1357  tmp.delete_elements (idx_j, idx_i);
1358  *this = tmp.transpose ();
1359  }
1360  }
1361  else
1362  {
1363  // Empty assignment (no elements to delete) is OK if there is at
1364  // least one zero-length index and at most one other index that is
1365  // non-colon (or equivalent) index. Since we only have two
1366  // indices, we just need to check that we have at least one zero
1367  // length index. Matlab considers "[]" to be an empty index but
1368  // not "false". We accept both.
1369 
1370  bool empty_assignment
1371  = (idx_i.length (nr) == 0 || idx_j.length (nc) == 0);
1372 
1373  if (! empty_assignment)
1374  (*current_liboctave_error_handler)
1375  ("a null assignment can only have one non-colon index");
1376  }
1377 }
1378 
1379 template <typename T>
1380 void
1382 {
1383  if (dim == 0)
1384  delete_elements (idx, idx_vector::colon);
1385  else if (dim == 1)
1386  delete_elements (idx_vector::colon, idx);
1387  else
1388  (*current_liboctave_error_handler) ("invalid dimension in delete_elements");
1389 }
1390 
1391 template <typename T>
1392 Sparse<T>
1393 Sparse<T>::index (const idx_vector& idx, bool resize_ok) const
1394 {
1395  Sparse<T> retval;
1396 
1397  assert (ndims () == 2);
1398 
1399  octave_idx_type nr = dim1 ();
1400  octave_idx_type nc = dim2 ();
1401  octave_idx_type nz = nnz ();
1402 
1403  octave_idx_type nel = numel (); // Can throw.
1404 
1405  const dim_vector idx_dims = idx.orig_dimensions ().redim (2);
1406 
1407  if (idx.is_colon ())
1408  {
1409  if (nc == 1)
1410  retval = *this;
1411  else
1412  {
1413  // Fast magic colon processing.
1414  retval = Sparse<T> (nel, 1, nz);
1415 
1416  for (octave_idx_type i = 0; i < nc; i++)
1417  {
1418  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1419  {
1420  retval.xdata (j) = data (j);
1421  retval.xridx (j) = ridx (j) + i * nr;
1422  }
1423  }
1424 
1425  retval.xcidx (0) = 0;
1426  retval.xcidx (1) = nz;
1427  }
1428  }
1429  else if (idx.extent (nel) > nel)
1430  {
1431  if (! resize_ok)
1432  octave::err_index_out_of_range (1, 1, idx.extent (nel), nel, dims ());
1433 
1434  // resize_ok is completely handled here.
1435  octave_idx_type ext = idx.extent (nel);
1436  Sparse<T> tmp = *this;
1437  tmp.resize1 (ext);
1438  retval = tmp.index (idx);
1439  }
1440  else if (nr == 1 && nc == 1)
1441  {
1442  // You have to be pretty sick to get to this bit of code,
1443  // since you have a scalar stored as a sparse matrix, and
1444  // then want to make a dense matrix with sparse representation.
1445  // Ok, we'll do it, but you deserve what you get!!
1446  retval = (Sparse<T> (idx_dims(0), idx_dims(1), nz ? data (0) : T ()));
1447  }
1448  else if (nc == 1)
1449  {
1450  // Sparse column vector.
1451  octave_idx_type lb, ub;
1452 
1453  if (idx.is_scalar ())
1454  {
1455  // Scalar index - just a binary lookup.
1456  octave_idx_type i = lblookup (ridx (), nz, idx(0));
1457  if (i < nz && ridx (i) == idx(0))
1458  retval = Sparse (1, 1, data (i));
1459  else
1460  retval = Sparse (1, 1);
1461  }
1462  else if (idx.is_cont_range (nel, lb, ub))
1463  {
1464  // Special-case a contiguous range.
1465  // Look-up indices first.
1466  octave_idx_type li = lblookup (ridx (), nz, lb);
1467  octave_idx_type ui = lblookup (ridx (), nz, ub);
1468  // Copy data and adjust indices.
1469  octave_idx_type nz_new = ui - li;
1470  retval = Sparse<T> (ub - lb, 1, nz_new);
1471  std::copy_n (data () + li, nz_new, retval.data ());
1472  mx_inline_sub (nz_new, retval.xridx (), ridx () + li, lb);
1473  retval.xcidx (1) = nz_new;
1474  }
1475  else if (idx.is_permutation (nel) && idx.isvector ())
1476  {
1477  if (idx.is_range () && idx.increment () == -1)
1478  {
1479  retval = Sparse<T> (nr, 1, nz);
1480 
1481  for (octave_idx_type j = 0; j < nz; j++)
1482  retval.ridx (j) = nr - ridx (nz - j - 1) - 1;
1483 
1484  std::copy_n (cidx (), 2, retval.cidx ());
1485  std::reverse_copy (data (), data () + nz, retval.data ());
1486  }
1487  else
1488  {
1489  Array<T> tmp = array_value ();
1490  tmp = tmp.index (idx);
1491  retval = Sparse<T> (tmp);
1492  }
1493  }
1494  else
1495  {
1496  // If indexing a sparse column vector by a vector, the result is a
1497  // sparse column vector, otherwise it inherits the shape of index.
1498  // Vector transpose is cheap, so do it right here.
1499 
1500  Array<octave_idx_type> tmp_idx = idx.as_array ().as_matrix ();
1501 
1502  const Array<octave_idx_type> idxa = (idx_dims(0) == 1
1503  ? tmp_idx.transpose ()
1504  : tmp_idx);
1505 
1506  octave_idx_type new_nr = idxa.rows ();
1507  octave_idx_type new_nc = idxa.cols ();
1508 
1509  // Lookup.
1510  // FIXME: Could specialize for sorted idx?
1511  Array<octave_idx_type> lidx (dim_vector (new_nr, new_nc));
1512  for (octave_idx_type i = 0; i < new_nr*new_nc; i++)
1513  lidx.xelem (i) = lblookup (ridx (), nz, idxa(i));
1514 
1515  // Count matches.
1516  retval = Sparse<T> (idxa.rows (), idxa.cols ());
1517  for (octave_idx_type j = 0; j < new_nc; j++)
1518  {
1519  octave_idx_type nzj = 0;
1520  for (octave_idx_type i = 0; i < new_nr; i++)
1521  {
1522  octave_idx_type l = lidx.xelem (i, j);
1523  if (l < nz && ridx (l) == idxa(i, j))
1524  nzj++;
1525  else
1526  lidx.xelem (i, j) = nz;
1527  }
1528  retval.xcidx (j+1) = retval.xcidx (j) + nzj;
1529  }
1530 
1531  retval.change_capacity (retval.xcidx (new_nc));
1532 
1533  // Copy data and set row indices.
1534  octave_idx_type k = 0;
1535  for (octave_idx_type j = 0; j < new_nc; j++)
1536  for (octave_idx_type i = 0; i < new_nr; i++)
1537  {
1538  octave_idx_type l = lidx.xelem (i, j);
1539  if (l < nz)
1540  {
1541  retval.data (k) = data (l);
1542  retval.xridx (k++) = i;
1543  }
1544  }
1545  }
1546  }
1547  else if (nr == 1)
1548  {
1549  octave_idx_type lb, ub;
1550  if (idx.is_scalar ())
1551  retval = Sparse<T> (1, 1, elem (0, idx(0)));
1552  else if (idx.is_cont_range (nel, lb, ub))
1553  {
1554  // Special-case a contiguous range.
1555  octave_idx_type lbi = cidx (lb);
1556  octave_idx_type ubi = cidx (ub);
1557  octave_idx_type new_nz = ubi - lbi;
1558  retval = Sparse<T> (1, ub - lb, new_nz);
1559  std::copy_n (data () + lbi, new_nz, retval.data ());
1560  std::fill_n (retval.ridx (), new_nz, static_cast<octave_idx_type> (0));
1561  mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1562  }
1563  else
1564  {
1565  // Sparse row vectors occupy O(nr) storage anyway, so let's just
1566  // convert the matrix to full, index, and sparsify the result.
1567  retval = Sparse<T> (array_value ().index (idx));
1568  }
1569  }
1570  else
1571  {
1572  if (nr != 0 && idx.is_scalar ())
1573  retval = Sparse<T> (1, 1, elem (idx(0) % nr, idx(0) / nr));
1574  else
1575  {
1576  // Indexing a non-vector sparse matrix by linear indexing.
1577  // I suppose this is rare (and it may easily overflow), so let's take
1578  // the easy way, and reshape first to column vector, which is already
1579  // handled above.
1580  retval = index (idx_vector::colon).index (idx);
1581  // In this case we're supposed to always inherit the shape, but
1582  // column(row) doesn't do it, so we'll do it instead.
1583  if (idx_dims(0) == 1 && idx_dims(1) != 1)
1584  retval = retval.transpose ();
1585  }
1586  }
1587 
1588  return retval;
1589 }
1590 
1591 template <typename T>
1592 Sparse<T>
1593 Sparse<T>::index (const idx_vector& idx_i, const idx_vector& idx_j,
1594  bool resize_ok) const
1595 {
1596  Sparse<T> retval;
1597 
1598  assert (ndims () == 2);
1599 
1600  octave_idx_type nr = dim1 ();
1601  octave_idx_type nc = dim2 ();
1602 
1603  octave_idx_type n = idx_i.length (nr);
1604  octave_idx_type m = idx_j.length (nc);
1605 
1606  octave_idx_type lb, ub;
1607 
1608  if (idx_i.extent (nr) > nr || idx_j.extent (nc) > nc)
1609  {
1610  // resize_ok is completely handled here.
1611  if (resize_ok)
1612  {
1613  octave_idx_type ext_i = idx_i.extent (nr);
1614  octave_idx_type ext_j = idx_j.extent (nc);
1615  Sparse<T> tmp = *this;
1616  tmp.resize (ext_i, ext_j);
1617  retval = tmp.index (idx_i, idx_j);
1618  }
1619  else if (idx_i.extent (nr) > nr)
1620  octave::err_index_out_of_range (2, 1, idx_i.extent (nr), nr, dims ());
1621  else
1622  octave::err_index_out_of_range (2, 2, idx_j.extent (nc), nc, dims ());
1623  }
1624  else if (nr == 1 && nc == 1)
1625  {
1626  // Scalars stored as sparse matrices occupy more memory than
1627  // a scalar, so let's just convert the matrix to full, index,
1628  // and sparsify the result.
1629 
1630  retval = Sparse<T> (array_value ().index (idx_i, idx_j));
1631  }
1632  else if (idx_i.is_colon ())
1633  {
1634  // Great, we're just manipulating columns. This is going to be quite
1635  // efficient, because the columns can stay compressed as they are.
1636  if (idx_j.is_colon ())
1637  retval = *this; // Shallow copy.
1638  else if (idx_j.is_cont_range (nc, lb, ub))
1639  {
1640  // Special-case a contiguous range.
1641  octave_idx_type lbi = cidx (lb);
1642  octave_idx_type ubi = cidx (ub);
1643  octave_idx_type new_nz = ubi - lbi;
1644  retval = Sparse<T> (nr, ub - lb, new_nz);
1645  std::copy_n (data () + lbi, new_nz, retval.data ());
1646  std::copy_n (ridx () + lbi, new_nz, retval.ridx ());
1647  mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1648  }
1649  else
1650  {
1651  // Count new nonzero elements.
1652  retval = Sparse<T> (nr, m);
1653  for (octave_idx_type j = 0; j < m; j++)
1654  {
1655  octave_idx_type jj = idx_j(j);
1656  retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1657  }
1658 
1659  retval.change_capacity (retval.xcidx (m));
1660 
1661  // Copy data & indices.
1662  for (octave_idx_type j = 0; j < m; j++)
1663  {
1664  octave_idx_type ljj = cidx (idx_j(j));
1665  octave_idx_type lj = retval.xcidx (j);
1666  octave_idx_type nzj = retval.xcidx (j+1) - lj;
1667 
1668  std::copy_n (data () + ljj, nzj, retval.data () + lj);
1669  std::copy_n (ridx () + ljj, nzj, retval.ridx () + lj);
1670  }
1671  }
1672  }
1673  else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
1674  {
1675  // It's actually vector indexing. The 1D index is specialized for that.
1676  retval = index (idx_i);
1677 
1678  // If nr == 1 then the vector indexing will return a column vector!!
1679  if (nr == 1)
1680  retval.transpose ();
1681  }
1682  else if (idx_i.is_scalar ())
1683  {
1684  octave_idx_type ii = idx_i(0);
1685  retval = Sparse<T> (1, m);
1687  for (octave_idx_type j = 0; j < m; j++)
1688  {
1689  octave_quit ();
1690  octave_idx_type jj = idx_j(j);
1691  octave_idx_type lj = cidx (jj);
1692  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1693 
1694  // Scalar index - just a binary lookup.
1695  octave_idx_type i = lblookup (ridx () + lj, nzj, ii);
1696  if (i < nzj && ridx (i+lj) == ii)
1697  {
1698  ij[j] = i + lj;
1699  retval.xcidx (j+1) = retval.xcidx (j) + 1;
1700  }
1701  else
1702  retval.xcidx (j+1) = retval.xcidx (j);
1703  }
1704 
1705  retval.change_capacity (retval.xcidx (m));
1706 
1707  // Copy data, adjust row indices.
1708  for (octave_idx_type j = 0; j < m; j++)
1709  {
1710  octave_idx_type i = retval.xcidx (j);
1711  if (retval.xcidx (j+1) > i)
1712  {
1713  retval.xridx (i) = 0;
1714  retval.xdata (i) = data (ij[j]);
1715  }
1716  }
1717  }
1718  else if (idx_i.is_cont_range (nr, lb, ub))
1719  {
1720  retval = Sparse<T> (n, m);
1723  for (octave_idx_type j = 0; j < m; j++)
1724  {
1725  octave_quit ();
1726  octave_idx_type jj = idx_j(j);
1727  octave_idx_type lj = cidx (jj);
1728  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1729  octave_idx_type lij, uij;
1730 
1731  // Lookup indices.
1732  li[j] = lij = lblookup (ridx () + lj, nzj, lb) + lj;
1733  ui[j] = uij = lblookup (ridx () + lj, nzj, ub) + lj;
1734  retval.xcidx (j+1) = retval.xcidx (j) + ui[j] - li[j];
1735  }
1736 
1737  retval.change_capacity (retval.xcidx (m));
1738 
1739  // Copy data, adjust row indices.
1740  for (octave_idx_type j = 0, k = 0; j < m; j++)
1741  {
1742  octave_quit ();
1743  for (octave_idx_type i = li[j]; i < ui[j]; i++)
1744  {
1745  retval.xdata (k) = data (i);
1746  retval.xridx (k++) = ridx (i) - lb;
1747  }
1748  }
1749  }
1750  else if (idx_i.is_permutation (nr))
1751  {
1752  // The columns preserve their length, just need to renumber and sort them.
1753  // Count new nonzero elements.
1754  retval = Sparse<T> (nr, m);
1755  for (octave_idx_type j = 0; j < m; j++)
1756  {
1757  octave_idx_type jj = idx_j(j);
1758  retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1759  }
1760 
1761  retval.change_capacity (retval.xcidx (m));
1762 
1763  octave_quit ();
1764 
1765  if (idx_i.is_range () && idx_i.increment () == -1)
1766  {
1767  // It's nr:-1:1. Just flip all columns.
1768  for (octave_idx_type j = 0; j < m; j++)
1769  {
1770  octave_quit ();
1771  octave_idx_type jj = idx_j(j);
1772  octave_idx_type lj = cidx (jj);
1773  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1774  octave_idx_type li = retval.xcidx (j);
1775  octave_idx_type uj = lj + nzj - 1;
1776  for (octave_idx_type i = 0; i < nzj; i++)
1777  {
1778  retval.xdata (li + i) = data (uj - i); // Copy in reverse order.
1779  retval.xridx (li + i) = nr - 1 - ridx (uj - i); // Ditto with transform.
1780  }
1781  }
1782  }
1783  else
1784  {
1785  // Get inverse permutation.
1786  idx_vector idx_iinv = idx_i.inverse_permutation (nr);
1787  const octave_idx_type *iinv = idx_iinv.raw ();
1788 
1789  // Scatter buffer.
1790  OCTAVE_LOCAL_BUFFER (T, scb, nr);
1791  octave_idx_type *rri = retval.ridx ();
1792 
1793  for (octave_idx_type j = 0; j < m; j++)
1794  {
1795  octave_quit ();
1796  octave_idx_type jj = idx_j(j);
1797  octave_idx_type lj = cidx (jj);
1798  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1799  octave_idx_type li = retval.xcidx (j);
1800  // Scatter the column, transform indices.
1801  for (octave_idx_type i = 0; i < nzj; i++)
1802  scb[rri[li + i] = iinv[ridx (lj + i)]] = data (lj + i);
1803 
1804  octave_quit ();
1805 
1806  // Sort them.
1807  std::sort (rri + li, rri + li + nzj);
1808 
1809  // Gather.
1810  for (octave_idx_type i = 0; i < nzj; i++)
1811  retval.xdata (li + i) = scb[rri[li + i]];
1812  }
1813  }
1814 
1815  }
1816  else if (idx_j.is_colon ())
1817  {
1818  // This requires uncompressing columns, which is generally costly,
1819  // so we rely on the efficient transpose to handle this.
1820  // It may still make sense to optimize some cases here.
1821  retval = transpose ();
1822  retval = retval.index (idx_vector::colon, idx_i);
1823  retval = retval.transpose ();
1824  }
1825  else
1826  {
1827  // A(I, J) is decomposed into A(:, J)(I, :).
1828  retval = index (idx_vector::colon, idx_j);
1829  retval = retval.index (idx_i, idx_vector::colon);
1830  }
1831 
1832  return retval;
1833 }
1834 
1835 template <typename T>
1836 void
1837 Sparse<T>::assign (const idx_vector& idx, const Sparse<T>& rhs)
1838 {
1839  Sparse<T> retval;
1840 
1841  assert (ndims () == 2);
1842 
1843  octave_idx_type nr = dim1 ();
1844  octave_idx_type nc = dim2 ();
1845  octave_idx_type nz = nnz ();
1846 
1847  octave_idx_type n = numel (); // Can throw.
1848 
1849  octave_idx_type rhl = rhs.numel ();
1850 
1851  if (idx.length (n) == rhl)
1852  {
1853  if (rhl == 0)
1854  return;
1855 
1856  octave_idx_type nx = idx.extent (n);
1857  // Try to resize first if necessary.
1858  if (nx != n)
1859  {
1860  resize1 (nx);
1861  n = numel ();
1862  nr = rows ();
1863  nc = cols ();
1864  // nz is preserved.
1865  }
1866 
1867  if (idx.is_colon ())
1868  {
1869  *this = rhs.reshape (dimensions);
1870  }
1871  else if (nc == 1 && rhs.cols () == 1)
1872  {
1873  // Sparse column vector to sparse column vector assignment.
1874 
1875  octave_idx_type lb, ub;
1876  if (idx.is_cont_range (nr, lb, ub))
1877  {
1878  // Special-case a contiguous range.
1879  // Look-up indices first.
1880  octave_idx_type li = lblookup (ridx (), nz, lb);
1881  octave_idx_type ui = lblookup (ridx (), nz, ub);
1882  octave_idx_type rnz = rhs.nnz ();
1883  octave_idx_type new_nz = nz - (ui - li) + rnz;
1884 
1885  if (new_nz >= nz && new_nz <= nzmax ())
1886  {
1887  // Adding/overwriting elements, enough capacity allocated.
1888 
1889  if (new_nz > nz)
1890  {
1891  // Make room first.
1892  std::copy_backward (data () + ui, data () + nz,
1893  data () + nz + rnz);
1894  std::copy_backward (ridx () + ui, ridx () + nz,
1895  ridx () + nz + rnz);
1896  }
1897 
1898  // Copy data and adjust indices from rhs.
1899  std::copy_n (rhs.data (), rnz, data () + li);
1900  mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1901  }
1902  else
1903  {
1904  // Clearing elements or exceeding capacity, allocate afresh
1905  // and paste pieces.
1906  const Sparse<T> tmp = *this;
1907  *this = Sparse<T> (nr, 1, new_nz);
1908 
1909  // Head ...
1910  std::copy_n (tmp.data (), li, data ());
1911  std::copy_n (tmp.ridx (), li, ridx ());
1912 
1913  // new stuff ...
1914  std::copy_n (rhs.data (), rnz, data () + li);
1915  mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1916 
1917  // ...tail
1918  std::copy (tmp.data () + ui, tmp.data () + nz,
1919  data () + li + rnz);
1920  std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
1921  ridx () + li + rnz);
1922  }
1923 
1924  cidx (1) = new_nz;
1925  }
1926  else if (idx.is_range () && idx.increment () == -1)
1927  {
1928  // It's s(u:-1:l) = r. Reverse the assignment.
1929  assign (idx.sorted (), rhs.index (idx_vector (rhl - 1, 0, -1)));
1930  }
1931  else if (idx.is_permutation (n))
1932  {
1933  *this = rhs.index (idx.inverse_permutation (n));
1934  }
1935  else if (rhs.nnz () == 0)
1936  {
1937  // Elements are being zeroed.
1938  octave_idx_type *ri = ridx ();
1939  for (octave_idx_type i = 0; i < rhl; i++)
1940  {
1941  octave_idx_type iidx = idx(i);
1942  octave_idx_type li = lblookup (ri, nz, iidx);
1943  if (li != nz && ri[li] == iidx)
1944  xdata (li) = T ();
1945  }
1946 
1947  maybe_compress (true);
1948  }
1949  else
1950  {
1951  const Sparse<T> tmp = *this;
1952  octave_idx_type new_nz = nz + rhl;
1953  // Disassembly our matrix...
1954  Array<octave_idx_type> new_ri (dim_vector (new_nz, 1));
1955  Array<T> new_data (dim_vector (new_nz, 1));
1956  std::copy_n (tmp.ridx (), nz, new_ri.fortran_vec ());
1957  std::copy_n (tmp.data (), nz, new_data.fortran_vec ());
1958  // ... insert new data (densified) ...
1959  idx.copy_data (new_ri.fortran_vec () + nz);
1960  new_data.assign (idx_vector (nz, new_nz), rhs.array_value ());
1961  // ... reassembly.
1962  *this = Sparse<T> (new_data, new_ri,
1963  static_cast<octave_idx_type> (0),
1964  nr, nc, false);
1965  }
1966  }
1967  else
1968  {
1969  dim_vector save_dims = dimensions;
1970  *this = index (idx_vector::colon);
1971  assign (idx, rhs.index (idx_vector::colon));
1972  *this = reshape (save_dims);
1973  }
1974  }
1975  else if (rhl == 1)
1976  {
1977  rhl = idx.length (n);
1978  if (rhs.nnz () != 0)
1979  assign (idx, Sparse<T> (rhl, 1, rhs.data (0)));
1980  else
1981  assign (idx, Sparse<T> (rhl, 1));
1982  }
1983  else
1984  octave::err_nonconformant ("=", dim_vector(idx.length (n),1), rhs.dims());
1985 }
1986 
1987 template <typename T>
1988 void
1990  const idx_vector& idx_j, const Sparse<T>& rhs)
1991 {
1992  Sparse<T> retval;
1993 
1994  assert (ndims () == 2);
1995 
1996  octave_idx_type nr = dim1 ();
1997  octave_idx_type nc = dim2 ();
1998  octave_idx_type nz = nnz ();
1999 
2000  octave_idx_type n = rhs.rows ();
2001  octave_idx_type m = rhs.columns ();
2002 
2003  // FIXME: this should probably be written more like the
2004  // Array<T>::assign function...
2005 
2006  bool orig_zero_by_zero = (nr == 0 && nc == 0);
2007 
2008  if (orig_zero_by_zero || (idx_i.length (nr) == n && idx_j.length (nc) == m))
2009  {
2010  octave_idx_type nrx;
2011  octave_idx_type ncx;
2012 
2013  if (orig_zero_by_zero)
2014  {
2015  if (idx_i.is_colon ())
2016  {
2017  nrx = n;
2018 
2019  if (idx_j.is_colon ())
2020  ncx = m;
2021  else
2022  ncx = idx_j.extent (nc);
2023  }
2024  else if (idx_j.is_colon ())
2025  {
2026  nrx = idx_i.extent (nr);
2027  ncx = m;
2028  }
2029  else
2030  {
2031  nrx = idx_i.extent (nr);
2032  ncx = idx_j.extent (nc);
2033  }
2034  }
2035  else
2036  {
2037  nrx = idx_i.extent (nr);
2038  ncx = idx_j.extent (nc);
2039  }
2040 
2041  // Try to resize first if necessary.
2042  if (nrx != nr || ncx != nc)
2043  {
2044  resize (nrx, ncx);
2045  nr = rows ();
2046  nc = cols ();
2047  // nz is preserved.
2048  }
2049 
2050  if (n == 0 || m == 0)
2051  return;
2052 
2053  if (idx_i.is_colon ())
2054  {
2055  octave_idx_type lb, ub;
2056  // Great, we're just manipulating columns. This is going to be quite
2057  // efficient, because the columns can stay compressed as they are.
2058  if (idx_j.is_colon ())
2059  *this = rhs; // Shallow copy.
2060  else if (idx_j.is_cont_range (nc, lb, ub))
2061  {
2062  // Special-case a contiguous range.
2063  octave_idx_type li = cidx (lb);
2064  octave_idx_type ui = cidx (ub);
2065  octave_idx_type rnz = rhs.nnz ();
2066  octave_idx_type new_nz = nz - (ui - li) + rnz;
2067 
2068  if (new_nz >= nz && new_nz <= nzmax ())
2069  {
2070  // Adding/overwriting elements, enough capacity allocated.
2071 
2072  if (new_nz > nz)
2073  {
2074  // Make room first.
2075  std::copy_backward (data () + ui, data () + nz,
2076  data () + new_nz);
2077  std::copy_backward (ridx () + ui, ridx () + nz,
2078  ridx () + new_nz);
2079  mx_inline_add2 (nc - ub, cidx () + ub + 1, new_nz - nz);
2080  }
2081 
2082  // Copy data and indices from rhs.
2083  std::copy_n (rhs.data (), rnz, data () + li);
2084  std::copy_n (rhs.ridx (), rnz, ridx () + li);
2085  mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2086  li);
2087 
2088  assert (nnz () == new_nz);
2089  }
2090  else
2091  {
2092  // Clearing elements or exceeding capacity, allocate afresh
2093  // and paste pieces.
2094  const Sparse<T> tmp = *this;
2095  *this = Sparse<T> (nr, nc, new_nz);
2096 
2097  // Head...
2098  std::copy_n (tmp.data (), li, data ());
2099  std::copy_n (tmp.ridx (), li, ridx ());
2100  std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
2101 
2102  // new stuff...
2103  std::copy_n (rhs.data (), rnz, data () + li);
2104  std::copy_n (rhs.ridx (), rnz, ridx () + li);
2105  mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2106  li);
2107 
2108  // ...tail.
2109  std::copy (tmp.data () + ui, tmp.data () + nz,
2110  data () + li + rnz);
2111  std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
2112  ridx () + li + rnz);
2113  mx_inline_add (nc - ub, cidx () + ub + 1,
2114  tmp.cidx () + ub + 1, new_nz - nz);
2115 
2116  assert (nnz () == new_nz);
2117  }
2118  }
2119  else if (idx_j.is_range () && idx_j.increment () == -1)
2120  {
2121  // It's s(:,u:-1:l) = r. Reverse the assignment.
2122  assign (idx_i, idx_j.sorted (),
2123  rhs.index (idx_i, idx_vector (m - 1, 0, -1)));
2124  }
2125  else if (idx_j.is_permutation (nc))
2126  {
2127  *this = rhs.index (idx_i, idx_j.inverse_permutation (nc));
2128  }
2129  else
2130  {
2131  const Sparse<T> tmp = *this;
2132  *this = Sparse<T> (nr, nc);
2133  OCTAVE_LOCAL_BUFFER_INIT (octave_idx_type, jsav, nc, -1);
2134 
2135  // Assemble column lengths.
2136  for (octave_idx_type i = 0; i < nc; i++)
2137  xcidx (i+1) = tmp.cidx (i+1) - tmp.cidx (i);
2138 
2139  for (octave_idx_type i = 0; i < m; i++)
2140  {
2141  octave_idx_type j =idx_j(i);
2142  jsav[j] = i;
2143  xcidx (j+1) = rhs.cidx (i+1) - rhs.cidx (i);
2144  }
2145 
2146  // Make cumulative.
2147  for (octave_idx_type i = 0; i < nc; i++)
2148  xcidx (i+1) += xcidx (i);
2149 
2150  change_capacity (nnz ());
2151 
2152  // Merge columns.
2153  for (octave_idx_type i = 0; i < nc; i++)
2154  {
2155  octave_idx_type l = xcidx (i);
2156  octave_idx_type u = xcidx (i+1);
2157  octave_idx_type j = jsav[i];
2158  if (j >= 0)
2159  {
2160  // from rhs
2161  octave_idx_type k = rhs.cidx (j);
2162  std::copy_n (rhs.data () + k, u - l, xdata () + l);
2163  std::copy_n (rhs.ridx () + k, u - l, xridx () + l);
2164  }
2165  else
2166  {
2167  // original
2168  octave_idx_type k = tmp.cidx (i);
2169  std::copy_n (tmp.data () + k, u - l, xdata () + l);
2170  std::copy_n (tmp.ridx () + k, u - l, xridx () + l);
2171  }
2172  }
2173 
2174  }
2175  }
2176  else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
2177  {
2178  // It's just vector indexing. The 1D assign is specialized for that.
2179  assign (idx_i, rhs);
2180  }
2181  else if (idx_j.is_colon ())
2182  {
2183  if (idx_i.is_permutation (nr))
2184  {
2185  *this = rhs.index (idx_i.inverse_permutation (nr), idx_j);
2186  }
2187  else
2188  {
2189  // FIXME: optimize more special cases?
2190  // In general this requires unpacking the columns, which is slow,
2191  // especially for many small columns. OTOH, transpose is an
2192  // efficient O(nr+nc+nnz) operation.
2193  *this = transpose ();
2194  assign (idx_vector::colon, idx_i, rhs.transpose ());
2195  *this = transpose ();
2196  }
2197  }
2198  else
2199  {
2200  // Split it into 2 assignments and one indexing.
2201  Sparse<T> tmp = index (idx_vector::colon, idx_j);
2202  tmp.assign (idx_i, idx_vector::colon, rhs);
2203  assign (idx_vector::colon, idx_j, tmp);
2204  }
2205  }
2206  else if (m == 1 && n == 1)
2207  {
2208  n = idx_i.length (nr);
2209  m = idx_j.length (nc);
2210  if (rhs.nnz () != 0)
2211  assign (idx_i, idx_j, Sparse<T> (n, m, rhs.data (0)));
2212  else
2213  assign (idx_i, idx_j, Sparse<T> (n, m));
2214  }
2215  else if (idx_i.length (nr) == m && idx_j.length (nc) == n
2216  && (n == 1 || m == 1))
2217  {
2218  assign (idx_i, idx_j, rhs.transpose ());
2219  }
2220  else
2221  octave::err_nonconformant ("=", idx_i.length (nr), idx_j.length (nc), n, m);
2222 }
2223 
2224 // Can't use versions of these in Array.cc due to duplication of the
2225 // instantiations for Array<double and Sparse<double>, etc
2226 template <typename T>
2227 bool
2229  typename ref_param<T>::type b)
2230 {
2231  return (a < b);
2232 }
2233 
2234 template <typename T>
2235 bool
2237  typename ref_param<T>::type b)
2238 {
2239  return (a > b);
2240 }
2241 
2242 template <typename T>
2243 Sparse<T>
2245 {
2246  Sparse<T> m = *this;
2247 
2248  octave_idx_type nr = m.rows ();
2249  octave_idx_type nc = m.columns ();
2250 
2251  if (m.numel () < 1 || dim > 1)
2252  return m;
2253 
2254  if (dim > 0)
2255  {
2256  m = m.transpose ();
2257  nr = m.rows ();
2258  nc = m.columns ();
2259  }
2260 
2261  octave_sort<T> lsort;
2262 
2263  if (mode == ASCENDING)
2264  lsort.set_compare (sparse_ascending_compare<T>);
2265  else if (mode == DESCENDING)
2266  lsort.set_compare (sparse_descending_compare<T>);
2267  else
2269  ("Sparse<T>::sort: invalid MODE");
2270 
2271  T *v = m.data ();
2272  octave_idx_type *mcidx = m.cidx ();
2273  octave_idx_type *mridx = m.ridx ();
2274 
2275  for (octave_idx_type j = 0; j < nc; j++)
2276  {
2277  octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2278  lsort.sort (v, ns);
2279 
2280  octave_idx_type i;
2281  if (mode == ASCENDING)
2282  {
2283  for (i = 0; i < ns; i++)
2284  if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2285  break;
2286  }
2287  else
2288  {
2289  for (i = 0; i < ns; i++)
2290  if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2291  break;
2292  }
2293  for (octave_idx_type k = 0; k < i; k++)
2294  mridx[k] = k;
2295  for (octave_idx_type k = i; k < ns; k++)
2296  mridx[k] = k - ns + nr;
2297 
2298  v += ns;
2299  mridx += ns;
2300  }
2301 
2302  if (dim > 0)
2303  m = m.transpose ();
2304 
2305  return m;
2306 }
2307 
2308 template <typename T>
2309 Sparse<T>
2311  sortmode mode) const
2312 {
2313  Sparse<T> m = *this;
2314 
2315  octave_idx_type nr = m.rows ();
2316  octave_idx_type nc = m.columns ();
2317 
2318  if (m.numel () < 1 || dim > 1)
2319  {
2320  sidx = Array<octave_idx_type> (dim_vector (nr, nc), 1);
2321  return m;
2322  }
2323 
2324  if (dim > 0)
2325  {
2326  m = m.transpose ();
2327  nr = m.rows ();
2328  nc = m.columns ();
2329  }
2330 
2331  octave_sort<T> indexed_sort;
2332 
2333  if (mode == ASCENDING)
2334  indexed_sort.set_compare (sparse_ascending_compare<T>);
2335  else if (mode == DESCENDING)
2336  indexed_sort.set_compare (sparse_descending_compare<T>);
2337  else
2339  ("Sparse<T>::sort: invalid MODE");
2340 
2341  T *v = m.data ();
2342  octave_idx_type *mcidx = m.cidx ();
2343  octave_idx_type *mridx = m.ridx ();
2344 
2345  sidx = Array<octave_idx_type> (dim_vector (nr, nc));
2347 
2348  for (octave_idx_type j = 0; j < nc; j++)
2349  {
2350  octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2351  octave_idx_type offset = j * nr;
2352 
2353  if (ns == 0)
2354  {
2355  for (octave_idx_type k = 0; k < nr; k++)
2356  sidx (offset + k) = k;
2357  }
2358  else
2359  {
2360  for (octave_idx_type i = 0; i < ns; i++)
2361  vi[i] = mridx[i];
2362 
2363  indexed_sort.sort (v, vi, ns);
2364 
2365  octave_idx_type i;
2366  if (mode == ASCENDING)
2367  {
2368  for (i = 0; i < ns; i++)
2369  if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2370  break;
2371  }
2372  else
2373  {
2374  for (i = 0; i < ns; i++)
2375  if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2376  break;
2377  }
2378 
2379  octave_idx_type ii = 0;
2380  octave_idx_type jj = i;
2381  for (octave_idx_type k = 0; k < nr; k++)
2382  {
2383  if (ii < ns && mridx[ii] == k)
2384  ii++;
2385  else
2386  sidx (offset + jj++) = k;
2387  }
2388 
2389  for (octave_idx_type k = 0; k < i; k++)
2390  {
2391  sidx (k + offset) = vi[k];
2392  mridx[k] = k;
2393  }
2394 
2395  for (octave_idx_type k = i; k < ns; k++)
2396  {
2397  sidx (k - ns + nr + offset) = vi[k];
2398  mridx[k] = k - ns + nr;
2399  }
2400 
2401  v += ns;
2402  mridx += ns;
2403  }
2404  }
2405 
2406  if (dim > 0)
2407  {
2408  m = m.transpose ();
2409  sidx = sidx.transpose ();
2410  }
2411 
2412  return m;
2413 }
2414 
2415 template <typename T>
2416 Sparse<T>
2418 {
2419  octave_idx_type nnr = rows ();
2420  octave_idx_type nnc = cols ();
2421  Sparse<T> d;
2422 
2423  if (nnr == 0 || nnc == 0)
2424  ; // do nothing
2425  else if (nnr != 1 && nnc != 1)
2426  {
2427  if (k > 0)
2428  nnc -= k;
2429  else if (k < 0)
2430  nnr += k;
2431 
2432  if (nnr > 0 && nnc > 0)
2433  {
2434  octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2435 
2436  // Count the number of nonzero elements
2437  octave_idx_type nel = 0;
2438  if (k > 0)
2439  {
2440  for (octave_idx_type i = 0; i < ndiag; i++)
2441  if (elem (i, i+k) != 0.)
2442  nel++;
2443  }
2444  else if (k < 0)
2445  {
2446  for (octave_idx_type i = 0; i < ndiag; i++)
2447  if (elem (i-k, i) != 0.)
2448  nel++;
2449  }
2450  else
2451  {
2452  for (octave_idx_type i = 0; i < ndiag; i++)
2453  if (elem (i, i) != 0.)
2454  nel++;
2455  }
2456 
2457  d = Sparse<T> (ndiag, 1, nel);
2458  d.xcidx (0) = 0;
2459  d.xcidx (1) = nel;
2460 
2461  octave_idx_type ii = 0;
2462  if (k > 0)
2463  {
2464  for (octave_idx_type i = 0; i < ndiag; i++)
2465  {
2466  T tmp = elem (i, i+k);
2467  if (tmp != 0.)
2468  {
2469  d.xdata (ii) = tmp;
2470  d.xridx (ii++) = i;
2471  }
2472  }
2473  }
2474  else if (k < 0)
2475  {
2476  for (octave_idx_type i = 0; i < ndiag; i++)
2477  {
2478  T tmp = elem (i-k, i);
2479  if (tmp != 0.)
2480  {
2481  d.xdata (ii) = tmp;
2482  d.xridx (ii++) = i;
2483  }
2484  }
2485  }
2486  else
2487  {
2488  for (octave_idx_type i = 0; i < ndiag; i++)
2489  {
2490  T tmp = elem (i, i);
2491  if (tmp != 0.)
2492  {
2493  d.xdata (ii) = tmp;
2494  d.xridx (ii++) = i;
2495  }
2496  }
2497  }
2498  }
2499  else
2500  {
2501  // Matlab returns [] 0x1 for out-of-range diagonal
2502 
2503  octave_idx_type nr = 0;
2504  octave_idx_type nc = 1;
2505  octave_idx_type nz = 0;
2506 
2507  d = Sparse<T> (nr, nc, nz);
2508  }
2509  }
2510  else // one of dimensions == 1 (vector)
2511  {
2512  octave_idx_type roff = 0;
2513  octave_idx_type coff = 0;
2514  if (k > 0)
2515  {
2516  roff = 0;
2517  coff = k;
2518  }
2519  else if (k < 0)
2520  {
2521  roff = -k;
2522  coff = 0;
2523  }
2524 
2525  if (nnr == 1)
2526  {
2527  octave_idx_type n = nnc + std::abs (k);
2528  octave_idx_type nz = nnz ();
2529 
2530  d = Sparse<T> (n, n, nz);
2531 
2532  if (nnz () > 0)
2533  {
2534  for (octave_idx_type i = 0; i < coff+1; i++)
2535  d.xcidx (i) = 0;
2536 
2537  for (octave_idx_type j = 0; j < nnc; j++)
2538  {
2539  for (octave_idx_type i = cidx (j); i < cidx (j+1); i++)
2540  {
2541  d.xdata (i) = data (i);
2542  d.xridx (i) = j + roff;
2543  }
2544  d.xcidx (j + coff + 1) = cidx (j+1);
2545  }
2546 
2547  for (octave_idx_type i = nnc + coff + 1; i < n + 1; i++)
2548  d.xcidx (i) = nz;
2549  }
2550  }
2551  else
2552  {
2553  octave_idx_type n = nnr + std::abs (k);
2554  octave_idx_type nz = nnz ();
2555 
2556  d = Sparse<T> (n, n, nz);
2557 
2558  if (nnz () > 0)
2559  {
2560  octave_idx_type ii = 0;
2561  octave_idx_type ir = ridx (0);
2562 
2563  for (octave_idx_type i = 0; i < coff+1; i++)
2564  d.xcidx (i) = 0;
2565 
2566  for (octave_idx_type i = 0; i < nnr; i++)
2567  {
2568  if (ir == i)
2569  {
2570  d.xdata (ii) = data (ii);
2571  d.xridx (ii++) = ir + roff;
2572 
2573  if (ii != nz)
2574  ir = ridx (ii);
2575  }
2576  d.xcidx (i + coff + 1) = ii;
2577  }
2578 
2579  for (octave_idx_type i = nnr + coff + 1; i < n+1; i++)
2580  d.xcidx (i) = nz;
2581  }
2582  }
2583  }
2584 
2585  return d;
2586 }
2587 
2588 template <typename T>
2589 Sparse<T>
2590 Sparse<T>::cat (int dim, octave_idx_type n, const Sparse<T> *sparse_list)
2591 {
2592  // Default concatenation.
2593  bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2594 
2595  if (dim == -1 || dim == -2)
2596  {
2597  concat_rule = &dim_vector::hvcat;
2598  dim = -dim - 1;
2599  }
2600  else if (dim < 0)
2601  (*current_liboctave_error_handler) ("cat: invalid dimension");
2602 
2603  dim_vector dv;
2604  octave_idx_type total_nz = 0;
2605  if (dim != 0 && dim != 1)
2606  (*current_liboctave_error_handler)
2607  ("cat: invalid dimension for sparse concatenation");
2608 
2609  if (n == 1)
2610  return sparse_list[0];
2611 
2612  for (octave_idx_type i = 0; i < n; i++)
2613  {
2614  if (! (dv.*concat_rule) (sparse_list[i].dims (), dim))
2615  (*current_liboctave_error_handler) ("cat: dimension mismatch");
2616 
2617  total_nz += sparse_list[i].nnz ();
2618  }
2619 
2620  Sparse<T> retval (dv, total_nz);
2621 
2622  if (retval.isempty ())
2623  return retval;
2624 
2625  switch (dim)
2626  {
2627  case 0:
2628  {
2629  // sparse vertcat. This is not efficiently handled by assignment,
2630  // so we'll do it directly.
2631  octave_idx_type l = 0;
2632  for (octave_idx_type j = 0; j < dv(1); j++)
2633  {
2634  octave_quit ();
2635 
2636  octave_idx_type rcum = 0;
2637  for (octave_idx_type i = 0; i < n; i++)
2638  {
2639  const Sparse<T>& spi = sparse_list[i];
2640  // Skipping empty matrices. See the comment in Array.cc.
2641  if (spi.isempty ())
2642  continue;
2643 
2644  octave_idx_type kl = spi.cidx (j);
2645  octave_idx_type ku = spi.cidx (j+1);
2646  for (octave_idx_type k = kl; k < ku; k++, l++)
2647  {
2648  retval.xridx (l) = spi.ridx (k) + rcum;
2649  retval.xdata (l) = spi.data (k);
2650  }
2651 
2652  rcum += spi.rows ();
2653  }
2654 
2655  retval.xcidx (j+1) = l;
2656  }
2657 
2658  break;
2659  }
2660  case 1:
2661  {
2662  octave_idx_type l = 0;
2663  for (octave_idx_type i = 0; i < n; i++)
2664  {
2665  octave_quit ();
2666 
2667  // Skipping empty matrices. See the comment in Array.cc.
2668  if (sparse_list[i].isempty ())
2669  continue;
2670 
2671  octave_idx_type u = l + sparse_list[i].columns ();
2673  sparse_list[i]);
2674  l = u;
2675  }
2676 
2677  break;
2678  }
2679  default:
2680  assert (false);
2681  }
2682 
2683  return retval;
2684 }
2685 
2686 template <typename T>
2687 Array<T>
2689 {
2690  Array<T> retval (dims (), T ());
2691  if (rows () == 1)
2692  {
2693  octave_idx_type i = 0;
2694  for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2695  {
2696  if (cidx (j+1) > i)
2697  retval.xelem (j) = data (i++);
2698  }
2699  }
2700  else
2701  {
2702  for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2703  for (octave_idx_type i = cidx (j), iu = cidx (j+1); i < iu; i++)
2704  retval.xelem (ridx (i), j) = data (i);
2705  }
2706 
2707  return retval;
2708 }
2709 
2710 template <typename T>
2711 std::istream&
2712 read_sparse_matrix (std::istream& is, Sparse<T>& a,
2713  T (*read_fcn) (std::istream&))
2714 {
2715  octave_idx_type nr = a.rows ();
2716  octave_idx_type nc = a.cols ();
2717  octave_idx_type nz = a.nzmax ();
2718 
2719  if (nr > 0 && nc > 0)
2720  {
2721  octave_idx_type itmp;
2722  octave_idx_type jtmp;
2723  octave_idx_type iold = 0;
2724  octave_idx_type jold = 0;
2725  octave_idx_type ii = 0;
2726  T tmp;
2727 
2728  a.cidx (0) = 0;
2729  for (octave_idx_type i = 0; i < nz; i++)
2730  {
2731  itmp = 0; jtmp = 0;
2732  is >> itmp;
2733  itmp--;
2734 
2735  is >> jtmp;
2736  jtmp--;
2737 
2738  if (is.fail ())
2739  {
2740  is.clear();
2741  std::string err_field;
2742  is >> err_field;
2743  (*current_liboctave_error_handler)
2744  ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2745  "Symbols '%s' is not an integer format",
2746  i+1, err_field.c_str ());
2747  }
2748 
2749  if (itmp < 0 || itmp >= nr)
2750  {
2751  is.setstate (std::ios::failbit);
2752 
2753  (*current_liboctave_error_handler)
2754  ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2755  "row index = %" OCTAVE_IDX_TYPE_FORMAT " out of range",
2756  i+1, itmp + 1);
2757  }
2758 
2759  if (jtmp < 0 || jtmp >= nc)
2760  {
2761  is.setstate (std::ios::failbit);
2762 
2763  (*current_liboctave_error_handler)
2764  ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2765  "column index = %" OCTAVE_IDX_TYPE_FORMAT " out of range",
2766  i+1, jtmp + 1);
2767  }
2768 
2769  if (jtmp < jold)
2770  {
2771  is.setstate (std::ios::failbit);
2772 
2773  (*current_liboctave_error_handler)
2774  ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ":"
2775  "column indices must appear in ascending order "
2776  "(%" OCTAVE_IDX_TYPE_FORMAT " < %" OCTAVE_IDX_TYPE_FORMAT ")",
2777  i+1, jtmp, jold);
2778  }
2779  else if (jtmp > jold)
2780  {
2781  for (octave_idx_type j = jold; j < jtmp; j++)
2782  a.cidx (j+1) = ii;
2783  }
2784  else if (itmp < iold)
2785  {
2786  is.setstate (std::ios::failbit);
2787 
2788  (*current_liboctave_error_handler)
2789  ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2790  "row indices must appear in ascending order in each column "
2791  "(%" OCTAVE_IDX_TYPE_FORMAT " < %" OCTAVE_IDX_TYPE_FORMAT ")",
2792  i+1, iold, itmp);
2793  }
2794 
2795  iold = itmp;
2796  jold = jtmp;
2797 
2798  tmp = read_fcn (is);
2799 
2800  if (! is)
2801  return is; // Problem, return is in error state
2802 
2803  a.data (ii) = tmp;
2804  a.ridx (ii++) = itmp;
2805  }
2806 
2807  for (octave_idx_type j = jold; j < nc; j++)
2808  a.cidx (j+1) = ii;
2809  }
2810 
2811  return is;
2812 }
2813 
2814 /*
2815  * Tests
2816  *
2817 
2818 %!function x = set_slice (x, dim, slice, arg)
2819 %! switch (dim)
2820 %! case 11
2821 %! x(slice) = 2;
2822 %! case 21
2823 %! x(slice, :) = 2;
2824 %! case 22
2825 %! x(:, slice) = 2;
2826 %! otherwise
2827 %! error ("invalid dim, '%d'", dim);
2828 %! endswitch
2829 %!endfunction
2830 
2831 %!function x = set_slice2 (x, dim, slice)
2832 %! switch (dim)
2833 %! case 11
2834 %! x(slice) = 2 * ones (size (slice));
2835 %! case 21
2836 %! x(slice, :) = 2 * ones (length (slice), columns (x));
2837 %! case 22
2838 %! x(:, slice) = 2 * ones (rows (x), length (slice));
2839 %! otherwise
2840 %! error ("invalid dim, '%d'", dim);
2841 %! endswitch
2842 %!endfunction
2843 
2844 %!function test_sparse_slice (size, dim, slice)
2845 %! x = ones (size);
2846 %! s = set_slice (sparse (x), dim, slice);
2847 %! f = set_slice (x, dim, slice);
2848 %! assert (nnz (s), nnz (f));
2849 %! assert (full (s), f);
2850 %! s = set_slice2 (sparse (x), dim, slice);
2851 %! f = set_slice2 (x, dim, slice);
2852 %! assert (nnz (s), nnz (f));
2853 %! assert (full (s), f);
2854 %!endfunction
2855 
2856 #### 1d indexing
2857 
2858 ## size = [2 0]
2859 %!test test_sparse_slice ([2 0], 11, []);
2860 %!assert (set_slice (sparse (ones ([2 0])), 11, 1), sparse ([2 0]')) # sparse different from full
2861 %!assert (set_slice (sparse (ones ([2 0])), 11, 2), sparse ([0 2]')) # sparse different from full
2862 %!assert (set_slice (sparse (ones ([2 0])), 11, 3), sparse ([0 0; 2 0]')) # sparse different from full
2863 %!assert (set_slice (sparse (ones ([2 0])), 11, 4), sparse ([0 0; 0 2]')) # sparse different from full
2864 
2865 ## size = [0 2]
2866 %!test test_sparse_slice ([0 2], 11, []);
2867 %!assert (set_slice (sparse (ones ([0 2])), 11, 1), sparse ([2 0])) # sparse different from full
2868 %!test test_sparse_slice ([0 2], 11, 2);
2869 %!test test_sparse_slice ([0 2], 11, 3);
2870 %!test test_sparse_slice ([0 2], 11, 4);
2871 %!test test_sparse_slice ([0 2], 11, [4, 4]);
2872 
2873 ## size = [2 1]
2874 %!test test_sparse_slice ([2 1], 11, []);
2875 %!test test_sparse_slice ([2 1], 11, 1);
2876 %!test test_sparse_slice ([2 1], 11, 2);
2877 %!test test_sparse_slice ([2 1], 11, 3);
2878 %!test test_sparse_slice ([2 1], 11, 4);
2879 %!test test_sparse_slice ([2 1], 11, [4, 4]);
2880 
2881 ## size = [1 2]
2882 %!test test_sparse_slice ([1 2], 11, []);
2883 %!test test_sparse_slice ([1 2], 11, 1);
2884 %!test test_sparse_slice ([1 2], 11, 2);
2885 %!test test_sparse_slice ([1 2], 11, 3);
2886 %!test test_sparse_slice ([1 2], 11, 4);
2887 %!test test_sparse_slice ([1 2], 11, [4, 4]);
2888 
2889 ## size = [2 2]
2890 %!test test_sparse_slice ([2 2], 11, []);
2891 %!test test_sparse_slice ([2 2], 11, 1);
2892 %!test test_sparse_slice ([2 2], 11, 2);
2893 %!test test_sparse_slice ([2 2], 11, 3);
2894 %!test test_sparse_slice ([2 2], 11, 4);
2895 %!test test_sparse_slice ([2 2], 11, [4, 4]);
2896 # These 2 errors are the same as in the full case
2897 %!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 5)
2898 %!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 6)
2899 
2900 #### 2d indexing
2901 
2902 ## size = [2 0]
2903 %!test test_sparse_slice ([2 0], 21, []);
2904 %!test test_sparse_slice ([2 0], 21, 1);
2905 %!test test_sparse_slice ([2 0], 21, 2);
2906 %!test test_sparse_slice ([2 0], 21, [2,2]);
2907 %!assert (set_slice (sparse (ones ([2 0])), 21, 3), sparse (3,0))
2908 %!assert (set_slice (sparse (ones ([2 0])), 21, 4), sparse (4,0))
2909 %!test test_sparse_slice ([2 0], 22, []);
2910 %!test test_sparse_slice ([2 0], 22, 1);
2911 %!test test_sparse_slice ([2 0], 22, 2);
2912 %!test test_sparse_slice ([2 0], 22, [2,2]);
2913 %!assert (set_slice (sparse (ones ([2 0])), 22, 3), sparse ([0 0 2;0 0 2])) # sparse different from full
2914 %!assert (set_slice (sparse (ones ([2 0])), 22, 4), sparse ([0 0 0 2;0 0 0 2])) # sparse different from full
2915 
2916 ## size = [0 2]
2917 %!test test_sparse_slice ([0 2], 21, []);
2918 %!test test_sparse_slice ([0 2], 21, 1);
2919 %!test test_sparse_slice ([0 2], 21, 2);
2920 %!test test_sparse_slice ([0 2], 21, [2,2]);
2921 %!assert (set_slice (sparse (ones ([0 2])), 21, 3), sparse ([0 0;0 0;2 2])) # sparse different from full
2922 %!assert (set_slice (sparse (ones ([0 2])), 21, 4), sparse ([0 0;0 0;0 0;2 2])) # sparse different from full
2923 %!test test_sparse_slice ([0 2], 22, []);
2924 %!test test_sparse_slice ([0 2], 22, 1);
2925 %!test test_sparse_slice ([0 2], 22, 2);
2926 %!test test_sparse_slice ([0 2], 22, [2,2]);
2927 %!assert (set_slice (sparse (ones ([0 2])), 22, 3), sparse (0,3))
2928 %!assert (set_slice (sparse (ones ([0 2])), 22, 4), sparse (0,4))
2929 
2930 ## size = [2 1]
2931 %!test test_sparse_slice ([2 1], 21, []);
2932 %!test test_sparse_slice ([2 1], 21, 1);
2933 %!test test_sparse_slice ([2 1], 21, 2);
2934 %!test test_sparse_slice ([2 1], 21, [2,2]);
2935 %!test test_sparse_slice ([2 1], 21, 3);
2936 %!test test_sparse_slice ([2 1], 21, 4);
2937 %!test test_sparse_slice ([2 1], 22, []);
2938 %!test test_sparse_slice ([2 1], 22, 1);
2939 %!test test_sparse_slice ([2 1], 22, 2);
2940 %!test test_sparse_slice ([2 1], 22, [2,2]);
2941 %!test test_sparse_slice ([2 1], 22, 3);
2942 %!test test_sparse_slice ([2 1], 22, 4);
2943 
2944 ## size = [1 2]
2945 %!test test_sparse_slice ([1 2], 21, []);
2946 %!test test_sparse_slice ([1 2], 21, 1);
2947 %!test test_sparse_slice ([1 2], 21, 2);
2948 %!test test_sparse_slice ([1 2], 21, [2,2]);
2949 %!test test_sparse_slice ([1 2], 21, 3);
2950 %!test test_sparse_slice ([1 2], 21, 4);
2951 %!test test_sparse_slice ([1 2], 22, []);
2952 %!test test_sparse_slice ([1 2], 22, 1);
2953 %!test test_sparse_slice ([1 2], 22, 2);
2954 %!test test_sparse_slice ([1 2], 22, [2,2]);
2955 %!test test_sparse_slice ([1 2], 22, 3);
2956 %!test test_sparse_slice ([1 2], 22, 4);
2957 
2958 ## size = [2 2]
2959 %!test test_sparse_slice ([2 2], 21, []);
2960 %!test test_sparse_slice ([2 2], 21, 1);
2961 %!test test_sparse_slice ([2 2], 21, 2);
2962 %!test test_sparse_slice ([2 2], 21, [2,2]);
2963 %!test test_sparse_slice ([2 2], 21, 3);
2964 %!test test_sparse_slice ([2 2], 21, 4);
2965 %!test test_sparse_slice ([2 2], 22, []);
2966 %!test test_sparse_slice ([2 2], 22, 1);
2967 %!test test_sparse_slice ([2 2], 22, 2);
2968 %!test test_sparse_slice ([2 2], 22, [2,2]);
2969 %!test test_sparse_slice ([2 2], 22, 3);
2970 %!test test_sparse_slice ([2 2], 22, 4);
2971 
2972 %!assert <35570> (speye (3,1)(3:-1:1), sparse ([0; 0; 1]))
2973 
2974 ## Test removing columns
2975 %!test <36656>
2976 %! s = sparse (magic (5));
2977 %! s(:,2:4) = [];
2978 %! assert (s, sparse (magic (5)(:, [1,5])));
2979 
2980 %!test
2981 %! s = sparse ([], [], [], 1, 1);
2982 %! s(1,:) = [];
2983 %! assert (s, sparse ([], [], [], 0, 1));
2984 
2985 ## Test (bug #37321)
2986 %!test <37321> a=sparse (0,0); assert (all (a) == sparse ([1]));
2987 %!test <37321> a=sparse (0,1); assert (all (a) == sparse ([1]));
2988 %!test <37321> a=sparse (1,0); assert (all (a) == sparse ([1]));
2989 %!test <37321> a=sparse (1,0); assert (all (a,2) == sparse ([1]));
2990 %!test <37321> a=sparse (1,0); assert (size (all (a,1)), [1 0]);
2991 %!test <37321> a=sparse (1,1);
2992 %! assert (all (a) == sparse ([0]));
2993 %! assert (size (all (a)), [1 1]);
2994 %!test <37321> a=sparse (2,1);
2995 %! assert (all (a) == sparse ([0]));
2996 %! assert (size (all (a)), [1 1]);
2997 %!test <37321> a=sparse (1,2);
2998 %! assert (all (a) == sparse ([0]));
2999 %! assert (size (all (a)), [1 1]);
3000 %!test <37321> a=sparse (2,2); assert (isequal (all (a), sparse ([0 0])));
3001 
3002 ## Test assigning row to a column slice
3003 %!test <45589>
3004 %! a = sparse (magic (3));
3005 %! b = a;
3006 %! a(1,:) = 1:3;
3007 %! b(1,:) = (1:3)';
3008 %! assert (a, b);
3009 
3010 */
3011 
3012 template <typename T>
3013 void
3014 Sparse<T>::print_info (std::ostream& os, const std::string& prefix) const
3015 {
3016  os << prefix << "rep address: " << rep << "\n"
3017  << prefix << "rep->nzmx: " << rep->nzmx << "\n"
3018  << prefix << "rep->nrows: " << rep->nrows << "\n"
3019  << prefix << "rep->ncols: " << rep->ncols << "\n"
3020  << prefix << "rep->data: " << static_cast<void *> (rep->d) << "\n"
3021  << prefix << "rep->ridx: " << static_cast<void *> (rep->r) << "\n"
3022  << prefix << "rep->cidx: " << static_cast<void *> (rep->c) << "\n"
3023  << prefix << "rep->count: " << rep->count << "\n";
3024 }
3025 
3026 #define INSTANTIATE_SPARSE(T, API) \
3027  template class API Sparse<T>; \
3028  template std::istream& \
3029  read_sparse_matrix<T> (std::istream& is, Sparse<T>& a, \
3030  T (*read_fcn) (std::istream&));
template class OCTAVE_API Array< octave_idx_type >
bool sparse_descending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2236
static octave_idx_type lblookup(const octave_idx_type *ridx, octave_idx_type nr, octave_idx_type ri)
Definition: Sparse.cc:1144
std::istream & read_sparse_matrix(std::istream &is, Sparse< T > &a, T(*read_fcn)(std::istream &))
Definition: Sparse.cc:2712
bool sparse_ascending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2228
static int elem
Definition: __contourc__.cc:52
charNDArray max(char d, const charNDArray &m)
Definition: chNDArray.cc:230
charNDArray min(char d, const charNDArray &m)
Definition: chNDArray.cc:207
Array< T > as_matrix(void) const
Return the array as a matrix.
Definition: Array.h:401
void assign(const idx_vector &i, const Array< T > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Definition: Array.cc:1116
T & xelem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:469
octave_idx_type numel(void) const
Number of elements in the array.
Definition: Array.h:377
T & elem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:499
const T * data(void) const
Size of the specified dimension.
Definition: Array.h:581
octave_idx_type cols(void) const
Definition: Array.h:423
octave_idx_type rows(void) const
Definition: Array.h:415
Array< T > index(const idx_vector &i) const
Indexing without resizing.
Definition: Array.cc:698
Array< T > transpose(void) const
Size of the specified dimension.
Definition: Array.cc:1599
const T * fortran_vec(void) const
Size of the specified dimension.
Definition: Array.h:583
bool isempty(void) const
Size of the specified dimension.
Definition: Array.h:572
const Array< octave_idx_type > & col_perm_vec(void) const
Definition: PermMatrix.h:81
octave_idx_type rows(void) const
Definition: PermMatrix.h:60
octave::refcount< octave_idx_type > count
Definition: Sparse.h:69
bool indices_ok(void) const
Definition: Sparse.cc:175
void change_length(octave_idx_type nz)
Definition: Sparse.cc:142
octave_idx_type nzmx
Definition: Sparse.h:66
octave_idx_type ncols
Definition: Sparse.h:68
void maybe_compress(bool remove_zeros)
Definition: Sparse.cc:118
T celem(octave_idx_type _r, octave_idx_type _c) const
Definition: Sparse.cc:107
T & elem(octave_idx_type _r, octave_idx_type _c)
Definition: Sparse.cc:66
octave_idx_type * c
Definition: Sparse.h:65
octave_idx_type * r
Definition: Sparse.h:64
bool any_element_is_nan(void) const
Definition: Sparse.cc:182
Definition: Sparse.h:49
void change_capacity(octave_idx_type nz)
Definition: Sparse.h:454
static Sparse< T > cat(int dim, octave_idx_type n, const Sparse< T > *sparse_list)
Definition: Sparse.cc:2590
dim_vector dimensions
Definition: Sparse.h:160
Sparse< T > transpose(void) const
Definition: Sparse.cc:1105
octave_idx_type numel(void) const
Definition: Sparse.h:242
Sparse< T > & insert(const Sparse< T > &a, octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:1013
Sparse(void)
Definition: Sparse.h:168
Array< T > array_value(void) const
Definition: Sparse.cc:2688
Sparse< T > sort(octave_idx_type dim=0, sortmode mode=ASCENDING) const
Definition: Sparse.cc:2244
octave_idx_type cols(void) const
Definition: Sparse.h:251
static Sparse< T >::SparseRep * nil_rep(void)
Definition: Sparse.cc:58
octave_idx_type * xridx(void)
Definition: Sparse.h:485
void resize1(octave_idx_type n)
Definition: Sparse.cc:932
T * data(void)
Definition: Sparse.h:470
Sparse< T > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Definition: Sparse.cc:904
OCTAVE_NORETURN T range_error(const char *fcn, octave_idx_type n) const
Definition: Sparse.cc:743
void print_info(std::ostream &os, const std::string &prefix) const
Definition: Sparse.cc:3014
bool isempty(void) const
Definition: Sparse.h:466
void assign(const idx_vector &i, const Sparse< T > &rhs)
Definition: Sparse.cc:1837
virtual ~Sparse(void)
Definition: Sparse.cc:694
octave_idx_type * cidx(void)
Definition: Sparse.h:492
Sparse< T > diag(octave_idx_type k=0) const
Definition: Sparse.cc:2417
octave_idx_type nnz(void) const
Actual number of nonzero terms.
Definition: Sparse.h:238
octave_idx_type nzmax(void) const
Amount of storage for nonzero elements.
Definition: Sparse.h:235
void resize(octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:963
Sparse< T >::SparseRep * rep
Definition: Sparse.h:158
octave_idx_type rows(void) const
Definition: Sparse.h:250
octave_idx_type columns(void) const
Definition: Sparse.h:252
dim_vector dims(void) const
Definition: Sparse.h:270
void delete_elements(const idx_vector &i)
Definition: Sparse.cc:1161
octave_idx_type * xcidx(void)
Definition: Sparse.h:498
Sparse< T > reshape(const dim_vector &new_dims) const
Definition: Sparse.cc:825
octave_idx_type compute_index(const Array< octave_idx_type > &ra_idx) const
Definition: Sparse.cc:720
T * xdata(void)
Definition: Sparse.h:472
Sparse< T > maybe_compress(bool remove_zeros=false)
Definition: Sparse.h:430
Sparse< T > index(const idx_vector &i, bool resize_ok=false) const
Definition: Sparse.cc:1393
octave_idx_type * ridx(void)
Definition: Sparse.h:479
Sparse< T > & operator=(const Sparse< T > &a)
Definition: Sparse.cc:702
Vector representing the dimensions (size) of an Array.
Definition: dim-vector.h:95
bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
Definition: dim-vector.cc:157
std::string str(char sep='x') const
Definition: dim-vector.cc:85
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition: dim-vector.h:401
void resize(int n, int fill_value=0)
Definition: dim-vector.h:349
bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
Definition: dim-vector.cc:220
octave_idx_type ndims(void) const
Number of dimensions.
Definition: dim-vector.h:334
dim_vector redim(int n) const
Force certain dimensionality, preserving numel ().
Definition: dim-vector.cc:245
octave_idx_type safe_numel(void) const
The following function will throw a std::bad_alloc () exception if the requested size is larger than ...
Definition: dim-vector.cc:115
const octave_idx_type * raw(void)
Definition: idx-vector.cc:1039
bool is_colon(void) const
Definition: idx-vector.h:576
idx_vector sorted(bool uniq=false) const
Definition: idx-vector.h:588
static const idx_vector colon
Definition: idx-vector.h:499
bool isvector(void) const
Definition: idx-vector.cc:1279
void copy_data(octave_idx_type *data) const
Definition: idx-vector.cc:1052
octave_idx_type length(octave_idx_type n=0) const
Definition: idx-vector.h:558
bool is_scalar(void) const
Definition: idx-vector.h:579
bool is_cont_range(octave_idx_type n, octave_idx_type &l, octave_idx_type &u) const
Definition: idx-vector.cc:955
idx_vector complement(octave_idx_type n) const
Definition: idx-vector.cc:1110
dim_vector orig_dimensions(void) const
Definition: idx-vector.h:594
bool is_permutation(octave_idx_type n) const
Definition: idx-vector.cc:1141
bool is_range(void) const
Definition: idx-vector.h:582
Array< octave_idx_type > as_array(void) const
Definition: idx-vector.cc:1273
idx_vector inverse_permutation(octave_idx_type n) const
Definition: idx-vector.cc:1170
octave_idx_type increment(void) const
Definition: idx-vector.cc:1009
octave_idx_type extent(octave_idx_type n) const
Definition: idx-vector.h:561
bool is_colon_equiv(octave_idx_type n) const
Definition: idx-vector.h:585
virtual octave_idx_type numel(void) const
Definition: ov-base.h:335
void set_compare(compare_fcn_type comp)
Definition: oct-sort.h:118
void sort(T *data, octave_idx_type nel)
Definition: oct-sort.cc:1517
if_then_else< is_class_type< T >::no, T, T const & >::result type
Definition: lo-traits.h:121
OCTAVE_NORETURN liboctave_error_handler current_liboctave_error_handler
Definition: lo-error.c:41
F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
void mx_inline_add2(size_t n, R *r, const X *x)
Definition: mx-inlines.cc:126
T octave_idx_type m
Definition: mx-inlines.cc:773
void mx_inline_add(size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:107
octave_idx_type n
Definition: mx-inlines.cc:753
T * r
Definition: mx-inlines.cc:773
void mx_inline_sub(size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:108
bool isnan(bool)
Definition: lo-mappers.h:178
void err_del_index_out_of_range(bool is1d, octave_idx_type idx, octave_idx_type ext)
void err_invalid_resize(void)
void err_index_out_of_range(int nd, int dim, octave_idx_type idx, octave_idx_type ext, const dim_vector &dv)
void err_nonconformant(const char *op, octave_idx_type op1_len, octave_idx_type op2_len)
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition: oct-locbuf.h:44
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition: oct-locbuf.h:50
sortmode
Definition: oct-sort.h:95
@ ASCENDING
Definition: oct-sort.h:95
@ DESCENDING
Definition: oct-sort.h:95
T::size_type numel(const T &str)
Definition: oct-string.cc:71
const octave_base_value const Array< octave_idx_type > & ra_idx
octave_value::octave_value(const Array< char > &chm, char type) return retval
Definition: ov.cc:811
static T abs(T x)
Definition: pr-output.cc:1678
bool sparse_indices_ok(octave_idx_type *r, octave_idx_type *c, octave_idx_type nrows, octave_idx_type ncols, octave_idx_type nnz)
Definition: sparse-util.cc:87
static void transpose(octave_idx_type N, const octave_idx_type *ridx, const octave_idx_type *cidx, octave_idx_type *ridx2, octave_idx_type *cidx2)
Definition: symrcm.cc:389
F77_RET_T len
Definition: xerbla.cc:61