GNU Octave  9.1.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
Sparse.cc
Go to the documentation of this file.
1 ////////////////////////////////////////////////////////////////////////
2 //
3 // Copyright (C) 1998-2024 The Octave Project Developers
4 //
5 // See the file COPYRIGHT.md in the top-level directory of this
6 // distribution or <https://octave.org/copyright/>.
7 //
8 // This file is part of Octave.
9 //
10 // Octave is free software: you can redistribute it and/or modify it
11 // under the terms of the GNU General Public License as published by
12 // the Free Software Foundation, either version 3 of the License, or
13 // (at your option) any later version.
14 //
15 // Octave is distributed in the hope that it will be useful, but
16 // WITHOUT ANY WARRANTY; without even the implied warranty of
17 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 // GNU General Public License for more details.
19 //
20 // You should have received a copy of the GNU General Public License
21 // along with Octave; see the file COPYING. If not, see
22 // <https://www.gnu.org/licenses/>.
23 //
24 ////////////////////////////////////////////////////////////////////////
25 
26 // This file should not include config.h. It is only included in other
27 // C++ source files that should have included config.h before including
28 // this file.
29 
30 #include <cassert>
31 #include <cinttypes>
32 
33 #include <algorithm>
34 #include <istream>
35 #include <ostream>
36 #include <sstream>
37 #include <vector>
38 
39 #include "Array.h"
40 #include "MArray.h"
41 #include "Array-util.h"
42 #include "Range.h"
43 #include "idx-vector.h"
44 #include "lo-error.h"
45 #include "quit.h"
46 #include "oct-locbuf.h"
47 
48 #include "Sparse.h"
49 #include "sparse-util.h"
50 #include "oct-spparms.h"
51 #include "mx-inlines.cc"
52 
53 #include "PermMatrix.h"
54 
55 template <typename T, typename Alloc>
58 {
59  static typename Sparse<T, Alloc>::SparseRep nr;
60  return &nr;
61 }
62 
63 template <typename T, typename Alloc>
65 T&
67 {
69 
70  if (m_nzmax <= 0)
71  (*current_liboctave_error_handler)
72  ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
73 
74  for (i = m_cidx[c]; i < m_cidx[c + 1]; i++)
75  if (m_ridx[i] == r)
76  return m_data[i];
77  else if (m_ridx[i] > r)
78  break;
79 
80  // Ok, If we've gotten here, we're in trouble. Have to create a
81  // new element in the sparse array. This' gonna be slow!!!
82  if (m_cidx[m_ncols] == m_nzmax)
83  (*current_liboctave_error_handler)
84  ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
85 
86  octave_idx_type to_move = m_cidx[m_ncols] - i;
87  if (to_move != 0)
88  {
89  for (octave_idx_type j = m_cidx[m_ncols]; j > i; j--)
90  {
91  m_data[j] = m_data[j-1];
92  m_ridx[j] = m_ridx[j-1];
93  }
94  }
95 
96  for (octave_idx_type j = c + 1; j < m_ncols + 1; j++)
97  m_cidx[j] = m_cidx[j] + 1;
98 
99  m_data[i] = 0.;
100  m_ridx[i] = r;
101 
102  return m_data[i];
103 }
104 
105 template <typename T, typename Alloc>
107 T
109 {
110  if (m_nzmax > 0)
111  for (octave_idx_type i = m_cidx[c]; i < m_cidx[c + 1]; i++)
112  if (m_ridx[i] == r)
113  return m_data[i];
114  return T ();
115 }
116 
117 template <typename T, typename Alloc>
119 void
121 {
122  if (remove_zeros)
123  {
124  octave_idx_type i = 0;
125  octave_idx_type k = 0;
126  for (octave_idx_type j = 1; j <= m_ncols; j++)
127  {
128  octave_idx_type u = m_cidx[j];
129  for (; i < u; i++)
130  if (m_data[i] != T ())
131  {
132  m_data[k] = m_data[i];
133  m_ridx[k++] = m_ridx[i];
134  }
135  m_cidx[j] = k;
136  }
137  }
138 
139  change_length (m_cidx[m_ncols]);
140 }
141 
142 template <typename T, typename Alloc>
144 void
146 {
147  for (octave_idx_type j = m_ncols; j > 0 && m_cidx[j] > nz; j--)
148  m_cidx[j] = nz;
149 
150  // Always preserve space for 1 element.
151  nz = (nz > 0 ? nz : 1);
152 
153  // Skip reallocation if we have less than 1/frac extra elements to discard.
154  static const int frac = 5;
155  if (nz > m_nzmax || nz < m_nzmax - m_nzmax/frac)
156  {
157  // Reallocate.
158  octave_idx_type min_nzmax = std::min (nz, m_nzmax);
159 
160  octave_idx_type *new_ridx = idx_type_allocate (nz);
161  std::copy_n (m_ridx, min_nzmax, new_ridx);
162 
163  idx_type_deallocate (m_ridx, m_nzmax);
164  m_ridx = new_ridx;
165 
166  T *new_data = T_allocate (nz);
167  std::copy_n (m_data, min_nzmax, new_data);
168 
169  T_deallocate (m_data, m_nzmax);
170  m_data = new_data;
171 
172  m_nzmax = nz;
173  }
174 }
175 
176 template <typename T, typename Alloc>
178 bool
180 {
181  return sparse_indices_ok (m_ridx, m_cidx, m_nrows, m_ncols, nnz ());
182 }
183 
184 template <typename T, typename Alloc>
186 bool
188 {
189  octave_idx_type nz = nnz ();
190 
191  for (octave_idx_type i = 0; i < nz; i++)
192  if (octave::math::isnan (m_data[i]))
193  return true;
194 
195  return false;
196 }
197 
198 template <typename T, typename Alloc>
201  : m_rep (nullptr), m_dimensions (dim_vector (nr, nc))
202 {
203  if (val != T ())
204  {
205  m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, m_dimensions.safe_numel ());
206 
207  octave_idx_type ii = 0;
208  xcidx (0) = 0;
209  for (octave_idx_type j = 0; j < nc; j++)
210  {
211  for (octave_idx_type i = 0; i < nr; i++)
212  {
213  xdata (ii) = val;
214  xridx (ii++) = i;
215  }
216  xcidx (j+1) = ii;
217  }
218  }
219  else
220  {
221  m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, 0);
222  for (octave_idx_type j = 0; j < nc+1; j++)
223  xcidx (j) = 0;
224  }
225 }
226 
227 template <typename T, typename Alloc>
230  : m_rep (new typename Sparse<T, Alloc>::SparseRep (a.rows (), a.cols (), a.rows ())),
231  m_dimensions (dim_vector (a.rows (), a.cols ()))
232 {
233  octave_idx_type n = a.rows ();
234  for (octave_idx_type i = 0; i <= n; i++)
235  cidx (i) = i;
236 
237  const Array<octave_idx_type> pv = a.col_perm_vec ();
238 
239  for (octave_idx_type i = 0; i < n; i++)
240  ridx (i) = pv(i);
241 
242  for (octave_idx_type i = 0; i < n; i++)
243  data (i) = 1.0;
244 }
245 
246 template <typename T, typename Alloc>
249  : m_rep (nullptr), m_dimensions (dv)
250 {
251  if (dv.ndims () != 2)
252  (*current_liboctave_error_handler)
253  ("Sparse::Sparse (const dim_vector&): dimension mismatch");
254 
255  m_rep = new typename Sparse<T, Alloc>::SparseRep (dv(0), dv(1), 0);
256 }
257 
258 template <typename T, typename Alloc>
261  : m_rep (nullptr), m_dimensions (dv)
262 {
263 
264  // Work in unsigned long long to avoid overflow issues with numel
265  unsigned long long a_nel = static_cast<unsigned long long> (a.rows ()) *
266  static_cast<unsigned long long> (a.cols ());
267  unsigned long long dv_nel = static_cast<unsigned long long> (dv(0)) *
268  static_cast<unsigned long long> (dv(1));
269 
270  if (a_nel != dv_nel)
271  (*current_liboctave_error_handler)
272  ("Sparse::Sparse (const Sparse&, const dim_vector&): dimension mismatch");
273 
274  dim_vector old_dims = a.dims ();
275  octave_idx_type new_nzmax = a.nnz ();
276  octave_idx_type new_nr = dv(0);
277  octave_idx_type new_nc = dv(1);
278  octave_idx_type old_nr = old_dims(0);
279  octave_idx_type old_nc = old_dims(1);
280 
281  m_rep = new typename Sparse<T, Alloc>::SparseRep (new_nr, new_nc, new_nzmax);
282 
283  octave_idx_type kk = 0;
284  xcidx (0) = 0;
285  for (octave_idx_type i = 0; i < old_nc; i++)
286  for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++)
287  {
288  octave_idx_type tmp = i * old_nr + a.ridx (j);
289  octave_idx_type ii = tmp % new_nr;
290  octave_idx_type jj = (tmp - ii) / new_nr;
291  for (octave_idx_type k = kk; k < jj; k++)
292  xcidx (k+1) = j;
293  kk = jj;
294  xdata (j) = a.data (j);
295  xridx (j) = ii;
296  }
297  for (octave_idx_type k = kk; k < new_nc; k++)
298  xcidx (k+1) = new_nzmax;
299 }
300 
301 template <typename T, typename Alloc>
304  const octave::idx_vector& r,
305  const octave::idx_vector& c,
307  bool sum_terms, octave_idx_type nzm)
308  : m_rep (nullptr), m_dimensions ()
309 {
310  if (nr < 0)
311  nr = r.extent (0);
312  else if (r.extent (nr) > nr)
313  (*current_liboctave_error_handler)
314  ("sparse: row index %" OCTAVE_IDX_TYPE_FORMAT "out of bound "
315  "%" OCTAVE_IDX_TYPE_FORMAT, r.extent (nr), nr);
316 
317  if (nc < 0)
318  nc = c.extent (0);
319  else if (c.extent (nc) > nc)
321  ("sparse: column index %" OCTAVE_IDX_TYPE_FORMAT " out of bound "
322  "%" OCTAVE_IDX_TYPE_FORMAT, r.extent (nc), nc);
323 
324  m_dimensions = dim_vector (nr, nc);
325 
326  octave_idx_type n = a.numel ();
327  octave_idx_type rl = r.length (nr);
328  octave_idx_type cl = c.length (nc);
329  bool a_scalar = n == 1;
330  if (a_scalar)
331  {
332  if (rl != 1)
333  n = rl;
334  else if (cl != 1)
335  n = cl;
336  }
337 
338  if ((rl != 1 && rl != n) || (cl != 1 && cl != n))
339  (*current_liboctave_error_handler) ("sparse: dimension mismatch");
340 
341  // Only create m_rep after input validation to avoid memory leak.
342  m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, (nzm > 0 ? nzm : 0));
343 
344  if (rl <= 1 && cl <= 1)
345  {
346  if (n == 1 && a(0) != T ())
347  {
348  change_capacity (nzm > 1 ? nzm : 1);
349  xridx (0) = r(0);
350  xdata (0) = a(0);
351  std::fill_n (xcidx () + c(0) + 1, nc - c(0), 1);
352  }
353  }
354  else if (a_scalar)
355  {
356  // This is completely specialized, because the sorts can be simplified.
357  T a0 = a(0);
358  if (a0 == T ())
359  {
360  // Do nothing, it's an empty matrix.
361  }
362  else if (cl == 1)
363  {
364  // Sparse column vector. Sort row indices.
365  octave::idx_vector rs = r.sorted ();
366 
367  octave_quit ();
368 
369  const octave_idx_type *rd = rs.raw ();
370  // Count unique indices.
371  octave_idx_type new_nz = 1;
372  for (octave_idx_type i = 1; i < n; i++)
373  new_nz += rd[i-1] != rd[i];
374 
375  // Allocate result.
376  change_capacity (nzm > new_nz ? nzm : new_nz);
377  std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
378 
380  T *rrd = data ();
381 
382  octave_quit ();
383 
385  octave_idx_type l = -1;
386 
387  if (sum_terms)
388  {
389  // Sum repeated indices.
390  for (octave_idx_type i = 0; i < n; i++)
391  {
392  if (rd[i] != l)
393  {
394  l = rd[i];
395  rri[++k] = rd[i];
396  rrd[k] = a0;
397  }
398  else
399  rrd[k] += a0;
400  }
401  }
402  else
403  {
404  // Pick the last one.
405  for (octave_idx_type i = 0; i < n; i++)
406  {
407  if (rd[i] != l)
408  {
409  l = rd[i];
410  rri[++k] = rd[i];
411  rrd[k] = a0;
412  }
413  }
414  }
415 
416  }
417  else
418  {
419  octave::idx_vector rr = r;
420  octave::idx_vector cc = c;
421  const octave_idx_type *rd = rr.raw ();
422  const octave_idx_type *cd = cc.raw ();
424  ci[0] = 0;
425  // Bin counts of column indices.
426  for (octave_idx_type i = 0; i < n; i++)
427  ci[cd[i]+1]++;
428  // Make them cumulative, shifted one to right.
429  for (octave_idx_type i = 1, s = 0; i <= nc; i++)
430  {
431  octave_idx_type s1 = s + ci[i];
432  ci[i] = s;
433  s = s1;
434  }
435 
436  octave_quit ();
437 
438  // Bucket sort.
440  for (octave_idx_type i = 0; i < n; i++)
441  if (rl == 1)
442  sidx[ci[cd[i]+1]++] = rd[0];
443  else
444  sidx[ci[cd[i]+1]++] = rd[i];
445 
446  // Subsorts. We don't need a stable sort, all values are equal.
447  xcidx (0) = 0;
448  for (octave_idx_type j = 0; j < nc; j++)
449  {
450  std::sort (sidx + ci[j], sidx + ci[j+1]);
451  octave_idx_type l = -1;
452  octave_idx_type nzj = 0;
453  // Count.
454  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
455  {
456  octave_idx_type k = sidx[i];
457  if (k != l)
458  {
459  l = k;
460  nzj++;
461  }
462  }
463  // Set column pointer.
464  xcidx (j+1) = xcidx (j) + nzj;
465  }
466 
467  change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
468  octave_idx_type *rri = ridx ();
469  T *rrd = data ();
470 
471  // Fill-in data.
472  for (octave_idx_type j = 0, jj = -1; j < nc; j++)
473  {
474  octave_quit ();
475  octave_idx_type l = -1;
476  if (sum_terms)
477  {
478  // Sum adjacent terms.
479  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
480  {
481  octave_idx_type k = sidx[i];
482  if (k != l)
483  {
484  l = k;
485  rrd[++jj] = a0;
486  rri[jj] = k;
487  }
488  else
489  rrd[jj] += a0;
490  }
491  }
492  else
493  {
494  // Use the last one.
495  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
496  {
497  octave_idx_type k = sidx[i];
498  if (k != l)
499  {
500  l = k;
501  rrd[++jj] = a0;
502  rri[jj] = k;
503  }
504  }
505  }
506  }
507  }
508  }
509  else if (cl == 1)
510  {
511  // Sparse column vector. Sort row indices.
513  octave::idx_vector rs = r.sorted (rsi);
514 
515  octave_quit ();
516 
517  const octave_idx_type *rd = rs.raw ();
518  const octave_idx_type *rdi = rsi.data ();
519  // Count unique indices.
520  octave_idx_type new_nz = 1;
521  for (octave_idx_type i = 1; i < n; i++)
522  new_nz += rd[i-1] != rd[i];
523 
524  // Allocate result.
525  change_capacity (nzm > new_nz ? nzm : new_nz);
526  std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
527 
528  octave_idx_type *rri = ridx ();
529  T *rrd = data ();
530 
531  octave_quit ();
532 
533  octave_idx_type k = 0;
534  rri[k] = rd[0];
535  rrd[k] = a(rdi[0]);
536 
537  if (sum_terms)
538  {
539  // Sum repeated indices.
540  for (octave_idx_type i = 1; i < n; i++)
541  {
542  if (rd[i] != rd[i-1])
543  {
544  rri[++k] = rd[i];
545  rrd[k] = a(rdi[i]);
546  }
547  else
548  rrd[k] += a(rdi[i]);
549  }
550  }
551  else
552  {
553  // Pick the last one.
554  for (octave_idx_type i = 1; i < n; i++)
555  {
556  if (rd[i] != rd[i-1])
557  rri[++k] = rd[i];
558  rrd[k] = a(rdi[i]);
559  }
560  }
561 
562  maybe_compress (true);
563  }
564  else
565  {
567  octave::idx_vector cc = c;
568  const octave_idx_type *rd = rr.raw ();
569  const octave_idx_type *cd = cc.raw ();
571  ci[0] = 0;
572  // Bin counts of column indices.
573  for (octave_idx_type i = 0; i < n; i++)
574  ci[cd[i]+1]++;
575  // Make them cumulative, shifted one to right.
576  for (octave_idx_type i = 1, s = 0; i <= nc; i++)
577  {
578  octave_idx_type s1 = s + ci[i];
579  ci[i] = s;
580  s = s1;
581  }
582 
583  octave_quit ();
584 
585  typedef std::pair<octave_idx_type, octave_idx_type> idx_pair;
586  // Bucket sort.
587  OCTAVE_LOCAL_BUFFER (idx_pair, spairs, n);
588  for (octave_idx_type i = 0; i < n; i++)
589  {
590  idx_pair& p = spairs[ci[cd[i]+1]++];
591  if (rl == 1)
592  p.first = rd[0];
593  else
594  p.first = rd[i];
595  p.second = i;
596  }
597 
598  // Subsorts. We don't need a stable sort, the second index stabilizes it.
599  xcidx (0) = 0;
600  for (octave_idx_type j = 0; j < nc; j++)
601  {
602  std::sort (spairs + ci[j], spairs + ci[j+1]);
603  octave_idx_type l = -1;
604  octave_idx_type nzj = 0;
605  // Count.
606  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
607  {
608  octave_idx_type k = spairs[i].first;
609  if (k != l)
610  {
611  l = k;
612  nzj++;
613  }
614  }
615  // Set column pointer.
616  xcidx (j+1) = xcidx (j) + nzj;
617  }
618 
619  change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
620  octave_idx_type *rri = ridx ();
621  T *rrd = data ();
622 
623  // Fill-in data.
624  for (octave_idx_type j = 0, jj = -1; j < nc; j++)
625  {
626  octave_quit ();
628  if (sum_terms)
629  {
630  // Sum adjacent terms.
631  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
632  {
633  octave_idx_type k = spairs[i].first;
634  if (k != l)
635  {
636  l = k;
637  rrd[++jj] = a(spairs[i].second);
638  rri[jj] = k;
639  }
640  else
641  rrd[jj] += a(spairs[i].second);
642  }
643  }
644  else
645  {
646  // Use the last one.
647  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
648  {
649  octave_idx_type k = spairs[i].first;
650  if (k != l)
651  {
652  l = k;
653  rri[++jj] = k;
654  }
655  rrd[jj] = a(spairs[i].second);
656  }
657  }
658  }
659 
660  maybe_compress (true);
661  }
662 }
663 
664 /*
665 %!assert <*51880> (sparse (1:2, 2, 1:2, 2, 2), sparse ([0, 1; 0, 2]))
666 %!assert <*51880> (sparse (1:2, 1, 1:2, 2, 2), sparse ([1, 0; 2, 0]))
667 %!assert <*51880> (sparse (1:2, 2, 1:2, 2, 3), sparse ([0, 1, 0; 0, 2, 0]))
668 */
669 
670 template <typename T, typename Alloc>
673  : m_rep (nullptr), m_dimensions (a.dims ())
674 {
675  if (m_dimensions.ndims () > 2)
676  (*current_liboctave_error_handler)
677  ("Sparse::Sparse (const Array<T>&): dimension mismatch");
678 
679  octave_idx_type nr = rows ();
680  octave_idx_type nc = cols ();
681  octave_idx_type len = a.numel ();
682  octave_idx_type new_nzmax = 0;
683 
684  // First count the number of nonzero terms
685  for (octave_idx_type i = 0; i < len; i++)
686  if (a(i) != T ())
687  new_nzmax++;
688 
689  m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, new_nzmax);
690 
691  octave_idx_type ii = 0;
692  xcidx (0) = 0;
693  for (octave_idx_type j = 0; j < nc; j++)
694  {
695  for (octave_idx_type i = 0; i < nr; i++)
696  if (a.elem (i, j) != T ())
697  {
698  xdata (ii) = a.elem (i, j);
699  xridx (ii++) = i;
700  }
701  xcidx (j+1) = ii;
702  }
703 }
704 
705 template <typename T, typename Alloc>
708 {
709  if (--m_rep->m_count == 0)
710  delete m_rep;
711 }
712 
713 template <typename T, typename Alloc>
716 {
717  if (this != &a)
718  {
719  if (--m_rep->m_count == 0)
720  delete m_rep;
721 
722  m_rep = a.m_rep;
723  m_rep->m_count++;
724 
725  m_dimensions = a.m_dimensions;
726  }
727 
728  return *this;
729 }
730 
731 template <typename T, typename Alloc>
735 {
736  octave_idx_type n = m_dimensions.ndims ();
737 
738  if (n <= 0 || n != ra_idx.numel ())
739  (*current_liboctave_error_handler)
740  ("Sparse<T, Alloc>::compute_index: invalid ra_idxing operation");
741 
742  octave_idx_type retval = -1;
743 
744  retval = ra_idx(--n);
745 
746  while (--n >= 0)
747  {
748  retval *= m_dimensions(n);
749  retval += ra_idx(n);
750  }
751 
752  return retval;
753 }
754 
755 template <typename T, typename Alloc>
757 T
759 {
760  (*current_liboctave_error_handler) ("%s (%" OCTAVE_IDX_TYPE_FORMAT "): "
761  "range error", fcn, n);
762 }
763 
764 template <typename T, typename Alloc>
766 T&
768 {
769  (*current_liboctave_error_handler) ("%s (%" OCTAVE_IDX_TYPE_FORMAT "): "
770  "range error", fcn, n);
771 }
772 
773 template <typename T, typename Alloc>
775 T
777  octave_idx_type i, octave_idx_type j) const
778 {
779  (*current_liboctave_error_handler)
780  ("%s (%" OCTAVE_IDX_TYPE_FORMAT ", %" OCTAVE_IDX_TYPE_FORMAT "): "
781  "range error", fcn, i, j);
782 }
783 
784 template <typename T, typename Alloc>
786 T&
789 {
790  (*current_liboctave_error_handler)
791  ("%s (%" OCTAVE_IDX_TYPE_FORMAT ", %" OCTAVE_IDX_TYPE_FORMAT "): "
792  "range error", fcn, i, j);
793 }
794 
795 template <typename T, typename Alloc>
797 T
799  const Array<octave_idx_type>& ra_idx) const
800 {
801  std::ostringstream buf;
802 
803  buf << fcn << " (";
804 
806 
807  if (n > 0)
808  buf << ra_idx(0);
809 
810  for (octave_idx_type i = 1; i < n; i++)
811  buf << ", " << ra_idx(i);
812 
813  buf << "): range error";
814 
815  std::string buf_str = buf.str ();
816 
817  (*current_liboctave_error_handler) ("%s", buf_str.c_str ());
818 }
819 
820 template <typename T, typename Alloc>
822 T&
825 {
826  std::ostringstream buf;
827 
828  buf << fcn << " (";
829 
831 
832  if (n > 0)
833  buf << ra_idx(0);
834 
835  for (octave_idx_type i = 1; i < n; i++)
836  buf << ", " << ra_idx(i);
837 
838  buf << "): range error";
839 
840  std::string buf_str = buf.str ();
841 
842  (*current_liboctave_error_handler) ("%s", buf_str.c_str ());
843 }
844 
845 template <typename T, typename Alloc>
848 Sparse<T, Alloc>::reshape (const dim_vector& new_dims) const
849 {
850  Sparse<T, Alloc> retval;
851  dim_vector dims2 = new_dims;
852 
853  if (dims2.ndims () > 2)
854  {
855  (*current_liboctave_warning_with_id_handler)
856  ("Octave:reshape-smashes-dims",
857  "reshape: sparse reshape to N-D array smashes dims");
858 
859  for (octave_idx_type i = 2; i < dims2.ndims (); i++)
860  dims2(1) *= dims2(i);
861 
862  dims2.resize (2);
863  }
864 
865  if (m_dimensions != dims2)
866  {
867  if (m_dimensions.numel () == dims2.numel ())
868  {
869  octave_idx_type new_nnz = nnz ();
870  octave_idx_type new_nr = dims2 (0);
871  octave_idx_type new_nc = dims2 (1);
872  octave_idx_type old_nr = rows ();
873  octave_idx_type old_nc = cols ();
874  retval = Sparse<T, Alloc> (new_nr, new_nc, new_nnz);
875  // Special case for empty matrices (bug #64080)
876  if (new_nr == 0 || new_nc == 0)
877  return retval;
878 
879  octave_idx_type kk = 0;
880  retval.xcidx (0) = 0;
881  // Quotient and remainder of i * old_nr divided by new_nr.
882  // Track them individually to avoid overflow (bug #42850).
883  octave_idx_type i_old_qu = 0;
884  octave_idx_type i_old_rm = static_cast<octave_idx_type> (-old_nr);
885  for (octave_idx_type i = 0; i < old_nc; i++)
886  {
887  i_old_rm += old_nr;
888  if (i_old_rm >= new_nr)
889  {
890  i_old_qu += i_old_rm / new_nr;
891  i_old_rm = i_old_rm % new_nr;
892  }
893  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
894  {
895  octave_idx_type ii, jj;
896  ii = (i_old_rm + ridx (j)) % new_nr;
897  jj = i_old_qu + (i_old_rm + ridx (j)) / new_nr;
898 
899  // Original calculation subject to overflow
900  // ii = (i*old_nr + ridx (j)) % new_nr
901  // jj = (i*old_nr + ridx (j)) / new_nr
902  for (octave_idx_type k = kk; k < jj; k++)
903  retval.xcidx (k+1) = j;
904  kk = jj;
905  retval.xdata (j) = data (j);
906  retval.xridx (j) = ii;
907  }
908  }
909  for (octave_idx_type k = kk; k < new_nc; k++)
910  retval.xcidx (k+1) = new_nnz;
911  }
912  else
913  {
914  std::string dimensions_str = m_dimensions.str ();
915  std::string new_dims_str = new_dims.str ();
916 
917  (*current_liboctave_error_handler)
918  ("reshape: can't reshape %s array to %s array",
919  dimensions_str.c_str (), new_dims_str.c_str ());
920  }
921  }
922  else
923  retval = *this;
924 
925  return retval;
926 }
927 
928 template <typename T, typename Alloc>
932 {
933  // The only valid permutations of a sparse array are [1, 2] and [2, 1].
934 
935  bool fail = false;
936  bool trans = false;
937 
938  if (perm_vec.numel () == 2)
939  {
940  if (perm_vec(0) == 0 && perm_vec(1) == 1)
941  /* do nothing */;
942  else if (perm_vec(0) == 1 && perm_vec(1) == 0)
943  trans = true;
944  else
945  fail = true;
946  }
947  else
948  fail = true;
949 
950  if (fail)
951  (*current_liboctave_error_handler)
952  ("permutation vector contains an invalid element");
953 
954  return trans ? this->transpose () : *this;
955 }
956 
957 template <typename T, typename Alloc>
959 void
961 {
962  octave_idx_type nr = rows ();
963  octave_idx_type nc = cols ();
964 
965  if (nr == 0)
966  resize (1, std::max (nc, n));
967  else if (nc == 0)
968  resize (nr, (n + nr - 1) / nr); // Ain't it wicked?
969  else if (nr == 1)
970  resize (1, n);
971  else if (nc == 1)
972  resize (n, 1);
973  else
975 }
976 
977 template <typename T, typename Alloc>
979 void
981 {
982  octave_idx_type n = dv.ndims ();
983 
984  if (n != 2)
985  (*current_liboctave_error_handler) ("sparse array must be 2-D");
986 
987  resize (dv(0), dv(1));
988 }
989 
990 template <typename T, typename Alloc>
992 void
994 {
995  if (r < 0 || c < 0)
996  (*current_liboctave_error_handler) ("can't resize to negative dimension");
997 
998  if (r == dim1 () && c == dim2 ())
999  return;
1000 
1001  // This wouldn't be necessary for r >= rows () if m_nrows wasn't part of the
1002  // Sparse rep. It is not good for anything in there.
1003  make_unique ();
1004 
1005  if (r < rows ())
1006  {
1007  octave_idx_type i = 0;
1008  octave_idx_type k = 0;
1009  for (octave_idx_type j = 1; j <= m_rep->m_ncols; j++)
1010  {
1011  octave_idx_type u = xcidx (j);
1012  for (; i < u; i++)
1013  if (xridx (i) < r)
1014  {
1015  xdata (k) = xdata (i);
1016  xridx (k++) = xridx (i);
1017  }
1018  xcidx (j) = k;
1019  }
1020  }
1021 
1022  m_rep->m_nrows = m_dimensions(0) = r;
1023 
1024  if (c != m_rep->m_ncols)
1025  {
1026  octave_idx_type *new_cidx = m_rep->idx_type_allocate (c+1);
1027  std::copy_n (m_rep->m_cidx, std::min (c, m_rep->m_ncols) + 1, new_cidx);
1028  m_rep->idx_type_deallocate (m_rep->m_cidx, m_rep->m_ncols + 1);
1029  m_rep->m_cidx = new_cidx;
1030 
1031  if (c > m_rep->m_ncols)
1032  std::fill_n (m_rep->m_cidx + m_rep->m_ncols + 1, c - m_rep->m_ncols,
1033  m_rep->m_cidx[m_rep->m_ncols]);
1034  }
1035 
1036  m_rep->m_ncols = m_dimensions(1) = c;
1037 
1038  m_rep->change_length (m_rep->nnz ());
1039 }
1040 
1041 template <typename T, typename Alloc>
1042 OCTAVE_API
1046 {
1047  octave_idx_type a_rows = a.rows ();
1048  octave_idx_type a_cols = a.cols ();
1049  octave_idx_type nr = rows ();
1050  octave_idx_type nc = cols ();
1051 
1052  if (r < 0 || r + a_rows > rows () || c < 0 || c + a_cols > cols ())
1053  (*current_liboctave_error_handler) ("range error for insert");
1054 
1055  // First count the number of elements in the final array
1056  octave_idx_type nel = cidx (c) + a.nnz ();
1057 
1058  if (c + a_cols < nc)
1059  nel += cidx (nc) - cidx (c + a_cols);
1060 
1061  for (octave_idx_type i = c; i < c + a_cols; i++)
1062  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1063  if (ridx (j) < r || ridx (j) >= r + a_rows)
1064  nel++;
1065 
1066  Sparse<T, Alloc> tmp (*this);
1067  --m_rep->m_count;
1068  m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, nel);
1069 
1070  for (octave_idx_type i = 0; i < tmp.cidx (c); i++)
1071  {
1072  data (i) = tmp.data (i);
1073  ridx (i) = tmp.ridx (i);
1074  }
1075  for (octave_idx_type i = 0; i < c + 1; i++)
1076  cidx (i) = tmp.cidx (i);
1077 
1078  octave_idx_type ii = cidx (c);
1079 
1080  for (octave_idx_type i = c; i < c + a_cols; i++)
1081  {
1082  octave_quit ();
1083 
1084  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1085  if (tmp.ridx (j) < r)
1086  {
1087  data (ii) = tmp.data (j);
1088  ridx (ii++) = tmp.ridx (j);
1089  }
1090 
1091  octave_quit ();
1092 
1093  for (octave_idx_type j = a.cidx (i-c); j < a.cidx (i-c+1); j++)
1094  {
1095  data (ii) = a.data (j);
1096  ridx (ii++) = r + a.ridx (j);
1097  }
1098 
1099  octave_quit ();
1100 
1101  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1102  if (tmp.ridx (j) >= r + a_rows)
1103  {
1104  data (ii) = tmp.data (j);
1105  ridx (ii++) = tmp.ridx (j);
1106  }
1107 
1108  cidx (i+1) = ii;
1109  }
1110 
1111  for (octave_idx_type i = c + a_cols; i < nc; i++)
1112  {
1113  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1114  {
1115  data (ii) = tmp.data (j);
1116  ridx (ii++) = tmp.ridx (j);
1117  }
1118  cidx (i+1) = ii;
1119  }
1120 
1121  return *this;
1122 }
1123 
1124 template <typename T, typename Alloc>
1125 OCTAVE_API
1129 {
1130 
1131  if (ra_idx.numel () != 2)
1132  (*current_liboctave_error_handler) ("range error for insert");
1133 
1134  return insert (a, ra_idx(0), ra_idx(1));
1135 }
1136 
1137 template <typename T, typename Alloc>
1138 OCTAVE_API
1141 {
1142  assert (ndims () == 2);
1143 
1144  octave_idx_type nr = rows ();
1145  octave_idx_type nc = cols ();
1146  octave_idx_type nz = nnz ();
1147  Sparse<T, Alloc> retval (nc, nr, nz);
1148 
1149  for (octave_idx_type i = 0; i < nz; i++)
1150  retval.xcidx (ridx (i) + 1)++;
1151  // retval.xcidx[1:nr] holds the row degrees for rows 0:(nr-1)
1152  nz = 0;
1153  for (octave_idx_type i = 1; i <= nr; i++)
1154  {
1155  const octave_idx_type tmp = retval.xcidx (i);
1156  retval.xcidx (i) = nz;
1157  nz += tmp;
1158  }
1159  // retval.xcidx[1:nr] holds row entry *start* offsets for rows 0:(nr-1)
1160 
1161  for (octave_idx_type j = 0; j < nc; j++)
1162  for (octave_idx_type k = cidx (j); k < cidx (j+1); k++)
1163  {
1164  octave_idx_type q = retval.xcidx (ridx (k) + 1)++;
1165  retval.xridx (q) = j;
1166  retval.xdata (q) = data (k);
1167  }
1168  assert (nnz () == retval.xcidx (nr));
1169  // retval.xcidx[1:nr] holds row entry *end* offsets for rows 0:(nr-1)
1170  // and retval.xcidx[0:(nr-1)] holds their row entry *start* offsets
1171 
1172  return retval;
1173 }
1174 
1175 // Lower bound lookup. Could also use octave_sort, but that has upper bound
1176 // semantics, so requires some manipulation to set right. Uses a plain loop
1177 // for small columns.
1178 static
1180 lblookup (const octave_idx_type *ridx, octave_idx_type nr,
1181  octave_idx_type ri)
1182 {
1183  if (nr <= 8)
1184  {
1185  octave_idx_type l;
1186  for (l = 0; l < nr; l++)
1187  if (ridx[l] >= ri)
1188  break;
1189  return l;
1190  }
1191  else
1192  return std::lower_bound (ridx, ridx + nr, ri) - ridx;
1193 }
1194 
1195 template <typename T, typename Alloc>
1196 OCTAVE_API
1197 void
1199 {
1200  Sparse<T, Alloc> retval;
1201 
1202  assert (ndims () == 2);
1203 
1204  octave_idx_type nr = dim1 ();
1205  octave_idx_type nc = dim2 ();
1206  octave_idx_type nz = nnz ();
1207 
1208  octave_idx_type nel = numel (); // Can throw.
1209 
1210  const dim_vector idx_dims = idx.orig_dimensions ();
1211 
1212  if (idx.extent (nel) > nel)
1213  octave::err_del_index_out_of_range (true, idx.extent (nel), nel);
1214 
1215  if (nc == 1)
1216  {
1217  // Sparse column vector.
1218  const Sparse<T, Alloc> tmp = *this; // constant copy to prevent COW.
1219 
1220  octave_idx_type lb, ub;
1221 
1222  if (idx.is_cont_range (nel, lb, ub))
1223  {
1224  // Special-case a contiguous range.
1225  // Look-up indices first.
1226  octave_idx_type li = lblookup (tmp.ridx (), nz, lb);
1227  octave_idx_type ui = lblookup (tmp.ridx (), nz, ub);
1228  // Copy data and adjust indices.
1229  octave_idx_type nz_new = nz - (ui - li);
1230  *this = Sparse<T, Alloc> (nr - (ub - lb), 1, nz_new);
1231  std::copy_n (tmp.data (), li, data ());
1232  std::copy_n (tmp.ridx (), li, xridx ());
1233  std::copy (tmp.data () + ui, tmp.data () + nz, xdata () + li);
1234  mx_inline_sub (nz - ui, xridx () + li, tmp.ridx () + ui, ub - lb);
1235  xcidx (1) = nz_new;
1236  }
1237  else
1238  {
1239  OCTAVE_LOCAL_BUFFER (octave_idx_type, ridx_new, nz);
1240  OCTAVE_LOCAL_BUFFER (T, data_new, nz);
1241  octave::idx_vector sidx = idx.sorted (true);
1242  const octave_idx_type *sj = sidx.raw ();
1243  octave_idx_type sl = sidx.length (nel);
1244  octave_idx_type nz_new = 0;
1245  octave_idx_type j = 0;
1246  for (octave_idx_type i = 0; i < nz; i++)
1247  {
1248  octave_idx_type r = tmp.ridx (i);
1249  for (; j < sl && sj[j] < r; j++) ;
1250  if (j == sl || sj[j] > r)
1251  {
1252  data_new[nz_new] = tmp.data (i);
1253  ridx_new[nz_new++] = r - j;
1254  }
1255  }
1256 
1257  *this = Sparse<T, Alloc> (nr - sl, 1, nz_new);
1258  std::copy_n (ridx_new, nz_new, ridx ());
1259  std::copy_n (data_new, nz_new, xdata ());
1260  xcidx (1) = nz_new;
1261  }
1262  }
1263  else if (nr == 1)
1264  {
1265  // Sparse row vector.
1266  octave_idx_type lb, ub;
1267  if (idx.is_cont_range (nc, lb, ub))
1268  {
1269  const Sparse<T, Alloc> tmp = *this;
1270  octave_idx_type lbi = tmp.cidx (lb);
1271  octave_idx_type ubi = tmp.cidx (ub);
1272  octave_idx_type new_nz = nz - (ubi - lbi);
1273  *this = Sparse<T, Alloc> (1, nc - (ub - lb), new_nz);
1274  std::copy_n (tmp.data (), lbi, data ());
1275  std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1276  std::fill_n (ridx (), new_nz, static_cast<octave_idx_type> (0));
1277  std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1278  mx_inline_sub (nc - ub, xcidx () + 1, tmp.cidx () + ub + 1,
1279  ubi - lbi);
1280  }
1281  else
1282  *this = index (idx.complement (nc));
1283  }
1284  else if (idx.length (nel) != 0)
1285  {
1286  if (idx.is_colon_equiv (nel))
1287  *this = Sparse<T, Alloc> ();
1288  else
1289  {
1290  *this = index (octave::idx_vector::colon);
1291  delete_elements (idx);
1292  *this = transpose (); // We want a row vector.
1293  }
1294  }
1295 }
1296 
1297 template <typename T, typename Alloc>
1298 OCTAVE_API
1299 void
1301  const octave::idx_vector& idx_j)
1302 {
1303  assert (ndims () == 2);
1304 
1305  octave_idx_type nr = dim1 ();
1306  octave_idx_type nc = dim2 ();
1307  octave_idx_type nz = nnz ();
1308 
1309  if (idx_i.is_colon ())
1310  {
1311  // Deleting columns.
1312  octave_idx_type lb, ub;
1313  if (idx_j.extent (nc) > nc)
1314  octave::err_del_index_out_of_range (false, idx_j.extent (nc), nc);
1315  else if (idx_j.is_cont_range (nc, lb, ub))
1316  {
1317  if (lb == 0 && ub == nc)
1318  {
1319  // Delete all rows and columns.
1320  *this = Sparse<T, Alloc> (nr, 0);
1321  }
1322  else if (nz == 0)
1323  {
1324  // No elements to preserve; adjust dimensions.
1325  *this = Sparse<T, Alloc> (nr, nc - (ub - lb));
1326  }
1327  else
1328  {
1329  const Sparse<T, Alloc> tmp = *this;
1330  octave_idx_type lbi = tmp.cidx (lb);
1331  octave_idx_type ubi = tmp.cidx (ub);
1332  octave_idx_type new_nz = nz - (ubi - lbi);
1333 
1334  *this = Sparse<T, Alloc> (nr, nc - (ub - lb), new_nz);
1335  std::copy_n (tmp.data (), lbi, data ());
1336  std::copy_n (tmp.ridx (), lbi, ridx ());
1337  std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1338  std::copy (tmp.ridx () + ubi, tmp.ridx () + nz, xridx () + lbi);
1339  std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1340  mx_inline_sub (nc - ub, xcidx () + lb + 1,
1341  tmp.cidx () + ub + 1, ubi - lbi);
1342  }
1343  }
1344  else
1345  *this = index (idx_i, idx_j.complement (nc));
1346  }
1347  else if (idx_j.is_colon ())
1348  {
1349  // Deleting rows.
1350  octave_idx_type lb, ub;
1351  if (idx_i.extent (nr) > nr)
1352  octave::err_del_index_out_of_range (false, idx_i.extent (nr), nr);
1353  else if (idx_i.is_cont_range (nr, lb, ub))
1354  {
1355  if (lb == 0 && ub == nr)
1356  {
1357  // Delete all rows and columns.
1358  *this = Sparse<T, Alloc> (0, nc);
1359  }
1360  else if (nz == 0)
1361  {
1362  // No elements to preserve; adjust dimensions.
1363  *this = Sparse<T, Alloc> (nr - (ub - lb), nc);
1364  }
1365  else
1366  {
1367  // This is more memory-efficient than the approach below.
1368  const Sparse<T, Alloc> tmpl = index (octave::idx_vector (0, lb), idx_j);
1369  const Sparse<T, Alloc> tmpu = index (octave::idx_vector (ub, nr), idx_j);
1370  *this = Sparse<T, Alloc> (nr - (ub - lb), nc,
1371  tmpl.nnz () + tmpu.nnz ());
1372  for (octave_idx_type j = 0, k = 0; j < nc; j++)
1373  {
1374  for (octave_idx_type i = tmpl.cidx (j); i < tmpl.cidx (j+1);
1375  i++)
1376  {
1377  xdata (k) = tmpl.data (i);
1378  xridx (k++) = tmpl.ridx (i);
1379  }
1380  for (octave_idx_type i = tmpu.cidx (j); i < tmpu.cidx (j+1);
1381  i++)
1382  {
1383  xdata (k) = tmpu.data (i);
1384  xridx (k++) = tmpu.ridx (i) + lb;
1385  }
1386 
1387  xcidx (j+1) = k;
1388  }
1389  }
1390  }
1391  else
1392  {
1393  // This is done by transposing, deleting columns, then transposing
1394  // again.
1395  Sparse<T, Alloc> tmp = transpose ();
1396  tmp.delete_elements (idx_j, idx_i);
1397  *this = tmp.transpose ();
1398  }
1399  }
1400  else
1401  {
1402  // Empty assignment (no elements to delete) is OK if there is at
1403  // least one zero-length index and at most one other index that is
1404  // non-colon (or equivalent) index. Since we only have two
1405  // indices, we just need to check that we have at least one zero
1406  // length index. Matlab considers "[]" to be an empty index but
1407  // not "false". We accept both.
1408 
1409  bool empty_assignment
1410  = (idx_i.length (nr) == 0 || idx_j.length (nc) == 0);
1411 
1412  if (! empty_assignment)
1413  (*current_liboctave_error_handler)
1414  ("a null assignment can only have one non-colon index");
1415  }
1416 }
1417 
1418 template <typename T, typename Alloc>
1419 OCTAVE_API
1420 void
1422 {
1423  if (dim == 0)
1424  delete_elements (idx, octave::idx_vector::colon);
1425  else if (dim == 1)
1426  delete_elements (octave::idx_vector::colon, idx);
1427  else
1428  (*current_liboctave_error_handler) ("invalid dimension in delete_elements");
1429 }
1430 
1431 template <typename T, typename Alloc>
1432 OCTAVE_API
1434 Sparse<T, Alloc>::index (const octave::idx_vector& idx, bool resize_ok) const
1435 {
1436  Sparse<T, Alloc> retval;
1437 
1438  assert (ndims () == 2);
1439 
1440  octave_idx_type nr = dim1 ();
1441  octave_idx_type nc = dim2 ();
1442  octave_idx_type nz = nnz ();
1443 
1444  octave_idx_type nel = numel (); // Can throw.
1445 
1446  const dim_vector idx_dims = idx.orig_dimensions ().redim (2);
1447 
1448  if (idx.is_colon ())
1449  {
1450  if (nc == 1)
1451  retval = *this;
1452  else
1453  {
1454  // Fast magic colon processing.
1455  retval = Sparse<T, Alloc> (nel, 1, nz);
1456 
1457  for (octave_idx_type i = 0; i < nc; i++)
1458  {
1459  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1460  {
1461  retval.xdata (j) = data (j);
1462  retval.xridx (j) = ridx (j) + i * nr;
1463  }
1464  }
1465 
1466  retval.xcidx (0) = 0;
1467  retval.xcidx (1) = nz;
1468  }
1469  }
1470  else if (idx.extent (nel) > nel)
1471  {
1472  if (! resize_ok)
1473  octave::err_index_out_of_range (1, 1, idx.extent (nel), nel, dims ());
1474 
1475  // resize_ok is completely handled here.
1476  octave_idx_type ext = idx.extent (nel);
1477  Sparse<T, Alloc> tmp = *this;
1478  tmp.resize1 (ext);
1479  retval = tmp.index (idx);
1480  }
1481  else if (nr == 1 && nc == 1)
1482  {
1483  // You have to be pretty sick to get to this bit of code,
1484  // since you have a scalar stored as a sparse matrix, and
1485  // then want to make a dense matrix with sparse representation.
1486  // Ok, we'll do it, but you deserve what you get!!
1487  retval = (Sparse<T, Alloc> (idx_dims(0), idx_dims(1), nz ? data (0) : T ()));
1488  }
1489  else if (nc == 1)
1490  {
1491  // Sparse column vector.
1492  octave_idx_type lb, ub;
1493 
1494  if (idx.is_scalar ())
1495  {
1496  // Scalar index - just a binary lookup.
1497  octave_idx_type i = lblookup (ridx (), nz, idx(0));
1498  if (i < nz && ridx (i) == idx(0))
1499  retval = Sparse (1, 1, data (i));
1500  else
1501  retval = Sparse (1, 1);
1502  }
1503  else if (idx.is_cont_range (nel, lb, ub))
1504  {
1505  // Special-case a contiguous range.
1506  // Look-up indices first.
1507  octave_idx_type li = lblookup (ridx (), nz, lb);
1508  octave_idx_type ui = lblookup (ridx (), nz, ub);
1509  // Copy data and adjust indices.
1510  octave_idx_type nz_new = ui - li;
1511  retval = Sparse<T, Alloc> (ub - lb, 1, nz_new);
1512  std::copy_n (data () + li, nz_new, retval.data ());
1513  mx_inline_sub (nz_new, retval.xridx (), ridx () + li, lb);
1514  retval.xcidx (1) = nz_new;
1515  }
1516  else if (idx.is_permutation (nel) && idx.isvector ())
1517  {
1518  if (idx.is_range () && idx.increment () == -1)
1519  {
1520  retval = Sparse<T, Alloc> (nr, 1, nz);
1521 
1522  for (octave_idx_type j = 0; j < nz; j++)
1523  retval.ridx (j) = nr - ridx (nz - j - 1) - 1;
1524 
1525  std::copy_n (cidx (), 2, retval.cidx ());
1526  std::reverse_copy (data (), data () + nz, retval.data ());
1527  }
1528  else
1529  {
1530  Array<T> tmp = array_value ();
1531  tmp = tmp.index (idx);
1532  retval = Sparse<T, Alloc> (tmp);
1533  }
1534  }
1535  else
1536  {
1537  // If indexing a sparse column vector by a vector, the result is a
1538  // sparse column vector, otherwise it inherits the shape of index.
1539  // Vector transpose is cheap, so do it right here.
1540 
1541  Array<octave_idx_type> tmp_idx = idx.as_array ().as_matrix ();
1542 
1543  const Array<octave_idx_type> idxa = (idx_dims(0) == 1
1544  ? tmp_idx.transpose ()
1545  : tmp_idx);
1546 
1547  octave_idx_type new_nr = idxa.rows ();
1548  octave_idx_type new_nc = idxa.cols ();
1549 
1550  // Lookup.
1551  // FIXME: Could specialize for sorted idx?
1552  Array<octave_idx_type> lidx (dim_vector (new_nr, new_nc));
1553  for (octave_idx_type i = 0; i < new_nr*new_nc; i++)
1554  lidx.xelem (i) = lblookup (ridx (), nz, idxa(i));
1555 
1556  // Count matches.
1557  retval = Sparse<T, Alloc> (idxa.rows (), idxa.cols ());
1558  for (octave_idx_type j = 0; j < new_nc; j++)
1559  {
1560  octave_idx_type nzj = 0;
1561  for (octave_idx_type i = 0; i < new_nr; i++)
1562  {
1563  octave_idx_type l = lidx.xelem (i, j);
1564  if (l < nz && ridx (l) == idxa(i, j))
1565  nzj++;
1566  else
1567  lidx.xelem (i, j) = nz;
1568  }
1569  retval.xcidx (j+1) = retval.xcidx (j) + nzj;
1570  }
1571 
1572  retval.change_capacity (retval.xcidx (new_nc));
1573 
1574  // Copy data and set row indices.
1575  octave_idx_type k = 0;
1576  for (octave_idx_type j = 0; j < new_nc; j++)
1577  for (octave_idx_type i = 0; i < new_nr; i++)
1578  {
1579  octave_idx_type l = lidx.xelem (i, j);
1580  if (l < nz)
1581  {
1582  retval.data (k) = data (l);
1583  retval.xridx (k++) = i;
1584  }
1585  }
1586  }
1587  }
1588  else if (nr == 1)
1589  {
1590  octave_idx_type lb, ub;
1591  if (idx.is_scalar ())
1592  retval = Sparse<T, Alloc> (1, 1, elem (0, idx(0)));
1593  else if (idx.is_cont_range (nel, lb, ub))
1594  {
1595  // Special-case a contiguous range.
1596  octave_idx_type lbi = cidx (lb);
1597  octave_idx_type ubi = cidx (ub);
1598  octave_idx_type new_nz = ubi - lbi;
1599  retval = Sparse<T, Alloc> (1, ub - lb, new_nz);
1600  std::copy_n (data () + lbi, new_nz, retval.data ());
1601  std::fill_n (retval.ridx (), new_nz, static_cast<octave_idx_type> (0));
1602  mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1603  }
1604  else
1605  {
1606  // Sparse row vectors occupy O(nr) storage anyway, so let's just
1607  // convert the matrix to full, index, and sparsify the result.
1608  retval = Sparse<T, Alloc> (array_value ().index (idx));
1609  }
1610  }
1611  else
1612  {
1613  if (nr != 0 && idx.is_scalar ())
1614  retval = Sparse<T, Alloc> (1, 1, elem (idx(0) % nr, idx(0) / nr));
1615  else
1616  {
1617  // Indexing a non-vector sparse matrix by linear indexing.
1618  // I suppose this is rare (and it may easily overflow), so let's take
1619  // the easy way, and reshape first to column vector, which is already
1620  // handled above.
1621  retval = index (octave::idx_vector::colon).index (idx);
1622  // In this case we're supposed to always inherit the shape, but
1623  // column(row) doesn't do it, so we'll do it instead.
1624  if (idx_dims(0) == 1 && idx_dims(1) != 1)
1625  retval = retval.transpose ();
1626  }
1627  }
1628 
1629  return retval;
1630 }
1631 
1632 template <typename T, typename Alloc>
1633 OCTAVE_API
1636  const octave::idx_vector& idx_j,
1637  bool resize_ok) const
1638 {
1639  Sparse<T, Alloc> retval;
1640 
1641  assert (ndims () == 2);
1642 
1643  octave_idx_type nr = dim1 ();
1644  octave_idx_type nc = dim2 ();
1645 
1646  octave_idx_type n = idx_i.length (nr);
1647  octave_idx_type m = idx_j.length (nc);
1648 
1649  octave_idx_type lb, ub;
1650 
1651  if (idx_i.extent (nr) > nr || idx_j.extent (nc) > nc)
1652  {
1653  // resize_ok is completely handled here.
1654  if (resize_ok)
1655  {
1656  octave_idx_type ext_i = idx_i.extent (nr);
1657  octave_idx_type ext_j = idx_j.extent (nc);
1658  Sparse<T, Alloc> tmp = *this;
1659  tmp.resize (ext_i, ext_j);
1660  retval = tmp.index (idx_i, idx_j);
1661  }
1662  else if (idx_i.extent (nr) > nr)
1663  octave::err_index_out_of_range (2, 1, idx_i.extent (nr), nr, dims ());
1664  else
1665  octave::err_index_out_of_range (2, 2, idx_j.extent (nc), nc, dims ());
1666  }
1667  else if (nr == 1 && nc == 1)
1668  {
1669  // Scalars stored as sparse matrices occupy more memory than
1670  // a scalar, so let's just convert the matrix to full, index,
1671  // and sparsify the result.
1672 
1673  retval = Sparse<T, Alloc> (array_value ().index (idx_i, idx_j));
1674  }
1675  else if (idx_i.is_colon ())
1676  {
1677  // Great, we're just manipulating columns. This is going to be quite
1678  // efficient, because the columns can stay compressed as they are.
1679  if (idx_j.is_colon ())
1680  retval = *this; // Shallow copy.
1681  else if (idx_j.is_cont_range (nc, lb, ub))
1682  {
1683  // Special-case a contiguous range.
1684  octave_idx_type lbi = cidx (lb);
1685  octave_idx_type ubi = cidx (ub);
1686  octave_idx_type new_nz = ubi - lbi;
1687  retval = Sparse<T, Alloc> (nr, ub - lb, new_nz);
1688  std::copy_n (data () + lbi, new_nz, retval.data ());
1689  std::copy_n (ridx () + lbi, new_nz, retval.ridx ());
1690  mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1691  }
1692  else
1693  {
1694  // Count new nonzero elements.
1695  retval = Sparse<T, Alloc> (nr, m);
1696  for (octave_idx_type j = 0; j < m; j++)
1697  {
1698  octave_idx_type jj = idx_j(j);
1699  retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1700  }
1701 
1702  retval.change_capacity (retval.xcidx (m));
1703 
1704  // Copy data & indices.
1705  for (octave_idx_type j = 0; j < m; j++)
1706  {
1707  octave_idx_type ljj = cidx (idx_j(j));
1708  octave_idx_type lj = retval.xcidx (j);
1709  octave_idx_type nzj = retval.xcidx (j+1) - lj;
1710 
1711  std::copy_n (data () + ljj, nzj, retval.data () + lj);
1712  std::copy_n (ridx () + ljj, nzj, retval.ridx () + lj);
1713  }
1714  }
1715  }
1716  else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
1717  {
1718  // It's actually vector indexing. The 1D index is specialized for that.
1719  retval = index (idx_i);
1720 
1721  // If nr == 1 then the vector indexing will return a column vector!!
1722  if (nr == 1)
1723  retval.transpose ();
1724  }
1725  else if (idx_i.is_scalar ())
1726  {
1727  octave_idx_type ii = idx_i(0);
1728  retval = Sparse<T, Alloc> (1, m);
1730  for (octave_idx_type j = 0; j < m; j++)
1731  {
1732  octave_quit ();
1733  octave_idx_type jj = idx_j(j);
1734  octave_idx_type lj = cidx (jj);
1735  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1736 
1737  // Scalar index - just a binary lookup.
1738  octave_idx_type i = lblookup (ridx () + lj, nzj, ii);
1739  if (i < nzj && ridx (i+lj) == ii)
1740  {
1741  ij[j] = i + lj;
1742  retval.xcidx (j+1) = retval.xcidx (j) + 1;
1743  }
1744  else
1745  retval.xcidx (j+1) = retval.xcidx (j);
1746  }
1747 
1748  retval.change_capacity (retval.xcidx (m));
1749 
1750  // Copy data, adjust row indices.
1751  for (octave_idx_type j = 0; j < m; j++)
1752  {
1753  octave_idx_type i = retval.xcidx (j);
1754  if (retval.xcidx (j+1) > i)
1755  {
1756  retval.xridx (i) = 0;
1757  retval.xdata (i) = data (ij[j]);
1758  }
1759  }
1760  }
1761  else if (idx_i.is_cont_range (nr, lb, ub))
1762  {
1763  retval = Sparse<T, Alloc> (n, m);
1766  for (octave_idx_type j = 0; j < m; j++)
1767  {
1768  octave_quit ();
1769  octave_idx_type jj = idx_j(j);
1770  octave_idx_type lj = cidx (jj);
1771  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1772  octave_idx_type lij, uij;
1773 
1774  // Lookup indices.
1775  li[j] = lij = lblookup (ridx () + lj, nzj, lb) + lj;
1776  ui[j] = uij = lblookup (ridx () + lj, nzj, ub) + lj;
1777  retval.xcidx (j+1) = retval.xcidx (j) + ui[j] - li[j];
1778  }
1779 
1780  retval.change_capacity (retval.xcidx (m));
1781 
1782  // Copy data, adjust row indices.
1783  for (octave_idx_type j = 0, k = 0; j < m; j++)
1784  {
1785  octave_quit ();
1786  for (octave_idx_type i = li[j]; i < ui[j]; i++)
1787  {
1788  retval.xdata (k) = data (i);
1789  retval.xridx (k++) = ridx (i) - lb;
1790  }
1791  }
1792  }
1793  else if (idx_i.is_permutation (nr))
1794  {
1795  // The columns preserve their length, just need to renumber and sort them.
1796  // Count new nonzero elements.
1797  retval = Sparse<T, Alloc> (nr, m);
1798  for (octave_idx_type j = 0; j < m; j++)
1799  {
1800  octave_idx_type jj = idx_j(j);
1801  retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1802  }
1803 
1804  retval.change_capacity (retval.xcidx (m));
1805 
1806  octave_quit ();
1807 
1808  if (idx_i.is_range () && idx_i.increment () == -1)
1809  {
1810  // It's nr:-1:1. Just flip all columns.
1811  for (octave_idx_type j = 0; j < m; j++)
1812  {
1813  octave_quit ();
1814  octave_idx_type jj = idx_j(j);
1815  octave_idx_type lj = cidx (jj);
1816  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1817  octave_idx_type li = retval.xcidx (j);
1818  octave_idx_type uj = lj + nzj - 1;
1819  for (octave_idx_type i = 0; i < nzj; i++)
1820  {
1821  retval.xdata (li + i) = data (uj - i); // Copy in reverse order.
1822  retval.xridx (li + i) = nr - 1 - ridx (uj - i); // Ditto with transform.
1823  }
1824  }
1825  }
1826  else
1827  {
1828  // Get inverse permutation.
1829  octave::idx_vector idx_iinv = idx_i.inverse_permutation (nr);
1830  const octave_idx_type *iinv = idx_iinv.raw ();
1831 
1832  // Scatter buffer.
1833  OCTAVE_LOCAL_BUFFER (T, scb, nr);
1834  octave_idx_type *rri = retval.ridx ();
1835 
1836  for (octave_idx_type j = 0; j < m; j++)
1837  {
1838  octave_quit ();
1839  octave_idx_type jj = idx_j(j);
1840  octave_idx_type lj = cidx (jj);
1841  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1842  octave_idx_type li = retval.xcidx (j);
1843  // Scatter the column, transform indices.
1844  for (octave_idx_type i = 0; i < nzj; i++)
1845  scb[rri[li + i] = iinv[ridx (lj + i)]] = data (lj + i);
1846 
1847  octave_quit ();
1848 
1849  // Sort them.
1850  std::sort (rri + li, rri + li + nzj);
1851 
1852  // Gather.
1853  for (octave_idx_type i = 0; i < nzj; i++)
1854  retval.xdata (li + i) = scb[rri[li + i]];
1855  }
1856  }
1857 
1858  }
1859  else if (idx_j.is_colon ())
1860  {
1861  // This requires uncompressing columns, which is generally costly,
1862  // so we rely on the efficient transpose to handle this.
1863  // It may still make sense to optimize some cases here.
1864  retval = transpose ();
1865  retval = retval.index (octave::idx_vector::colon, idx_i);
1866  retval = retval.transpose ();
1867  }
1868  else
1869  {
1870  // A(I, J) is decomposed into A(:, J)(I, :).
1871  retval = index (octave::idx_vector::colon, idx_j);
1872  retval = retval.index (idx_i, octave::idx_vector::colon);
1873  }
1874 
1875  return retval;
1876 }
1877 
1878 template <typename T, typename Alloc>
1879 OCTAVE_API
1880 void
1882  const Sparse<T, Alloc>& rhs)
1883 {
1884  Sparse<T, Alloc> retval;
1885 
1886  assert (ndims () == 2);
1887 
1888  octave_idx_type nr = dim1 ();
1889  octave_idx_type nc = dim2 ();
1890  octave_idx_type nz = nnz ();
1891 
1892  octave_idx_type n = numel (); // Can throw.
1893 
1894  octave_idx_type rhl = rhs.numel ();
1895 
1896  if (idx.length (n) == rhl)
1897  {
1898  if (rhl == 0)
1899  return;
1900 
1901  octave_idx_type nx = idx.extent (n);
1902  // Try to resize first if necessary.
1903  if (nx != n)
1904  {
1905  resize1 (nx);
1906  n = numel ();
1907  nr = rows ();
1908  nc = cols ();
1909  // nz is preserved.
1910  }
1911 
1912  if (idx.is_colon ())
1913  {
1914  *this = rhs.reshape (m_dimensions);
1915  }
1916  else if (nc == 1 && rhs.cols () == 1)
1917  {
1918  // Sparse column vector to sparse column vector assignment.
1919 
1920  octave_idx_type lb, ub;
1921  if (idx.is_cont_range (nr, lb, ub))
1922  {
1923  // Special-case a contiguous range.
1924  // Look-up indices first.
1925  octave_idx_type li = lblookup (ridx (), nz, lb);
1926  octave_idx_type ui = lblookup (ridx (), nz, ub);
1927  octave_idx_type rnz = rhs.nnz ();
1928  octave_idx_type new_nz = nz - (ui - li) + rnz;
1929 
1930  if (new_nz >= nz && new_nz <= nzmax ())
1931  {
1932  // Adding/overwriting elements, enough capacity allocated.
1933 
1934  if (new_nz > nz)
1935  {
1936  // Make room first.
1937  std::copy_backward (data () + ui, data () + nz,
1938  data () + nz + rnz);
1939  std::copy_backward (ridx () + ui, ridx () + nz,
1940  ridx () + nz + rnz);
1941  }
1942 
1943  // Copy data and adjust indices from rhs.
1944  std::copy_n (rhs.data (), rnz, data () + li);
1945  mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1946  }
1947  else
1948  {
1949  // Clearing elements or exceeding capacity, allocate afresh
1950  // and paste pieces.
1951  const Sparse<T, Alloc> tmp = *this;
1952  *this = Sparse<T, Alloc> (nr, 1, new_nz);
1953 
1954  // Head ...
1955  std::copy_n (tmp.data (), li, data ());
1956  std::copy_n (tmp.ridx (), li, ridx ());
1957 
1958  // new stuff ...
1959  std::copy_n (rhs.data (), rnz, data () + li);
1960  mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1961 
1962  // ...tail
1963  std::copy (tmp.data () + ui, tmp.data () + nz,
1964  data () + li + rnz);
1965  std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
1966  ridx () + li + rnz);
1967  }
1968 
1969  cidx (1) = new_nz;
1970  }
1971  else if (idx.is_range () && idx.increment () == -1)
1972  {
1973  // It's s(u:-1:l) = r. Reverse the assignment.
1974  assign (idx.sorted (), rhs.index (octave::idx_vector (rhl - 1, 0, -1)));
1975  }
1976  else if (idx.is_permutation (n))
1977  {
1978  *this = rhs.index (idx.inverse_permutation (n));
1979  }
1980  else if (rhs.nnz () == 0)
1981  {
1982  // Elements are being zeroed.
1983  octave_idx_type *ri = ridx ();
1984  for (octave_idx_type i = 0; i < rhl; i++)
1985  {
1986  octave_idx_type iidx = idx(i);
1987  octave_idx_type li = lblookup (ri, nz, iidx);
1988  if (li != nz && ri[li] == iidx)
1989  xdata (li) = T ();
1990  }
1991 
1992  maybe_compress (true);
1993  }
1994  else
1995  {
1996  const Sparse<T, Alloc> tmp = *this;
1997  octave_idx_type new_nz = nz + rhl;
1998  // Disassembly our matrix...
1999  Array<octave_idx_type> new_ri (dim_vector (new_nz, 1));
2000  Array<T> new_data (dim_vector (new_nz, 1));
2001  std::copy_n (tmp.ridx (), nz, new_ri.fortran_vec ());
2002  std::copy_n (tmp.data (), nz, new_data.fortran_vec ());
2003  // ... insert new data (densified) ...
2004  idx.copy_data (new_ri.fortran_vec () + nz);
2005  new_data.assign (octave::idx_vector (nz, new_nz), rhs.array_value ());
2006  // ... reassembly.
2007  *this = Sparse<T, Alloc> (new_data, new_ri, 0, nr, nc, false);
2008  }
2009  }
2010  else
2011  {
2012  dim_vector save_dims = m_dimensions;
2013  *this = index (octave::idx_vector::colon);
2014  assign (idx, rhs.index (octave::idx_vector::colon));
2015  *this = reshape (save_dims);
2016  }
2017  }
2018  else if (rhl == 1)
2019  {
2020  rhl = idx.length (n);
2021  if (rhs.nnz () != 0)
2022  assign (idx, Sparse<T, Alloc> (rhl, 1, rhs.data (0)));
2023  else
2024  assign (idx, Sparse<T, Alloc> (rhl, 1));
2025  }
2026  else
2027  octave::err_nonconformant ("=", dim_vector(idx.length (n), 1), rhs.dims());
2028 }
2029 
2030 template <typename T, typename Alloc>
2031 OCTAVE_API
2032 void
2034 {
2035  // FIXME: Converting the RHS and forwarding to the sparse matrix
2036  // assignment function is simpler, but it might be good to have a
2037  // specialization...
2038 
2039  assign (idx, Sparse<T, Alloc> (1, 1, rhs));
2040 }
2041 
2042 template <typename T, typename Alloc>
2043 OCTAVE_API
2044 void
2046  const octave::idx_vector& idx_j,
2047  const Sparse<T, Alloc>& rhs)
2048 {
2049  Sparse<T, Alloc> retval;
2050 
2051  assert (ndims () == 2);
2052 
2053  octave_idx_type nr = dim1 ();
2054  octave_idx_type nc = dim2 ();
2055  octave_idx_type nz = nnz ();
2056 
2057  octave_idx_type n = rhs.rows ();
2058  octave_idx_type m = rhs.columns ();
2059 
2060  // FIXME: this should probably be written more like the
2061  // Array<T>::assign function...
2062 
2063  bool orig_zero_by_zero = (nr == 0 && nc == 0);
2064 
2065  if (orig_zero_by_zero || (idx_i.length (nr) == n && idx_j.length (nc) == m))
2066  {
2067  octave_idx_type nrx;
2068  octave_idx_type ncx;
2069 
2070  if (orig_zero_by_zero)
2071  {
2072  if (idx_i.is_colon ())
2073  {
2074  nrx = n;
2075 
2076  if (idx_j.is_colon ())
2077  ncx = m;
2078  else
2079  ncx = idx_j.extent (nc);
2080  }
2081  else if (idx_j.is_colon ())
2082  {
2083  nrx = idx_i.extent (nr);
2084  ncx = m;
2085  }
2086  else
2087  {
2088  nrx = idx_i.extent (nr);
2089  ncx = idx_j.extent (nc);
2090  }
2091  }
2092  else
2093  {
2094  nrx = idx_i.extent (nr);
2095  ncx = idx_j.extent (nc);
2096  }
2097 
2098  // Try to resize first if necessary.
2099  if (nrx != nr || ncx != nc)
2100  {
2101  resize (nrx, ncx);
2102  nr = rows ();
2103  nc = cols ();
2104  // nz is preserved.
2105  }
2106 
2107  if (n == 0 || m == 0)
2108  return;
2109 
2110  if (idx_i.is_colon ())
2111  {
2112  octave_idx_type lb, ub;
2113  // Great, we're just manipulating columns. This is going to be quite
2114  // efficient, because the columns can stay compressed as they are.
2115  if (idx_j.is_colon ())
2116  *this = rhs; // Shallow copy.
2117  else if (idx_j.is_cont_range (nc, lb, ub))
2118  {
2119  // Special-case a contiguous range.
2120  octave_idx_type li = cidx (lb);
2121  octave_idx_type ui = cidx (ub);
2122  octave_idx_type rnz = rhs.nnz ();
2123  octave_idx_type new_nz = nz - (ui - li) + rnz;
2124 
2125  if (new_nz >= nz && new_nz <= nzmax ())
2126  {
2127  // Adding/overwriting elements, enough capacity allocated.
2128 
2129  if (new_nz > nz)
2130  {
2131  // Make room first.
2132  std::copy_backward (data () + ui, data () + nz,
2133  data () + new_nz);
2134  std::copy_backward (ridx () + ui, ridx () + nz,
2135  ridx () + new_nz);
2136  mx_inline_add2 (nc - ub, cidx () + ub + 1, new_nz - nz);
2137  }
2138 
2139  // Copy data and indices from rhs.
2140  std::copy_n (rhs.data (), rnz, data () + li);
2141  std::copy_n (rhs.ridx (), rnz, ridx () + li);
2142  mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2143  li);
2144 
2145  assert (nnz () == new_nz);
2146  }
2147  else
2148  {
2149  // Clearing elements or exceeding capacity, allocate afresh
2150  // and paste pieces.
2151  const Sparse<T, Alloc> tmp = *this;
2152  *this = Sparse<T, Alloc> (nr, nc, new_nz);
2153 
2154  // Head...
2155  std::copy_n (tmp.data (), li, data ());
2156  std::copy_n (tmp.ridx (), li, ridx ());
2157  std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
2158 
2159  // new stuff...
2160  std::copy_n (rhs.data (), rnz, data () + li);
2161  std::copy_n (rhs.ridx (), rnz, ridx () + li);
2162  mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2163  li);
2164 
2165  // ...tail.
2166  std::copy (tmp.data () + ui, tmp.data () + nz,
2167  data () + li + rnz);
2168  std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
2169  ridx () + li + rnz);
2170  mx_inline_add (nc - ub, cidx () + ub + 1,
2171  tmp.cidx () + ub + 1, new_nz - nz);
2172 
2173  assert (nnz () == new_nz);
2174  }
2175  }
2176  else if (idx_j.is_range () && idx_j.increment () == -1)
2177  {
2178  // It's s(:,u:-1:l) = r. Reverse the assignment.
2179  assign (idx_i, idx_j.sorted (),
2180  rhs.index (idx_i, octave::idx_vector (m - 1, 0, -1)));
2181  }
2182  else if (idx_j.is_permutation (nc))
2183  {
2184  *this = rhs.index (idx_i, idx_j.inverse_permutation (nc));
2185  }
2186  else
2187  {
2188  const Sparse<T, Alloc> tmp = *this;
2189  *this = Sparse<T, Alloc> (nr, nc);
2190  OCTAVE_LOCAL_BUFFER_INIT (octave_idx_type, jsav, nc, -1);
2191 
2192  // Assemble column lengths.
2193  for (octave_idx_type i = 0; i < nc; i++)
2194  xcidx (i+1) = tmp.cidx (i+1) - tmp.cidx (i);
2195 
2196  for (octave_idx_type i = 0; i < m; i++)
2197  {
2198  octave_idx_type j =idx_j(i);
2199  jsav[j] = i;
2200  xcidx (j+1) = rhs.cidx (i+1) - rhs.cidx (i);
2201  }
2202 
2203  // Make cumulative.
2204  for (octave_idx_type i = 0; i < nc; i++)
2205  xcidx (i+1) += xcidx (i);
2206 
2207  change_capacity (nnz ());
2208 
2209  // Merge columns.
2210  for (octave_idx_type i = 0; i < nc; i++)
2211  {
2212  octave_idx_type l = xcidx (i);
2213  octave_idx_type u = xcidx (i+1);
2214  octave_idx_type j = jsav[i];
2215  if (j >= 0)
2216  {
2217  // from rhs
2218  octave_idx_type k = rhs.cidx (j);
2219  std::copy_n (rhs.data () + k, u - l, xdata () + l);
2220  std::copy_n (rhs.ridx () + k, u - l, xridx () + l);
2221  }
2222  else
2223  {
2224  // original
2225  octave_idx_type k = tmp.cidx (i);
2226  std::copy_n (tmp.data () + k, u - l, xdata () + l);
2227  std::copy_n (tmp.ridx () + k, u - l, xridx () + l);
2228  }
2229  }
2230 
2231  }
2232  }
2233  else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
2234  {
2235  // It's just vector indexing. The 1D assign is specialized for that.
2236  assign (idx_i, rhs);
2237  }
2238  else if (idx_j.is_colon ())
2239  {
2240  if (idx_i.is_permutation (nr))
2241  {
2242  *this = rhs.index (idx_i.inverse_permutation (nr), idx_j);
2243  }
2244  else
2245  {
2246  // FIXME: optimize more special cases?
2247  // In general this requires unpacking the columns, which is slow,
2248  // especially for many small columns. OTOH, transpose is an
2249  // efficient O(nr+nc+nnz) operation.
2250  *this = transpose ();
2251  assign (octave::idx_vector::colon, idx_i, rhs.transpose ());
2252  *this = transpose ();
2253  }
2254  }
2255  else
2256  {
2257  // Split it into 2 assignments and one indexing.
2258  Sparse<T, Alloc> tmp = index (octave::idx_vector::colon, idx_j);
2259  tmp.assign (idx_i, octave::idx_vector::colon, rhs);
2260  assign (octave::idx_vector::colon, idx_j, tmp);
2261  }
2262  }
2263  else if (m == 1 && n == 1)
2264  {
2265  n = idx_i.length (nr);
2266  m = idx_j.length (nc);
2267  if (rhs.nnz () != 0)
2268  assign (idx_i, idx_j, Sparse<T, Alloc> (n, m, rhs.data (0)));
2269  else
2270  assign (idx_i, idx_j, Sparse<T, Alloc> (n, m));
2271  }
2272  else if (idx_i.length (nr) == m && idx_j.length (nc) == n
2273  && (n == 1 || m == 1))
2274  {
2275  assign (idx_i, idx_j, rhs.transpose ());
2276  }
2277  else
2278  octave::err_nonconformant ("=", idx_i.length (nr), idx_j.length (nc), n, m);
2279 }
2280 
2281 template <typename T, typename Alloc>
2282 OCTAVE_API
2283 void
2285  const octave::idx_vector& idx_j,
2286  const T& rhs)
2287 {
2288  // FIXME: Converting the RHS and forwarding to the sparse matrix
2289  // assignment function is simpler, but it might be good to have a
2290  // specialization...
2291 
2292  assign (idx_i, idx_j, Sparse<T, Alloc> (1, 1, rhs));
2293 }
2294 
2295 // Can't use versions of these in Array.cc due to duplication of the
2296 // instantiations for Array<double and Sparse<double>, etc.
2297 template <typename T>
2298 OCTAVE_API
2299 bool
2301  typename ref_param<T>::type b)
2302 {
2303  return (a < b);
2304 }
2305 
2306 template <typename T>
2307 OCTAVE_API
2308 bool
2310  typename ref_param<T>::type b)
2311 {
2312  return (a > b);
2313 }
2314 
2315 template <typename T, typename Alloc>
2316 OCTAVE_API
2319 {
2320  Sparse<T, Alloc> m = *this;
2321 
2322  octave_idx_type nr = m.rows ();
2323  octave_idx_type nc = m.columns ();
2324 
2325  if (m.numel () < 1 || dim > 1)
2326  return m;
2327 
2328  bool sort_by_column = (dim > 0);
2329  if (sort_by_column)
2330  {
2331  m = m.transpose ();
2332  std::swap (nr, nc);
2333  }
2334 
2335  octave_sort<T> lsort;
2336 
2337  if (mode == ASCENDING)
2338  lsort.set_compare (sparse_ascending_compare<T>);
2339  else if (mode == DESCENDING)
2340  lsort.set_compare (sparse_descending_compare<T>);
2341  else
2343  ("Sparse<T, Alloc>::sort: invalid MODE");
2344 
2345  T *v = m.data ();
2346  octave_idx_type *mcidx = m.cidx ();
2347  octave_idx_type *mridx = m.ridx ();
2348 
2349  for (octave_idx_type j = 0; j < nc; j++)
2350  {
2351  octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2352  lsort.sort (v, ns);
2353 
2354  octave_idx_type i;
2355  if (mode == ASCENDING)
2356  {
2357  for (i = 0; i < ns; i++)
2358  if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2359  break;
2360  }
2361  else
2362  {
2363  for (i = 0; i < ns; i++)
2364  if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2365  break;
2366  }
2367  for (octave_idx_type k = 0; k < i; k++)
2368  mridx[k] = k;
2369  for (octave_idx_type k = i; k < ns; k++)
2370  mridx[k] = k - ns + nr;
2371 
2372  v += ns;
2373  mridx += ns;
2374  }
2375 
2376  if (sort_by_column)
2377  m = m.transpose ();
2378 
2379  return m;
2380 }
2381 
2382 template <typename T, typename Alloc>
2383 OCTAVE_API
2386  octave_idx_type dim, sortmode mode) const
2387 {
2388  Sparse<T, Alloc> m = *this;
2389 
2390  octave_idx_type nr = m.rows ();
2391  octave_idx_type nc = m.columns ();
2392 
2393  if (m.numel () < 1 || dim > 1)
2394  {
2395  sidx = Array<octave_idx_type> (dim_vector (nr, nc), 1);
2396  return m;
2397  }
2398 
2399  bool sort_by_column = (dim > 0);
2400  if (sort_by_column)
2401  {
2402  m = m.transpose ();
2403  std::swap (nr, nc);
2404  }
2405 
2406  octave_sort<T> indexed_sort;
2407 
2408  if (mode == ASCENDING)
2409  indexed_sort.set_compare (sparse_ascending_compare<T>);
2410  else if (mode == DESCENDING)
2411  indexed_sort.set_compare (sparse_descending_compare<T>);
2412  else
2414  ("Sparse<T, Alloc>::sort: invalid MODE");
2415 
2416  T *v = m.data ();
2417  octave_idx_type *mcidx = m.cidx ();
2418  octave_idx_type *mridx = m.ridx ();
2419 
2420  sidx = Array<octave_idx_type> (dim_vector (nr, nc));
2422 
2423  for (octave_idx_type j = 0; j < nc; j++)
2424  {
2425  octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2426  octave_idx_type offset = j * nr;
2427 
2428  if (ns == 0)
2429  {
2430  for (octave_idx_type k = 0; k < nr; k++)
2431  sidx(offset + k) = k;
2432  }
2433  else
2434  {
2435  for (octave_idx_type i = 0; i < ns; i++)
2436  vi[i] = mridx[i];
2437 
2438  indexed_sort.sort (v, vi, ns);
2439 
2440  octave_idx_type i;
2441  if (mode == ASCENDING)
2442  {
2443  for (i = 0; i < ns; i++)
2444  if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2445  break;
2446  }
2447  else
2448  {
2449  for (i = 0; i < ns; i++)
2450  if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2451  break;
2452  }
2453 
2454  octave_idx_type ii = 0;
2455  octave_idx_type jj = i;
2456  for (octave_idx_type k = 0; k < nr; k++)
2457  {
2458  if (ii < ns && mridx[ii] == k)
2459  ii++;
2460  else
2461  sidx(offset + jj++) = k;
2462  }
2463 
2464  for (octave_idx_type k = 0; k < i; k++)
2465  {
2466  sidx(k + offset) = vi[k];
2467  mridx[k] = k;
2468  }
2469 
2470  for (octave_idx_type k = i; k < ns; k++)
2471  {
2472  sidx(k - ns + nr + offset) = vi[k];
2473  mridx[k] = k - ns + nr;
2474  }
2475 
2476  v += ns;
2477  mridx += ns;
2478  }
2479  }
2480 
2481  if (sort_by_column)
2482  {
2483  m = m.transpose ();
2484  sidx = sidx.transpose ();
2485  }
2486 
2487  return m;
2488 }
2489 
2490 template <typename T, typename Alloc>
2491 OCTAVE_API
2494 {
2495  octave_idx_type nnr = rows ();
2496  octave_idx_type nnc = cols ();
2498 
2499  if (nnr == 0 || nnc == 0)
2500  ; // do nothing
2501  else if (nnr != 1 && nnc != 1)
2502  {
2503  if (k > 0)
2504  nnc -= k;
2505  else if (k < 0)
2506  nnr += k;
2507 
2508  if (nnr > 0 && nnc > 0)
2509  {
2510  octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2511 
2512  // Count the number of nonzero elements
2513  octave_idx_type nel = 0;
2514  if (k > 0)
2515  {
2516  for (octave_idx_type i = 0; i < ndiag; i++)
2517  if (elem (i, i+k) != 0.)
2518  nel++;
2519  }
2520  else if (k < 0)
2521  {
2522  for (octave_idx_type i = 0; i < ndiag; i++)
2523  if (elem (i-k, i) != 0.)
2524  nel++;
2525  }
2526  else
2527  {
2528  for (octave_idx_type i = 0; i < ndiag; i++)
2529  if (elem (i, i) != 0.)
2530  nel++;
2531  }
2532 
2533  d = Sparse<T, Alloc> (ndiag, 1, nel);
2534  d.xcidx (0) = 0;
2535  d.xcidx (1) = nel;
2536 
2537  octave_idx_type ii = 0;
2538  if (k > 0)
2539  {
2540  for (octave_idx_type i = 0; i < ndiag; i++)
2541  {
2542  T tmp = elem (i, i+k);
2543  if (tmp != 0.)
2544  {
2545  d.xdata (ii) = tmp;
2546  d.xridx (ii++) = i;
2547  }
2548  }
2549  }
2550  else if (k < 0)
2551  {
2552  for (octave_idx_type i = 0; i < ndiag; i++)
2553  {
2554  T tmp = elem (i-k, i);
2555  if (tmp != 0.)
2556  {
2557  d.xdata (ii) = tmp;
2558  d.xridx (ii++) = i;
2559  }
2560  }
2561  }
2562  else
2563  {
2564  for (octave_idx_type i = 0; i < ndiag; i++)
2565  {
2566  T tmp = elem (i, i);
2567  if (tmp != 0.)
2568  {
2569  d.xdata (ii) = tmp;
2570  d.xridx (ii++) = i;
2571  }
2572  }
2573  }
2574  }
2575  else
2576  {
2577  // Matlab returns [] 0x1 for out-of-range diagonal
2578 
2579  octave_idx_type nr = 0;
2580  octave_idx_type nc = 1;
2581  octave_idx_type nz = 0;
2582 
2583  d = Sparse<T, Alloc> (nr, nc, nz);
2584  }
2585  }
2586  else // one of dimensions == 1 (vector)
2587  {
2588  octave_idx_type roff = 0;
2589  octave_idx_type coff = 0;
2590  if (k > 0)
2591  {
2592  roff = 0;
2593  coff = k;
2594  }
2595  else if (k < 0)
2596  {
2597  roff = -k;
2598  coff = 0;
2599  }
2600 
2601  if (nnr == 1)
2602  {
2603  octave_idx_type n = nnc + std::abs (k);
2604  octave_idx_type nz = nnz ();
2605 
2606  d = Sparse<T, Alloc> (n, n, nz);
2607 
2608  if (nnz () > 0)
2609  {
2610  for (octave_idx_type i = 0; i < coff+1; i++)
2611  d.xcidx (i) = 0;
2612 
2613  for (octave_idx_type j = 0; j < nnc; j++)
2614  {
2615  for (octave_idx_type i = cidx (j); i < cidx (j+1); i++)
2616  {
2617  d.xdata (i) = data (i);
2618  d.xridx (i) = j + roff;
2619  }
2620  d.xcidx (j + coff + 1) = cidx (j+1);
2621  }
2622 
2623  for (octave_idx_type i = nnc + coff + 1; i < n + 1; i++)
2624  d.xcidx (i) = nz;
2625  }
2626  }
2627  else
2628  {
2629  octave_idx_type n = nnr + std::abs (k);
2630  octave_idx_type nz = nnz ();
2631 
2632  d = Sparse<T, Alloc> (n, n, nz);
2633 
2634  if (nnz () > 0)
2635  {
2636  octave_idx_type ii = 0;
2637  octave_idx_type ir = ridx (0);
2638 
2639  for (octave_idx_type i = 0; i < coff+1; i++)
2640  d.xcidx (i) = 0;
2641 
2642  for (octave_idx_type i = 0; i < nnr; i++)
2643  {
2644  if (ir == i)
2645  {
2646  d.xdata (ii) = data (ii);
2647  d.xridx (ii++) = ir + roff;
2648 
2649  if (ii != nz)
2650  ir = ridx (ii);
2651  }
2652  d.xcidx (i + coff + 1) = ii;
2653  }
2654 
2655  for (octave_idx_type i = nnr + coff + 1; i < n+1; i++)
2656  d.xcidx (i) = nz;
2657  }
2658  }
2659  }
2660 
2661  return d;
2662 }
2663 
2664 template <typename T, typename Alloc>
2665 OCTAVE_API
2668  const Sparse<T, Alloc> *sparse_list)
2669 {
2670  // Default concatenation.
2671  bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2672 
2673  if (dim == -1 || dim == -2)
2674  {
2675  concat_rule = &dim_vector::hvcat;
2676  dim = -dim - 1;
2677  }
2678  else if (dim < 0)
2679  (*current_liboctave_error_handler) ("cat: invalid dimension");
2680 
2681  dim_vector dv;
2682  octave_idx_type total_nz = 0;
2683  if (dim != 0 && dim != 1)
2684  (*current_liboctave_error_handler)
2685  ("cat: invalid dimension for sparse concatenation");
2686 
2687  if (n == 1)
2688  return sparse_list[0];
2689 
2690  for (octave_idx_type i = 0; i < n; i++)
2691  {
2692  if (! (dv.*concat_rule) (sparse_list[i].dims (), dim))
2693  (*current_liboctave_error_handler) ("cat: dimension mismatch");
2694 
2695  total_nz += sparse_list[i].nnz ();
2696  }
2697 
2698  Sparse<T, Alloc> retval (dv, total_nz);
2699 
2700  if (retval.isempty ())
2701  return retval;
2702 
2703  switch (dim)
2704  {
2705  case 0:
2706  {
2707  // sparse vertcat. This is not efficiently handled by assignment,
2708  // so we'll do it directly.
2709  octave_idx_type l = 0;
2710  for (octave_idx_type j = 0; j < dv(1); j++)
2711  {
2712  octave_quit ();
2713 
2714  octave_idx_type rcum = 0;
2715  for (octave_idx_type i = 0; i < n; i++)
2716  {
2717  const Sparse<T, Alloc>& spi = sparse_list[i];
2718  // Skipping empty matrices. See the comment in Array.cc.
2719  if (spi.isempty ())
2720  continue;
2721 
2722  octave_idx_type kl = spi.cidx (j);
2723  octave_idx_type ku = spi.cidx (j+1);
2724  for (octave_idx_type k = kl; k < ku; k++, l++)
2725  {
2726  retval.xridx (l) = spi.ridx (k) + rcum;
2727  retval.xdata (l) = spi.data (k);
2728  }
2729 
2730  rcum += spi.rows ();
2731  }
2732 
2733  retval.xcidx (j+1) = l;
2734  }
2735 
2736  break;
2737  }
2738  case 1:
2739  {
2740  octave_idx_type l = 0;
2741  for (octave_idx_type i = 0; i < n; i++)
2742  {
2743  octave_quit ();
2744 
2745  // Skipping empty matrices. See the comment in Array.cc.
2746  if (sparse_list[i].isempty ())
2747  continue;
2748 
2749  octave_idx_type u = l + sparse_list[i].columns ();
2750  retval.assign (octave::idx_vector::colon, octave::idx_vector (l, u),
2751  sparse_list[i]);
2752  l = u;
2753  }
2754 
2755  break;
2756  }
2757  default:
2758  assert (false);
2759  }
2760 
2761  return retval;
2762 }
2763 
2764 template <typename T, typename Alloc>
2765 OCTAVE_API
2766 Array<T>
2768 {
2769  Array<T> retval (dims (), T ());
2770  if (rows () == 1)
2771  {
2772  octave_idx_type i = 0;
2773  for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2774  {
2775  if (cidx (j+1) > i)
2776  retval.xelem (j) = data (i++);
2777  }
2778  }
2779  else
2780  {
2781  for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2782  for (octave_idx_type i = cidx (j), iu = cidx (j+1); i < iu; i++)
2783  retval.xelem (ridx (i), j) = data (i);
2784  }
2785 
2786  return retval;
2787 }
2788 
2789 template <typename T>
2790 OCTAVE_API
2791 std::istream&
2792 read_sparse_matrix (std::istream& is, Sparse<T>& a,
2793  T (*read_fcn) (std::istream&))
2794 {
2795  octave_idx_type nr = a.rows ();
2796  octave_idx_type nc = a.cols ();
2797  octave_idx_type nz = a.nzmax ();
2798 
2799  if (nr > 0 && nc > 0)
2800  {
2801  octave_idx_type itmp;
2802  octave_idx_type jtmp;
2803  octave_idx_type iold = 0;
2804  octave_idx_type jold = 0;
2805  octave_idx_type ii = 0;
2806  T tmp;
2807 
2808  a.cidx (0) = 0;
2809  for (octave_idx_type i = 0; i < nz; i++)
2810  {
2811  itmp = 0; jtmp = 0;
2812  is >> itmp;
2813  itmp--;
2814 
2815  is >> jtmp;
2816  jtmp--;
2817 
2818  if (is.fail ())
2819  {
2820  is.clear();
2821  std::string err_field;
2822  is >> err_field;
2823  (*current_liboctave_error_handler)
2824  ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2825  "Symbols '%s' is not an integer format",
2826  i+1, err_field.c_str ());
2827  }
2828 
2829  if (itmp < 0 || itmp >= nr)
2830  {
2831  is.setstate (std::ios::failbit);
2832 
2833  (*current_liboctave_error_handler)
2834  ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2835  "row index = %" OCTAVE_IDX_TYPE_FORMAT " out of range",
2836  i+1, itmp + 1);
2837  }
2838 
2839  if (jtmp < 0 || jtmp >= nc)
2840  {
2841  is.setstate (std::ios::failbit);
2842 
2843  (*current_liboctave_error_handler)
2844  ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2845  "column index = %" OCTAVE_IDX_TYPE_FORMAT " out of range",
2846  i+1, jtmp + 1);
2847  }
2848 
2849  if (jtmp < jold)
2850  {
2851  is.setstate (std::ios::failbit);
2852 
2853  (*current_liboctave_error_handler)
2854  ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ":"
2855  "column indices must appear in ascending order "
2856  "(%" OCTAVE_IDX_TYPE_FORMAT " < %" OCTAVE_IDX_TYPE_FORMAT ")",
2857  i+1, jtmp, jold);
2858  }
2859  else if (jtmp > jold)
2860  {
2861  for (octave_idx_type j = jold; j < jtmp; j++)
2862  a.cidx (j+1) = ii;
2863  }
2864  else if (itmp < iold)
2865  {
2866  is.setstate (std::ios::failbit);
2867 
2868  (*current_liboctave_error_handler)
2869  ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2870  "row indices must appear in ascending order in each column "
2871  "(%" OCTAVE_IDX_TYPE_FORMAT " < %" OCTAVE_IDX_TYPE_FORMAT ")",
2872  i+1, iold, itmp);
2873  }
2874 
2875  iold = itmp;
2876  jold = jtmp;
2877 
2878  tmp = read_fcn (is);
2879 
2880  if (! is)
2881  return is; // Problem, return is in error state
2882 
2883  a.data (ii) = tmp;
2884  a.ridx (ii++) = itmp;
2885  }
2886 
2887  for (octave_idx_type j = jold; j < nc; j++)
2888  a.cidx (j+1) = ii;
2889  }
2890 
2891  return is;
2892 }
2893 
2894 /*
2895  * Tests
2896  *
2897 
2898 %!function x = set_slice (x, dim, slice, arg)
2899 %! switch (dim)
2900 %! case 11
2901 %! x(slice) = 2;
2902 %! case 21
2903 %! x(slice, :) = 2;
2904 %! case 22
2905 %! x(:, slice) = 2;
2906 %! otherwise
2907 %! error ("invalid dim, '%d'", dim);
2908 %! endswitch
2909 %!endfunction
2910 
2911 %!function x = set_slice2 (x, dim, slice)
2912 %! switch (dim)
2913 %! case 11
2914 %! x(slice) = 2 * ones (size (slice));
2915 %! case 21
2916 %! x(slice, :) = 2 * ones (length (slice), columns (x));
2917 %! case 22
2918 %! x(:, slice) = 2 * ones (rows (x), length (slice));
2919 %! otherwise
2920 %! error ("invalid dim, '%d'", dim);
2921 %! endswitch
2922 %!endfunction
2923 
2924 %!function test_sparse_slice (size, dim, slice)
2925 %! x = ones (size);
2926 %! s = set_slice (sparse (x), dim, slice);
2927 %! f = set_slice (x, dim, slice);
2928 %! assert (nnz (s), nnz (f));
2929 %! assert (full (s), f);
2930 %! s = set_slice2 (sparse (x), dim, slice);
2931 %! f = set_slice2 (x, dim, slice);
2932 %! assert (nnz (s), nnz (f));
2933 %! assert (full (s), f);
2934 %!endfunction
2935 
2936 #### 1d indexing
2937 
2938 ## size = [2 0]
2939 %!test test_sparse_slice ([2 0], 11, []);
2940 %!assert (set_slice (sparse (ones ([2 0])), 11, 1), sparse ([2 0]')) # sparse different from full
2941 %!assert (set_slice (sparse (ones ([2 0])), 11, 2), sparse ([0 2]')) # sparse different from full
2942 %!assert (set_slice (sparse (ones ([2 0])), 11, 3), sparse ([0 0; 2 0]')) # sparse different from full
2943 %!assert (set_slice (sparse (ones ([2 0])), 11, 4), sparse ([0 0; 0 2]')) # sparse different from full
2944 
2945 ## size = [0 2]
2946 %!test test_sparse_slice ([0 2], 11, []);
2947 %!assert (set_slice (sparse (ones ([0 2])), 11, 1), sparse ([2 0])) # sparse different from full
2948 %!test test_sparse_slice ([0 2], 11, 2);
2949 %!test test_sparse_slice ([0 2], 11, 3);
2950 %!test test_sparse_slice ([0 2], 11, 4);
2951 %!test test_sparse_slice ([0 2], 11, [4, 4]);
2952 
2953 ## size = [2 1]
2954 %!test test_sparse_slice ([2 1], 11, []);
2955 %!test test_sparse_slice ([2 1], 11, 1);
2956 %!test test_sparse_slice ([2 1], 11, 2);
2957 %!test test_sparse_slice ([2 1], 11, 3);
2958 %!test test_sparse_slice ([2 1], 11, 4);
2959 %!test test_sparse_slice ([2 1], 11, [4, 4]);
2960 
2961 ## size = [1 2]
2962 %!test test_sparse_slice ([1 2], 11, []);
2963 %!test test_sparse_slice ([1 2], 11, 1);
2964 %!test test_sparse_slice ([1 2], 11, 2);
2965 %!test test_sparse_slice ([1 2], 11, 3);
2966 %!test test_sparse_slice ([1 2], 11, 4);
2967 %!test test_sparse_slice ([1 2], 11, [4, 4]);
2968 
2969 ## size = [2 2]
2970 %!test test_sparse_slice ([2 2], 11, []);
2971 %!test test_sparse_slice ([2 2], 11, 1);
2972 %!test test_sparse_slice ([2 2], 11, 2);
2973 %!test test_sparse_slice ([2 2], 11, 3);
2974 %!test test_sparse_slice ([2 2], 11, 4);
2975 %!test test_sparse_slice ([2 2], 11, [4, 4]);
2976 # These 2 errors are the same as in the full case
2977 %!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 5)
2978 %!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 6)
2979 
2980 #### 2d indexing
2981 
2982 ## size = [2 0]
2983 %!test test_sparse_slice ([2 0], 21, []);
2984 %!test test_sparse_slice ([2 0], 21, 1);
2985 %!test test_sparse_slice ([2 0], 21, 2);
2986 %!test test_sparse_slice ([2 0], 21, [2,2]);
2987 %!assert (set_slice (sparse (ones ([2 0])), 21, 3), sparse (3,0))
2988 %!assert (set_slice (sparse (ones ([2 0])), 21, 4), sparse (4,0))
2989 %!test test_sparse_slice ([2 0], 22, []);
2990 %!test test_sparse_slice ([2 0], 22, 1);
2991 %!test test_sparse_slice ([2 0], 22, 2);
2992 %!test test_sparse_slice ([2 0], 22, [2,2]);
2993 %!assert (set_slice (sparse (ones ([2 0])), 22, 3), sparse ([0 0 2;0 0 2])) # sparse different from full
2994 %!assert (set_slice (sparse (ones ([2 0])), 22, 4), sparse ([0 0 0 2;0 0 0 2])) # sparse different from full
2995 
2996 ## size = [0 2]
2997 %!test test_sparse_slice ([0 2], 21, []);
2998 %!test test_sparse_slice ([0 2], 21, 1);
2999 %!test test_sparse_slice ([0 2], 21, 2);
3000 %!test test_sparse_slice ([0 2], 21, [2,2]);
3001 %!assert (set_slice (sparse (ones ([0 2])), 21, 3), sparse ([0 0;0 0;2 2])) # sparse different from full
3002 %!assert (set_slice (sparse (ones ([0 2])), 21, 4), sparse ([0 0;0 0;0 0;2 2])) # sparse different from full
3003 %!test test_sparse_slice ([0 2], 22, []);
3004 %!test test_sparse_slice ([0 2], 22, 1);
3005 %!test test_sparse_slice ([0 2], 22, 2);
3006 %!test test_sparse_slice ([0 2], 22, [2,2]);
3007 %!assert (set_slice (sparse (ones ([0 2])), 22, 3), sparse (0,3))
3008 %!assert (set_slice (sparse (ones ([0 2])), 22, 4), sparse (0,4))
3009 
3010 ## size = [2 1]
3011 %!test test_sparse_slice ([2 1], 21, []);
3012 %!test test_sparse_slice ([2 1], 21, 1);
3013 %!test test_sparse_slice ([2 1], 21, 2);
3014 %!test test_sparse_slice ([2 1], 21, [2,2]);
3015 %!test test_sparse_slice ([2 1], 21, 3);
3016 %!test test_sparse_slice ([2 1], 21, 4);
3017 %!test test_sparse_slice ([2 1], 22, []);
3018 %!test test_sparse_slice ([2 1], 22, 1);
3019 %!test test_sparse_slice ([2 1], 22, 2);
3020 %!test test_sparse_slice ([2 1], 22, [2,2]);
3021 %!test test_sparse_slice ([2 1], 22, 3);
3022 %!test test_sparse_slice ([2 1], 22, 4);
3023 
3024 ## size = [1 2]
3025 %!test test_sparse_slice ([1 2], 21, []);
3026 %!test test_sparse_slice ([1 2], 21, 1);
3027 %!test test_sparse_slice ([1 2], 21, 2);
3028 %!test test_sparse_slice ([1 2], 21, [2,2]);
3029 %!test test_sparse_slice ([1 2], 21, 3);
3030 %!test test_sparse_slice ([1 2], 21, 4);
3031 %!test test_sparse_slice ([1 2], 22, []);
3032 %!test test_sparse_slice ([1 2], 22, 1);
3033 %!test test_sparse_slice ([1 2], 22, 2);
3034 %!test test_sparse_slice ([1 2], 22, [2,2]);
3035 %!test test_sparse_slice ([1 2], 22, 3);
3036 %!test test_sparse_slice ([1 2], 22, 4);
3037 
3038 ## size = [2 2]
3039 %!test test_sparse_slice ([2 2], 21, []);
3040 %!test test_sparse_slice ([2 2], 21, 1);
3041 %!test test_sparse_slice ([2 2], 21, 2);
3042 %!test test_sparse_slice ([2 2], 21, [2,2]);
3043 %!test test_sparse_slice ([2 2], 21, 3);
3044 %!test test_sparse_slice ([2 2], 21, 4);
3045 %!test test_sparse_slice ([2 2], 22, []);
3046 %!test test_sparse_slice ([2 2], 22, 1);
3047 %!test test_sparse_slice ([2 2], 22, 2);
3048 %!test test_sparse_slice ([2 2], 22, [2,2]);
3049 %!test test_sparse_slice ([2 2], 22, 3);
3050 %!test test_sparse_slice ([2 2], 22, 4);
3051 
3052 %!assert <*35570> (speye (3,1)(3:-1:1), sparse ([0; 0; 1]))
3053 
3054 ## Test removing columns
3055 %!test <*36656>
3056 %! s = sparse (magic (5));
3057 %! s(:,2:4) = [];
3058 %! assert (s, sparse (magic (5)(:, [1,5])));
3059 
3060 %!test
3061 %! s = sparse ([], [], [], 1, 1);
3062 %! s(1,:) = [];
3063 %! assert (s, sparse ([], [], [], 0, 1));
3064 
3065 ## Test (bug #37321)
3066 %!test <*37321> a=sparse (0,0); assert (all (a) == sparse ([1]));
3067 %!test <*37321> a=sparse (0,1); assert (all (a) == sparse ([1]));
3068 %!test <*37321> a=sparse (1,0); assert (all (a) == sparse ([1]));
3069 %!test <*37321> a=sparse (1,0); assert (all (a,2) == sparse ([1]));
3070 %!test <*37321> a=sparse (1,0); assert (size (all (a,1)), [1 0]);
3071 %!test <*37321> a=sparse (1,1);
3072 %! assert (all (a) == sparse ([0]));
3073 %! assert (size (all (a)), [1 1]);
3074 %!test <*37321> a=sparse (2,1);
3075 %! assert (all (a) == sparse ([0]));
3076 %! assert (size (all (a)), [1 1]);
3077 %!test <*37321> a=sparse (1,2);
3078 %! assert (all (a) == sparse ([0]));
3079 %! assert (size (all (a)), [1 1]);
3080 %!test <*37321> a=sparse (2,2); assert (isequal (all (a), sparse ([0 0])));
3081 
3082 ## Test assigning row to a column slice
3083 %!test <45589>
3084 %! a = sparse (magic (3));
3085 %! b = a;
3086 %! a(1,:) = 1:3;
3087 %! b(1,:) = (1:3)';
3088 %! assert (a, b);
3089 
3090 */
3091 
3092 template <typename T, typename Alloc>
3093 OCTAVE_API
3094 void
3095 Sparse<T, Alloc>::print_info (std::ostream& os, const std::string& prefix) const
3096 {
3097  os << prefix << "m_rep address: " << m_rep << "\n"
3098  << prefix << "m_rep->m_nzmax: " << m_rep->m_nzmax << "\n"
3099  << prefix << "m_rep->m_nrows: " << m_rep->m_nrows << "\n"
3100  << prefix << "m_rep->m_ncols: " << m_rep->m_ncols << "\n"
3101  << prefix << "m_rep->m_data: " << m_rep->m_data << "\n"
3102  << prefix << "m_rep->m_ridx: " << m_rep->m_ridx << "\n"
3103  << prefix << "m_rep->m_cidx: " << m_rep->m_cidx << "\n"
3104  << prefix << "m_rep->m_count: " << m_rep->m_count << "\n";
3105 }
3106 
3107 #if defined (__clang__)
3108 # define INSTANTIATE_SPARSE(T) \
3109  template class OCTAVE_API Sparse<T>; \
3110  template OCTAVE_API std::istream& \
3111  read_sparse_matrix<T> (std::istream& is, Sparse<T>& a, \
3112  T (*read_fcn) (std::istream&));
3113 #else
3114 # define INSTANTIATE_SPARSE(T) \
3115  template class Sparse<T>; \
3116  template OCTAVE_API std::istream& \
3117  read_sparse_matrix<T> (std::istream& is, Sparse<T>& a, \
3118  T (*read_fcn) (std::istream&));
3119 #endif
bool sparse_descending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2309
std::istream & read_sparse_matrix(std::istream &is, Sparse< T > &a, T(*read_fcn)(std::istream &))
Definition: Sparse.cc:2792
bool sparse_ascending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2300
charNDArray max(char d, const charNDArray &m)
Definition: chNDArray.cc:230
charNDArray min(char d, const charNDArray &m)
Definition: chNDArray.cc:207
T & elem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:562
T * fortran_vec()
Size of the specified dimension.
Definition: Array-base.cc:1764
Array< T, Alloc > index(const octave::idx_vector &i) const
Indexing without resizing.
Definition: Array-base.cc:710
void assign(const octave::idx_vector &i, const Array< T, Alloc > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Definition: Array-base.cc:1128
Array< T, Alloc > transpose() const
Size of the specified dimension.
Definition: Array-base.cc:1623
Array< T, Alloc > as_matrix() const
Return the array as a matrix.
Definition: Array.h:442
octave_idx_type rows() const
Definition: Array.h:459
const T * data() const
Size of the specified dimension.
Definition: Array.h:663
octave_idx_type cols() const
Definition: Array.h:469
T & xelem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:524
octave_idx_type numel() const
Number of elements in the array.
Definition: Array.h:414
octave_idx_type rows() const
Definition: PermMatrix.h:62
const Array< octave_idx_type > & col_perm_vec() const
Definition: PermMatrix.h:83
bool indices_ok() const
Definition: Sparse.cc:179
bool any_element_is_nan() const
Definition: Sparse.cc:187
void change_length(octave_idx_type nz)
Definition: Sparse.cc:145
octave_idx_type m_ncols
Definition: Sparse.h:76
octave::refcount< octave_idx_type > m_count
Definition: Sparse.h:77
idx_type_pointer m_cidx
Definition: Sparse.h:73
T_pointer m_data
Definition: Sparse.h:71
void maybe_compress(bool remove_zeros)
Definition: Sparse.cc:120
T & elem(octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:66
idx_type_pointer m_ridx
Definition: Sparse.h:72
octave_idx_type m_nzmax
Definition: Sparse.h:74
T celem(octave_idx_type r, octave_idx_type c) const
Definition: Sparse.cc:108
Definition: Sparse.h:49
octave_idx_type cols() const
Definition: Sparse.h:352
Sparse< T, Alloc > diag(octave_idx_type k=0) const
Definition: Sparse.cc:2493
Sparse< T, Alloc > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Definition: Sparse.cc:931
octave_idx_type * xcidx()
Definition: Sparse.h:602
octave_idx_type nzmax() const
Amount of storage for nonzero elements.
Definition: Sparse.h:336
T * xdata()
Definition: Sparse.h:576
Array< T > array_value() const
Definition: Sparse.cc:2767
void delete_elements(const octave::idx_vector &i)
Definition: Sparse.cc:1198
Sparse< T, Alloc > index(const octave::idx_vector &i, bool resize_ok=false) const
Definition: Sparse.cc:1434
void print_info(std::ostream &os, const std::string &prefix) const
Definition: Sparse.cc:3095
octave_idx_type columns() const
Definition: Sparse.h:353
octave_idx_type * cidx()
Definition: Sparse.h:596
void assign(const octave::idx_vector &i, const Sparse< T, Alloc > &rhs)
Definition: Sparse.cc:1881
virtual ~Sparse()
Definition: Sparse.cc:707
void resize(octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:993
T * data()
Definition: Sparse.h:574
Sparse< T, Alloc >::SparseRep * m_rep
Definition: Sparse.h:249
Sparse< T, Alloc > sort(octave_idx_type dim=0, sortmode mode=ASCENDING) const
Definition: Sparse.cc:2318
octave_idx_type * ridx()
Definition: Sparse.h:583
Sparse< T, Alloc > & insert(const Sparse< T, Alloc > &a, octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:1044
void resize1(octave_idx_type n)
Definition: Sparse.cc:960
Sparse< T, Alloc > maybe_compress(bool remove_zeros=false)
Definition: Sparse.h:531
OCTAVE_NORETURN T range_error(const char *fcn, octave_idx_type n) const
Definition: Sparse.cc:758
octave_idx_type numel() const
Definition: Sparse.h:343
Sparse()
Definition: Sparse.h:259
static Sparse< T, Alloc > cat(int dim, octave_idx_type n, const Sparse< T, Alloc > *sparse_list)
Definition: Sparse.cc:2667
Sparse< T, Alloc > & operator=(const Sparse< T, Alloc > &a)
Definition: Sparse.cc:715
octave_idx_type nnz() const
Actual number of nonzero terms.
Definition: Sparse.h:339
octave_idx_type rows() const
Definition: Sparse.h:351
void change_capacity(octave_idx_type nz)
Definition: Sparse.h:556
Sparse< T, Alloc > transpose() const
Definition: Sparse.cc:1140
dim_vector m_dimensions
Definition: Sparse.h:251
octave_idx_type compute_index(const Array< octave_idx_type > &ra_idx) const
Definition: Sparse.cc:734
Sparse< T, Alloc > reshape(const dim_vector &new_dims) const
Definition: Sparse.cc:848
dim_vector dims() const
Definition: Sparse.h:371
octave_idx_type * xridx()
Definition: Sparse.h:589
bool isempty() const
Definition: Sparse.h:570
Vector representing the dimensions (size) of an Array.
Definition: dim-vector.h:94
bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
Definition: dim-vector.cc:140
octave_idx_type safe_numel() const
The following function will throw a std::bad_alloc () exception if the requested size is larger than ...
Definition: dim-vector.cc:98
std::string str(char sep='x') const
Definition: dim-vector.cc:68
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition: dim-vector.h:335
void resize(int n, int fill_value=0)
Definition: dim-vector.h:272
octave_idx_type ndims() const
Number of dimensions.
Definition: dim-vector.h:257
bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
Definition: dim-vector.cc:201
dim_vector redim(int n) const
Force certain dimensionality, preserving numel ().
Definition: dim-vector.cc:226
virtual octave_idx_type numel() const
Definition: ov-base.h:391
void set_compare(const compare_fcn_type &comp)
Definition: oct-sort.h:117
void sort(T *data, octave_idx_type nel)
Definition: oct-sort.cc:1522
if_then_else< is_class_type< T >::no, T, T const & >::result type
Definition: lo-traits.h:121
octave::idx_vector idx_vector
Definition: idx-vector.h:1022
void err_invalid_resize()
void err_nonconformant(const char *op, octave_idx_type op1_len, octave_idx_type op2_len)
void err_del_index_out_of_range(bool is1d, octave_idx_type idx, octave_idx_type ext)
OCTAVE_NORETURN liboctave_error_handler current_liboctave_error_handler
Definition: lo-error.c:41
bool isnan(bool)
Definition: lo-mappers.h:178
F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
#define OCTAVE_API
Definition: main.cc:55
void mx_inline_add2(std::size_t n, R *r, const X *x)
Definition: mx-inlines.cc:127
void mx_inline_sub(std::size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:109
void mx_inline_add(std::size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:108
T octave_idx_type m
Definition: mx-inlines.cc:781
octave_idx_type n
Definition: mx-inlines.cc:761
T * r
Definition: mx-inlines.cc:781
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition: oct-locbuf.h:44
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition: oct-locbuf.h:50
sortmode
Definition: oct-sort.h:97
@ ASCENDING
Definition: oct-sort.h:97
@ DESCENDING
Definition: oct-sort.h:97
T::size_type numel(const T &str)
Definition: oct-string.cc:74
const octave_base_value const Array< octave_idx_type > & ra_idx
bool sparse_indices_ok(octave_idx_type *r, octave_idx_type *c, octave_idx_type nrows, octave_idx_type ncols, octave_idx_type nnz)
Definition: sparse-util.cc:87
F77_RET_T len
Definition: xerbla.cc:61