GNU Octave 11.1.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
 
Loading...
Searching...
No Matches
Sparse.cc
Go to the documentation of this file.
1////////////////////////////////////////////////////////////////////////
2//
3// Copyright (C) 1998-2026 The Octave Project Developers
4//
5// See the file COPYRIGHT.md in the top-level directory of this
6// distribution or <https://octave.org/copyright/>.
7//
8// This file is part of Octave.
9//
10// Octave is free software: you can redistribute it and/or modify it
11// under the terms of the GNU General Public License as published by
12// the Free Software Foundation, either version 3 of the License, or
13// (at your option) any later version.
14//
15// Octave is distributed in the hope that it will be useful, but
16// WITHOUT ANY WARRANTY; without even the implied warranty of
17// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18// GNU General Public License for more details.
19//
20// You should have received a copy of the GNU General Public License
21// along with Octave; see the file COPYING. If not, see
22// <https://www.gnu.org/licenses/>.
23//
24////////////////////////////////////////////////////////////////////////
25
26// This file should not include config.h. It is only included in other
27// C++ source files that should have included config.h before including
28// this file.
29
30#include <cinttypes>
31
32#include <algorithm>
33#include <istream>
34#include <ostream>
35#include <sstream>
36#include <vector>
37
38#include "Array-oct.h"
39#include "Array-util.h"
40#include "MArray.h"
41#include "Range.h"
42#include "idx-vector.h"
43#include "oct-error.h"
44#include "oct-locbuf.h"
45#include "quit.h"
46
47#include "Sparse.h"
48#include "mx-inlines.cc"
49#include "oct-spparms.h"
50#include "sparse-util.h"
51
52#include "PermMatrix.h"
53
54template <typename T, typename Alloc>
57{
58 static typename Sparse<T, Alloc>::SparseRep nr;
59 return &nr;
60}
61
62template <typename T, typename Alloc>
64T&
66{
68
69 if (m_nzmax <= 0)
70 (*current_liboctave_error_handler)
71 ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
72
73 for (i = m_cidx[c]; i < m_cidx[c + 1]; i++)
74 if (m_ridx[i] == r)
75 return m_data[i];
76 else if (m_ridx[i] > r)
77 break;
78
79 // Ok, If we've gotten here, we're in trouble. Have to create a
80 // new element in the sparse array. This' gonna be slow!!!
81 if (m_cidx[m_ncols] == m_nzmax)
82 (*current_liboctave_error_handler)
83 ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
84
85 octave_idx_type to_move = m_cidx[m_ncols] - i;
86 if (to_move != 0)
87 {
88 for (octave_idx_type j = m_cidx[m_ncols]; j > i; j--)
89 {
90 m_data[j] = m_data[j-1];
91 m_ridx[j] = m_ridx[j-1];
92 }
93 }
94
95 for (octave_idx_type j = c + 1; j < m_ncols + 1; j++)
96 m_cidx[j] = m_cidx[j] + 1;
97
98 m_data[i] = 0.;
99 m_ridx[i] = r;
100
101 return m_data[i];
102}
103
104template <typename T, typename Alloc>
106T
108{
109 if (m_nzmax > 0)
110 for (octave_idx_type i = m_cidx[c]; i < m_cidx[c + 1]; i++)
111 if (m_ridx[i] == r)
112 return m_data[i];
113 return T ();
114}
115
116template <typename T, typename Alloc>
118void
120{
121 if (remove_zeros)
122 {
123 octave_idx_type i = 0;
124 octave_idx_type k = 0;
125 for (octave_idx_type j = 1; j <= m_ncols; j++)
126 {
127 octave_idx_type u = m_cidx[j];
128 for (; i < u; i++)
129 if (m_data[i] != T ())
130 {
131 m_data[k] = m_data[i];
132 m_ridx[k++] = m_ridx[i];
133 }
134 m_cidx[j] = k;
135 }
136 }
137
138 change_length (m_cidx[m_ncols]);
139}
140
141template <typename T, typename Alloc>
143void
145{
146 for (octave_idx_type j = m_ncols; j > 0 && m_cidx[j] > nz; j--)
147 m_cidx[j] = nz;
148
149 // Always preserve space for 1 element.
150 nz = (nz > 0 ? nz : 1);
151
152 // Skip reallocation if we have less than 1/FRAC extra elements to discard.
153 static const int FRAC = 5;
154 if (nz > m_nzmax || nz < m_nzmax - m_nzmax/FRAC)
156 // Reallocate.
157 octave_idx_type min_nzmax = std::min (nz, m_nzmax);
158
159 octave_idx_type *new_ridx = idx_type_allocate (nz);
160 std::copy_n (m_ridx, min_nzmax, new_ridx);
161
162 idx_type_deallocate (m_ridx, m_nzmax);
163 m_ridx = new_ridx;
164
165 T *new_data = T_allocate (nz);
166 std::copy_n (m_data, min_nzmax, new_data);
167
168 T_deallocate (m_data, m_nzmax);
169 m_data = new_data;
170
171 m_nzmax = nz;
172 }
174
175template <typename T, typename Alloc>
177bool
180 return sparse_indices_ok (m_ridx, m_cidx, m_nrows, m_ncols, nnz ());
181}
182
183template <typename T, typename Alloc>
185bool
187{
188 octave_idx_type nz = nnz ();
189
190 for (octave_idx_type i = 0; i < nz; i++)
191 if (octave::math::isnan (m_data[i]))
192 return true;
193
194 return false;
195}
196
197template <typename T, typename Alloc>
199bool
201{
202 octave_idx_type nz = nnz ();
203
204 for (octave_idx_type i = 0; i < nz; i++)
205 if (octave::math::isinf (m_data[i]) || octave::math::isnan (m_data[i]))
206 return true;
207
208 return false;
209}
210
211template <typename T, typename Alloc>
214 : m_rep (nullptr), m_dimensions (dim_vector (nr, nc))
215{
216 if (val != T ())
217 {
218 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, m_dimensions.safe_numel ());
219
220 octave_idx_type ii = 0;
221 xcidx (0) = 0;
222 for (octave_idx_type j = 0; j < nc; j++)
223 {
224 for (octave_idx_type i = 0; i < nr; i++)
225 {
226 xdata (ii) = val;
227 xridx (ii++) = i;
228 }
229 xcidx (j+1) = ii;
230 }
231 }
232 else
233 {
234 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, 0);
235 for (octave_idx_type j = 0; j < nc+1; j++)
236 xcidx (j) = 0;
237 }
238}
239
240template <typename T, typename Alloc>
243 : m_rep (new typename Sparse<T, Alloc>::SparseRep (a.rows (), a.cols (), a.rows ())),
244 m_dimensions (dim_vector (a.rows (), a.cols ()))
245{
246 octave_idx_type n = a.rows ();
247 for (octave_idx_type i = 0; i <= n; i++)
248 cidx (i) = i;
249
250 const Array<octave_idx_type> pv = a.col_perm_vec ();
251
252 for (octave_idx_type i = 0; i < n; i++)
253 ridx (i) = pv(i);
254
255 for (octave_idx_type i = 0; i < n; i++)
256 data (i) = 1.0;
257}
258
259template <typename T, typename Alloc>
262 : m_rep (nullptr), m_dimensions (dv)
263{
264 if (dv.ndims () != 2)
265 (*current_liboctave_error_handler)
266 ("Sparse::Sparse (const dim_vector&): dimension mismatch");
267
268 m_rep = new typename Sparse<T, Alloc>::SparseRep (dv(0), dv(1), 0);
269}
270
271template <typename T, typename Alloc>
274 : m_rep (nullptr), m_dimensions (dv)
275{
276
277 // Work in unsigned long long to avoid overflow issues with numel
278 unsigned long long a_nel = static_cast<unsigned long long> (a.rows ()) *
279 static_cast<unsigned long long> (a.cols ());
280 unsigned long long dv_nel = static_cast<unsigned long long> (dv(0)) *
281 static_cast<unsigned long long> (dv(1));
282
283 if (a_nel != dv_nel)
284 (*current_liboctave_error_handler)
285 ("Sparse::Sparse (const Sparse&, const dim_vector&): dimension mismatch");
286
287 const dim_vector& old_dims = a.dims ();
288 octave_idx_type new_nzmax = a.nnz ();
289 octave_idx_type new_nr = dv(0);
290 octave_idx_type new_nc = dv(1);
291 octave_idx_type old_nr = old_dims(0);
292 octave_idx_type old_nc = old_dims(1);
293
294 m_rep = new typename Sparse<T, Alloc>::SparseRep (new_nr, new_nc, new_nzmax);
295
296 octave_idx_type kk = 0;
297 xcidx (0) = 0;
298 for (octave_idx_type i = 0; i < old_nc; i++)
299 for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++)
300 {
301 octave_idx_type tmp = i * old_nr + a.ridx (j);
302 octave_idx_type ii = tmp % new_nr;
303 octave_idx_type jj = (tmp - ii) / new_nr;
304 for (octave_idx_type k = kk; k < jj; k++)
305 xcidx (k+1) = j;
306 kk = jj;
307 xdata (j) = a.data (j);
308 xridx (j) = ii;
309 }
310 for (octave_idx_type k = kk; k < new_nc; k++)
311 xcidx (k+1) = new_nzmax;
312}
313
314template <typename T, typename Alloc>
317 const octave::idx_vector& r,
318 const octave::idx_vector& c,
320 bool sum_terms, octave_idx_type nzm)
321 : m_rep (nullptr), m_dimensions ()
323 if (nr < 0)
324 nr = r.extent (0);
325 else if (r.extent (nr) > nr)
326 (*current_liboctave_error_handler)
327 ("sparse: row index %" OCTAVE_IDX_TYPE_FORMAT "out of bound "
328 "%" OCTAVE_IDX_TYPE_FORMAT, r.extent (nr), nr);
330 if (nc < 0)
331 nc = c.extent (0);
332 else if (c.extent (nc) > nc)
334 ("sparse: column index %" OCTAVE_IDX_TYPE_FORMAT " out of bound "
335 "%" OCTAVE_IDX_TYPE_FORMAT, c.extent (nc), nc);
336
337 m_dimensions = dim_vector (nr, nc);
338
339 octave_idx_type n = a.numel ();
340 octave_idx_type rl = r.length (nr);
341 octave_idx_type cl = c.length (nc);
342 bool a_scalar = n == 1;
343 if (a_scalar)
344 {
345 if (rl != 1)
346 n = rl;
347 else if (cl != 1)
348 n = cl;
349 }
350
351 if ((rl != 1 && rl != n) || (cl != 1 && cl != n))
352 (*current_liboctave_error_handler) ("sparse: dimension mismatch");
353
354 // Only create m_rep after input validation to avoid memory leak.
355 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, std::max<octave_idx_type> (nzm, 0));
356
357 if (rl <= 1 && cl <= 1)
358 {
359 if (n == 1 && a(0) != T ())
360 {
361 change_capacity (std::max<octave_idx_type> (nzm, 1));
362 xridx (0) = r(0);
363 xdata (0) = a(0);
364 std::fill_n (xcidx () + c(0) + 1, nc - c(0), 1);
365 }
366 }
367 else if (a_scalar)
368 {
369 // This is completely specialized, because the sorts can be simplified.
370 T a0 = a(0);
371 if (a0 == T ())
372 {
373 // Do nothing, it's an empty matrix.
374 }
375 else if (cl == 1)
376 {
377 // Sparse column vector. Sort row indices.
378 octave::idx_vector rs = r.sorted ();
379
380 octave_quit ();
381
382 const octave_idx_type *rd = rs.raw ();
383 // Count unique indices.
384 octave_idx_type new_nz = 1;
385 for (octave_idx_type i = 1; i < n; i++)
386 new_nz += rd[i-1] != rd[i];
387
388 // Allocate result.
389 change_capacity (std::max (nzm, new_nz));
390 std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
391
392 octave_idx_type *rri = ridx ();
393 T *rrd = data ();
394
395 octave_quit ();
396
397 octave_idx_type k = -1;
398 octave_idx_type l = -1;
399
400 if (sum_terms)
401 {
402 // Sum repeated indices.
403 for (octave_idx_type i = 0; i < n; i++)
404 {
405 if (rd[i] != l)
406 {
407 l = rd[i];
408 rri[++k] = rd[i];
409 rrd[k] = a0;
410 }
411 else
412 rrd[k] += a0;
413 }
414 }
415 else
416 {
417 // Pick the last one.
418 for (octave_idx_type i = 0; i < n; i++)
419 {
420 if (rd[i] != l)
421 {
422 l = rd[i];
423 rri[++k] = rd[i];
424 rrd[k] = a0;
425 }
426 }
427 }
428
429 }
430 else
431 {
432 octave::idx_vector rr = r;
433 octave::idx_vector cc = c;
434 const octave_idx_type *rd = rr.raw ();
435 const octave_idx_type *cd = cc.raw ();
437 ci[0] = 0;
438 // Bin counts of column indices.
439 for (octave_idx_type i = 0; i < n; i++)
440 ci[cd[i]+1]++;
441 // Make them cumulative, shifted one to right.
442 for (octave_idx_type i = 1, s = 0; i <= nc; i++)
443 {
444 octave_idx_type s1 = s + ci[i];
445 ci[i] = s;
446 s = s1;
447 }
448
449 octave_quit ();
450
451 // Bucket sort.
453 for (octave_idx_type i = 0; i < n; i++)
454 if (rl == 1)
455 sidx[ci[cd[i]+1]++] = rd[0];
456 else
457 sidx[ci[cd[i]+1]++] = rd[i];
458
459 // Subsorts. We don't need a stable sort, all values are equal.
460 xcidx (0) = 0;
461 for (octave_idx_type j = 0; j < nc; j++)
462 {
463 std::sort (sidx + ci[j], sidx + ci[j+1]);
464 octave_idx_type l = -1;
465 octave_idx_type nzj = 0;
466 // Count.
467 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
468 {
469 octave_idx_type k = sidx[i];
470 if (k != l)
471 {
472 l = k;
473 nzj++;
474 }
475 }
476 // Set column pointer.
477 xcidx (j+1) = xcidx (j) + nzj;
478 }
479
480 change_capacity (std::max (nzm, xcidx (nc)));
481 octave_idx_type *rri = ridx ();
482 T *rrd = data ();
483
484 // Fill-in data.
485 for (octave_idx_type j = 0, jj = -1; j < nc; j++)
486 {
487 octave_quit ();
488 octave_idx_type l = -1;
489 if (sum_terms)
490 {
491 // Sum adjacent terms.
492 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
493 {
494 octave_idx_type k = sidx[i];
495 if (k != l)
496 {
497 l = k;
498 rrd[++jj] = a0;
499 rri[jj] = k;
500 }
501 else
502 rrd[jj] += a0;
503 }
504 }
505 else
506 {
507 // Use the last one.
508 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
509 {
510 octave_idx_type k = sidx[i];
511 if (k != l)
512 {
513 l = k;
514 rrd[++jj] = a0;
515 rri[jj] = k;
516 }
517 }
518 }
519 }
520 }
521 }
522 else if (cl == 1)
523 {
524 // Sparse column vector. Sort row indices.
526 octave::idx_vector rs = r.sorted (rsi);
527
528 octave_quit ();
529
530 const octave_idx_type *rd = rs.raw ();
531 const octave_idx_type *rdi = rsi.data ();
532 // Count unique indices.
533 octave_idx_type new_nz = 1;
534 for (octave_idx_type i = 1; i < n; i++)
535 new_nz += rd[i-1] != rd[i];
536
537 // Allocate result.
538 change_capacity (std::max (nzm, new_nz));
539 std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
540
541 octave_idx_type *rri = ridx ();
542 T *rrd = data ();
543
544 octave_quit ();
545
546 octave_idx_type k = 0;
547 rri[k] = rd[0];
548 rrd[k] = a(rdi[0]);
550 if (sum_terms)
552 // Sum repeated indices.
553 for (octave_idx_type i = 1; i < n; i++)
554 {
555 if (rd[i] != rd[i-1])
556 {
557 rri[++k] = rd[i];
558 rrd[k] = a(rdi[i]);
559 }
560 else
561 rrd[k] += a(rdi[i]);
562 }
564 else
566 // Pick the last one.
567 for (octave_idx_type i = 1; i < n; i++)
568 {
569 if (rd[i] != rd[i-1])
570 rri[++k] = rd[i];
571 rrd[k] = a(rdi[i]);
572 }
573 }
574
575 maybe_compress (true);
576 }
577 else
578 {
579 octave::idx_vector rr = r;
580 octave::idx_vector cc = c;
581 const octave_idx_type *rd = rr.raw ();
582 const octave_idx_type *cd = cc.raw ();
584 ci[0] = 0;
585 // Bin counts of column indices.
586 for (octave_idx_type i = 0; i < n; i++)
587 ci[cd[i]+1]++;
588 // Make them cumulative, shifted one to right.
589 for (octave_idx_type i = 1, s = 0; i <= nc; i++)
590 {
591 octave_idx_type s1 = s + ci[i];
592 ci[i] = s;
593 s = s1;
594 }
595
596 octave_quit ();
597
598 typedef std::pair<octave_idx_type, octave_idx_type> idx_pair;
599 // Bucket sort.
600 OCTAVE_LOCAL_BUFFER (idx_pair, spairs, n);
601 for (octave_idx_type i = 0; i < n; i++)
602 {
603 idx_pair& p = spairs[ci[cd[i]+1]++];
604 if (rl == 1)
605 p.first = rd[0];
606 else
607 p.first = rd[i];
608 p.second = i;
609 }
611 // Subsorts. We don't need a stable sort, the second index stabilizes it.
612 xcidx (0) = 0;
613 for (octave_idx_type j = 0; j < nc; j++)
615 std::sort (spairs + ci[j], spairs + ci[j+1]);
616 octave_idx_type l = -1;
618 // Count.
619 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
621 octave_idx_type k = spairs[i].first;
622 if (k != l)
624 l = k;
625 nzj++;
627 }
628 // Set column pointer.
629 xcidx (j+1) = xcidx (j) + nzj;
630 }
631
632 change_capacity (std::max (nzm, xcidx (nc)));
633 octave_idx_type *rri = ridx ();
634 T *rrd = data ();
635
636 // Fill-in data.
637 for (octave_idx_type j = 0, jj = -1; j < nc; j++)
638 {
639 octave_quit ();
641 if (sum_terms)
643 // Sum adjacent terms.
644 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
646 octave_idx_type k = spairs[i].first;
647 if (k != l)
648 {
649 l = k;
650 rrd[++jj] = a(spairs[i].second);
651 rri[jj] = k;
652 }
653 else
654 rrd[jj] += a(spairs[i].second);
655 }
656 }
657 else
658 {
659 // Use the last one.
660 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
661 {
662 octave_idx_type k = spairs[i].first;
663 if (k != l)
664 {
665 l = k;
666 rri[++jj] = k;
667 }
668 rrd[jj] = a(spairs[i].second);
669 }
670 }
671 }
672
673 maybe_compress (true);
674 }
675}
676
677/*
678%!assert <*51880> (sparse (1:2, 2, 1:2, 2, 2), sparse ([0, 1; 0, 2]))
679%!assert <*51880> (sparse (1:2, 1, 1:2, 2, 2), sparse ([1, 0; 2, 0]))
680%!assert <*51880> (sparse (1:2, 2, 1:2, 2, 3), sparse ([0, 1, 0; 0, 2, 0]))
681*/
682
683template <typename T, typename Alloc>
686 : m_rep (nullptr), m_dimensions (a.dims ())
687{
688 if (m_dimensions.ndims () > 2)
689 (*current_liboctave_error_handler)
690 ("Sparse::Sparse (const Array<T>&): dimension mismatch");
691
692 octave_idx_type nr = rows ();
693 octave_idx_type nc = cols ();
695 octave_idx_type new_nzmax = 0;
696
697 // First count the number of nonzero terms
698 for (octave_idx_type i = 0; i < len; i++)
699 if (a(i) != T ())
700 new_nzmax++;
701
702 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, new_nzmax);
703
704 octave_idx_type ii = 0;
705 xcidx (0) = 0;
706 for (octave_idx_type j = 0; j < nc; j++)
707 {
708 for (octave_idx_type i = 0; i < nr; i++)
709 if (a.elem (i, j) != T ())
710 {
711 xdata (ii) = a.elem (i, j);
712 xridx (ii++) = i;
713 }
714 xcidx (j+1) = ii;
715 }
716}
717
718template <typename T, typename Alloc>
721{
722 if (--m_rep->m_count == 0)
723 delete m_rep;
724}
725
726template <typename T, typename Alloc>
729{
730 if (this != &a)
731 {
732 if (--m_rep->m_count == 0)
733 delete m_rep;
734
735 m_rep = a.m_rep;
736 m_rep->m_count++;
737
738 m_dimensions = a.m_dimensions;
739 }
740
741 return *this;
742}
743
744template <typename T, typename Alloc>
748{
749 octave_idx_type n = m_dimensions.ndims ();
750
751 if (n <= 0 || n != ra_idx.numel ())
752 (*current_liboctave_error_handler)
753 ("Sparse<T, Alloc>::compute_index: invalid ra_idxing operation");
754
755 octave_idx_type retval = -1;
756
757 retval = ra_idx(--n);
758
759 while (--n >= 0)
760 {
761 retval *= m_dimensions(n);
762 retval += ra_idx(n);
763 }
764
765 return retval;
766}
767
768template <typename T, typename Alloc>
770T
772{
773 (*current_liboctave_error_handler) ("%s (%" OCTAVE_IDX_TYPE_FORMAT "): "
774 "range error", fcn, n);
775}
776
777template <typename T, typename Alloc>
779T&
781{
782 (*current_liboctave_error_handler) ("%s (%" OCTAVE_IDX_TYPE_FORMAT "): "
783 "range error", fcn, n);
784}
785
786template <typename T, typename Alloc>
788T
791{
792 (*current_liboctave_error_handler)
793 ("%s (%" OCTAVE_IDX_TYPE_FORMAT ", %" OCTAVE_IDX_TYPE_FORMAT "): "
794 "range error", fcn, i, j);
795}
796
797template <typename T, typename Alloc>
799T&
802{
803 (*current_liboctave_error_handler)
804 ("%s (%" OCTAVE_IDX_TYPE_FORMAT ", %" OCTAVE_IDX_TYPE_FORMAT "): "
805 "range error", fcn, i, j);
806}
807
808template <typename T, typename Alloc>
810T
812 const Array<octave_idx_type>& ra_idx) const
813{
814 std::ostringstream buf;
815
816 buf << fcn << " (";
817
819
820 if (n > 0)
821 buf << ra_idx(0);
822
823 for (octave_idx_type i = 1; i < n; i++)
824 buf << ", " << ra_idx(i);
825
826 buf << "): range error";
827
828 std::string buf_str = buf.str ();
829
830 (*current_liboctave_error_handler) ("%s", buf_str.c_str ());
831}
832
833template <typename T, typename Alloc>
835T&
838{
839 std::ostringstream buf;
840
841 buf << fcn << " (";
842
844
845 if (n > 0)
846 buf << ra_idx(0);
847
848 for (octave_idx_type i = 1; i < n; i++)
849 buf << ", " << ra_idx(i);
850
851 buf << "): range error";
852
853 std::string buf_str = buf.str ();
854
855 (*current_liboctave_error_handler) ("%s", buf_str.c_str ());
856}
857
858template <typename T, typename Alloc>
862{
863 Sparse<T, Alloc> retval;
864 dim_vector dims2 = new_dims;
865
866 if (dims2.ndims () > 2)
867 {
868 (*current_liboctave_warning_with_id_handler)
869 ("Octave:reshape-smashes-dims",
870 "reshape: sparse reshape to N-D array smashes dims");
871
872 for (octave_idx_type i = 2; i < dims2.ndims (); i++)
873 dims2(1) *= dims2(i);
874
875 dims2.resize (2);
876 }
877
878 if (m_dimensions != dims2)
879 {
880 if (m_dimensions.numel () == dims2.numel ())
881 {
882 octave_idx_type new_nnz = nnz ();
883 octave_idx_type new_nr = dims2 (0);
884 octave_idx_type new_nc = dims2 (1);
885 octave_idx_type old_nr = rows ();
886 octave_idx_type old_nc = cols ();
887 retval = Sparse<T, Alloc> (new_nr, new_nc, new_nnz);
888 // Special case for empty matrices (bug #64080)
889 if (new_nr == 0 || new_nc == 0)
890 return retval;
891
892 octave_idx_type kk = 0;
893 retval.xcidx (0) = 0;
894 // Quotient and remainder of i * old_nr divided by new_nr.
895 // Track them individually to avoid overflow (bug #42850).
896 octave_idx_type i_old_qu = 0;
897 octave_idx_type i_old_rm = static_cast<octave_idx_type> (-old_nr);
898 for (octave_idx_type i = 0; i < old_nc; i++)
899 {
900 i_old_rm += old_nr;
901 if (i_old_rm >= new_nr)
902 {
903 i_old_qu += i_old_rm / new_nr;
904 i_old_rm = i_old_rm % new_nr;
905 }
906 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
907 {
908 octave_idx_type ii, jj;
909 ii = (i_old_rm + ridx (j)) % new_nr;
910 jj = i_old_qu + (i_old_rm + ridx (j)) / new_nr;
911
912 // Original calculation subject to overflow
913 // ii = (i*old_nr + ridx (j)) % new_nr
914 // jj = (i*old_nr + ridx (j)) / new_nr
915 for (octave_idx_type k = kk; k < jj; k++)
916 retval.xcidx (k+1) = j;
917 kk = jj;
918 retval.xdata (j) = data (j);
919 retval.xridx (j) = ii;
920 }
921 }
922 for (octave_idx_type k = kk; k < new_nc; k++)
923 retval.xcidx (k+1) = new_nnz;
924 }
925 else
926 {
927 std::string dimensions_str = m_dimensions.str ();
928 std::string new_dims_str = new_dims.str ();
929
930 (*current_liboctave_error_handler)
931 ("reshape: can't reshape %s array to %s array",
932 dimensions_str.c_str (), new_dims_str.c_str ());
933 }
934 }
935 else
936 retval = *this;
937
938 return retval;
939}
940
941template <typename T, typename Alloc>
945{
946 // The only valid permutations of a sparse array are [1, 2] and [2, 1].
947
948 bool fail = false;
949 bool trans = false;
950
951 if (perm_vec.numel () == 2)
952 {
953 if (perm_vec(0) == 0 && perm_vec(1) == 1)
954 /* do nothing */;
955 else if (perm_vec(0) == 1 && perm_vec(1) == 0)
956 trans = true;
957 else
958 fail = true;
959 }
960 else
961 fail = true;
962
963 if (fail)
964 (*current_liboctave_error_handler)
965 ("permutation vector contains an invalid element");
966
967 return trans ? this->transpose () : *this;
968}
969
970template <typename T, typename Alloc>
972void
974{
975 octave_idx_type nr = rows ();
976 octave_idx_type nc = cols ();
977
978 if (nr == 0)
979 resize (1, std::max (nc, n));
980 else if (nc == 0)
981 resize (nr, (n + nr - 1) / nr); // Ain't it wicked?
982 else if (nr == 1)
983 resize (1, n);
984 else if (nc == 1)
985 resize (n, 1);
986 else
987 octave::err_invalid_resize ();
988}
989
990template <typename T, typename Alloc>
992void
994{
995 octave_idx_type n = dv.ndims ();
996
997 if (n != 2)
998 (*current_liboctave_error_handler) ("sparse array must be 2-D");
999
1000 resize (dv(0), dv(1));
1001}
1002
1003template <typename T, typename Alloc>
1005void
1007{
1008 if (r < 0 || c < 0)
1009 (*current_liboctave_error_handler) ("can't resize to negative dimension");
1010
1011 if (r == dim1 () && c == dim2 ())
1012 return;
1013
1014 // This wouldn't be necessary for r >= rows () if m_nrows wasn't part of the
1015 // Sparse rep. It is not good for anything in there.
1016 make_unique ();
1017
1018 if (r < rows ())
1019 {
1020 octave_idx_type i = 0;
1021 octave_idx_type k = 0;
1022 for (octave_idx_type j = 1; j <= m_rep->m_ncols; j++)
1023 {
1024 octave_idx_type u = xcidx (j);
1025 for (; i < u; i++)
1026 if (xridx (i) < r)
1027 {
1028 xdata (k) = xdata (i);
1029 xridx (k++) = xridx (i);
1030 }
1031 xcidx (j) = k;
1032 }
1033 }
1034
1035 m_rep->m_nrows = m_dimensions(0) = r;
1036
1037 if (c != m_rep->m_ncols)
1038 {
1039 octave_idx_type *new_cidx = m_rep->idx_type_allocate (c+1);
1040 std::copy_n (m_rep->m_cidx, std::min (c, m_rep->m_ncols) + 1, new_cidx);
1041 m_rep->idx_type_deallocate (m_rep->m_cidx, m_rep->m_ncols + 1);
1042 m_rep->m_cidx = new_cidx;
1043
1044 if (c > m_rep->m_ncols)
1045 std::fill_n (m_rep->m_cidx + m_rep->m_ncols + 1, c - m_rep->m_ncols,
1046 m_rep->m_cidx[m_rep->m_ncols]);
1047 }
1048
1049 m_rep->m_ncols = m_dimensions(1) = c;
1050
1051 m_rep->change_length (m_rep->nnz ());
1052}
1053
1054template <typename T, typename Alloc>
1059{
1060 octave_idx_type a_rows = a.rows ();
1061 octave_idx_type a_cols = a.cols ();
1062 octave_idx_type nr = rows ();
1063 octave_idx_type nc = cols ();
1064
1065 if (r < 0 || r + a_rows > rows () || c < 0 || c + a_cols > cols ())
1066 (*current_liboctave_error_handler) ("range error for insert");
1067
1068 // First count the number of elements in the final array
1069 octave_idx_type nel = cidx (c) + a.nnz ();
1070
1071 if (c + a_cols < nc)
1072 nel += cidx (nc) - cidx (c + a_cols);
1073
1074 for (octave_idx_type i = c; i < c + a_cols; i++)
1075 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1076 if (ridx (j) < r || ridx (j) >= r + a_rows)
1077 nel++;
1078
1079 Sparse<T, Alloc> tmp (*this);
1080 --m_rep->m_count;
1081 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, nel);
1082
1083 for (octave_idx_type i = 0; i < tmp.cidx (c); i++)
1084 {
1085 data (i) = tmp.data (i);
1086 ridx (i) = tmp.ridx (i);
1087 }
1088 for (octave_idx_type i = 0; i < c + 1; i++)
1089 cidx (i) = tmp.cidx (i);
1090
1091 octave_idx_type ii = cidx (c);
1092
1093 for (octave_idx_type i = c; i < c + a_cols; i++)
1094 {
1095 octave_quit ();
1096
1097 for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1098 if (tmp.ridx (j) < r)
1099 {
1100 data (ii) = tmp.data (j);
1101 ridx (ii++) = tmp.ridx (j);
1102 }
1103
1104 octave_quit ();
1105
1106 for (octave_idx_type j = a.cidx (i-c); j < a.cidx (i-c+1); j++)
1107 {
1108 data (ii) = a.data (j);
1109 ridx (ii++) = r + a.ridx (j);
1110 }
1111
1112 octave_quit ();
1113
1114 for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1115 if (tmp.ridx (j) >= r + a_rows)
1116 {
1117 data (ii) = tmp.data (j);
1118 ridx (ii++) = tmp.ridx (j);
1119 }
1120
1121 cidx (i+1) = ii;
1122 }
1123
1124 for (octave_idx_type i = c + a_cols; i < nc; i++)
1125 {
1126 for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1127 {
1128 data (ii) = tmp.data (j);
1129 ridx (ii++) = tmp.ridx (j);
1130 }
1131 cidx (i+1) = ii;
1132 }
1133
1134 return *this;
1135}
1136
1137template <typename T, typename Alloc>
1142{
1143
1144 if (ra_idx.numel () != 2)
1145 (*current_liboctave_error_handler) ("range error for insert");
1146
1147 return insert (a, ra_idx(0), ra_idx(1));
1148}
1149
1150template <typename T, typename Alloc>
1154{
1155 liboctave_panic_unless (ndims () == 2);
1156
1157 octave_idx_type nr = rows ();
1158 octave_idx_type nc = cols ();
1159 octave_idx_type nz = nnz ();
1160 Sparse<T, Alloc> retval (nc, nr, nz);
1161
1162 for (octave_idx_type i = 0; i < nz; i++)
1163 retval.xcidx (ridx (i) + 1)++;
1164 // retval.xcidx[1:nr] holds the row degrees for rows 0:(nr-1)
1165 nz = 0;
1166 for (octave_idx_type i = 1; i <= nr; i++)
1167 {
1168 const octave_idx_type tmp = retval.xcidx (i);
1169 retval.xcidx (i) = nz;
1170 nz += tmp;
1171 }
1172 // retval.xcidx[1:nr] holds row entry *start* offsets for rows 0:(nr-1)
1173
1174 for (octave_idx_type j = 0; j < nc; j++)
1175 for (octave_idx_type k = cidx (j); k < cidx (j+1); k++)
1176 {
1177 octave_idx_type q = retval.xcidx (ridx (k) + 1)++;
1178 retval.xridx (q) = j;
1179 retval.xdata (q) = data (k);
1180 }
1181 liboctave_panic_unless (nnz () == retval.xcidx (nr));
1182 // retval.xcidx[1:nr] holds row entry *end* offsets for rows 0:(nr-1)
1183 // and retval.xcidx[0:(nr-1)] holds their row entry *start* offsets
1184
1185 return retval;
1186}
1187
1188// Lower bound lookup. Could also use octave_sort, but that has upper bound
1189// semantics, so requires some manipulation to set right. Uses a plain loop
1190// for small columns.
1191static
1193lblookup (const octave_idx_type *ridx, octave_idx_type nr,
1194 octave_idx_type ri)
1195{
1196 if (nr <= 8)
1197 {
1199 for (l = 0; l < nr; l++)
1200 if (ridx[l] >= ri)
1201 break;
1202 return l;
1203 }
1204 else
1205 return std::lower_bound (ridx, ridx + nr, ri) - ridx;
1206}
1207
1208template <typename T, typename Alloc>
1210void
1211Sparse<T, Alloc>::delete_elements (const octave::idx_vector& idx)
1212{
1213 Sparse<T, Alloc> retval;
1214
1215 liboctave_panic_unless (ndims () == 2);
1216
1217 octave_idx_type nr = dim1 ();
1218 octave_idx_type nc = dim2 ();
1219 octave_idx_type nz = nnz ();
1220
1221 octave_idx_type nel = numel (); // Can throw.
1222
1223 const dim_vector idx_dims = idx.orig_dimensions ();
1224
1225 if (idx.extent (nel) > nel)
1226 octave::err_del_index_out_of_range (true, idx.extent (nel), nel);
1227
1228 if (nc == 1)
1229 {
1230 // Sparse column vector.
1231 const Sparse<T, Alloc> tmp = *this; // constant copy to prevent COW.
1232
1233 octave_idx_type lb, ub;
1234
1235 if (idx.is_cont_range (nel, lb, ub))
1236 {
1237 // Special-case a contiguous range.
1238 // Look-up indices first.
1239 octave_idx_type li = lblookup (tmp.ridx (), nz, lb);
1240 octave_idx_type ui = lblookup (tmp.ridx (), nz, ub);
1241 // Copy data and adjust indices.
1242 octave_idx_type nz_new = nz - (ui - li);
1243 *this = Sparse<T, Alloc> (nr - (ub - lb), 1, nz_new);
1244 std::copy_n (tmp.data (), li, data ());
1245 std::copy_n (tmp.ridx (), li, xridx ());
1246 std::copy (tmp.data () + ui, tmp.data () + nz, xdata () + li);
1247 mx_inline_sub (nz - ui, xridx () + li, tmp.ridx () + ui, ub - lb);
1248 xcidx (1) = nz_new;
1249 }
1250 else
1251 {
1252 OCTAVE_LOCAL_BUFFER (octave_idx_type, ridx_new, nz);
1253 OCTAVE_LOCAL_BUFFER (T, data_new, nz);
1254 octave::idx_vector sidx = idx.sorted (true);
1255 const octave_idx_type *sj = sidx.raw ();
1256 octave_idx_type sl = sidx.length (nel);
1257 octave_idx_type nz_new = 0;
1258 octave_idx_type j = 0;
1259 for (octave_idx_type i = 0; i < nz; i++)
1260 {
1261 octave_idx_type r = tmp.ridx (i);
1262 for (; j < sl && sj[j] < r; j++) ;
1263 if (j == sl || sj[j] > r)
1264 {
1265 data_new[nz_new] = tmp.data (i);
1266 ridx_new[nz_new++] = r - j;
1267 }
1268 }
1269
1270 *this = Sparse<T, Alloc> (nr - sl, 1, nz_new);
1271 std::copy_n (ridx_new, nz_new, ridx ());
1272 std::copy_n (data_new, nz_new, xdata ());
1273 xcidx (1) = nz_new;
1274 }
1275 }
1276 else if (nr == 1)
1277 {
1278 // Sparse row vector.
1279 octave_idx_type lb, ub;
1280 if (idx.is_cont_range (nc, lb, ub))
1281 {
1282 const Sparse<T, Alloc> tmp = *this;
1283 octave_idx_type lbi = tmp.cidx (lb);
1284 octave_idx_type ubi = tmp.cidx (ub);
1285 octave_idx_type new_nz = nz - (ubi - lbi);
1286 *this = Sparse<T, Alloc> (1, nc - (ub - lb), new_nz);
1287 std::copy_n (tmp.data (), lbi, data ());
1288 std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1289 std::fill_n (ridx (), new_nz, static_cast<octave_idx_type> (0));
1290 std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1291 mx_inline_sub (nc - ub, xcidx () + 1, tmp.cidx () + ub + 1,
1292 ubi - lbi);
1293 }
1294 else
1295 *this = index (idx.complement (nc));
1296 }
1297 else if (idx.length (nel) != 0)
1298 {
1299 if (idx.is_colon_equiv (nel))
1300 *this = Sparse<T, Alloc> ();
1301 else
1302 {
1303 *this = index (octave::idx_vector::colon);
1304 delete_elements (idx);
1305 *this = transpose (); // We want a row vector.
1306 }
1307 }
1308}
1309
1310template <typename T, typename Alloc>
1312void
1313Sparse<T, Alloc>::delete_elements (const octave::idx_vector& idx_i,
1314 const octave::idx_vector& idx_j)
1315{
1316 liboctave_panic_unless (ndims () == 2);
1317
1318 octave_idx_type nr = dim1 ();
1319 octave_idx_type nc = dim2 ();
1320 octave_idx_type nz = nnz ();
1321
1322 if (idx_i.is_colon ())
1323 {
1324 // Deleting columns.
1325 octave_idx_type lb, ub;
1326 if (idx_j.extent (nc) > nc)
1327 octave::err_del_index_out_of_range (false, idx_j.extent (nc), nc);
1328 else if (idx_j.is_cont_range (nc, lb, ub))
1329 {
1330 if (lb == 0 && ub == nc)
1331 {
1332 // Delete all rows and columns.
1333 *this = Sparse<T, Alloc> (nr, 0);
1334 }
1335 else if (nz == 0)
1336 {
1337 // No elements to preserve; adjust dimensions.
1338 *this = Sparse<T, Alloc> (nr, nc - (ub - lb));
1339 }
1340 else
1341 {
1342 const Sparse<T, Alloc> tmp = *this;
1343 octave_idx_type lbi = tmp.cidx (lb);
1344 octave_idx_type ubi = tmp.cidx (ub);
1345 octave_idx_type new_nz = nz - (ubi - lbi);
1346
1347 *this = Sparse<T, Alloc> (nr, nc - (ub - lb), new_nz);
1348 std::copy_n (tmp.data (), lbi, data ());
1349 std::copy_n (tmp.ridx (), lbi, ridx ());
1350 std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1351 std::copy (tmp.ridx () + ubi, tmp.ridx () + nz, xridx () + lbi);
1352 std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1353 mx_inline_sub (nc - ub, xcidx () + lb + 1,
1354 tmp.cidx () + ub + 1, ubi - lbi);
1355 }
1356 }
1357 else
1358 *this = index (idx_i, idx_j.complement (nc));
1359 }
1360 else if (idx_j.is_colon ())
1361 {
1362 // Deleting rows.
1363 octave_idx_type lb, ub;
1364 if (idx_i.extent (nr) > nr)
1365 octave::err_del_index_out_of_range (false, idx_i.extent (nr), nr);
1366 else if (idx_i.is_cont_range (nr, lb, ub))
1367 {
1368 if (lb == 0 && ub == nr)
1369 {
1370 // Delete all rows and columns.
1371 *this = Sparse<T, Alloc> (0, nc);
1372 }
1373 else if (nz == 0)
1374 {
1375 // No elements to preserve; adjust dimensions.
1376 *this = Sparse<T, Alloc> (nr - (ub - lb), nc);
1377 }
1378 else
1379 {
1380 // This is more memory-efficient than the approach below.
1381 const Sparse<T, Alloc> tmpl = index (octave::idx_vector (0, lb), idx_j);
1382 const Sparse<T, Alloc> tmpu = index (octave::idx_vector (ub, nr), idx_j);
1383 *this = Sparse<T, Alloc> (nr - (ub - lb), nc,
1384 tmpl.nnz () + tmpu.nnz ());
1385 for (octave_idx_type j = 0, k = 0; j < nc; j++)
1386 {
1387 for (octave_idx_type i = tmpl.cidx (j); i < tmpl.cidx (j+1);
1388 i++)
1389 {
1390 xdata (k) = tmpl.data (i);
1391 xridx (k++) = tmpl.ridx (i);
1392 }
1393 for (octave_idx_type i = tmpu.cidx (j); i < tmpu.cidx (j+1);
1394 i++)
1395 {
1396 xdata (k) = tmpu.data (i);
1397 xridx (k++) = tmpu.ridx (i) + lb;
1398 }
1399
1400 xcidx (j+1) = k;
1401 }
1402 }
1403 }
1404 else
1405 {
1406 // This is done by transposing, deleting columns, then transposing
1407 // again.
1408 Sparse<T, Alloc> tmp = transpose ();
1409 tmp.delete_elements (idx_j, idx_i);
1410 *this = tmp.transpose ();
1411 }
1412 }
1413 else
1414 {
1415 // Empty assignment (no elements to delete) is OK if there is at
1416 // least one zero-length index and at most one other index that is
1417 // non-colon (or equivalent) index. Since we only have two
1418 // indices, we just need to check that we have at least one zero
1419 // length index. Matlab considers "[]" to be an empty index but
1420 // not "false". We accept both.
1421
1422 bool empty_assignment
1423 = (idx_i.length (nr) == 0 || idx_j.length (nc) == 0);
1424
1425 if (! empty_assignment)
1426 (*current_liboctave_error_handler)
1427 ("a null assignment can only have one non-colon index");
1428 }
1429}
1430
1431template <typename T, typename Alloc>
1433void
1434Sparse<T, Alloc>::delete_elements (int dim, const octave::idx_vector& idx)
1435{
1436 if (dim == 0)
1437 delete_elements (idx, octave::idx_vector::colon);
1438 else if (dim == 1)
1439 delete_elements (octave::idx_vector::colon, idx);
1440 else
1441 (*current_liboctave_error_handler) ("invalid dimension in delete_elements");
1442}
1443
1444template <typename T, typename Alloc>
1447Sparse<T, Alloc>::index (const octave::idx_vector& idx, bool resize_ok) const
1448{
1449 Sparse<T, Alloc> retval;
1450
1451 liboctave_panic_unless (ndims () == 2);
1452
1453 octave_idx_type nr = dim1 ();
1454 octave_idx_type nc = dim2 ();
1455 octave_idx_type nz = nnz ();
1456
1457 octave_idx_type nel = numel (); // Can throw.
1458
1459 const dim_vector idx_dims = idx.orig_dimensions ().redim (2);
1460
1461 if (idx.is_colon ())
1462 {
1463 if (nc == 1)
1464 retval = *this;
1465 else
1466 {
1467 // Fast magic colon processing.
1468 retval = Sparse<T, Alloc> (nel, 1, nz);
1469
1470 for (octave_idx_type i = 0; i < nc; i++)
1471 {
1472 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1473 {
1474 retval.xdata (j) = data (j);
1475 retval.xridx (j) = ridx (j) + i * nr;
1476 }
1477 }
1478
1479 retval.xcidx (0) = 0;
1480 retval.xcidx (1) = nz;
1481 }
1482 }
1483 else if (idx.extent (nel) > nel)
1484 {
1485 if (! resize_ok)
1486 octave::err_index_out_of_range (1, 1, idx.extent (nel), nel, dims ());
1487
1488 // resize_ok is completely handled here.
1489 octave_idx_type ext = idx.extent (nel);
1490 Sparse<T, Alloc> tmp = *this;
1491 tmp.resize1 (ext);
1492 retval = tmp.index (idx);
1493 }
1494 else if (nr == 1 && nc == 1)
1495 {
1496 // You have to be pretty sick to get to this bit of code,
1497 // since you have a scalar stored as a sparse matrix, and
1498 // then want to make a dense matrix with sparse representation.
1499 // Ok, we'll do it, but you deserve what you get!!
1500 retval = (Sparse<T, Alloc> (idx_dims(0), idx_dims(1), nz ? data (0) : T ()));
1501 }
1502 else if (nc == 1)
1503 {
1504 // Sparse column vector.
1505 octave_idx_type lb, ub;
1506
1507 if (idx.is_scalar ())
1508 {
1509 // Scalar index - just a binary lookup.
1510 octave_idx_type i = lblookup (ridx (), nz, idx(0));
1511 if (i < nz && ridx (i) == idx(0))
1512 retval = Sparse (1, 1, data (i));
1513 else
1514 retval = Sparse (1, 1);
1515 }
1516 else if (idx.is_cont_range (nel, lb, ub))
1517 {
1518 // Special-case a contiguous range.
1519 // Look-up indices first.
1520 octave_idx_type li = lblookup (ridx (), nz, lb);
1521 octave_idx_type ui = lblookup (ridx (), nz, ub);
1522 // Copy data and adjust indices.
1523 octave_idx_type nz_new = ui - li;
1524 retval = Sparse<T, Alloc> (ub - lb, 1, nz_new);
1525 std::copy_n (data () + li, nz_new, retval.data ());
1526 mx_inline_sub (nz_new, retval.xridx (), ridx () + li, lb);
1527 retval.xcidx (1) = nz_new;
1528 }
1529 else if (idx.is_permutation (nel) && idx.isvector ())
1530 {
1531 if (idx.is_range () && idx.increment () == -1)
1532 {
1533 retval = Sparse<T, Alloc> (nr, 1, nz);
1534
1535 for (octave_idx_type j = 0; j < nz; j++)
1536 retval.ridx (j) = nr - ridx (nz - j - 1) - 1;
1537
1538 std::copy_n (cidx (), 2, retval.cidx ());
1539 std::reverse_copy (data (), data () + nz, retval.data ());
1540 }
1541 else
1542 {
1543 Array<T> tmp = array_value ();
1544 tmp = tmp.index (idx);
1545 retval = Sparse<T, Alloc> (tmp);
1546 }
1547 }
1548 else
1549 {
1550 // If indexing a sparse column vector by a vector, the result is a
1551 // sparse column vector, otherwise it inherits the shape of index.
1552 // Vector transpose is cheap, so do it right here.
1553
1554 Array<octave_idx_type> tmp_idx = idx.as_array ().as_matrix ();
1555
1556 const Array<octave_idx_type> idxa = (idx_dims(0) == 1
1557 ? tmp_idx.transpose ()
1558 : tmp_idx);
1559
1560 octave_idx_type new_nr = idxa.rows ();
1561 octave_idx_type new_nc = idxa.cols ();
1562
1563 // Lookup.
1564 // FIXME: Could specialize for sorted idx?
1565 Array<octave_idx_type> lidx (dim_vector (new_nr, new_nc));
1566 for (octave_idx_type i = 0; i < new_nr*new_nc; i++)
1567 lidx.xelem (i) = lblookup (ridx (), nz, idxa(i));
1568
1569 // Count matches.
1570 retval = Sparse<T, Alloc> (idxa.rows (), idxa.cols ());
1571 for (octave_idx_type j = 0; j < new_nc; j++)
1572 {
1573 octave_idx_type nzj = 0;
1574 for (octave_idx_type i = 0; i < new_nr; i++)
1575 {
1576 octave_idx_type l = lidx.xelem (i, j);
1577 if (l < nz && ridx (l) == idxa(i, j))
1578 nzj++;
1579 else
1580 lidx.xelem (i, j) = nz;
1581 }
1582 retval.xcidx (j+1) = retval.xcidx (j) + nzj;
1583 }
1584
1585 retval.change_capacity (retval.xcidx (new_nc));
1586
1587 // Copy data and set row indices.
1588 octave_idx_type k = 0;
1589 for (octave_idx_type j = 0; j < new_nc; j++)
1590 for (octave_idx_type i = 0; i < new_nr; i++)
1591 {
1592 octave_idx_type l = lidx.xelem (i, j);
1593 if (l < nz)
1594 {
1595 retval.data (k) = data (l);
1596 retval.xridx (k++) = i;
1597 }
1598 }
1599 }
1600 }
1601 else if (nr == 1)
1602 {
1603 octave_idx_type lb, ub;
1604 if (idx.is_scalar ())
1605 retval = Sparse<T, Alloc> (1, 1, elem (0, idx(0)));
1606 else if (idx.is_cont_range (nel, lb, ub))
1607 {
1608 // Special-case a contiguous range.
1609 octave_idx_type lbi = cidx (lb);
1610 octave_idx_type ubi = cidx (ub);
1611 octave_idx_type new_nz = ubi - lbi;
1612 retval = Sparse<T, Alloc> (1, ub - lb, new_nz);
1613 std::copy_n (data () + lbi, new_nz, retval.data ());
1614 std::fill_n (retval.ridx (), new_nz, static_cast<octave_idx_type> (0));
1615 mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1616 }
1617 else
1618 {
1619 // Sparse row vectors occupy O(nr) storage anyway, so let's just
1620 // convert the matrix to full, index, and sparsify the result.
1621 retval = Sparse<T, Alloc> (array_value ().index (idx));
1622 }
1623 }
1624 else
1625 {
1626 if (nr != 0 && idx.is_scalar ())
1627 retval = Sparse<T, Alloc> (1, 1, elem (idx(0) % nr, idx(0) / nr));
1628 else
1629 {
1630 // Indexing a non-vector sparse matrix by linear indexing.
1631 // I suppose this is rare (and it may easily overflow), so let's take
1632 // the easy way, and reshape first to column vector, which is already
1633 // handled above.
1634 retval = index (octave::idx_vector::colon).index (idx);
1635 // In this case we're supposed to always inherit the shape, but
1636 // column(row) doesn't do it, so we'll do it instead.
1637 if (idx_dims(0) == 1 && idx_dims(1) != 1)
1638 retval = retval.transpose ();
1639 }
1640 }
1641
1642 return retval;
1643}
1644
1645template <typename T, typename Alloc>
1648Sparse<T, Alloc>::index (const octave::idx_vector& idx_i,
1649 const octave::idx_vector& idx_j,
1650 bool resize_ok) const
1651{
1652 Sparse<T, Alloc> retval;
1653
1654 liboctave_panic_unless (ndims () == 2);
1655
1656 octave_idx_type nr = dim1 ();
1657 octave_idx_type nc = dim2 ();
1658
1659 octave_idx_type n = idx_i.length (nr);
1660 octave_idx_type m = idx_j.length (nc);
1661
1662 octave_idx_type lb, ub;
1663
1664 if (idx_i.extent (nr) > nr || idx_j.extent (nc) > nc)
1665 {
1666 // resize_ok is completely handled here.
1667 if (resize_ok)
1668 {
1669 octave_idx_type ext_i = idx_i.extent (nr);
1670 octave_idx_type ext_j = idx_j.extent (nc);
1671 Sparse<T, Alloc> tmp = *this;
1672 tmp.resize (ext_i, ext_j);
1673 retval = tmp.index (idx_i, idx_j);
1674 }
1675 else if (idx_i.extent (nr) > nr)
1676 octave::err_index_out_of_range (2, 1, idx_i.extent (nr), nr, dims ());
1677 else
1678 octave::err_index_out_of_range (2, 2, idx_j.extent (nc), nc, dims ());
1679 }
1680 else if (nr == 1 && nc == 1)
1681 {
1682 // Scalars stored as sparse matrices occupy more memory than
1683 // a scalar, so let's just convert the matrix to full, index,
1684 // and sparsify the result.
1685
1686 retval = Sparse<T, Alloc> (array_value ().index (idx_i, idx_j));
1687 }
1688 else if (idx_i.is_colon ())
1689 {
1690 // Great, we're just manipulating columns. This is going to be quite
1691 // efficient, because the columns can stay compressed as they are.
1692 if (idx_j.is_colon ())
1693 retval = *this; // Shallow copy.
1694 else if (idx_j.is_cont_range (nc, lb, ub))
1695 {
1696 // Special-case a contiguous range.
1697 octave_idx_type lbi = cidx (lb);
1698 octave_idx_type ubi = cidx (ub);
1699 octave_idx_type new_nz = ubi - lbi;
1700 retval = Sparse<T, Alloc> (nr, ub - lb, new_nz);
1701 std::copy_n (data () + lbi, new_nz, retval.data ());
1702 std::copy_n (ridx () + lbi, new_nz, retval.ridx ());
1703 mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1704 }
1705 else
1706 {
1707 // Count new nonzero elements.
1708 retval = Sparse<T, Alloc> (nr, m);
1709 for (octave_idx_type j = 0; j < m; j++)
1710 {
1711 octave_idx_type jj = idx_j(j);
1712 retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1713 }
1714
1715 retval.change_capacity (retval.xcidx (m));
1716
1717 // Copy data & indices.
1718 for (octave_idx_type j = 0; j < m; j++)
1719 {
1720 octave_idx_type ljj = cidx (idx_j(j));
1721 octave_idx_type lj = retval.xcidx (j);
1722 octave_idx_type nzj = retval.xcidx (j+1) - lj;
1723
1724 std::copy_n (data () + ljj, nzj, retval.data () + lj);
1725 std::copy_n (ridx () + ljj, nzj, retval.ridx () + lj);
1726 }
1727 }
1728 }
1729 else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
1730 {
1731 // It's actually vector indexing. The 1-D index is specialized for that.
1732 retval = index (idx_i);
1733
1734 // If nr == 1 then the vector indexing will return a column vector!!
1735 if (nr == 1)
1736 retval.transpose ();
1737 }
1738 else if (idx_i.is_scalar ())
1739 {
1740 octave_idx_type ii = idx_i(0);
1741 retval = Sparse<T, Alloc> (1, m);
1743 for (octave_idx_type j = 0; j < m; j++)
1744 {
1745 octave_quit ();
1746 octave_idx_type jj = idx_j(j);
1747 octave_idx_type lj = cidx (jj);
1748 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1749
1750 // Scalar index - just a binary lookup.
1751 octave_idx_type i = lblookup (ridx () + lj, nzj, ii);
1752 if (i < nzj && ridx (i+lj) == ii)
1753 {
1754 ij[j] = i + lj;
1755 retval.xcidx (j+1) = retval.xcidx (j) + 1;
1756 }
1757 else
1758 retval.xcidx (j+1) = retval.xcidx (j);
1759 }
1760
1761 retval.change_capacity (retval.xcidx (m));
1762
1763 // Copy data, adjust row indices.
1764 for (octave_idx_type j = 0; j < m; j++)
1765 {
1766 octave_idx_type i = retval.xcidx (j);
1767 if (retval.xcidx (j+1) > i)
1768 {
1769 retval.xridx (i) = 0;
1770 retval.xdata (i) = data (ij[j]);
1771 }
1772 }
1773 }
1774 else if (idx_i.is_cont_range (nr, lb, ub))
1775 {
1776 retval = Sparse<T, Alloc> (n, m);
1779 for (octave_idx_type j = 0; j < m; j++)
1780 {
1781 octave_quit ();
1782 octave_idx_type jj = idx_j(j);
1783 octave_idx_type lj = cidx (jj);
1784 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1785 octave_idx_type lij, uij;
1786
1787 // Lookup indices.
1788 li[j] = lij = lblookup (ridx () + lj, nzj, lb) + lj;
1789 ui[j] = uij = lblookup (ridx () + lj, nzj, ub) + lj;
1790 retval.xcidx (j+1) = retval.xcidx (j) + ui[j] - li[j];
1791 }
1792
1793 retval.change_capacity (retval.xcidx (m));
1794
1795 // Copy data, adjust row indices.
1796 for (octave_idx_type j = 0, k = 0; j < m; j++)
1797 {
1798 octave_quit ();
1799 for (octave_idx_type i = li[j]; i < ui[j]; i++)
1800 {
1801 retval.xdata (k) = data (i);
1802 retval.xridx (k++) = ridx (i) - lb;
1803 }
1804 }
1805 }
1806 else if (idx_i.is_permutation (nr))
1807 {
1808 // The columns preserve their length, just need to renumber and sort them.
1809 // Count new nonzero elements.
1810 retval = Sparse<T, Alloc> (nr, m);
1811 for (octave_idx_type j = 0; j < m; j++)
1812 {
1813 octave_idx_type jj = idx_j(j);
1814 retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1815 }
1816
1817 retval.change_capacity (retval.xcidx (m));
1818
1819 octave_quit ();
1820
1821 if (idx_i.is_range () && idx_i.increment () == -1)
1822 {
1823 // It's nr:-1:1. Just flip all columns.
1824 for (octave_idx_type j = 0; j < m; j++)
1825 {
1826 octave_quit ();
1827 octave_idx_type jj = idx_j(j);
1828 octave_idx_type lj = cidx (jj);
1829 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1830 octave_idx_type li = retval.xcidx (j);
1831 octave_idx_type uj = lj + nzj - 1;
1832 for (octave_idx_type i = 0; i < nzj; i++)
1833 {
1834 retval.xdata (li + i) = data (uj - i); // Copy in reverse order.
1835 retval.xridx (li + i) = nr - 1 - ridx (uj - i); // Ditto with transform.
1836 }
1837 }
1838 }
1839 else
1840 {
1841 // Get inverse permutation.
1842 octave::idx_vector idx_iinv = idx_i.inverse_permutation (nr);
1843 const octave_idx_type *iinv = idx_iinv.raw ();
1844
1845 // Scatter buffer.
1846 OCTAVE_LOCAL_BUFFER (T, scb, nr);
1847 octave_idx_type *rri = retval.ridx ();
1848
1849 for (octave_idx_type j = 0; j < m; j++)
1850 {
1851 octave_quit ();
1852 octave_idx_type jj = idx_j(j);
1853 octave_idx_type lj = cidx (jj);
1854 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1855 octave_idx_type li = retval.xcidx (j);
1856 // Scatter the column, transform indices.
1857 for (octave_idx_type i = 0; i < nzj; i++)
1858 scb[rri[li + i] = iinv[ridx (lj + i)]] = data (lj + i);
1859
1860 octave_quit ();
1861
1862 // Sort them.
1863 std::sort (rri + li, rri + li + nzj);
1864
1865 // Gather.
1866 for (octave_idx_type i = 0; i < nzj; i++)
1867 retval.xdata (li + i) = scb[rri[li + i]];
1868 }
1869 }
1870
1871 }
1872 else if (idx_j.is_colon ())
1873 {
1874 // This requires uncompressing columns, which is generally costly,
1875 // so we rely on the efficient transpose to handle this.
1876 // It may still make sense to optimize some cases here.
1877 retval = transpose ();
1878 retval = retval.index (octave::idx_vector::colon, idx_i);
1879 retval = retval.transpose ();
1880 }
1881 else
1882 {
1883 // A(I, J) is decomposed into A(:, J)(I, :).
1884 retval = index (octave::idx_vector::colon, idx_j);
1885 retval = retval.index (idx_i, octave::idx_vector::colon);
1886 }
1887
1888 return retval;
1889}
1890
1891template <typename T, typename Alloc>
1893void
1894Sparse<T, Alloc>::assign (const octave::idx_vector& idx,
1895 const Sparse<T, Alloc>& rhs)
1896{
1897 Sparse<T, Alloc> retval;
1898
1899 liboctave_panic_unless (ndims () == 2);
1900
1901 octave_idx_type nr = dim1 ();
1902 octave_idx_type nc = dim2 ();
1903 octave_idx_type nz = nnz ();
1904
1905 octave_idx_type n = numel (); // Can throw.
1906
1907 octave_idx_type rhl = rhs.numel ();
1908
1909 if (idx.length (n) == rhl)
1910 {
1911 if (rhl == 0)
1912 return;
1913
1914 octave_idx_type nx = idx.extent (n);
1915 // Try to resize first if necessary.
1916 if (nx != n)
1917 {
1918 resize1 (nx);
1919 n = numel ();
1920 nr = rows ();
1921 nc = cols ();
1922 // nz is preserved.
1923 }
1924
1925 if (idx.is_colon ())
1926 {
1927 *this = rhs.reshape (m_dimensions);
1928 }
1929 else if (nc == 1 && rhs.cols () == 1)
1930 {
1931 // Sparse column vector to sparse column vector assignment.
1932
1933 octave_idx_type lb, ub;
1934 if (idx.is_cont_range (nr, lb, ub))
1935 {
1936 // Special-case a contiguous range.
1937 // Look-up indices first.
1938 octave_idx_type li = lblookup (ridx (), nz, lb);
1939 octave_idx_type ui = lblookup (ridx (), nz, ub);
1940 octave_idx_type rnz = rhs.nnz ();
1941 octave_idx_type new_nz = nz - (ui - li) + rnz;
1942
1943 if (new_nz >= nz && new_nz <= nzmax ())
1944 {
1945 // Adding/overwriting elements, enough capacity allocated.
1946
1947 if (new_nz > nz)
1948 {
1949 // Make room first.
1950 std::copy_backward (data () + ui, data () + nz,
1951 data () + nz + rnz);
1952 std::copy_backward (ridx () + ui, ridx () + nz,
1953 ridx () + nz + rnz);
1954 }
1955
1956 // Copy data and adjust indices from rhs.
1957 std::copy_n (rhs.data (), rnz, data () + li);
1958 mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1959 }
1960 else
1961 {
1962 // Clearing elements or exceeding capacity, allocate afresh
1963 // and paste pieces.
1964 const Sparse<T, Alloc> tmp = *this;
1965 *this = Sparse<T, Alloc> (nr, 1, new_nz);
1966
1967 // Head ...
1968 std::copy_n (tmp.data (), li, data ());
1969 std::copy_n (tmp.ridx (), li, ridx ());
1970
1971 // new stuff ...
1972 std::copy_n (rhs.data (), rnz, data () + li);
1973 mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1974
1975 // ...tail
1976 std::copy (tmp.data () + ui, tmp.data () + nz,
1977 data () + li + rnz);
1978 std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
1979 ridx () + li + rnz);
1980 }
1981
1982 cidx (1) = new_nz;
1983 }
1984 else if (idx.is_range () && idx.increment () == -1)
1985 {
1986 // It's s(u:-1:l) = r. Reverse the assignment.
1987 assign (idx.sorted (), rhs.index (octave::idx_vector (rhl - 1, -1, -1)));
1988 }
1989 else if (idx.is_permutation (n))
1990 {
1991 *this = rhs.index (idx.inverse_permutation (n));
1992 }
1993 else if (rhs.nnz () == 0)
1994 {
1995 // Elements are being zeroed.
1996 octave_idx_type *ri = ridx ();
1997 for (octave_idx_type i = 0; i < rhl; i++)
1998 {
1999 octave_idx_type iidx = idx(i);
2000 octave_idx_type li = lblookup (ri, nz, iidx);
2001 if (li != nz && ri[li] == iidx)
2002 xdata (li) = T ();
2003 }
2004
2005 maybe_compress (true);
2006 }
2007 else
2008 {
2009 const Sparse<T, Alloc> tmp = *this;
2010 octave_idx_type new_nz = nz + rhl;
2011 // Disassembly our matrix...
2012 Array<octave_idx_type> new_ri (dim_vector (new_nz, 1));
2013 Array<T> new_data (dim_vector (new_nz, 1));
2014 std::copy_n (tmp.ridx (), nz, new_ri.rwdata ());
2015 std::copy_n (tmp.data (), nz, new_data.rwdata ());
2016 // ... insert new data (densified) ...
2017 idx.copy_data (new_ri.rwdata () + nz);
2018 new_data.assign (octave::idx_vector (nz, new_nz), rhs.array_value ());
2019 // ... reassembly.
2020 *this = Sparse<T, Alloc> (new_data, new_ri, 0, nr, nc, false);
2021 }
2022 }
2023 else
2024 {
2025 dim_vector save_dims = m_dimensions;
2026 *this = index (octave::idx_vector::colon);
2027 assign (idx, rhs.index (octave::idx_vector::colon));
2028 *this = reshape (save_dims);
2029 }
2030 }
2031 else if (rhl == 1)
2032 {
2033 rhl = idx.length (n);
2034 if (rhs.nnz () != 0)
2035 assign (idx, Sparse<T, Alloc> (rhl, 1, rhs.data (0)));
2036 else
2037 assign (idx, Sparse<T, Alloc> (rhl, 1));
2038 }
2039 else
2040 octave::err_nonconformant ("=", dim_vector(idx.length (n), 1), rhs.dims());
2041}
2042
2043template <typename T, typename Alloc>
2045void
2046Sparse<T, Alloc>::assign (const octave::idx_vector& idx, const T& rhs)
2047{
2048 // FIXME: Converting the RHS and forwarding to the sparse matrix
2049 // assignment function is simpler, but it might be good to have a
2050 // specialization...
2051
2052 assign (idx, Sparse<T, Alloc> (1, 1, rhs));
2053}
2054
2055template <typename T, typename Alloc>
2057void
2058Sparse<T, Alloc>::assign (const octave::idx_vector& idx_i,
2059 const octave::idx_vector& idx_j,
2060 const Sparse<T, Alloc>& rhs)
2061{
2062 Sparse<T, Alloc> retval;
2063
2064 liboctave_panic_unless (ndims () == 2);
2065
2066 octave_idx_type nr = dim1 ();
2067 octave_idx_type nc = dim2 ();
2068 octave_idx_type nz = nnz ();
2069
2070 octave_idx_type n = rhs.rows ();
2071 octave_idx_type m = rhs.columns ();
2072
2073 // FIXME: this should probably be written more like the
2074 // Array<T>::assign function...
2075
2076 bool orig_zero_by_zero = (nr == 0 && nc == 0);
2077
2078 if (orig_zero_by_zero || (idx_i.length (nr) == n && idx_j.length (nc) == m))
2079 {
2080 octave_idx_type nrx;
2081 octave_idx_type ncx;
2082
2083 if (orig_zero_by_zero)
2084 {
2085 if (idx_i.is_colon ())
2086 {
2087 nrx = n;
2088
2089 if (idx_j.is_colon ())
2090 ncx = m;
2091 else
2092 ncx = idx_j.extent (nc);
2093 }
2094 else if (idx_j.is_colon ())
2095 {
2096 nrx = idx_i.extent (nr);
2097 ncx = m;
2098 }
2099 else
2100 {
2101 nrx = idx_i.extent (nr);
2102 ncx = idx_j.extent (nc);
2103 }
2104 }
2105 else
2106 {
2107 nrx = idx_i.extent (nr);
2108 ncx = idx_j.extent (nc);
2109 }
2110
2111 // Try to resize first if necessary.
2112 if (nrx != nr || ncx != nc)
2113 {
2114 resize (nrx, ncx);
2115 nr = rows ();
2116 nc = cols ();
2117 // nz is preserved.
2118 }
2119
2120 if (n == 0 || m == 0)
2121 return;
2122
2123 if (idx_i.is_colon ())
2124 {
2125 octave_idx_type lb, ub;
2126 // Great, we're just manipulating columns. This is going to be quite
2127 // efficient, because the columns can stay compressed as they are.
2128 if (idx_j.is_colon ())
2129 *this = rhs; // Shallow copy.
2130 else if (idx_j.is_cont_range (nc, lb, ub))
2131 {
2132 // Special-case a contiguous range.
2133 octave_idx_type li = cidx (lb);
2134 octave_idx_type ui = cidx (ub);
2135 octave_idx_type rnz = rhs.nnz ();
2136 octave_idx_type new_nz = nz - (ui - li) + rnz;
2137
2138 if (new_nz >= nz && new_nz <= nzmax ())
2139 {
2140 // Adding/overwriting elements, enough capacity allocated.
2141
2142 if (new_nz > nz)
2143 {
2144 // Make room first.
2145 std::copy_backward (data () + ui, data () + nz,
2146 data () + new_nz);
2147 std::copy_backward (ridx () + ui, ridx () + nz,
2148 ridx () + new_nz);
2149 mx_inline_add2 (nc - ub, cidx () + ub + 1, new_nz - nz);
2150 }
2151
2152 // Copy data and indices from rhs.
2153 std::copy_n (rhs.data (), rnz, data () + li);
2154 std::copy_n (rhs.ridx (), rnz, ridx () + li);
2155 mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2156 li);
2157
2158 liboctave_panic_unless (nnz () == new_nz);
2159 }
2160 else
2161 {
2162 // Clearing elements or exceeding capacity, allocate afresh
2163 // and paste pieces.
2164 const Sparse<T, Alloc> tmp = *this;
2165 *this = Sparse<T, Alloc> (nr, nc, new_nz);
2166
2167 // Head...
2168 std::copy_n (tmp.data (), li, data ());
2169 std::copy_n (tmp.ridx (), li, ridx ());
2170 std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
2171
2172 // new stuff...
2173 std::copy_n (rhs.data (), rnz, data () + li);
2174 std::copy_n (rhs.ridx (), rnz, ridx () + li);
2175 mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2176 li);
2177
2178 // ...tail.
2179 std::copy (tmp.data () + ui, tmp.data () + nz,
2180 data () + li + rnz);
2181 std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
2182 ridx () + li + rnz);
2183 mx_inline_add (nc - ub, cidx () + ub + 1,
2184 tmp.cidx () + ub + 1, new_nz - nz);
2185
2186 liboctave_panic_unless (nnz () == new_nz);
2187 }
2188 }
2189 else if (idx_j.is_range () && idx_j.increment () == -1)
2190 {
2191 // It's s(:,u:-1:l) = r. Reverse the assignment.
2192 assign (idx_i, idx_j.sorted (),
2193 rhs.index (idx_i, octave::idx_vector (m - 1, -1, -1)));
2194 }
2195 else if (idx_j.is_permutation (nc))
2196 {
2197 *this = rhs.index (idx_i, idx_j.inverse_permutation (nc));
2198 }
2199 else
2200 {
2201 const Sparse<T, Alloc> tmp = *this;
2202 *this = Sparse<T, Alloc> (nr, nc);
2204
2205 // Assemble column lengths.
2206 for (octave_idx_type i = 0; i < nc; i++)
2207 xcidx (i+1) = tmp.cidx (i+1) - tmp.cidx (i);
2208
2209 for (octave_idx_type i = 0; i < m; i++)
2210 {
2211 octave_idx_type j =idx_j(i);
2212 jsav[j] = i;
2213 xcidx (j+1) = rhs.cidx (i+1) - rhs.cidx (i);
2214 }
2215
2216 // Make cumulative.
2217 for (octave_idx_type i = 0; i < nc; i++)
2218 xcidx (i+1) += xcidx (i);
2219
2220 change_capacity (nnz ());
2221
2222 // Merge columns.
2223 for (octave_idx_type i = 0; i < nc; i++)
2224 {
2225 octave_idx_type l = xcidx (i);
2226 octave_idx_type u = xcidx (i+1);
2227 octave_idx_type j = jsav[i];
2228 if (j >= 0)
2229 {
2230 // from rhs
2231 octave_idx_type k = rhs.cidx (j);
2232 std::copy_n (rhs.data () + k, u - l, xdata () + l);
2233 std::copy_n (rhs.ridx () + k, u - l, xridx () + l);
2234 }
2235 else
2236 {
2237 // original
2238 octave_idx_type k = tmp.cidx (i);
2239 std::copy_n (tmp.data () + k, u - l, xdata () + l);
2240 std::copy_n (tmp.ridx () + k, u - l, xridx () + l);
2241 }
2242 }
2243
2244 }
2245 }
2246 else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
2247 {
2248 // It's just vector indexing. The 1-D assign is specialized for that.
2249 assign (idx_i, rhs);
2250 }
2251 else if (idx_j.is_colon ())
2252 {
2253 if (idx_i.is_permutation (nr))
2254 {
2255 *this = rhs.index (idx_i.inverse_permutation (nr), idx_j);
2256 }
2257 else
2258 {
2259 // FIXME: optimize more special cases?
2260 // In general this requires unpacking the columns, which is slow,
2261 // especially for many small columns. OTOH, transpose is an
2262 // efficient O(nr+nc+nnz) operation.
2263 *this = transpose ();
2264 assign (octave::idx_vector::colon, idx_i, rhs.transpose ());
2265 *this = transpose ();
2266 }
2267 }
2268 else
2269 {
2270 // Split it into 2 assignments and one indexing.
2271 Sparse<T, Alloc> tmp = index (octave::idx_vector::colon, idx_j);
2272 tmp.assign (idx_i, octave::idx_vector::colon, rhs);
2273 assign (octave::idx_vector::colon, idx_j, tmp);
2274 }
2275 }
2276 else if (m == 1 && n == 1)
2277 {
2278 n = idx_i.length (nr);
2279 m = idx_j.length (nc);
2280 if (rhs.nnz () != 0)
2281 assign (idx_i, idx_j, Sparse<T, Alloc> (n, m, rhs.data (0)));
2282 else
2283 assign (idx_i, idx_j, Sparse<T, Alloc> (n, m));
2284 }
2285 else if (idx_i.length (nr) == m && idx_j.length (nc) == n
2286 && (n == 1 || m == 1))
2287 {
2288 assign (idx_i, idx_j, rhs.transpose ());
2289 }
2290 else
2291 octave::err_nonconformant ("=", idx_i.length (nr), idx_j.length (nc), n, m);
2292}
2293
2294template <typename T, typename Alloc>
2296void
2297Sparse<T, Alloc>::assign (const octave::idx_vector& idx_i,
2298 const octave::idx_vector& idx_j,
2299 const T& rhs)
2300{
2301 // FIXME: Converting the RHS and forwarding to the sparse matrix
2302 // assignment function is simpler, but it might be good to have a
2303 // specialization...
2304
2305 assign (idx_i, idx_j, Sparse<T, Alloc> (1, 1, rhs));
2306}
2307
2308// Can't use versions of these in Array.cc due to duplication of the
2309// instantiations for Array<double and Sparse<double>, etc.
2310template <typename T>
2312bool
2314 typename ref_param<T>::type b)
2315{
2316 return (a < b);
2317}
2318
2319template <typename T>
2321bool
2323 typename ref_param<T>::type b)
2324{
2325 return (a > b);
2326}
2327
2328template <typename T, typename Alloc>
2332{
2333 Sparse<T, Alloc> m = *this;
2334
2335 octave_idx_type nr = m.rows ();
2336 octave_idx_type nc = m.columns ();
2337
2338 if (m.numel () < 1 || dim > 1)
2339 return m;
2340
2341 bool sort_by_column = (dim > 0);
2342 if (sort_by_column)
2343 {
2344 m = m.transpose ();
2345 std::swap (nr, nc);
2346 }
2347
2348 octave_sort<T> lsort;
2349
2350 if (mode == ASCENDING)
2351 lsort.set_compare (sparse_ascending_compare<T>);
2352 else if (mode == DESCENDING)
2353 lsort.set_compare (sparse_descending_compare<T>);
2354 else
2356 ("Sparse<T, Alloc>::sort: invalid MODE");
2357
2358 T *v = m.data ();
2359 octave_idx_type *mcidx = m.cidx ();
2360 octave_idx_type *mridx = m.ridx ();
2361
2362 for (octave_idx_type j = 0; j < nc; j++)
2363 {
2364 octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2365 lsort.sort (v, ns);
2366
2368 if (mode == ASCENDING)
2369 {
2370 for (i = 0; i < ns; i++)
2371 if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2372 break;
2373 }
2374 else
2375 {
2376 for (i = 0; i < ns; i++)
2377 if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2378 break;
2379 }
2380 for (octave_idx_type k = 0; k < i; k++)
2381 mridx[k] = k;
2382 for (octave_idx_type k = i; k < ns; k++)
2383 mridx[k] = k - ns + nr;
2384
2385 v += ns;
2386 mridx += ns;
2387 }
2388
2389 if (sort_by_column)
2390 m = m.transpose ();
2391
2392 return m;
2393}
2394
2395template <typename T, typename Alloc>
2399 octave_idx_type dim, sortmode mode) const
2400{
2401 Sparse<T, Alloc> m = *this;
2402
2403 octave_idx_type nr = m.rows ();
2404 octave_idx_type nc = m.columns ();
2405
2406 if (m.numel () < 1 || dim > 1)
2407 {
2408 sidx = Array<octave_idx_type> (dim_vector (nr, nc), 1);
2409 return m;
2410 }
2411
2412 bool sort_by_column = (dim > 0);
2413 if (sort_by_column)
2414 {
2415 m = m.transpose ();
2416 std::swap (nr, nc);
2417 }
2418
2419 octave_sort<T> indexed_sort;
2420
2421 if (mode == ASCENDING)
2422 indexed_sort.set_compare (sparse_ascending_compare<T>);
2423 else if (mode == DESCENDING)
2424 indexed_sort.set_compare (sparse_descending_compare<T>);
2425 else
2427 ("Sparse<T, Alloc>::sort: invalid MODE");
2428
2429 T *v = m.data ();
2430 octave_idx_type *mcidx = m.cidx ();
2431 octave_idx_type *mridx = m.ridx ();
2432
2433 sidx = Array<octave_idx_type> (dim_vector (nr, nc));
2435
2436 for (octave_idx_type j = 0; j < nc; j++)
2437 {
2438 octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2439 octave_idx_type offset = j * nr;
2440
2441 if (ns == 0)
2442 {
2443 for (octave_idx_type k = 0; k < nr; k++)
2444 sidx(offset + k) = k;
2445 }
2446 else
2447 {
2448 for (octave_idx_type i = 0; i < ns; i++)
2449 vi[i] = mridx[i];
2450
2451 indexed_sort.sort (v, vi, ns);
2452
2454 if (mode == ASCENDING)
2455 {
2456 for (i = 0; i < ns; i++)
2457 if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2458 break;
2459 }
2460 else
2461 {
2462 for (i = 0; i < ns; i++)
2463 if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2464 break;
2465 }
2466
2467 octave_idx_type ii = 0;
2468 octave_idx_type jj = i;
2469 for (octave_idx_type k = 0; k < nr; k++)
2470 {
2471 if (ii < ns && mridx[ii] == k)
2472 ii++;
2473 else
2474 sidx(offset + jj++) = k;
2475 }
2476
2477 for (octave_idx_type k = 0; k < i; k++)
2478 {
2479 sidx(k + offset) = vi[k];
2480 mridx[k] = k;
2481 }
2482
2483 for (octave_idx_type k = i; k < ns; k++)
2484 {
2485 sidx(k - ns + nr + offset) = vi[k];
2486 mridx[k] = k - ns + nr;
2487 }
2488
2489 v += ns;
2490 mridx += ns;
2491 }
2492 }
2493
2494 if (sort_by_column)
2495 {
2496 m = m.transpose ();
2497 sidx = sidx.transpose ();
2498 }
2499
2500 return m;
2501}
2502
2503template <typename T, typename Alloc>
2507{
2508 octave_idx_type nnr = rows ();
2509 octave_idx_type nnc = cols ();
2511
2512 if (nnr == 0 || nnc == 0)
2513 ; // do nothing
2514 else if (nnr != 1 && nnc != 1)
2515 {
2516 if (k > 0)
2517 nnc -= k;
2518 else if (k < 0)
2519 nnr += k;
2520
2521 if (nnr > 0 && nnc > 0)
2522 {
2523 octave_idx_type ndiag = std::min (nnr, nnc);
2524
2525 // Count the number of nonzero elements
2526 octave_idx_type nel = 0;
2527 if (k > 0)
2528 {
2529 for (octave_idx_type i = 0; i < ndiag; i++)
2530 if (elem (i, i+k) != 0.)
2531 nel++;
2532 }
2533 else if (k < 0)
2534 {
2535 for (octave_idx_type i = 0; i < ndiag; i++)
2536 if (elem (i-k, i) != 0.)
2537 nel++;
2538 }
2539 else
2540 {
2541 for (octave_idx_type i = 0; i < ndiag; i++)
2542 if (elem (i, i) != 0.)
2543 nel++;
2544 }
2545
2546 d = Sparse<T, Alloc> (ndiag, 1, nel);
2547 d.xcidx (0) = 0;
2548 d.xcidx (1) = nel;
2549
2550 octave_idx_type ii = 0;
2551 if (k > 0)
2552 {
2553 for (octave_idx_type i = 0; i < ndiag; i++)
2554 {
2555 T tmp = elem (i, i+k);
2556 if (tmp != 0.)
2557 {
2558 d.xdata (ii) = tmp;
2559 d.xridx (ii++) = i;
2560 }
2561 }
2562 }
2563 else if (k < 0)
2564 {
2565 for (octave_idx_type i = 0; i < ndiag; i++)
2566 {
2567 T tmp = elem (i-k, i);
2568 if (tmp != 0.)
2569 {
2570 d.xdata (ii) = tmp;
2571 d.xridx (ii++) = i;
2572 }
2573 }
2574 }
2575 else
2576 {
2577 for (octave_idx_type i = 0; i < ndiag; i++)
2578 {
2579 T tmp = elem (i, i);
2580 if (tmp != 0.)
2581 {
2582 d.xdata (ii) = tmp;
2583 d.xridx (ii++) = i;
2584 }
2585 }
2586 }
2587 }
2588 else
2589 {
2590 // Matlab returns [] 0x1 for out-of-range diagonal
2591
2592 octave_idx_type nr = 0;
2593 octave_idx_type nc = 1;
2594 octave_idx_type nz = 0;
2595
2596 d = Sparse<T, Alloc> (nr, nc, nz);
2597 }
2598 }
2599 else // one of dimensions == 1 (vector)
2600 {
2601 octave_idx_type roff = 0;
2602 octave_idx_type coff = 0;
2603 if (k > 0)
2604 {
2605 roff = 0;
2606 coff = k;
2607 }
2608 else if (k < 0)
2609 {
2610 roff = -k;
2611 coff = 0;
2612 }
2613
2614 if (nnr == 1)
2615 {
2616 octave_idx_type n = nnc + std::abs (k);
2617 octave_idx_type nz = nnz ();
2618
2619 d = Sparse<T, Alloc> (n, n, nz);
2620
2621 if (nnz () > 0)
2622 {
2623 for (octave_idx_type i = 0; i < coff+1; i++)
2624 d.xcidx (i) = 0;
2625
2626 for (octave_idx_type j = 0; j < nnc; j++)
2627 {
2628 for (octave_idx_type i = cidx (j); i < cidx (j+1); i++)
2629 {
2630 d.xdata (i) = data (i);
2631 d.xridx (i) = j + roff;
2632 }
2633 d.xcidx (j + coff + 1) = cidx (j+1);
2634 }
2635
2636 for (octave_idx_type i = nnc + coff + 1; i < n + 1; i++)
2637 d.xcidx (i) = nz;
2638 }
2639 }
2640 else
2641 {
2642 octave_idx_type n = nnr + std::abs (k);
2643 octave_idx_type nz = nnz ();
2644
2645 d = Sparse<T, Alloc> (n, n, nz);
2646
2647 if (nnz () > 0)
2648 {
2649 octave_idx_type ii = 0;
2650 octave_idx_type ir = ridx (0);
2651
2652 for (octave_idx_type i = 0; i < coff+1; i++)
2653 d.xcidx (i) = 0;
2654
2655 for (octave_idx_type i = 0; i < nnr; i++)
2656 {
2657 if (ir == i)
2658 {
2659 d.xdata (ii) = data (ii);
2660 d.xridx (ii++) = ir + roff;
2661
2662 if (ii != nz)
2663 ir = ridx (ii);
2664 }
2665 d.xcidx (i + coff + 1) = ii;
2666 }
2667
2668 for (octave_idx_type i = nnr + coff + 1; i < n+1; i++)
2669 d.xcidx (i) = nz;
2670 }
2671 }
2672 }
2673
2674 return d;
2675}
2676
2677template <typename T, typename Alloc>
2681 const Sparse<T, Alloc> *sparse_list)
2682{
2683 // Default concatenation.
2684 bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2685
2686 if (dim == -1 || dim == -2)
2687 {
2688 concat_rule = &dim_vector::hvcat;
2689 dim = -dim - 1;
2690 }
2691 else if (dim < 0)
2692 (*current_liboctave_error_handler) ("cat: invalid dimension");
2693
2694 dim_vector dv;
2695 octave_idx_type total_nz = 0;
2696 if (dim != 0 && dim != 1)
2697 (*current_liboctave_error_handler)
2698 ("cat: invalid dimension for sparse concatenation");
2699
2700 if (n == 1)
2701 return sparse_list[0];
2702
2703 for (octave_idx_type i = 0; i < n; i++)
2704 {
2705 if (! (dv.*concat_rule) (sparse_list[i].dims (), dim))
2706 (*current_liboctave_error_handler) ("cat: dimension mismatch");
2707
2708 total_nz += sparse_list[i].nnz ();
2709 }
2710
2711 Sparse<T, Alloc> retval (dv, total_nz);
2712
2713 if (retval.isempty ())
2714 return retval;
2715
2716 switch (dim)
2717 {
2718 case 0:
2719 {
2720 // sparse vertcat. This is not efficiently handled by assignment,
2721 // so we'll do it directly.
2722 octave_idx_type l = 0;
2723 for (octave_idx_type j = 0; j < dv(1); j++)
2724 {
2725 octave_quit ();
2726
2727 octave_idx_type rcum = 0;
2728 for (octave_idx_type i = 0; i < n; i++)
2729 {
2730 const Sparse<T, Alloc>& spi = sparse_list[i];
2731 // Skipping empty matrices. See the comment in Array.cc.
2732 if (spi.isempty ())
2733 continue;
2734
2735 octave_idx_type kl = spi.cidx (j);
2736 octave_idx_type ku = spi.cidx (j+1);
2737 for (octave_idx_type k = kl; k < ku; k++, l++)
2738 {
2739 retval.xridx (l) = spi.ridx (k) + rcum;
2740 retval.xdata (l) = spi.data (k);
2741 }
2742
2743 rcum += spi.rows ();
2744 }
2745
2746 retval.xcidx (j+1) = l;
2747 }
2748
2749 break;
2750 }
2751 case 1:
2752 {
2753 octave_idx_type l = 0;
2754 for (octave_idx_type i = 0; i < n; i++)
2755 {
2756 octave_quit ();
2757
2758 // Skipping empty matrices. See the comment in Array.cc.
2759 if (sparse_list[i].isempty ())
2760 continue;
2761
2762 octave_idx_type u = l + sparse_list[i].columns ();
2763 retval.assign (octave::idx_vector::colon, octave::idx_vector (l, u),
2764 sparse_list[i]);
2765 l = u;
2766 }
2767
2768 break;
2769 }
2770 default:
2771 (*current_liboctave_error_handler) ("Sparse<T, Alloc>::cat: invalid dimension = %d - please report this bug", dim);
2772 }
2773
2774 return retval;
2775}
2776
2777template <typename T, typename Alloc>
2781{
2782 Array<T> retval (dims (), T ());
2783 if (rows () == 1)
2784 {
2785 octave_idx_type i = 0;
2786 for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2787 {
2788 if (cidx (j+1) > i)
2789 retval.xelem (j) = data (i++);
2790 }
2791 }
2792 else
2793 {
2794 for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2795 for (octave_idx_type i = cidx (j), iu = cidx (j+1); i < iu; i++)
2796 retval.xelem (ridx (i), j) = data (i);
2797 }
2798
2799 return retval;
2800}
2801
2802template <typename T>
2804std::istream&
2805read_sparse_matrix (std::istream& is, Sparse<T>& a,
2806 T (*read_fcn) (std::istream&))
2807{
2808 octave_idx_type nr = a.rows ();
2809 octave_idx_type nc = a.cols ();
2810 octave_idx_type nz = a.nzmax ();
2811
2812 if (nr > 0 && nc > 0)
2813 {
2814 octave_idx_type itmp;
2815 octave_idx_type jtmp;
2816 octave_idx_type iold = 0;
2817 octave_idx_type jold = 0;
2818 octave_idx_type ii = 0;
2819 T tmp;
2820
2821 a.cidx (0) = 0;
2822 for (octave_idx_type i = 0; i < nz; i++)
2823 {
2824 itmp = 0; jtmp = 0;
2825 is >> itmp;
2826 itmp--;
2827
2828 is >> jtmp;
2829 jtmp--;
2830
2831 if (is.fail ())
2832 {
2833 is.clear();
2834 std::string err_field;
2835 is >> err_field;
2836 (*current_liboctave_error_handler)
2837 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2838 "Symbols '%s' is not an integer format",
2839 i+1, err_field.c_str ());
2840 }
2841
2842 if (itmp < 0 || itmp >= nr)
2843 {
2844 is.setstate (std::ios::failbit);
2845
2846 (*current_liboctave_error_handler)
2847 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2848 "row index = %" OCTAVE_IDX_TYPE_FORMAT " out of range",
2849 i+1, itmp + 1);
2850 }
2851
2852 if (jtmp < 0 || jtmp >= nc)
2853 {
2854 is.setstate (std::ios::failbit);
2855
2856 (*current_liboctave_error_handler)
2857 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2858 "column index = %" OCTAVE_IDX_TYPE_FORMAT " out of range",
2859 i+1, jtmp + 1);
2860 }
2861
2862 if (jtmp < jold)
2863 {
2864 is.setstate (std::ios::failbit);
2865
2866 (*current_liboctave_error_handler)
2867 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ":"
2868 "column indices must appear in ascending order "
2869 "(%" OCTAVE_IDX_TYPE_FORMAT " < %" OCTAVE_IDX_TYPE_FORMAT ")",
2870 i+1, jtmp, jold);
2871 }
2872 else if (jtmp > jold)
2873 {
2874 for (octave_idx_type j = jold; j < jtmp; j++)
2875 a.cidx (j+1) = ii;
2876 }
2877 else if (itmp < iold)
2878 {
2879 is.setstate (std::ios::failbit);
2880
2881 (*current_liboctave_error_handler)
2882 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2883 "row indices must appear in ascending order in each column "
2884 "(%" OCTAVE_IDX_TYPE_FORMAT " < %" OCTAVE_IDX_TYPE_FORMAT ")",
2885 i+1, iold, itmp);
2886 }
2887
2888 iold = itmp;
2889 jold = jtmp;
2890
2891 tmp = read_fcn (is);
2892
2893 if (! is)
2894 return is; // Problem, return is in error state
2895
2896 a.data (ii) = tmp;
2897 a.ridx (ii++) = itmp;
2898 }
2899
2900 for (octave_idx_type j = jold; j < nc; j++)
2901 a.cidx (j+1) = ii;
2902 }
2903
2904 return is;
2905}
2906
2907/*
2908 * Tests
2909 *
2910
2911%!function x = set_slice (x, dim, slice, arg)
2912%! switch (dim)
2913%! case 11
2914%! x(slice) = 2;
2915%! case 21
2916%! x(slice, :) = 2;
2917%! case 22
2918%! x(:, slice) = 2;
2919%! otherwise
2920%! error ("invalid dim, '%d'", dim);
2921%! endswitch
2922%!endfunction
2923
2924%!function x = set_slice2 (x, dim, slice)
2925%! switch (dim)
2926%! case 11
2927%! x(slice) = 2 * ones (size (slice));
2928%! case 21
2929%! x(slice, :) = 2 * ones (length (slice), columns (x));
2930%! case 22
2931%! x(:, slice) = 2 * ones (rows (x), length (slice));
2932%! otherwise
2933%! error ("invalid dim, '%d'", dim);
2934%! endswitch
2935%!endfunction
2936
2937%!function test_sparse_slice (size, dim, slice)
2938%! x = ones (size);
2939%! s = set_slice (sparse (x), dim, slice);
2940%! f = set_slice (x, dim, slice);
2941%! assert (nnz (s), nnz (f));
2942%! assert (full (s), f);
2943%! s = set_slice2 (sparse (x), dim, slice);
2944%! f = set_slice2 (x, dim, slice);
2945%! assert (nnz (s), nnz (f));
2946%! assert (full (s), f);
2947%!endfunction
2948
2949#### 1-D indexing
2950
2951## size = [2 0]
2952%!test test_sparse_slice ([2 0], 11, []);
2953%!assert (set_slice (sparse (ones ([2 0])), 11, 1), sparse ([2 0]')) # sparse different from full
2954%!assert (set_slice (sparse (ones ([2 0])), 11, 2), sparse ([0 2]')) # sparse different from full
2955%!assert (set_slice (sparse (ones ([2 0])), 11, 3), sparse ([0 0; 2 0]')) # sparse different from full
2956%!assert (set_slice (sparse (ones ([2 0])), 11, 4), sparse ([0 0; 0 2]')) # sparse different from full
2957
2958## size = [0 2]
2959%!test test_sparse_slice ([0 2], 11, []);
2960%!assert (set_slice (sparse (ones ([0 2])), 11, 1), sparse ([2 0])) # sparse different from full
2961%!test test_sparse_slice ([0 2], 11, 2);
2962%!test test_sparse_slice ([0 2], 11, 3);
2963%!test test_sparse_slice ([0 2], 11, 4);
2964%!test test_sparse_slice ([0 2], 11, [4, 4]);
2965
2966## size = [2 1]
2967%!test test_sparse_slice ([2 1], 11, []);
2968%!test test_sparse_slice ([2 1], 11, 1);
2969%!test test_sparse_slice ([2 1], 11, 2);
2970%!test test_sparse_slice ([2 1], 11, 3);
2971%!test test_sparse_slice ([2 1], 11, 4);
2972%!test test_sparse_slice ([2 1], 11, [4, 4]);
2973
2974## size = [1 2]
2975%!test test_sparse_slice ([1 2], 11, []);
2976%!test test_sparse_slice ([1 2], 11, 1);
2977%!test test_sparse_slice ([1 2], 11, 2);
2978%!test test_sparse_slice ([1 2], 11, 3);
2979%!test test_sparse_slice ([1 2], 11, 4);
2980%!test test_sparse_slice ([1 2], 11, [4, 4]);
2981
2982## size = [2 2]
2983%!test test_sparse_slice ([2 2], 11, []);
2984%!test test_sparse_slice ([2 2], 11, 1);
2985%!test test_sparse_slice ([2 2], 11, 2);
2986%!test test_sparse_slice ([2 2], 11, 3);
2987%!test test_sparse_slice ([2 2], 11, 4);
2988%!test test_sparse_slice ([2 2], 11, [4, 4]);
2989# These 2 errors are the same as in the full case
2990%!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 5)
2991%!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 6)
2992
2993#### 2-D indexing
2994
2995## size = [2 0]
2996%!test test_sparse_slice ([2 0], 21, []);
2997%!test test_sparse_slice ([2 0], 21, 1);
2998%!test test_sparse_slice ([2 0], 21, 2);
2999%!test test_sparse_slice ([2 0], 21, [2,2]);
3000%!assert (set_slice (sparse (ones ([2 0])), 21, 3), sparse (3,0))
3001%!assert (set_slice (sparse (ones ([2 0])), 21, 4), sparse (4,0))
3002%!test test_sparse_slice ([2 0], 22, []);
3003%!test test_sparse_slice ([2 0], 22, 1);
3004%!test test_sparse_slice ([2 0], 22, 2);
3005%!test test_sparse_slice ([2 0], 22, [2,2]);
3006%!assert (set_slice (sparse (ones ([2 0])), 22, 3), sparse ([0 0 2;0 0 2])) # sparse different from full
3007%!assert (set_slice (sparse (ones ([2 0])), 22, 4), sparse ([0 0 0 2;0 0 0 2])) # sparse different from full
3008
3009## size = [0 2]
3010%!test test_sparse_slice ([0 2], 21, []);
3011%!test test_sparse_slice ([0 2], 21, 1);
3012%!test test_sparse_slice ([0 2], 21, 2);
3013%!test test_sparse_slice ([0 2], 21, [2,2]);
3014%!assert (set_slice (sparse (ones ([0 2])), 21, 3), sparse ([0 0;0 0;2 2])) # sparse different from full
3015%!assert (set_slice (sparse (ones ([0 2])), 21, 4), sparse ([0 0;0 0;0 0;2 2])) # sparse different from full
3016%!test test_sparse_slice ([0 2], 22, []);
3017%!test test_sparse_slice ([0 2], 22, 1);
3018%!test test_sparse_slice ([0 2], 22, 2);
3019%!test test_sparse_slice ([0 2], 22, [2,2]);
3020%!assert (set_slice (sparse (ones ([0 2])), 22, 3), sparse (0,3))
3021%!assert (set_slice (sparse (ones ([0 2])), 22, 4), sparse (0,4))
3022
3023## size = [2 1]
3024%!test test_sparse_slice ([2 1], 21, []);
3025%!test test_sparse_slice ([2 1], 21, 1);
3026%!test test_sparse_slice ([2 1], 21, 2);
3027%!test test_sparse_slice ([2 1], 21, [2,2]);
3028%!test test_sparse_slice ([2 1], 21, 3);
3029%!test test_sparse_slice ([2 1], 21, 4);
3030%!test test_sparse_slice ([2 1], 22, []);
3031%!test test_sparse_slice ([2 1], 22, 1);
3032%!test test_sparse_slice ([2 1], 22, 2);
3033%!test test_sparse_slice ([2 1], 22, [2,2]);
3034%!test test_sparse_slice ([2 1], 22, 3);
3035%!test test_sparse_slice ([2 1], 22, 4);
3036
3037## size = [1 2]
3038%!test test_sparse_slice ([1 2], 21, []);
3039%!test test_sparse_slice ([1 2], 21, 1);
3040%!test test_sparse_slice ([1 2], 21, 2);
3041%!test test_sparse_slice ([1 2], 21, [2,2]);
3042%!test test_sparse_slice ([1 2], 21, 3);
3043%!test test_sparse_slice ([1 2], 21, 4);
3044%!test test_sparse_slice ([1 2], 22, []);
3045%!test test_sparse_slice ([1 2], 22, 1);
3046%!test test_sparse_slice ([1 2], 22, 2);
3047%!test test_sparse_slice ([1 2], 22, [2,2]);
3048%!test test_sparse_slice ([1 2], 22, 3);
3049%!test test_sparse_slice ([1 2], 22, 4);
3050
3051## size = [2 2]
3052%!test test_sparse_slice ([2 2], 21, []);
3053%!test test_sparse_slice ([2 2], 21, 1);
3054%!test test_sparse_slice ([2 2], 21, 2);
3055%!test test_sparse_slice ([2 2], 21, [2,2]);
3056%!test test_sparse_slice ([2 2], 21, 3);
3057%!test test_sparse_slice ([2 2], 21, 4);
3058%!test test_sparse_slice ([2 2], 22, []);
3059%!test test_sparse_slice ([2 2], 22, 1);
3060%!test test_sparse_slice ([2 2], 22, 2);
3061%!test test_sparse_slice ([2 2], 22, [2,2]);
3062%!test test_sparse_slice ([2 2], 22, 3);
3063%!test test_sparse_slice ([2 2], 22, 4);
3064
3065%!assert <*35570> (speye (3,1)(3:-1:1), sparse ([0; 0; 1]))
3066
3067## Test removing columns
3068%!test <*36656>
3069%! s = sparse (magic (5));
3070%! s(:,2:4) = [];
3071%! assert (s, sparse (magic (5)(:, [1,5])));
3072
3073%!test
3074%! s = sparse ([], [], [], 1, 1);
3075%! s(1,:) = [];
3076%! assert (s, sparse ([], [], [], 0, 1));
3077
3078## Test (bug #37321)
3079%!test <*37321> a=sparse (0,0); assert (all (a) == sparse ([1]));
3080%!test <*37321> a=sparse (0,1); assert (all (a) == sparse ([1]));
3081%!test <*37321> a=sparse (1,0); assert (all (a) == sparse ([1]));
3082%!test <*37321> a=sparse (1,0); assert (all (a,2) == sparse ([1]));
3083%!test <*37321> a=sparse (1,0); assert (size (all (a,1)), [1 0]);
3084%!test <*37321> a=sparse (1,1);
3085%! assert (all (a) == sparse ([0]));
3086%! assert (size (all (a)), [1 1]);
3087%!test <*37321> a=sparse (2,1);
3088%! assert (all (a) == sparse ([0]));
3089%! assert (size (all (a)), [1 1]);
3090%!test <*37321> a=sparse (1,2);
3091%! assert (all (a) == sparse ([0]));
3092%! assert (size (all (a)), [1 1]);
3093%!test <*37321> a=sparse (2,2); assert (isequal (all (a), sparse ([0 0])));
3094
3095## Test assigning row to a column slice
3096%!test <45589>
3097%! a = sparse (magic (3));
3098%! b = a;
3099%! a(1,:) = 1:3;
3100%! b(1,:) = (1:3)';
3101%! assert (a, b);
3102
3103*/
3104
3105template <typename T, typename Alloc>
3107void
3108Sparse<T, Alloc>::print_info (std::ostream& os, const std::string& prefix) const
3109{
3110 os << prefix << "m_rep address: " << m_rep << "\n"
3111 << prefix << "m_rep->m_nzmax: " << m_rep->m_nzmax << "\n"
3112 << prefix << "m_rep->m_nrows: " << m_rep->m_nrows << "\n"
3113 << prefix << "m_rep->m_ncols: " << m_rep->m_ncols << "\n"
3114 << prefix << "m_rep->m_data: " << m_rep->m_data << "\n"
3115 << prefix << "m_rep->m_ridx: " << m_rep->m_ridx << "\n"
3116 << prefix << "m_rep->m_cidx: " << m_rep->m_cidx << "\n"
3117 << prefix << "m_rep->m_count: " << m_rep->m_count << "\n";
3118}
3119
3120#if defined (__clang__)
3121# define INSTANTIATE_SPARSE(T) \
3122 template class OCTAVE_API Sparse<T>; \
3123 template OCTAVE_API std::istream& \
3124 read_sparse_matrix<T> (std::istream& is, Sparse<T>& a, \
3125 T (*read_fcn) (std::istream&));
3126#else
3127# define INSTANTIATE_SPARSE(T) \
3128 template class Sparse<T>; \
3129 template OCTAVE_API std::istream& \
3130 read_sparse_matrix<T> (std::istream& is, Sparse<T>& a, \
3131 T (*read_fcn) (std::istream&));
3132#endif
std::istream & read_sparse_matrix(std::istream &is, Sparse< T > &a, T(*read_fcn)(std::istream &))
Definition Sparse.cc:2805
bool sparse_descending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition Sparse.cc:2322
bool sparse_ascending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition Sparse.cc:2313
N Dimensional Array with copy-on-write semantics.
Definition Array-base.h:130
T & xelem(octave_idx_type n)
Size of the specified dimension.
Definition Array-base.h:547
Array< T, Alloc > index(const octave::idx_vector &i) const
Indexing without resizing.
void assign(const octave::idx_vector &i, const Array< T, Alloc > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Array< T, Alloc > transpose() const
Size of the specified dimension.
T & elem(octave_idx_type n)
Size of the specified dimension.
Definition Array-base.h:585
octave_idx_type rows() const
Definition Array-base.h:485
octave_idx_type cols() const
Definition Array-base.h:495
Array< T, Alloc > as_matrix() const
Return the array as a matrix.
Definition Array-base.h:468
const T * data() const
Size of the specified dimension.
Definition Array-base.h:687
T * rwdata()
Size of the specified dimension.
octave_idx_type numel() const
Number of elements in the array.
Definition Array-base.h:440
octave_idx_type rows() const
Definition PermMatrix.h:62
const Array< octave_idx_type > & col_perm_vec() const
Definition PermMatrix.h:83
bool indices_ok() const
Definition Sparse.cc:178
bool any_element_is_nan() const
Definition Sparse.cc:186
void change_length(octave_idx_type nz)
Definition Sparse.cc:144
octave_idx_type m_ncols
Definition Sparse.h:73
octave::refcount< octave_idx_type > m_count
Definition Sparse.h:74
idx_type_pointer m_cidx
Definition Sparse.h:70
T_pointer m_data
Definition Sparse.h:68
void maybe_compress(bool remove_zeros)
Definition Sparse.cc:119
T & elem(octave_idx_type r, octave_idx_type c)
Definition Sparse.cc:65
idx_type_pointer m_ridx
Definition Sparse.h:69
octave_idx_type m_nzmax
Definition Sparse.h:71
T celem(octave_idx_type r, octave_idx_type c) const
Definition Sparse.cc:107
bool any_element_is_inf_or_nan() const
Definition Sparse.cc:200
octave_idx_type cols() const
Definition Sparse.h:351
Sparse< T, Alloc > diag(octave_idx_type k=0) const
Definition Sparse.cc:2506
Sparse< T, Alloc > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Definition Sparse.cc:944
octave_idx_type nzmax() const
Amount of storage for nonzero elements.
Definition Sparse.h:335
Array< T > array_value() const
Definition Sparse.cc:2780
void delete_elements(const octave::idx_vector &i)
Definition Sparse.cc:1211
Sparse< T, Alloc > index(const octave::idx_vector &i, bool resize_ok=false) const
Definition Sparse.cc:1447
void print_info(std::ostream &os, const std::string &prefix) const
Definition Sparse.cc:3108
octave_idx_type * cidx()
Definition Sparse.h:595
octave_idx_type columns() const
Definition Sparse.h:352
void assign(const octave::idx_vector &i, const Sparse< T, Alloc > &rhs)
Definition Sparse.cc:1894
virtual ~Sparse()
Definition Sparse.cc:720
T * data()
Definition Sparse.h:573
void resize(octave_idx_type r, octave_idx_type c)
Definition Sparse.cc:1006
Sparse< T, Alloc > sort(octave_idx_type dim=0, sortmode mode=ASCENDING) const
Definition Sparse.cc:2331
Sparse< T, Alloc > & insert(const Sparse< T, Alloc > &a, octave_idx_type r, octave_idx_type c)
Definition Sparse.cc:1057
T * xdata()
Definition Sparse.h:575
octave_idx_type * ridx()
Definition Sparse.h:582
void resize1(octave_idx_type n)
Definition Sparse.cc:973
OCTAVE_NORETURN T range_error(const char *fcn, octave_idx_type n) const
Definition Sparse.cc:771
octave_idx_type numel() const
Definition Sparse.h:342
Sparse()
Definition Sparse.h:258
static Sparse< T, Alloc > cat(int dim, octave_idx_type n, const Sparse< T, Alloc > *sparse_list)
Definition Sparse.cc:2680
Sparse< T, Alloc > & operator=(const Sparse< T, Alloc > &a)
Definition Sparse.cc:728
octave_idx_type nnz() const
Actual number of nonzero terms.
Definition Sparse.h:338
octave_idx_type rows() const
Definition Sparse.h:350
void change_capacity(octave_idx_type nz)
Definition Sparse.h:555
Sparse< T, Alloc > transpose() const
Definition Sparse.cc:1153
dim_vector m_dimensions
Definition Sparse.h:250
octave_idx_type compute_index(const Array< octave_idx_type > &ra_idx) const
Definition Sparse.cc:747
Sparse< T, Alloc >::SparseRep * m_rep
Definition Sparse.h:248
Sparse< T, Alloc > reshape(const dim_vector &new_dims) const
Definition Sparse.cc:861
dim_vector dims() const
Definition Sparse.h:370
octave_idx_type * xcidx()
Definition Sparse.h:601
octave_idx_type * xridx()
Definition Sparse.h:588
bool isempty() const
Definition Sparse.h:569
Vector representing the dimensions (size) of an Array.
Definition dim-vector.h:92
bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
octave_idx_type safe_numel() const
The following function will throw a std::bad_alloc () exception if the requested size is larger than ...
Definition dim-vector.h:364
std::string str(char sep='x') const
Definition dim-vector.cc:58
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition dim-vector.h:341
void resize(int n, int fill_value=0)
Definition dim-vector.h:278
octave_idx_type ndims() const
Number of dimensions.
Definition dim-vector.h:263
bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
dim_vector redim(int n) const
Force certain dimensionality, preserving numel ().
virtual octave_idx_type numel() const
Definition ov-base.h:401
void set_compare(const compare_fcn_type &comp)
Definition oct-sort.h:115
void sort(T *data, octave_idx_type nel)
Definition oct-sort.cc:1521
if_then_else< is_class_type< T >::no, T, Tconst & >::result type
Definition oct-traits.h:121
OCTAVE_NORETURN liboctave_error_handler current_liboctave_error_handler
Definition lo-error.c:41
#define OCTAVE_API
Definition main.in.cc:55
void mx_inline_add2(std::size_t n, R *r, const X *x)
void mx_inline_sub(std::size_t n, R *r, const X *x, const Y *y)
void mx_inline_add(std::size_t n, R *r, const X *x, const Y *y)
#define liboctave_panic_unless(cond)
Definition oct-error.h:102
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition oct-locbuf.h:44
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition oct-locbuf.h:50
sortmode
Definition oct-sort.h:97
@ ASCENDING
Definition oct-sort.h:97
@ DESCENDING
Definition oct-sort.h:97
T::size_type numel(const T &str)
Definition oct-string.cc:81
const octave_base_value const Array< octave_idx_type > & ra_idx
F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
bool sparse_indices_ok(octave_idx_type *r, octave_idx_type *c, octave_idx_type nrows, octave_idx_type ncols, octave_idx_type nnz)
F77_RET_T len
Definition xerbla.cc:61