GNU Octave 10.1.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
 
Loading...
Searching...
No Matches
Sparse.cc
Go to the documentation of this file.
1////////////////////////////////////////////////////////////////////////
2//
3// Copyright (C) 1998-2025 The Octave Project Developers
4//
5// See the file COPYRIGHT.md in the top-level directory of this
6// distribution or <https://octave.org/copyright/>.
7//
8// This file is part of Octave.
9//
10// Octave is free software: you can redistribute it and/or modify it
11// under the terms of the GNU General Public License as published by
12// the Free Software Foundation, either version 3 of the License, or
13// (at your option) any later version.
14//
15// Octave is distributed in the hope that it will be useful, but
16// WITHOUT ANY WARRANTY; without even the implied warranty of
17// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18// GNU General Public License for more details.
19//
20// You should have received a copy of the GNU General Public License
21// along with Octave; see the file COPYING. If not, see
22// <https://www.gnu.org/licenses/>.
23//
24////////////////////////////////////////////////////////////////////////
25
26// This file should not include config.h. It is only included in other
27// C++ source files that should have included config.h before including
28// this file.
29
30#include <cinttypes>
31
32#include <algorithm>
33#include <istream>
34#include <ostream>
35#include <sstream>
36#include <vector>
37
38#include "Array.h"
39#include "MArray.h"
40#include "Array-util.h"
41#include "Range.h"
42#include "idx-vector.h"
43#include "lo-error.h"
44#include "quit.h"
45#include "oct-locbuf.h"
46
47#include "Sparse.h"
48#include "sparse-util.h"
49#include "oct-spparms.h"
50#include "mx-inlines.cc"
51
52#include "PermMatrix.h"
53
54template <typename T, typename Alloc>
57{
58 static typename Sparse<T, Alloc>::SparseRep nr;
59 return &nr;
60}
61
62template <typename T, typename Alloc>
64T&
66{
68
69 if (m_nzmax <= 0)
70 (*current_liboctave_error_handler)
71 ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
72
73 for (i = m_cidx[c]; i < m_cidx[c + 1]; i++)
74 if (m_ridx[i] == r)
75 return m_data[i];
76 else if (m_ridx[i] > r)
77 break;
78
79 // Ok, If we've gotten here, we're in trouble. Have to create a
80 // new element in the sparse array. This' gonna be slow!!!
81 if (m_cidx[m_ncols] == m_nzmax)
82 (*current_liboctave_error_handler)
83 ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
84
85 octave_idx_type to_move = m_cidx[m_ncols] - i;
86 if (to_move != 0)
87 {
88 for (octave_idx_type j = m_cidx[m_ncols]; j > i; j--)
89 {
90 m_data[j] = m_data[j-1];
91 m_ridx[j] = m_ridx[j-1];
92 }
93 }
94
95 for (octave_idx_type j = c + 1; j < m_ncols + 1; j++)
96 m_cidx[j] = m_cidx[j] + 1;
97
98 m_data[i] = 0.;
99 m_ridx[i] = r;
100
101 return m_data[i];
102}
103
104template <typename T, typename Alloc>
106T
108{
109 if (m_nzmax > 0)
110 for (octave_idx_type i = m_cidx[c]; i < m_cidx[c + 1]; i++)
111 if (m_ridx[i] == r)
112 return m_data[i];
113 return T ();
114}
115
116template <typename T, typename Alloc>
118void
120{
121 if (remove_zeros)
122 {
123 octave_idx_type i = 0;
124 octave_idx_type k = 0;
125 for (octave_idx_type j = 1; j <= m_ncols; j++)
126 {
127 octave_idx_type u = m_cidx[j];
128 for (; i < u; i++)
129 if (m_data[i] != T ())
130 {
131 m_data[k] = m_data[i];
132 m_ridx[k++] = m_ridx[i];
133 }
134 m_cidx[j] = k;
135 }
136 }
137
138 change_length (m_cidx[m_ncols]);
139}
140
141template <typename T, typename Alloc>
143void
145{
146 for (octave_idx_type j = m_ncols; j > 0 && m_cidx[j] > nz; j--)
147 m_cidx[j] = nz;
148
149 // Always preserve space for 1 element.
150 nz = (nz > 0 ? nz : 1);
151
152 // Skip reallocation if we have less than 1/FRAC extra elements to discard.
153 static const int FRAC = 5;
154 if (nz > m_nzmax || nz < m_nzmax - m_nzmax/FRAC)
156 // Reallocate.
157 octave_idx_type min_nzmax = std::min (nz, m_nzmax);
158
159 octave_idx_type *new_ridx = idx_type_allocate (nz);
160 std::copy_n (m_ridx, min_nzmax, new_ridx);
161
162 idx_type_deallocate (m_ridx, m_nzmax);
163 m_ridx = new_ridx;
164
165 T *new_data = T_allocate (nz);
166 std::copy_n (m_data, min_nzmax, new_data);
167
168 T_deallocate (m_data, m_nzmax);
169 m_data = new_data;
170
171 m_nzmax = nz;
172 }
174
175template <typename T, typename Alloc>
177bool
179{
180 return sparse_indices_ok (m_ridx, m_cidx, m_nrows, m_ncols, nnz ());
181}
182
183template <typename T, typename Alloc>
185bool
187{
188 octave_idx_type nz = nnz ();
189
190 for (octave_idx_type i = 0; i < nz; i++)
191 if (octave::math::isnan (m_data[i]))
192 return true;
193
194 return false;
195}
196
197template <typename T, typename Alloc>
200 : m_rep (nullptr), m_dimensions (dim_vector (nr, nc))
201{
202 if (val != T ())
203 {
204 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, m_dimensions.safe_numel ());
205
206 octave_idx_type ii = 0;
207 xcidx (0) = 0;
208 for (octave_idx_type j = 0; j < nc; j++)
209 {
210 for (octave_idx_type i = 0; i < nr; i++)
211 {
212 xdata (ii) = val;
213 xridx (ii++) = i;
214 }
215 xcidx (j+1) = ii;
216 }
217 }
218 else
219 {
220 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, 0);
221 for (octave_idx_type j = 0; j < nc+1; j++)
222 xcidx (j) = 0;
223 }
224}
225
226template <typename T, typename Alloc>
229 : m_rep (new typename Sparse<T, Alloc>::SparseRep (a.rows (), a.cols (), a.rows ())),
230 m_dimensions (dim_vector (a.rows (), a.cols ()))
231{
232 octave_idx_type n = a.rows ();
233 for (octave_idx_type i = 0; i <= n; i++)
234 cidx (i) = i;
235
236 const Array<octave_idx_type> pv = a.col_perm_vec ();
237
238 for (octave_idx_type i = 0; i < n; i++)
239 ridx (i) = pv(i);
240
241 for (octave_idx_type i = 0; i < n; i++)
242 data (i) = 1.0;
243}
244
245template <typename T, typename Alloc>
248 : m_rep (nullptr), m_dimensions (dv)
249{
250 if (dv.ndims () != 2)
251 (*current_liboctave_error_handler)
252 ("Sparse::Sparse (const dim_vector&): dimension mismatch");
253
254 m_rep = new typename Sparse<T, Alloc>::SparseRep (dv(0), dv(1), 0);
255}
256
257template <typename T, typename Alloc>
260 : m_rep (nullptr), m_dimensions (dv)
261{
262
263 // Work in unsigned long long to avoid overflow issues with numel
264 unsigned long long a_nel = static_cast<unsigned long long> (a.rows ()) *
265 static_cast<unsigned long long> (a.cols ());
266 unsigned long long dv_nel = static_cast<unsigned long long> (dv(0)) *
267 static_cast<unsigned long long> (dv(1));
268
269 if (a_nel != dv_nel)
270 (*current_liboctave_error_handler)
271 ("Sparse::Sparse (const Sparse&, const dim_vector&): dimension mismatch");
272
273 const dim_vector& old_dims = a.dims ();
274 octave_idx_type new_nzmax = a.nnz ();
275 octave_idx_type new_nr = dv(0);
276 octave_idx_type new_nc = dv(1);
277 octave_idx_type old_nr = old_dims(0);
278 octave_idx_type old_nc = old_dims(1);
279
280 m_rep = new typename Sparse<T, Alloc>::SparseRep (new_nr, new_nc, new_nzmax);
281
282 octave_idx_type kk = 0;
283 xcidx (0) = 0;
284 for (octave_idx_type i = 0; i < old_nc; i++)
285 for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++)
286 {
287 octave_idx_type tmp = i * old_nr + a.ridx (j);
288 octave_idx_type ii = tmp % new_nr;
289 octave_idx_type jj = (tmp - ii) / new_nr;
290 for (octave_idx_type k = kk; k < jj; k++)
291 xcidx (k+1) = j;
292 kk = jj;
293 xdata (j) = a.data (j);
294 xridx (j) = ii;
295 }
296 for (octave_idx_type k = kk; k < new_nc; k++)
297 xcidx (k+1) = new_nzmax;
298}
299
300template <typename T, typename Alloc>
303 const octave::idx_vector& r,
304 const octave::idx_vector& c,
306 bool sum_terms, octave_idx_type nzm)
307 : m_rep (nullptr), m_dimensions ()
308{
309 if (nr < 0)
310 nr = r.extent (0);
311 else if (r.extent (nr) > nr)
312 (*current_liboctave_error_handler)
313 ("sparse: row index %" OCTAVE_IDX_TYPE_FORMAT "out of bound "
314 "%" OCTAVE_IDX_TYPE_FORMAT, r.extent (nr), nr);
316 if (nc < 0)
317 nc = c.extent (0);
318 else if (c.extent (nc) > nc)
320 ("sparse: column index %" OCTAVE_IDX_TYPE_FORMAT " out of bound "
321 "%" OCTAVE_IDX_TYPE_FORMAT, c.extent (nc), nc);
322
323 m_dimensions = dim_vector (nr, nc);
324
326 octave_idx_type rl = r.length (nr);
327 octave_idx_type cl = c.length (nc);
328 bool a_scalar = n == 1;
329 if (a_scalar)
330 {
331 if (rl != 1)
332 n = rl;
333 else if (cl != 1)
334 n = cl;
335 }
336
337 if ((rl != 1 && rl != n) || (cl != 1 && cl != n))
338 (*current_liboctave_error_handler) ("sparse: dimension mismatch");
339
340 // Only create m_rep after input validation to avoid memory leak.
341 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, (nzm > 0 ? nzm : 0));
342
343 if (rl <= 1 && cl <= 1)
344 {
345 if (n == 1 && a(0) != T ())
346 {
347 change_capacity (nzm > 1 ? nzm : 1);
348 xridx (0) = r(0);
349 xdata (0) = a(0);
350 std::fill_n (xcidx () + c(0) + 1, nc - c(0), 1);
351 }
352 }
353 else if (a_scalar)
354 {
355 // This is completely specialized, because the sorts can be simplified.
356 T a0 = a(0);
357 if (a0 == T ())
358 {
359 // Do nothing, it's an empty matrix.
360 }
361 else if (cl == 1)
362 {
363 // Sparse column vector. Sort row indices.
364 octave::idx_vector rs = r.sorted ();
365
366 octave_quit ();
367
368 const octave_idx_type *rd = rs.raw ();
369 // Count unique indices.
370 octave_idx_type new_nz = 1;
371 for (octave_idx_type i = 1; i < n; i++)
372 new_nz += rd[i-1] != rd[i];
374 // Allocate result.
375 change_capacity (nzm > new_nz ? nzm : new_nz);
376 std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
377
379 T *rrd = data ();
380
381 octave_quit ();
382
384 octave_idx_type l = -1;
385
386 if (sum_terms)
387 {
388 // Sum repeated indices.
389 for (octave_idx_type i = 0; i < n; i++)
390 {
391 if (rd[i] != l)
392 {
393 l = rd[i];
394 rri[++k] = rd[i];
395 rrd[k] = a0;
396 }
397 else
398 rrd[k] += a0;
399 }
400 }
401 else
402 {
403 // Pick the last one.
404 for (octave_idx_type i = 0; i < n; i++)
405 {
406 if (rd[i] != l)
407 {
408 l = rd[i];
409 rri[++k] = rd[i];
410 rrd[k] = a0;
411 }
412 }
413 }
414
415 }
416 else
417 {
418 octave::idx_vector rr = r;
419 octave::idx_vector cc = c;
420 const octave_idx_type *rd = rr.raw ();
421 const octave_idx_type *cd = cc.raw ();
423 ci[0] = 0;
424 // Bin counts of column indices.
425 for (octave_idx_type i = 0; i < n; i++)
426 ci[cd[i]+1]++;
427 // Make them cumulative, shifted one to right.
428 for (octave_idx_type i = 1, s = 0; i <= nc; i++)
429 {
430 octave_idx_type s1 = s + ci[i];
431 ci[i] = s;
432 s = s1;
433 }
434
435 octave_quit ();
436
437 // Bucket sort.
439 for (octave_idx_type i = 0; i < n; i++)
440 if (rl == 1)
441 sidx[ci[cd[i]+1]++] = rd[0];
442 else
443 sidx[ci[cd[i]+1]++] = rd[i];
444
445 // Subsorts. We don't need a stable sort, all values are equal.
446 xcidx (0) = 0;
447 for (octave_idx_type j = 0; j < nc; j++)
448 {
449 std::sort (sidx + ci[j], sidx + ci[j+1]);
450 octave_idx_type l = -1;
451 octave_idx_type nzj = 0;
452 // Count.
453 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
454 {
455 octave_idx_type k = sidx[i];
456 if (k != l)
457 {
458 l = k;
459 nzj++;
460 }
461 }
462 // Set column pointer.
463 xcidx (j+1) = xcidx (j) + nzj;
464 }
465
466 change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
467 octave_idx_type *rri = ridx ();
468 T *rrd = data ();
469
470 // Fill-in data.
471 for (octave_idx_type j = 0, jj = -1; j < nc; j++)
472 {
473 octave_quit ();
474 octave_idx_type l = -1;
475 if (sum_terms)
476 {
477 // Sum adjacent terms.
478 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
479 {
480 octave_idx_type k = sidx[i];
481 if (k != l)
482 {
483 l = k;
484 rrd[++jj] = a0;
485 rri[jj] = k;
486 }
487 else
488 rrd[jj] += a0;
489 }
490 }
491 else
492 {
493 // Use the last one.
494 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
495 {
496 octave_idx_type k = sidx[i];
497 if (k != l)
498 {
499 l = k;
500 rrd[++jj] = a0;
501 rri[jj] = k;
502 }
503 }
504 }
505 }
506 }
507 }
508 else if (cl == 1)
509 {
510 // Sparse column vector. Sort row indices.
512 octave::idx_vector rs = r.sorted (rsi);
513
514 octave_quit ();
515
516 const octave_idx_type *rd = rs.raw ();
517 const octave_idx_type *rdi = rsi.data ();
518 // Count unique indices.
519 octave_idx_type new_nz = 1;
520 for (octave_idx_type i = 1; i < n; i++)
521 new_nz += rd[i-1] != rd[i];
522
523 // Allocate result.
524 change_capacity (nzm > new_nz ? nzm : new_nz);
525 std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
526
527 octave_idx_type *rri = ridx ();
528 T *rrd = data ();
529
530 octave_quit ();
531
532 octave_idx_type k = 0;
533 rri[k] = rd[0];
534 rrd[k] = a(rdi[0]);
535
536 if (sum_terms)
538 // Sum repeated indices.
539 for (octave_idx_type i = 1; i < n; i++)
541 if (rd[i] != rd[i-1])
542 {
543 rri[++k] = rd[i];
544 rrd[k] = a(rdi[i]);
545 }
546 else
547 rrd[k] += a(rdi[i]);
548 }
550 else
552 // Pick the last one.
553 for (octave_idx_type i = 1; i < n; i++)
554 {
555 if (rd[i] != rd[i-1])
556 rri[++k] = rd[i];
557 rrd[k] = a(rdi[i]);
558 }
559 }
560
562 }
563 else
564 {
565 octave::idx_vector rr = r;
566 octave::idx_vector cc = c;
567 const octave_idx_type *rd = rr.raw ();
568 const octave_idx_type *cd = cc.raw ();
570 ci[0] = 0;
571 // Bin counts of column indices.
572 for (octave_idx_type i = 0; i < n; i++)
573 ci[cd[i]+1]++;
574 // Make them cumulative, shifted one to right.
575 for (octave_idx_type i = 1, s = 0; i <= nc; i++)
576 {
577 octave_idx_type s1 = s + ci[i];
578 ci[i] = s;
579 s = s1;
580 }
581
582 octave_quit ();
583
584 typedef std::pair<octave_idx_type, octave_idx_type> idx_pair;
585 // Bucket sort.
586 OCTAVE_LOCAL_BUFFER (idx_pair, spairs, n);
587 for (octave_idx_type i = 0; i < n; i++)
588 {
589 idx_pair& p = spairs[ci[cd[i]+1]++];
590 if (rl == 1)
591 p.first = rd[0];
592 else
593 p.first = rd[i];
594 p.second = i;
595 }
596
597 // Subsorts. We don't need a stable sort, the second index stabilizes it.
598 xcidx (0) = 0;
599 for (octave_idx_type j = 0; j < nc; j++)
600 {
601 std::sort (spairs + ci[j], spairs + ci[j+1]);
602 octave_idx_type l = -1;
603 octave_idx_type nzj = 0;
604 // Count.
605 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
606 {
607 octave_idx_type k = spairs[i].first;
608 if (k != l)
609 {
610 l = k;
611 nzj++;
613 }
614 // Set column pointer.
615 xcidx (j+1) = xcidx (j) + nzj;
616 }
617
618 change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
619 octave_idx_type *rri = ridx ();
620 T *rrd = data ();
622 // Fill-in data.
623 for (octave_idx_type j = 0, jj = -1; j < nc; j++)
625 octave_quit ();
626 octave_idx_type l = -1;
627 if (sum_terms)
628 {
629 // Sum adjacent terms.
630 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
632 octave_idx_type k = spairs[i].first;
633 if (k != l)
634 {
635 l = k;
636 rrd[++jj] = a(spairs[i].second);
637 rri[jj] = k;
639 else
640 rrd[jj] += a(spairs[i].second);
641 }
642 }
643 else
644 {
645 // Use the last one.
646 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
648 octave_idx_type k = spairs[i].first;
649 if (k != l)
650 {
651 l = k;
652 rri[++jj] = k;
653 }
654 rrd[jj] = a(spairs[i].second);
655 }
656 }
657 }
658
659 maybe_compress (true);
660 }
661}
662
663/*
664%!assert <*51880> (sparse (1:2, 2, 1:2, 2, 2), sparse ([0, 1; 0, 2]))
665%!assert <*51880> (sparse (1:2, 1, 1:2, 2, 2), sparse ([1, 0; 2, 0]))
666%!assert <*51880> (sparse (1:2, 2, 1:2, 2, 3), sparse ([0, 1, 0; 0, 2, 0]))
667*/
668
669template <typename T, typename Alloc>
672 : m_rep (nullptr), m_dimensions (a.dims ())
673{
674 if (m_dimensions.ndims () > 2)
675 (*current_liboctave_error_handler)
676 ("Sparse::Sparse (const Array<T>&): dimension mismatch");
677
678 octave_idx_type nr = rows ();
679 octave_idx_type nc = cols ();
681 octave_idx_type new_nzmax = 0;
682
683 // First count the number of nonzero terms
684 for (octave_idx_type i = 0; i < len; i++)
685 if (a(i) != T ())
686 new_nzmax++;
687
688 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, new_nzmax);
689
690 octave_idx_type ii = 0;
691 xcidx (0) = 0;
692 for (octave_idx_type j = 0; j < nc; j++)
693 {
694 for (octave_idx_type i = 0; i < nr; i++)
695 if (a.elem (i, j) != T ())
696 {
697 xdata (ii) = a.elem (i, j);
698 xridx (ii++) = i;
699 }
700 xcidx (j+1) = ii;
701 }
702}
703
704template <typename T, typename Alloc>
707{
708 if (--m_rep->m_count == 0)
709 delete m_rep;
710}
711
712template <typename T, typename Alloc>
715{
716 if (this != &a)
717 {
718 if (--m_rep->m_count == 0)
719 delete m_rep;
720
721 m_rep = a.m_rep;
722 m_rep->m_count++;
723
724 m_dimensions = a.m_dimensions;
725 }
726
727 return *this;
728}
729
730template <typename T, typename Alloc>
734{
735 octave_idx_type n = m_dimensions.ndims ();
736
737 if (n <= 0 || n != ra_idx.numel ())
738 (*current_liboctave_error_handler)
739 ("Sparse<T, Alloc>::compute_index: invalid ra_idxing operation");
740
741 octave_idx_type retval = -1;
742
743 retval = ra_idx(--n);
744
745 while (--n >= 0)
746 {
747 retval *= m_dimensions(n);
748 retval += ra_idx(n);
749 }
750
751 return retval;
752}
753
754template <typename T, typename Alloc>
756T
758{
759 (*current_liboctave_error_handler) ("%s (%" OCTAVE_IDX_TYPE_FORMAT "): "
760 "range error", fcn, n);
761}
762
763template <typename T, typename Alloc>
765T&
767{
768 (*current_liboctave_error_handler) ("%s (%" OCTAVE_IDX_TYPE_FORMAT "): "
769 "range error", fcn, n);
770}
771
772template <typename T, typename Alloc>
774T
777{
778 (*current_liboctave_error_handler)
779 ("%s (%" OCTAVE_IDX_TYPE_FORMAT ", %" OCTAVE_IDX_TYPE_FORMAT "): "
780 "range error", fcn, i, j);
781}
782
783template <typename T, typename Alloc>
785T&
788{
789 (*current_liboctave_error_handler)
790 ("%s (%" OCTAVE_IDX_TYPE_FORMAT ", %" OCTAVE_IDX_TYPE_FORMAT "): "
791 "range error", fcn, i, j);
792}
793
794template <typename T, typename Alloc>
796T
798 const Array<octave_idx_type>& ra_idx) const
799{
800 std::ostringstream buf;
801
802 buf << fcn << " (";
803
805
806 if (n > 0)
807 buf << ra_idx(0);
808
809 for (octave_idx_type i = 1; i < n; i++)
810 buf << ", " << ra_idx(i);
811
812 buf << "): range error";
813
814 std::string buf_str = buf.str ();
815
816 (*current_liboctave_error_handler) ("%s", buf_str.c_str ());
817}
818
819template <typename T, typename Alloc>
821T&
824{
825 std::ostringstream buf;
826
827 buf << fcn << " (";
828
830
831 if (n > 0)
832 buf << ra_idx(0);
833
834 for (octave_idx_type i = 1; i < n; i++)
835 buf << ", " << ra_idx(i);
836
837 buf << "): range error";
838
839 std::string buf_str = buf.str ();
840
841 (*current_liboctave_error_handler) ("%s", buf_str.c_str ());
842}
843
844template <typename T, typename Alloc>
848{
849 Sparse<T, Alloc> retval;
850 dim_vector dims2 = new_dims;
851
852 if (dims2.ndims () > 2)
853 {
854 (*current_liboctave_warning_with_id_handler)
855 ("Octave:reshape-smashes-dims",
856 "reshape: sparse reshape to N-D array smashes dims");
857
858 for (octave_idx_type i = 2; i < dims2.ndims (); i++)
859 dims2(1) *= dims2(i);
860
861 dims2.resize (2);
862 }
863
864 if (m_dimensions != dims2)
865 {
866 if (m_dimensions.numel () == dims2.numel ())
867 {
868 octave_idx_type new_nnz = nnz ();
869 octave_idx_type new_nr = dims2 (0);
870 octave_idx_type new_nc = dims2 (1);
871 octave_idx_type old_nr = rows ();
872 octave_idx_type old_nc = cols ();
873 retval = Sparse<T, Alloc> (new_nr, new_nc, new_nnz);
874 // Special case for empty matrices (bug #64080)
875 if (new_nr == 0 || new_nc == 0)
876 return retval;
877
878 octave_idx_type kk = 0;
879 retval.xcidx (0) = 0;
880 // Quotient and remainder of i * old_nr divided by new_nr.
881 // Track them individually to avoid overflow (bug #42850).
882 octave_idx_type i_old_qu = 0;
883 octave_idx_type i_old_rm = static_cast<octave_idx_type> (-old_nr);
884 for (octave_idx_type i = 0; i < old_nc; i++)
885 {
886 i_old_rm += old_nr;
887 if (i_old_rm >= new_nr)
888 {
889 i_old_qu += i_old_rm / new_nr;
890 i_old_rm = i_old_rm % new_nr;
891 }
892 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
893 {
894 octave_idx_type ii, jj;
895 ii = (i_old_rm + ridx (j)) % new_nr;
896 jj = i_old_qu + (i_old_rm + ridx (j)) / new_nr;
897
898 // Original calculation subject to overflow
899 // ii = (i*old_nr + ridx (j)) % new_nr
900 // jj = (i*old_nr + ridx (j)) / new_nr
901 for (octave_idx_type k = kk; k < jj; k++)
902 retval.xcidx (k+1) = j;
903 kk = jj;
904 retval.xdata (j) = data (j);
905 retval.xridx (j) = ii;
906 }
907 }
908 for (octave_idx_type k = kk; k < new_nc; k++)
909 retval.xcidx (k+1) = new_nnz;
910 }
911 else
912 {
913 std::string dimensions_str = m_dimensions.str ();
914 std::string new_dims_str = new_dims.str ();
915
916 (*current_liboctave_error_handler)
917 ("reshape: can't reshape %s array to %s array",
918 dimensions_str.c_str (), new_dims_str.c_str ());
919 }
920 }
921 else
922 retval = *this;
923
924 return retval;
925}
926
927template <typename T, typename Alloc>
931{
932 // The only valid permutations of a sparse array are [1, 2] and [2, 1].
933
934 bool fail = false;
935 bool trans = false;
936
937 if (perm_vec.numel () == 2)
938 {
939 if (perm_vec(0) == 0 && perm_vec(1) == 1)
940 /* do nothing */;
941 else if (perm_vec(0) == 1 && perm_vec(1) == 0)
942 trans = true;
943 else
944 fail = true;
945 }
946 else
947 fail = true;
948
949 if (fail)
950 (*current_liboctave_error_handler)
951 ("permutation vector contains an invalid element");
952
953 return trans ? this->transpose () : *this;
954}
955
956template <typename T, typename Alloc>
958void
960{
961 octave_idx_type nr = rows ();
962 octave_idx_type nc = cols ();
963
964 if (nr == 0)
965 resize (1, std::max (nc, n));
966 else if (nc == 0)
967 resize (nr, (n + nr - 1) / nr); // Ain't it wicked?
968 else if (nr == 1)
969 resize (1, n);
970 else if (nc == 1)
971 resize (n, 1);
972 else
973 octave::err_invalid_resize ();
974}
975
976template <typename T, typename Alloc>
978void
980{
981 octave_idx_type n = dv.ndims ();
982
983 if (n != 2)
984 (*current_liboctave_error_handler) ("sparse array must be 2-D");
985
986 resize (dv(0), dv(1));
987}
988
989template <typename T, typename Alloc>
991void
993{
994 if (r < 0 || c < 0)
995 (*current_liboctave_error_handler) ("can't resize to negative dimension");
996
997 if (r == dim1 () && c == dim2 ())
998 return;
999
1000 // This wouldn't be necessary for r >= rows () if m_nrows wasn't part of the
1001 // Sparse rep. It is not good for anything in there.
1002 make_unique ();
1003
1004 if (r < rows ())
1005 {
1006 octave_idx_type i = 0;
1007 octave_idx_type k = 0;
1008 for (octave_idx_type j = 1; j <= m_rep->m_ncols; j++)
1009 {
1010 octave_idx_type u = xcidx (j);
1011 for (; i < u; i++)
1012 if (xridx (i) < r)
1013 {
1014 xdata (k) = xdata (i);
1015 xridx (k++) = xridx (i);
1016 }
1017 xcidx (j) = k;
1018 }
1019 }
1020
1021 m_rep->m_nrows = m_dimensions(0) = r;
1022
1023 if (c != m_rep->m_ncols)
1024 {
1025 octave_idx_type *new_cidx = m_rep->idx_type_allocate (c+1);
1026 std::copy_n (m_rep->m_cidx, std::min (c, m_rep->m_ncols) + 1, new_cidx);
1027 m_rep->idx_type_deallocate (m_rep->m_cidx, m_rep->m_ncols + 1);
1028 m_rep->m_cidx = new_cidx;
1029
1030 if (c > m_rep->m_ncols)
1031 std::fill_n (m_rep->m_cidx + m_rep->m_ncols + 1, c - m_rep->m_ncols,
1032 m_rep->m_cidx[m_rep->m_ncols]);
1033 }
1034
1035 m_rep->m_ncols = m_dimensions(1) = c;
1036
1037 m_rep->change_length (m_rep->nnz ());
1038}
1039
1040template <typename T, typename Alloc>
1045{
1046 octave_idx_type a_rows = a.rows ();
1047 octave_idx_type a_cols = a.cols ();
1048 octave_idx_type nr = rows ();
1049 octave_idx_type nc = cols ();
1050
1051 if (r < 0 || r + a_rows > rows () || c < 0 || c + a_cols > cols ())
1052 (*current_liboctave_error_handler) ("range error for insert");
1053
1054 // First count the number of elements in the final array
1055 octave_idx_type nel = cidx (c) + a.nnz ();
1056
1057 if (c + a_cols < nc)
1058 nel += cidx (nc) - cidx (c + a_cols);
1059
1060 for (octave_idx_type i = c; i < c + a_cols; i++)
1061 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1062 if (ridx (j) < r || ridx (j) >= r + a_rows)
1063 nel++;
1064
1065 Sparse<T, Alloc> tmp (*this);
1066 --m_rep->m_count;
1067 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, nel);
1068
1069 for (octave_idx_type i = 0; i < tmp.cidx (c); i++)
1070 {
1071 data (i) = tmp.data (i);
1072 ridx (i) = tmp.ridx (i);
1073 }
1074 for (octave_idx_type i = 0; i < c + 1; i++)
1075 cidx (i) = tmp.cidx (i);
1076
1077 octave_idx_type ii = cidx (c);
1078
1079 for (octave_idx_type i = c; i < c + a_cols; i++)
1080 {
1081 octave_quit ();
1082
1083 for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1084 if (tmp.ridx (j) < r)
1085 {
1086 data (ii) = tmp.data (j);
1087 ridx (ii++) = tmp.ridx (j);
1088 }
1089
1090 octave_quit ();
1091
1092 for (octave_idx_type j = a.cidx (i-c); j < a.cidx (i-c+1); j++)
1093 {
1094 data (ii) = a.data (j);
1095 ridx (ii++) = r + a.ridx (j);
1096 }
1097
1098 octave_quit ();
1099
1100 for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1101 if (tmp.ridx (j) >= r + a_rows)
1102 {
1103 data (ii) = tmp.data (j);
1104 ridx (ii++) = tmp.ridx (j);
1105 }
1106
1107 cidx (i+1) = ii;
1108 }
1109
1110 for (octave_idx_type i = c + a_cols; i < nc; i++)
1111 {
1112 for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1113 {
1114 data (ii) = tmp.data (j);
1115 ridx (ii++) = tmp.ridx (j);
1116 }
1117 cidx (i+1) = ii;
1118 }
1119
1120 return *this;
1121}
1122
1123template <typename T, typename Alloc>
1128{
1129
1130 if (ra_idx.numel () != 2)
1131 (*current_liboctave_error_handler) ("range error for insert");
1132
1133 return insert (a, ra_idx(0), ra_idx(1));
1134}
1135
1136template <typename T, typename Alloc>
1140{
1141 liboctave_panic_unless (ndims () == 2);
1142
1143 octave_idx_type nr = rows ();
1144 octave_idx_type nc = cols ();
1145 octave_idx_type nz = nnz ();
1146 Sparse<T, Alloc> retval (nc, nr, nz);
1147
1148 for (octave_idx_type i = 0; i < nz; i++)
1149 retval.xcidx (ridx (i) + 1)++;
1150 // retval.xcidx[1:nr] holds the row degrees for rows 0:(nr-1)
1151 nz = 0;
1152 for (octave_idx_type i = 1; i <= nr; i++)
1153 {
1154 const octave_idx_type tmp = retval.xcidx (i);
1155 retval.xcidx (i) = nz;
1156 nz += tmp;
1157 }
1158 // retval.xcidx[1:nr] holds row entry *start* offsets for rows 0:(nr-1)
1159
1160 for (octave_idx_type j = 0; j < nc; j++)
1161 for (octave_idx_type k = cidx (j); k < cidx (j+1); k++)
1162 {
1163 octave_idx_type q = retval.xcidx (ridx (k) + 1)++;
1164 retval.xridx (q) = j;
1165 retval.xdata (q) = data (k);
1166 }
1167 liboctave_panic_unless (nnz () == retval.xcidx (nr));
1168 // retval.xcidx[1:nr] holds row entry *end* offsets for rows 0:(nr-1)
1169 // and retval.xcidx[0:(nr-1)] holds their row entry *start* offsets
1170
1171 return retval;
1172}
1173
1174// Lower bound lookup. Could also use octave_sort, but that has upper bound
1175// semantics, so requires some manipulation to set right. Uses a plain loop
1176// for small columns.
1177static
1179lblookup (const octave_idx_type *ridx, octave_idx_type nr,
1180 octave_idx_type ri)
1181{
1182 if (nr <= 8)
1183 {
1185 for (l = 0; l < nr; l++)
1186 if (ridx[l] >= ri)
1187 break;
1188 return l;
1189 }
1190 else
1191 return std::lower_bound (ridx, ridx + nr, ri) - ridx;
1192}
1193
1194template <typename T, typename Alloc>
1196void
1197Sparse<T, Alloc>::delete_elements (const octave::idx_vector& idx)
1198{
1199 Sparse<T, Alloc> retval;
1200
1201 liboctave_panic_unless (ndims () == 2);
1202
1203 octave_idx_type nr = dim1 ();
1204 octave_idx_type nc = dim2 ();
1205 octave_idx_type nz = nnz ();
1206
1207 octave_idx_type nel = numel (); // Can throw.
1208
1209 const dim_vector idx_dims = idx.orig_dimensions ();
1210
1211 if (idx.extent (nel) > nel)
1212 octave::err_del_index_out_of_range (true, idx.extent (nel), nel);
1213
1214 if (nc == 1)
1215 {
1216 // Sparse column vector.
1217 const Sparse<T, Alloc> tmp = *this; // constant copy to prevent COW.
1218
1219 octave_idx_type lb, ub;
1220
1221 if (idx.is_cont_range (nel, lb, ub))
1222 {
1223 // Special-case a contiguous range.
1224 // Look-up indices first.
1225 octave_idx_type li = lblookup (tmp.ridx (), nz, lb);
1226 octave_idx_type ui = lblookup (tmp.ridx (), nz, ub);
1227 // Copy data and adjust indices.
1228 octave_idx_type nz_new = nz - (ui - li);
1229 *this = Sparse<T, Alloc> (nr - (ub - lb), 1, nz_new);
1230 std::copy_n (tmp.data (), li, data ());
1231 std::copy_n (tmp.ridx (), li, xridx ());
1232 std::copy (tmp.data () + ui, tmp.data () + nz, xdata () + li);
1233 mx_inline_sub (nz - ui, xridx () + li, tmp.ridx () + ui, ub - lb);
1234 xcidx (1) = nz_new;
1235 }
1236 else
1237 {
1238 OCTAVE_LOCAL_BUFFER (octave_idx_type, ridx_new, nz);
1239 OCTAVE_LOCAL_BUFFER (T, data_new, nz);
1240 octave::idx_vector sidx = idx.sorted (true);
1241 const octave_idx_type *sj = sidx.raw ();
1242 octave_idx_type sl = sidx.length (nel);
1243 octave_idx_type nz_new = 0;
1244 octave_idx_type j = 0;
1245 for (octave_idx_type i = 0; i < nz; i++)
1246 {
1247 octave_idx_type r = tmp.ridx (i);
1248 for (; j < sl && sj[j] < r; j++) ;
1249 if (j == sl || sj[j] > r)
1250 {
1251 data_new[nz_new] = tmp.data (i);
1252 ridx_new[nz_new++] = r - j;
1253 }
1254 }
1255
1256 *this = Sparse<T, Alloc> (nr - sl, 1, nz_new);
1257 std::copy_n (ridx_new, nz_new, ridx ());
1258 std::copy_n (data_new, nz_new, xdata ());
1259 xcidx (1) = nz_new;
1260 }
1261 }
1262 else if (nr == 1)
1263 {
1264 // Sparse row vector.
1265 octave_idx_type lb, ub;
1266 if (idx.is_cont_range (nc, lb, ub))
1267 {
1268 const Sparse<T, Alloc> tmp = *this;
1269 octave_idx_type lbi = tmp.cidx (lb);
1270 octave_idx_type ubi = tmp.cidx (ub);
1271 octave_idx_type new_nz = nz - (ubi - lbi);
1272 *this = Sparse<T, Alloc> (1, nc - (ub - lb), new_nz);
1273 std::copy_n (tmp.data (), lbi, data ());
1274 std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1275 std::fill_n (ridx (), new_nz, static_cast<octave_idx_type> (0));
1276 std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1277 mx_inline_sub (nc - ub, xcidx () + 1, tmp.cidx () + ub + 1,
1278 ubi - lbi);
1279 }
1280 else
1281 *this = index (idx.complement (nc));
1282 }
1283 else if (idx.length (nel) != 0)
1284 {
1285 if (idx.is_colon_equiv (nel))
1286 *this = Sparse<T, Alloc> ();
1287 else
1288 {
1289 *this = index (octave::idx_vector::colon);
1290 delete_elements (idx);
1291 *this = transpose (); // We want a row vector.
1292 }
1293 }
1294}
1295
1296template <typename T, typename Alloc>
1298void
1299Sparse<T, Alloc>::delete_elements (const octave::idx_vector& idx_i,
1300 const octave::idx_vector& idx_j)
1301{
1302 liboctave_panic_unless (ndims () == 2);
1303
1304 octave_idx_type nr = dim1 ();
1305 octave_idx_type nc = dim2 ();
1306 octave_idx_type nz = nnz ();
1307
1308 if (idx_i.is_colon ())
1309 {
1310 // Deleting columns.
1311 octave_idx_type lb, ub;
1312 if (idx_j.extent (nc) > nc)
1313 octave::err_del_index_out_of_range (false, idx_j.extent (nc), nc);
1314 else if (idx_j.is_cont_range (nc, lb, ub))
1315 {
1316 if (lb == 0 && ub == nc)
1317 {
1318 // Delete all rows and columns.
1319 *this = Sparse<T, Alloc> (nr, 0);
1320 }
1321 else if (nz == 0)
1322 {
1323 // No elements to preserve; adjust dimensions.
1324 *this = Sparse<T, Alloc> (nr, nc - (ub - lb));
1325 }
1326 else
1327 {
1328 const Sparse<T, Alloc> tmp = *this;
1329 octave_idx_type lbi = tmp.cidx (lb);
1330 octave_idx_type ubi = tmp.cidx (ub);
1331 octave_idx_type new_nz = nz - (ubi - lbi);
1332
1333 *this = Sparse<T, Alloc> (nr, nc - (ub - lb), new_nz);
1334 std::copy_n (tmp.data (), lbi, data ());
1335 std::copy_n (tmp.ridx (), lbi, ridx ());
1336 std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1337 std::copy (tmp.ridx () + ubi, tmp.ridx () + nz, xridx () + lbi);
1338 std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1339 mx_inline_sub (nc - ub, xcidx () + lb + 1,
1340 tmp.cidx () + ub + 1, ubi - lbi);
1341 }
1342 }
1343 else
1344 *this = index (idx_i, idx_j.complement (nc));
1345 }
1346 else if (idx_j.is_colon ())
1347 {
1348 // Deleting rows.
1349 octave_idx_type lb, ub;
1350 if (idx_i.extent (nr) > nr)
1351 octave::err_del_index_out_of_range (false, idx_i.extent (nr), nr);
1352 else if (idx_i.is_cont_range (nr, lb, ub))
1353 {
1354 if (lb == 0 && ub == nr)
1355 {
1356 // Delete all rows and columns.
1357 *this = Sparse<T, Alloc> (0, nc);
1358 }
1359 else if (nz == 0)
1360 {
1361 // No elements to preserve; adjust dimensions.
1362 *this = Sparse<T, Alloc> (nr - (ub - lb), nc);
1363 }
1364 else
1365 {
1366 // This is more memory-efficient than the approach below.
1367 const Sparse<T, Alloc> tmpl = index (octave::idx_vector (0, lb), idx_j);
1368 const Sparse<T, Alloc> tmpu = index (octave::idx_vector (ub, nr), idx_j);
1369 *this = Sparse<T, Alloc> (nr - (ub - lb), nc,
1370 tmpl.nnz () + tmpu.nnz ());
1371 for (octave_idx_type j = 0, k = 0; j < nc; j++)
1372 {
1373 for (octave_idx_type i = tmpl.cidx (j); i < tmpl.cidx (j+1);
1374 i++)
1375 {
1376 xdata (k) = tmpl.data (i);
1377 xridx (k++) = tmpl.ridx (i);
1378 }
1379 for (octave_idx_type i = tmpu.cidx (j); i < tmpu.cidx (j+1);
1380 i++)
1381 {
1382 xdata (k) = tmpu.data (i);
1383 xridx (k++) = tmpu.ridx (i) + lb;
1384 }
1385
1386 xcidx (j+1) = k;
1387 }
1388 }
1389 }
1390 else
1391 {
1392 // This is done by transposing, deleting columns, then transposing
1393 // again.
1394 Sparse<T, Alloc> tmp = transpose ();
1395 tmp.delete_elements (idx_j, idx_i);
1396 *this = tmp.transpose ();
1397 }
1398 }
1399 else
1400 {
1401 // Empty assignment (no elements to delete) is OK if there is at
1402 // least one zero-length index and at most one other index that is
1403 // non-colon (or equivalent) index. Since we only have two
1404 // indices, we just need to check that we have at least one zero
1405 // length index. Matlab considers "[]" to be an empty index but
1406 // not "false". We accept both.
1407
1408 bool empty_assignment
1409 = (idx_i.length (nr) == 0 || idx_j.length (nc) == 0);
1410
1411 if (! empty_assignment)
1412 (*current_liboctave_error_handler)
1413 ("a null assignment can only have one non-colon index");
1414 }
1415}
1416
1417template <typename T, typename Alloc>
1419void
1420Sparse<T, Alloc>::delete_elements (int dim, const octave::idx_vector& idx)
1421{
1422 if (dim == 0)
1423 delete_elements (idx, octave::idx_vector::colon);
1424 else if (dim == 1)
1425 delete_elements (octave::idx_vector::colon, idx);
1426 else
1427 (*current_liboctave_error_handler) ("invalid dimension in delete_elements");
1428}
1429
1430template <typename T, typename Alloc>
1433Sparse<T, Alloc>::index (const octave::idx_vector& idx, bool resize_ok) const
1434{
1435 Sparse<T, Alloc> retval;
1436
1437 liboctave_panic_unless (ndims () == 2);
1438
1439 octave_idx_type nr = dim1 ();
1440 octave_idx_type nc = dim2 ();
1441 octave_idx_type nz = nnz ();
1442
1443 octave_idx_type nel = numel (); // Can throw.
1444
1445 const dim_vector idx_dims = idx.orig_dimensions ().redim (2);
1446
1447 if (idx.is_colon ())
1448 {
1449 if (nc == 1)
1450 retval = *this;
1451 else
1452 {
1453 // Fast magic colon processing.
1454 retval = Sparse<T, Alloc> (nel, 1, nz);
1455
1456 for (octave_idx_type i = 0; i < nc; i++)
1457 {
1458 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1459 {
1460 retval.xdata (j) = data (j);
1461 retval.xridx (j) = ridx (j) + i * nr;
1462 }
1463 }
1464
1465 retval.xcidx (0) = 0;
1466 retval.xcidx (1) = nz;
1467 }
1468 }
1469 else if (idx.extent (nel) > nel)
1470 {
1471 if (! resize_ok)
1472 octave::err_index_out_of_range (1, 1, idx.extent (nel), nel, dims ());
1473
1474 // resize_ok is completely handled here.
1475 octave_idx_type ext = idx.extent (nel);
1476 Sparse<T, Alloc> tmp = *this;
1477 tmp.resize1 (ext);
1478 retval = tmp.index (idx);
1479 }
1480 else if (nr == 1 && nc == 1)
1481 {
1482 // You have to be pretty sick to get to this bit of code,
1483 // since you have a scalar stored as a sparse matrix, and
1484 // then want to make a dense matrix with sparse representation.
1485 // Ok, we'll do it, but you deserve what you get!!
1486 retval = (Sparse<T, Alloc> (idx_dims(0), idx_dims(1), nz ? data (0) : T ()));
1487 }
1488 else if (nc == 1)
1489 {
1490 // Sparse column vector.
1491 octave_idx_type lb, ub;
1492
1493 if (idx.is_scalar ())
1494 {
1495 // Scalar index - just a binary lookup.
1496 octave_idx_type i = lblookup (ridx (), nz, idx(0));
1497 if (i < nz && ridx (i) == idx(0))
1498 retval = Sparse (1, 1, data (i));
1499 else
1500 retval = Sparse (1, 1);
1501 }
1502 else if (idx.is_cont_range (nel, lb, ub))
1503 {
1504 // Special-case a contiguous range.
1505 // Look-up indices first.
1506 octave_idx_type li = lblookup (ridx (), nz, lb);
1507 octave_idx_type ui = lblookup (ridx (), nz, ub);
1508 // Copy data and adjust indices.
1509 octave_idx_type nz_new = ui - li;
1510 retval = Sparse<T, Alloc> (ub - lb, 1, nz_new);
1511 std::copy_n (data () + li, nz_new, retval.data ());
1512 mx_inline_sub (nz_new, retval.xridx (), ridx () + li, lb);
1513 retval.xcidx (1) = nz_new;
1514 }
1515 else if (idx.is_permutation (nel) && idx.isvector ())
1516 {
1517 if (idx.is_range () && idx.increment () == -1)
1518 {
1519 retval = Sparse<T, Alloc> (nr, 1, nz);
1520
1521 for (octave_idx_type j = 0; j < nz; j++)
1522 retval.ridx (j) = nr - ridx (nz - j - 1) - 1;
1523
1524 std::copy_n (cidx (), 2, retval.cidx ());
1525 std::reverse_copy (data (), data () + nz, retval.data ());
1526 }
1527 else
1528 {
1529 Array<T> tmp = array_value ();
1530 tmp = tmp.index (idx);
1531 retval = Sparse<T, Alloc> (tmp);
1532 }
1533 }
1534 else
1535 {
1536 // If indexing a sparse column vector by a vector, the result is a
1537 // sparse column vector, otherwise it inherits the shape of index.
1538 // Vector transpose is cheap, so do it right here.
1539
1540 Array<octave_idx_type> tmp_idx = idx.as_array ().as_matrix ();
1541
1542 const Array<octave_idx_type> idxa = (idx_dims(0) == 1
1543 ? tmp_idx.transpose ()
1544 : tmp_idx);
1545
1546 octave_idx_type new_nr = idxa.rows ();
1547 octave_idx_type new_nc = idxa.cols ();
1548
1549 // Lookup.
1550 // FIXME: Could specialize for sorted idx?
1551 Array<octave_idx_type> lidx (dim_vector (new_nr, new_nc));
1552 for (octave_idx_type i = 0; i < new_nr*new_nc; i++)
1553 lidx.xelem (i) = lblookup (ridx (), nz, idxa(i));
1554
1555 // Count matches.
1556 retval = Sparse<T, Alloc> (idxa.rows (), idxa.cols ());
1557 for (octave_idx_type j = 0; j < new_nc; j++)
1558 {
1559 octave_idx_type nzj = 0;
1560 for (octave_idx_type i = 0; i < new_nr; i++)
1561 {
1562 octave_idx_type l = lidx.xelem (i, j);
1563 if (l < nz && ridx (l) == idxa(i, j))
1564 nzj++;
1565 else
1566 lidx.xelem (i, j) = nz;
1567 }
1568 retval.xcidx (j+1) = retval.xcidx (j) + nzj;
1569 }
1570
1571 retval.change_capacity (retval.xcidx (new_nc));
1572
1573 // Copy data and set row indices.
1574 octave_idx_type k = 0;
1575 for (octave_idx_type j = 0; j < new_nc; j++)
1576 for (octave_idx_type i = 0; i < new_nr; i++)
1577 {
1578 octave_idx_type l = lidx.xelem (i, j);
1579 if (l < nz)
1580 {
1581 retval.data (k) = data (l);
1582 retval.xridx (k++) = i;
1583 }
1584 }
1585 }
1586 }
1587 else if (nr == 1)
1588 {
1589 octave_idx_type lb, ub;
1590 if (idx.is_scalar ())
1591 retval = Sparse<T, Alloc> (1, 1, elem (0, idx(0)));
1592 else if (idx.is_cont_range (nel, lb, ub))
1593 {
1594 // Special-case a contiguous range.
1595 octave_idx_type lbi = cidx (lb);
1596 octave_idx_type ubi = cidx (ub);
1597 octave_idx_type new_nz = ubi - lbi;
1598 retval = Sparse<T, Alloc> (1, ub - lb, new_nz);
1599 std::copy_n (data () + lbi, new_nz, retval.data ());
1600 std::fill_n (retval.ridx (), new_nz, static_cast<octave_idx_type> (0));
1601 mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1602 }
1603 else
1604 {
1605 // Sparse row vectors occupy O(nr) storage anyway, so let's just
1606 // convert the matrix to full, index, and sparsify the result.
1607 retval = Sparse<T, Alloc> (array_value ().index (idx));
1608 }
1609 }
1610 else
1611 {
1612 if (nr != 0 && idx.is_scalar ())
1613 retval = Sparse<T, Alloc> (1, 1, elem (idx(0) % nr, idx(0) / nr));
1614 else
1615 {
1616 // Indexing a non-vector sparse matrix by linear indexing.
1617 // I suppose this is rare (and it may easily overflow), so let's take
1618 // the easy way, and reshape first to column vector, which is already
1619 // handled above.
1620 retval = index (octave::idx_vector::colon).index (idx);
1621 // In this case we're supposed to always inherit the shape, but
1622 // column(row) doesn't do it, so we'll do it instead.
1623 if (idx_dims(0) == 1 && idx_dims(1) != 1)
1624 retval = retval.transpose ();
1625 }
1626 }
1627
1628 return retval;
1629}
1630
1631template <typename T, typename Alloc>
1634Sparse<T, Alloc>::index (const octave::idx_vector& idx_i,
1635 const octave::idx_vector& idx_j,
1636 bool resize_ok) const
1637{
1638 Sparse<T, Alloc> retval;
1639
1640 liboctave_panic_unless (ndims () == 2);
1641
1642 octave_idx_type nr = dim1 ();
1643 octave_idx_type nc = dim2 ();
1644
1645 octave_idx_type n = idx_i.length (nr);
1646 octave_idx_type m = idx_j.length (nc);
1647
1648 octave_idx_type lb, ub;
1649
1650 if (idx_i.extent (nr) > nr || idx_j.extent (nc) > nc)
1651 {
1652 // resize_ok is completely handled here.
1653 if (resize_ok)
1654 {
1655 octave_idx_type ext_i = idx_i.extent (nr);
1656 octave_idx_type ext_j = idx_j.extent (nc);
1657 Sparse<T, Alloc> tmp = *this;
1658 tmp.resize (ext_i, ext_j);
1659 retval = tmp.index (idx_i, idx_j);
1660 }
1661 else if (idx_i.extent (nr) > nr)
1662 octave::err_index_out_of_range (2, 1, idx_i.extent (nr), nr, dims ());
1663 else
1664 octave::err_index_out_of_range (2, 2, idx_j.extent (nc), nc, dims ());
1665 }
1666 else if (nr == 1 && nc == 1)
1667 {
1668 // Scalars stored as sparse matrices occupy more memory than
1669 // a scalar, so let's just convert the matrix to full, index,
1670 // and sparsify the result.
1671
1672 retval = Sparse<T, Alloc> (array_value ().index (idx_i, idx_j));
1673 }
1674 else if (idx_i.is_colon ())
1675 {
1676 // Great, we're just manipulating columns. This is going to be quite
1677 // efficient, because the columns can stay compressed as they are.
1678 if (idx_j.is_colon ())
1679 retval = *this; // Shallow copy.
1680 else if (idx_j.is_cont_range (nc, lb, ub))
1681 {
1682 // Special-case a contiguous range.
1683 octave_idx_type lbi = cidx (lb);
1684 octave_idx_type ubi = cidx (ub);
1685 octave_idx_type new_nz = ubi - lbi;
1686 retval = Sparse<T, Alloc> (nr, ub - lb, new_nz);
1687 std::copy_n (data () + lbi, new_nz, retval.data ());
1688 std::copy_n (ridx () + lbi, new_nz, retval.ridx ());
1689 mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1690 }
1691 else
1692 {
1693 // Count new nonzero elements.
1694 retval = Sparse<T, Alloc> (nr, m);
1695 for (octave_idx_type j = 0; j < m; j++)
1696 {
1697 octave_idx_type jj = idx_j(j);
1698 retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1699 }
1700
1701 retval.change_capacity (retval.xcidx (m));
1702
1703 // Copy data & indices.
1704 for (octave_idx_type j = 0; j < m; j++)
1705 {
1706 octave_idx_type ljj = cidx (idx_j(j));
1707 octave_idx_type lj = retval.xcidx (j);
1708 octave_idx_type nzj = retval.xcidx (j+1) - lj;
1709
1710 std::copy_n (data () + ljj, nzj, retval.data () + lj);
1711 std::copy_n (ridx () + ljj, nzj, retval.ridx () + lj);
1712 }
1713 }
1714 }
1715 else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
1716 {
1717 // It's actually vector indexing. The 1D index is specialized for that.
1718 retval = index (idx_i);
1719
1720 // If nr == 1 then the vector indexing will return a column vector!!
1721 if (nr == 1)
1722 retval.transpose ();
1723 }
1724 else if (idx_i.is_scalar ())
1725 {
1726 octave_idx_type ii = idx_i(0);
1727 retval = Sparse<T, Alloc> (1, m);
1729 for (octave_idx_type j = 0; j < m; j++)
1730 {
1731 octave_quit ();
1732 octave_idx_type jj = idx_j(j);
1733 octave_idx_type lj = cidx (jj);
1734 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1735
1736 // Scalar index - just a binary lookup.
1737 octave_idx_type i = lblookup (ridx () + lj, nzj, ii);
1738 if (i < nzj && ridx (i+lj) == ii)
1739 {
1740 ij[j] = i + lj;
1741 retval.xcidx (j+1) = retval.xcidx (j) + 1;
1742 }
1743 else
1744 retval.xcidx (j+1) = retval.xcidx (j);
1745 }
1746
1747 retval.change_capacity (retval.xcidx (m));
1748
1749 // Copy data, adjust row indices.
1750 for (octave_idx_type j = 0; j < m; j++)
1751 {
1752 octave_idx_type i = retval.xcidx (j);
1753 if (retval.xcidx (j+1) > i)
1754 {
1755 retval.xridx (i) = 0;
1756 retval.xdata (i) = data (ij[j]);
1757 }
1758 }
1759 }
1760 else if (idx_i.is_cont_range (nr, lb, ub))
1761 {
1762 retval = Sparse<T, Alloc> (n, m);
1765 for (octave_idx_type j = 0; j < m; j++)
1766 {
1767 octave_quit ();
1768 octave_idx_type jj = idx_j(j);
1769 octave_idx_type lj = cidx (jj);
1770 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1771 octave_idx_type lij, uij;
1772
1773 // Lookup indices.
1774 li[j] = lij = lblookup (ridx () + lj, nzj, lb) + lj;
1775 ui[j] = uij = lblookup (ridx () + lj, nzj, ub) + lj;
1776 retval.xcidx (j+1) = retval.xcidx (j) + ui[j] - li[j];
1777 }
1778
1779 retval.change_capacity (retval.xcidx (m));
1780
1781 // Copy data, adjust row indices.
1782 for (octave_idx_type j = 0, k = 0; j < m; j++)
1783 {
1784 octave_quit ();
1785 for (octave_idx_type i = li[j]; i < ui[j]; i++)
1786 {
1787 retval.xdata (k) = data (i);
1788 retval.xridx (k++) = ridx (i) - lb;
1789 }
1790 }
1791 }
1792 else if (idx_i.is_permutation (nr))
1793 {
1794 // The columns preserve their length, just need to renumber and sort them.
1795 // Count new nonzero elements.
1796 retval = Sparse<T, Alloc> (nr, m);
1797 for (octave_idx_type j = 0; j < m; j++)
1798 {
1799 octave_idx_type jj = idx_j(j);
1800 retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1801 }
1802
1803 retval.change_capacity (retval.xcidx (m));
1804
1805 octave_quit ();
1806
1807 if (idx_i.is_range () && idx_i.increment () == -1)
1808 {
1809 // It's nr:-1:1. Just flip all columns.
1810 for (octave_idx_type j = 0; j < m; j++)
1811 {
1812 octave_quit ();
1813 octave_idx_type jj = idx_j(j);
1814 octave_idx_type lj = cidx (jj);
1815 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1816 octave_idx_type li = retval.xcidx (j);
1817 octave_idx_type uj = lj + nzj - 1;
1818 for (octave_idx_type i = 0; i < nzj; i++)
1819 {
1820 retval.xdata (li + i) = data (uj - i); // Copy in reverse order.
1821 retval.xridx (li + i) = nr - 1 - ridx (uj - i); // Ditto with transform.
1822 }
1823 }
1824 }
1825 else
1826 {
1827 // Get inverse permutation.
1828 octave::idx_vector idx_iinv = idx_i.inverse_permutation (nr);
1829 const octave_idx_type *iinv = idx_iinv.raw ();
1830
1831 // Scatter buffer.
1832 OCTAVE_LOCAL_BUFFER (T, scb, nr);
1833 octave_idx_type *rri = retval.ridx ();
1834
1835 for (octave_idx_type j = 0; j < m; j++)
1836 {
1837 octave_quit ();
1838 octave_idx_type jj = idx_j(j);
1839 octave_idx_type lj = cidx (jj);
1840 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1841 octave_idx_type li = retval.xcidx (j);
1842 // Scatter the column, transform indices.
1843 for (octave_idx_type i = 0; i < nzj; i++)
1844 scb[rri[li + i] = iinv[ridx (lj + i)]] = data (lj + i);
1845
1846 octave_quit ();
1847
1848 // Sort them.
1849 std::sort (rri + li, rri + li + nzj);
1850
1851 // Gather.
1852 for (octave_idx_type i = 0; i < nzj; i++)
1853 retval.xdata (li + i) = scb[rri[li + i]];
1854 }
1855 }
1856
1857 }
1858 else if (idx_j.is_colon ())
1859 {
1860 // This requires uncompressing columns, which is generally costly,
1861 // so we rely on the efficient transpose to handle this.
1862 // It may still make sense to optimize some cases here.
1863 retval = transpose ();
1864 retval = retval.index (octave::idx_vector::colon, idx_i);
1865 retval = retval.transpose ();
1866 }
1867 else
1868 {
1869 // A(I, J) is decomposed into A(:, J)(I, :).
1870 retval = index (octave::idx_vector::colon, idx_j);
1871 retval = retval.index (idx_i, octave::idx_vector::colon);
1872 }
1873
1874 return retval;
1875}
1876
1877template <typename T, typename Alloc>
1879void
1880Sparse<T, Alloc>::assign (const octave::idx_vector& idx,
1881 const Sparse<T, Alloc>& rhs)
1882{
1883 Sparse<T, Alloc> retval;
1884
1885 liboctave_panic_unless (ndims () == 2);
1886
1887 octave_idx_type nr = dim1 ();
1888 octave_idx_type nc = dim2 ();
1889 octave_idx_type nz = nnz ();
1890
1891 octave_idx_type n = numel (); // Can throw.
1892
1893 octave_idx_type rhl = rhs.numel ();
1894
1895 if (idx.length (n) == rhl)
1896 {
1897 if (rhl == 0)
1898 return;
1899
1900 octave_idx_type nx = idx.extent (n);
1901 // Try to resize first if necessary.
1902 if (nx != n)
1903 {
1904 resize1 (nx);
1905 n = numel ();
1906 nr = rows ();
1907 nc = cols ();
1908 // nz is preserved.
1909 }
1910
1911 if (idx.is_colon ())
1912 {
1913 *this = rhs.reshape (m_dimensions);
1914 }
1915 else if (nc == 1 && rhs.cols () == 1)
1916 {
1917 // Sparse column vector to sparse column vector assignment.
1918
1919 octave_idx_type lb, ub;
1920 if (idx.is_cont_range (nr, lb, ub))
1921 {
1922 // Special-case a contiguous range.
1923 // Look-up indices first.
1924 octave_idx_type li = lblookup (ridx (), nz, lb);
1925 octave_idx_type ui = lblookup (ridx (), nz, ub);
1926 octave_idx_type rnz = rhs.nnz ();
1927 octave_idx_type new_nz = nz - (ui - li) + rnz;
1928
1929 if (new_nz >= nz && new_nz <= nzmax ())
1930 {
1931 // Adding/overwriting elements, enough capacity allocated.
1932
1933 if (new_nz > nz)
1934 {
1935 // Make room first.
1936 std::copy_backward (data () + ui, data () + nz,
1937 data () + nz + rnz);
1938 std::copy_backward (ridx () + ui, ridx () + nz,
1939 ridx () + nz + rnz);
1940 }
1941
1942 // Copy data and adjust indices from rhs.
1943 std::copy_n (rhs.data (), rnz, data () + li);
1944 mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1945 }
1946 else
1947 {
1948 // Clearing elements or exceeding capacity, allocate afresh
1949 // and paste pieces.
1950 const Sparse<T, Alloc> tmp = *this;
1951 *this = Sparse<T, Alloc> (nr, 1, new_nz);
1952
1953 // Head ...
1954 std::copy_n (tmp.data (), li, data ());
1955 std::copy_n (tmp.ridx (), li, ridx ());
1956
1957 // new stuff ...
1958 std::copy_n (rhs.data (), rnz, data () + li);
1959 mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1960
1961 // ...tail
1962 std::copy (tmp.data () + ui, tmp.data () + nz,
1963 data () + li + rnz);
1964 std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
1965 ridx () + li + rnz);
1966 }
1967
1968 cidx (1) = new_nz;
1969 }
1970 else if (idx.is_range () && idx.increment () == -1)
1971 {
1972 // It's s(u:-1:l) = r. Reverse the assignment.
1973 assign (idx.sorted (), rhs.index (octave::idx_vector (rhl - 1, 0, -1)));
1974 }
1975 else if (idx.is_permutation (n))
1976 {
1977 *this = rhs.index (idx.inverse_permutation (n));
1978 }
1979 else if (rhs.nnz () == 0)
1980 {
1981 // Elements are being zeroed.
1982 octave_idx_type *ri = ridx ();
1983 for (octave_idx_type i = 0; i < rhl; i++)
1984 {
1985 octave_idx_type iidx = idx(i);
1986 octave_idx_type li = lblookup (ri, nz, iidx);
1987 if (li != nz && ri[li] == iidx)
1988 xdata (li) = T ();
1989 }
1990
1991 maybe_compress (true);
1992 }
1993 else
1994 {
1995 const Sparse<T, Alloc> tmp = *this;
1996 octave_idx_type new_nz = nz + rhl;
1997 // Disassembly our matrix...
1998 Array<octave_idx_type> new_ri (dim_vector (new_nz, 1));
1999 Array<T> new_data (dim_vector (new_nz, 1));
2000 std::copy_n (tmp.ridx (), nz, new_ri.rwdata ());
2001 std::copy_n (tmp.data (), nz, new_data.rwdata ());
2002 // ... insert new data (densified) ...
2003 idx.copy_data (new_ri.rwdata () + nz);
2004 new_data.assign (octave::idx_vector (nz, new_nz), rhs.array_value ());
2005 // ... reassembly.
2006 *this = Sparse<T, Alloc> (new_data, new_ri, 0, nr, nc, false);
2007 }
2008 }
2009 else
2010 {
2011 dim_vector save_dims = m_dimensions;
2012 *this = index (octave::idx_vector::colon);
2013 assign (idx, rhs.index (octave::idx_vector::colon));
2014 *this = reshape (save_dims);
2015 }
2016 }
2017 else if (rhl == 1)
2018 {
2019 rhl = idx.length (n);
2020 if (rhs.nnz () != 0)
2021 assign (idx, Sparse<T, Alloc> (rhl, 1, rhs.data (0)));
2022 else
2023 assign (idx, Sparse<T, Alloc> (rhl, 1));
2024 }
2025 else
2026 octave::err_nonconformant ("=", dim_vector(idx.length (n), 1), rhs.dims());
2027}
2028
2029template <typename T, typename Alloc>
2031void
2032Sparse<T, Alloc>::assign (const octave::idx_vector& idx, const T& rhs)
2033{
2034 // FIXME: Converting the RHS and forwarding to the sparse matrix
2035 // assignment function is simpler, but it might be good to have a
2036 // specialization...
2037
2038 assign (idx, Sparse<T, Alloc> (1, 1, rhs));
2039}
2040
2041template <typename T, typename Alloc>
2043void
2044Sparse<T, Alloc>::assign (const octave::idx_vector& idx_i,
2045 const octave::idx_vector& idx_j,
2046 const Sparse<T, Alloc>& rhs)
2047{
2048 Sparse<T, Alloc> retval;
2049
2050 liboctave_panic_unless (ndims () == 2);
2051
2052 octave_idx_type nr = dim1 ();
2053 octave_idx_type nc = dim2 ();
2054 octave_idx_type nz = nnz ();
2055
2056 octave_idx_type n = rhs.rows ();
2057 octave_idx_type m = rhs.columns ();
2058
2059 // FIXME: this should probably be written more like the
2060 // Array<T>::assign function...
2061
2062 bool orig_zero_by_zero = (nr == 0 && nc == 0);
2063
2064 if (orig_zero_by_zero || (idx_i.length (nr) == n && idx_j.length (nc) == m))
2065 {
2066 octave_idx_type nrx;
2067 octave_idx_type ncx;
2068
2069 if (orig_zero_by_zero)
2070 {
2071 if (idx_i.is_colon ())
2072 {
2073 nrx = n;
2074
2075 if (idx_j.is_colon ())
2076 ncx = m;
2077 else
2078 ncx = idx_j.extent (nc);
2079 }
2080 else if (idx_j.is_colon ())
2081 {
2082 nrx = idx_i.extent (nr);
2083 ncx = m;
2084 }
2085 else
2086 {
2087 nrx = idx_i.extent (nr);
2088 ncx = idx_j.extent (nc);
2089 }
2090 }
2091 else
2092 {
2093 nrx = idx_i.extent (nr);
2094 ncx = idx_j.extent (nc);
2095 }
2096
2097 // Try to resize first if necessary.
2098 if (nrx != nr || ncx != nc)
2099 {
2100 resize (nrx, ncx);
2101 nr = rows ();
2102 nc = cols ();
2103 // nz is preserved.
2104 }
2105
2106 if (n == 0 || m == 0)
2107 return;
2108
2109 if (idx_i.is_colon ())
2110 {
2111 octave_idx_type lb, ub;
2112 // Great, we're just manipulating columns. This is going to be quite
2113 // efficient, because the columns can stay compressed as they are.
2114 if (idx_j.is_colon ())
2115 *this = rhs; // Shallow copy.
2116 else if (idx_j.is_cont_range (nc, lb, ub))
2117 {
2118 // Special-case a contiguous range.
2119 octave_idx_type li = cidx (lb);
2120 octave_idx_type ui = cidx (ub);
2121 octave_idx_type rnz = rhs.nnz ();
2122 octave_idx_type new_nz = nz - (ui - li) + rnz;
2123
2124 if (new_nz >= nz && new_nz <= nzmax ())
2125 {
2126 // Adding/overwriting elements, enough capacity allocated.
2127
2128 if (new_nz > nz)
2129 {
2130 // Make room first.
2131 std::copy_backward (data () + ui, data () + nz,
2132 data () + new_nz);
2133 std::copy_backward (ridx () + ui, ridx () + nz,
2134 ridx () + new_nz);
2135 mx_inline_add2 (nc - ub, cidx () + ub + 1, new_nz - nz);
2136 }
2137
2138 // Copy data and indices from rhs.
2139 std::copy_n (rhs.data (), rnz, data () + li);
2140 std::copy_n (rhs.ridx (), rnz, ridx () + li);
2141 mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2142 li);
2143
2144 liboctave_panic_unless (nnz () == new_nz);
2145 }
2146 else
2147 {
2148 // Clearing elements or exceeding capacity, allocate afresh
2149 // and paste pieces.
2150 const Sparse<T, Alloc> tmp = *this;
2151 *this = Sparse<T, Alloc> (nr, nc, new_nz);
2152
2153 // Head...
2154 std::copy_n (tmp.data (), li, data ());
2155 std::copy_n (tmp.ridx (), li, ridx ());
2156 std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
2157
2158 // new stuff...
2159 std::copy_n (rhs.data (), rnz, data () + li);
2160 std::copy_n (rhs.ridx (), rnz, ridx () + li);
2161 mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2162 li);
2163
2164 // ...tail.
2165 std::copy (tmp.data () + ui, tmp.data () + nz,
2166 data () + li + rnz);
2167 std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
2168 ridx () + li + rnz);
2169 mx_inline_add (nc - ub, cidx () + ub + 1,
2170 tmp.cidx () + ub + 1, new_nz - nz);
2171
2172 liboctave_panic_unless (nnz () == new_nz);
2173 }
2174 }
2175 else if (idx_j.is_range () && idx_j.increment () == -1)
2176 {
2177 // It's s(:,u:-1:l) = r. Reverse the assignment.
2178 assign (idx_i, idx_j.sorted (),
2179 rhs.index (idx_i, octave::idx_vector (m - 1, 0, -1)));
2180 }
2181 else if (idx_j.is_permutation (nc))
2182 {
2183 *this = rhs.index (idx_i, idx_j.inverse_permutation (nc));
2184 }
2185 else
2186 {
2187 const Sparse<T, Alloc> tmp = *this;
2188 *this = Sparse<T, Alloc> (nr, nc);
2190
2191 // Assemble column lengths.
2192 for (octave_idx_type i = 0; i < nc; i++)
2193 xcidx (i+1) = tmp.cidx (i+1) - tmp.cidx (i);
2194
2195 for (octave_idx_type i = 0; i < m; i++)
2196 {
2197 octave_idx_type j =idx_j(i);
2198 jsav[j] = i;
2199 xcidx (j+1) = rhs.cidx (i+1) - rhs.cidx (i);
2200 }
2201
2202 // Make cumulative.
2203 for (octave_idx_type i = 0; i < nc; i++)
2204 xcidx (i+1) += xcidx (i);
2205
2206 change_capacity (nnz ());
2207
2208 // Merge columns.
2209 for (octave_idx_type i = 0; i < nc; i++)
2210 {
2211 octave_idx_type l = xcidx (i);
2212 octave_idx_type u = xcidx (i+1);
2213 octave_idx_type j = jsav[i];
2214 if (j >= 0)
2215 {
2216 // from rhs
2217 octave_idx_type k = rhs.cidx (j);
2218 std::copy_n (rhs.data () + k, u - l, xdata () + l);
2219 std::copy_n (rhs.ridx () + k, u - l, xridx () + l);
2220 }
2221 else
2222 {
2223 // original
2224 octave_idx_type k = tmp.cidx (i);
2225 std::copy_n (tmp.data () + k, u - l, xdata () + l);
2226 std::copy_n (tmp.ridx () + k, u - l, xridx () + l);
2227 }
2228 }
2229
2230 }
2231 }
2232 else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
2233 {
2234 // It's just vector indexing. The 1D assign is specialized for that.
2235 assign (idx_i, rhs);
2236 }
2237 else if (idx_j.is_colon ())
2238 {
2239 if (idx_i.is_permutation (nr))
2240 {
2241 *this = rhs.index (idx_i.inverse_permutation (nr), idx_j);
2242 }
2243 else
2244 {
2245 // FIXME: optimize more special cases?
2246 // In general this requires unpacking the columns, which is slow,
2247 // especially for many small columns. OTOH, transpose is an
2248 // efficient O(nr+nc+nnz) operation.
2249 *this = transpose ();
2250 assign (octave::idx_vector::colon, idx_i, rhs.transpose ());
2251 *this = transpose ();
2252 }
2253 }
2254 else
2255 {
2256 // Split it into 2 assignments and one indexing.
2257 Sparse<T, Alloc> tmp = index (octave::idx_vector::colon, idx_j);
2258 tmp.assign (idx_i, octave::idx_vector::colon, rhs);
2259 assign (octave::idx_vector::colon, idx_j, tmp);
2260 }
2261 }
2262 else if (m == 1 && n == 1)
2263 {
2264 n = idx_i.length (nr);
2265 m = idx_j.length (nc);
2266 if (rhs.nnz () != 0)
2267 assign (idx_i, idx_j, Sparse<T, Alloc> (n, m, rhs.data (0)));
2268 else
2269 assign (idx_i, idx_j, Sparse<T, Alloc> (n, m));
2270 }
2271 else if (idx_i.length (nr) == m && idx_j.length (nc) == n
2272 && (n == 1 || m == 1))
2273 {
2274 assign (idx_i, idx_j, rhs.transpose ());
2275 }
2276 else
2277 octave::err_nonconformant ("=", idx_i.length (nr), idx_j.length (nc), n, m);
2278}
2279
2280template <typename T, typename Alloc>
2282void
2283Sparse<T, Alloc>::assign (const octave::idx_vector& idx_i,
2284 const octave::idx_vector& idx_j,
2285 const T& rhs)
2286{
2287 // FIXME: Converting the RHS and forwarding to the sparse matrix
2288 // assignment function is simpler, but it might be good to have a
2289 // specialization...
2290
2291 assign (idx_i, idx_j, Sparse<T, Alloc> (1, 1, rhs));
2292}
2293
2294// Can't use versions of these in Array.cc due to duplication of the
2295// instantiations for Array<double and Sparse<double>, etc.
2296template <typename T>
2298bool
2300 typename ref_param<T>::type b)
2301{
2302 return (a < b);
2303}
2304
2305template <typename T>
2307bool
2309 typename ref_param<T>::type b)
2310{
2311 return (a > b);
2312}
2313
2314template <typename T, typename Alloc>
2318{
2319 Sparse<T, Alloc> m = *this;
2320
2321 octave_idx_type nr = m.rows ();
2322 octave_idx_type nc = m.columns ();
2323
2324 if (m.numel () < 1 || dim > 1)
2325 return m;
2326
2327 bool sort_by_column = (dim > 0);
2328 if (sort_by_column)
2329 {
2330 m = m.transpose ();
2331 std::swap (nr, nc);
2332 }
2333
2334 octave_sort<T> lsort;
2335
2336 if (mode == ASCENDING)
2337 lsort.set_compare (sparse_ascending_compare<T>);
2338 else if (mode == DESCENDING)
2339 lsort.set_compare (sparse_descending_compare<T>);
2340 else
2342 ("Sparse<T, Alloc>::sort: invalid MODE");
2343
2344 T *v = m.data ();
2345 octave_idx_type *mcidx = m.cidx ();
2346 octave_idx_type *mridx = m.ridx ();
2347
2348 for (octave_idx_type j = 0; j < nc; j++)
2349 {
2350 octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2351 lsort.sort (v, ns);
2352
2354 if (mode == ASCENDING)
2355 {
2356 for (i = 0; i < ns; i++)
2357 if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2358 break;
2359 }
2360 else
2361 {
2362 for (i = 0; i < ns; i++)
2363 if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2364 break;
2365 }
2366 for (octave_idx_type k = 0; k < i; k++)
2367 mridx[k] = k;
2368 for (octave_idx_type k = i; k < ns; k++)
2369 mridx[k] = k - ns + nr;
2370
2371 v += ns;
2372 mridx += ns;
2373 }
2374
2375 if (sort_by_column)
2376 m = m.transpose ();
2377
2378 return m;
2379}
2380
2381template <typename T, typename Alloc>
2385 octave_idx_type dim, sortmode mode) const
2386{
2387 Sparse<T, Alloc> m = *this;
2388
2389 octave_idx_type nr = m.rows ();
2390 octave_idx_type nc = m.columns ();
2391
2392 if (m.numel () < 1 || dim > 1)
2393 {
2394 sidx = Array<octave_idx_type> (dim_vector (nr, nc), 1);
2395 return m;
2396 }
2397
2398 bool sort_by_column = (dim > 0);
2399 if (sort_by_column)
2400 {
2401 m = m.transpose ();
2402 std::swap (nr, nc);
2403 }
2404
2405 octave_sort<T> indexed_sort;
2406
2407 if (mode == ASCENDING)
2408 indexed_sort.set_compare (sparse_ascending_compare<T>);
2409 else if (mode == DESCENDING)
2410 indexed_sort.set_compare (sparse_descending_compare<T>);
2411 else
2413 ("Sparse<T, Alloc>::sort: invalid MODE");
2414
2415 T *v = m.data ();
2416 octave_idx_type *mcidx = m.cidx ();
2417 octave_idx_type *mridx = m.ridx ();
2418
2419 sidx = Array<octave_idx_type> (dim_vector (nr, nc));
2421
2422 for (octave_idx_type j = 0; j < nc; j++)
2423 {
2424 octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2425 octave_idx_type offset = j * nr;
2426
2427 if (ns == 0)
2428 {
2429 for (octave_idx_type k = 0; k < nr; k++)
2430 sidx(offset + k) = k;
2431 }
2432 else
2433 {
2434 for (octave_idx_type i = 0; i < ns; i++)
2435 vi[i] = mridx[i];
2436
2437 indexed_sort.sort (v, vi, ns);
2438
2440 if (mode == ASCENDING)
2441 {
2442 for (i = 0; i < ns; i++)
2443 if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2444 break;
2445 }
2446 else
2447 {
2448 for (i = 0; i < ns; i++)
2449 if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2450 break;
2451 }
2452
2453 octave_idx_type ii = 0;
2454 octave_idx_type jj = i;
2455 for (octave_idx_type k = 0; k < nr; k++)
2456 {
2457 if (ii < ns && mridx[ii] == k)
2458 ii++;
2459 else
2460 sidx(offset + jj++) = k;
2461 }
2462
2463 for (octave_idx_type k = 0; k < i; k++)
2464 {
2465 sidx(k + offset) = vi[k];
2466 mridx[k] = k;
2467 }
2468
2469 for (octave_idx_type k = i; k < ns; k++)
2470 {
2471 sidx(k - ns + nr + offset) = vi[k];
2472 mridx[k] = k - ns + nr;
2473 }
2474
2475 v += ns;
2476 mridx += ns;
2477 }
2478 }
2479
2480 if (sort_by_column)
2481 {
2482 m = m.transpose ();
2483 sidx = sidx.transpose ();
2484 }
2485
2486 return m;
2487}
2488
2489template <typename T, typename Alloc>
2493{
2494 octave_idx_type nnr = rows ();
2495 octave_idx_type nnc = cols ();
2497
2498 if (nnr == 0 || nnc == 0)
2499 ; // do nothing
2500 else if (nnr != 1 && nnc != 1)
2501 {
2502 if (k > 0)
2503 nnc -= k;
2504 else if (k < 0)
2505 nnr += k;
2506
2507 if (nnr > 0 && nnc > 0)
2508 {
2509 octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2510
2511 // Count the number of nonzero elements
2512 octave_idx_type nel = 0;
2513 if (k > 0)
2514 {
2515 for (octave_idx_type i = 0; i < ndiag; i++)
2516 if (elem (i, i+k) != 0.)
2517 nel++;
2518 }
2519 else if (k < 0)
2520 {
2521 for (octave_idx_type i = 0; i < ndiag; i++)
2522 if (elem (i-k, i) != 0.)
2523 nel++;
2524 }
2525 else
2526 {
2527 for (octave_idx_type i = 0; i < ndiag; i++)
2528 if (elem (i, i) != 0.)
2529 nel++;
2530 }
2531
2532 d = Sparse<T, Alloc> (ndiag, 1, nel);
2533 d.xcidx (0) = 0;
2534 d.xcidx (1) = nel;
2535
2536 octave_idx_type ii = 0;
2537 if (k > 0)
2538 {
2539 for (octave_idx_type i = 0; i < ndiag; i++)
2540 {
2541 T tmp = elem (i, i+k);
2542 if (tmp != 0.)
2543 {
2544 d.xdata (ii) = tmp;
2545 d.xridx (ii++) = i;
2546 }
2547 }
2548 }
2549 else if (k < 0)
2550 {
2551 for (octave_idx_type i = 0; i < ndiag; i++)
2552 {
2553 T tmp = elem (i-k, i);
2554 if (tmp != 0.)
2555 {
2556 d.xdata (ii) = tmp;
2557 d.xridx (ii++) = i;
2558 }
2559 }
2560 }
2561 else
2562 {
2563 for (octave_idx_type i = 0; i < ndiag; i++)
2564 {
2565 T tmp = elem (i, i);
2566 if (tmp != 0.)
2567 {
2568 d.xdata (ii) = tmp;
2569 d.xridx (ii++) = i;
2570 }
2571 }
2572 }
2573 }
2574 else
2575 {
2576 // Matlab returns [] 0x1 for out-of-range diagonal
2577
2578 octave_idx_type nr = 0;
2579 octave_idx_type nc = 1;
2580 octave_idx_type nz = 0;
2581
2582 d = Sparse<T, Alloc> (nr, nc, nz);
2583 }
2584 }
2585 else // one of dimensions == 1 (vector)
2586 {
2587 octave_idx_type roff = 0;
2588 octave_idx_type coff = 0;
2589 if (k > 0)
2590 {
2591 roff = 0;
2592 coff = k;
2593 }
2594 else if (k < 0)
2595 {
2596 roff = -k;
2597 coff = 0;
2598 }
2599
2600 if (nnr == 1)
2601 {
2602 octave_idx_type n = nnc + std::abs (k);
2603 octave_idx_type nz = nnz ();
2604
2605 d = Sparse<T, Alloc> (n, n, nz);
2606
2607 if (nnz () > 0)
2608 {
2609 for (octave_idx_type i = 0; i < coff+1; i++)
2610 d.xcidx (i) = 0;
2611
2612 for (octave_idx_type j = 0; j < nnc; j++)
2613 {
2614 for (octave_idx_type i = cidx (j); i < cidx (j+1); i++)
2615 {
2616 d.xdata (i) = data (i);
2617 d.xridx (i) = j + roff;
2618 }
2619 d.xcidx (j + coff + 1) = cidx (j+1);
2620 }
2621
2622 for (octave_idx_type i = nnc + coff + 1; i < n + 1; i++)
2623 d.xcidx (i) = nz;
2624 }
2625 }
2626 else
2627 {
2628 octave_idx_type n = nnr + std::abs (k);
2629 octave_idx_type nz = nnz ();
2630
2631 d = Sparse<T, Alloc> (n, n, nz);
2632
2633 if (nnz () > 0)
2634 {
2635 octave_idx_type ii = 0;
2636 octave_idx_type ir = ridx (0);
2637
2638 for (octave_idx_type i = 0; i < coff+1; i++)
2639 d.xcidx (i) = 0;
2640
2641 for (octave_idx_type i = 0; i < nnr; i++)
2642 {
2643 if (ir == i)
2644 {
2645 d.xdata (ii) = data (ii);
2646 d.xridx (ii++) = ir + roff;
2647
2648 if (ii != nz)
2649 ir = ridx (ii);
2650 }
2651 d.xcidx (i + coff + 1) = ii;
2652 }
2653
2654 for (octave_idx_type i = nnr + coff + 1; i < n+1; i++)
2655 d.xcidx (i) = nz;
2656 }
2657 }
2658 }
2659
2660 return d;
2661}
2662
2663template <typename T, typename Alloc>
2667 const Sparse<T, Alloc> *sparse_list)
2668{
2669 // Default concatenation.
2670 bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2671
2672 if (dim == -1 || dim == -2)
2673 {
2674 concat_rule = &dim_vector::hvcat;
2675 dim = -dim - 1;
2676 }
2677 else if (dim < 0)
2678 (*current_liboctave_error_handler) ("cat: invalid dimension");
2679
2680 dim_vector dv;
2681 octave_idx_type total_nz = 0;
2682 if (dim != 0 && dim != 1)
2683 (*current_liboctave_error_handler)
2684 ("cat: invalid dimension for sparse concatenation");
2685
2686 if (n == 1)
2687 return sparse_list[0];
2688
2689 for (octave_idx_type i = 0; i < n; i++)
2690 {
2691 if (! (dv.*concat_rule) (sparse_list[i].dims (), dim))
2692 (*current_liboctave_error_handler) ("cat: dimension mismatch");
2693
2694 total_nz += sparse_list[i].nnz ();
2695 }
2696
2697 Sparse<T, Alloc> retval (dv, total_nz);
2698
2699 if (retval.isempty ())
2700 return retval;
2701
2702 switch (dim)
2703 {
2704 case 0:
2705 {
2706 // sparse vertcat. This is not efficiently handled by assignment,
2707 // so we'll do it directly.
2708 octave_idx_type l = 0;
2709 for (octave_idx_type j = 0; j < dv(1); j++)
2710 {
2711 octave_quit ();
2712
2713 octave_idx_type rcum = 0;
2714 for (octave_idx_type i = 0; i < n; i++)
2715 {
2716 const Sparse<T, Alloc>& spi = sparse_list[i];
2717 // Skipping empty matrices. See the comment in Array.cc.
2718 if (spi.isempty ())
2719 continue;
2720
2721 octave_idx_type kl = spi.cidx (j);
2722 octave_idx_type ku = spi.cidx (j+1);
2723 for (octave_idx_type k = kl; k < ku; k++, l++)
2724 {
2725 retval.xridx (l) = spi.ridx (k) + rcum;
2726 retval.xdata (l) = spi.data (k);
2727 }
2728
2729 rcum += spi.rows ();
2730 }
2731
2732 retval.xcidx (j+1) = l;
2733 }
2734
2735 break;
2736 }
2737 case 1:
2738 {
2739 octave_idx_type l = 0;
2740 for (octave_idx_type i = 0; i < n; i++)
2741 {
2742 octave_quit ();
2743
2744 // Skipping empty matrices. See the comment in Array.cc.
2745 if (sparse_list[i].isempty ())
2746 continue;
2747
2748 octave_idx_type u = l + sparse_list[i].columns ();
2749 retval.assign (octave::idx_vector::colon, octave::idx_vector (l, u),
2750 sparse_list[i]);
2751 l = u;
2752 }
2753
2754 break;
2755 }
2756 default:
2757 (*current_liboctave_error_handler) ("Sparse<T, Alloc>::cat: invalid dimension = %d - please report this bug", dim);
2758 }
2759
2760 return retval;
2761}
2762
2763template <typename T, typename Alloc>
2767{
2768 Array<T> retval (dims (), T ());
2769 if (rows () == 1)
2770 {
2771 octave_idx_type i = 0;
2772 for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2773 {
2774 if (cidx (j+1) > i)
2775 retval.xelem (j) = data (i++);
2776 }
2777 }
2778 else
2779 {
2780 for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2781 for (octave_idx_type i = cidx (j), iu = cidx (j+1); i < iu; i++)
2782 retval.xelem (ridx (i), j) = data (i);
2783 }
2784
2785 return retval;
2786}
2787
2788template <typename T>
2790std::istream&
2791read_sparse_matrix (std::istream& is, Sparse<T>& a,
2792 T (*read_fcn) (std::istream&))
2793{
2794 octave_idx_type nr = a.rows ();
2795 octave_idx_type nc = a.cols ();
2796 octave_idx_type nz = a.nzmax ();
2797
2798 if (nr > 0 && nc > 0)
2799 {
2800 octave_idx_type itmp;
2801 octave_idx_type jtmp;
2802 octave_idx_type iold = 0;
2803 octave_idx_type jold = 0;
2804 octave_idx_type ii = 0;
2805 T tmp;
2806
2807 a.cidx (0) = 0;
2808 for (octave_idx_type i = 0; i < nz; i++)
2809 {
2810 itmp = 0; jtmp = 0;
2811 is >> itmp;
2812 itmp--;
2813
2814 is >> jtmp;
2815 jtmp--;
2816
2817 if (is.fail ())
2818 {
2819 is.clear();
2820 std::string err_field;
2821 is >> err_field;
2822 (*current_liboctave_error_handler)
2823 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2824 "Symbols '%s' is not an integer format",
2825 i+1, err_field.c_str ());
2826 }
2827
2828 if (itmp < 0 || itmp >= nr)
2829 {
2830 is.setstate (std::ios::failbit);
2831
2832 (*current_liboctave_error_handler)
2833 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2834 "row index = %" OCTAVE_IDX_TYPE_FORMAT " out of range",
2835 i+1, itmp + 1);
2836 }
2837
2838 if (jtmp < 0 || jtmp >= nc)
2839 {
2840 is.setstate (std::ios::failbit);
2841
2842 (*current_liboctave_error_handler)
2843 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2844 "column index = %" OCTAVE_IDX_TYPE_FORMAT " out of range",
2845 i+1, jtmp + 1);
2846 }
2847
2848 if (jtmp < jold)
2849 {
2850 is.setstate (std::ios::failbit);
2851
2852 (*current_liboctave_error_handler)
2853 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ":"
2854 "column indices must appear in ascending order "
2855 "(%" OCTAVE_IDX_TYPE_FORMAT " < %" OCTAVE_IDX_TYPE_FORMAT ")",
2856 i+1, jtmp, jold);
2857 }
2858 else if (jtmp > jold)
2859 {
2860 for (octave_idx_type j = jold; j < jtmp; j++)
2861 a.cidx (j+1) = ii;
2862 }
2863 else if (itmp < iold)
2864 {
2865 is.setstate (std::ios::failbit);
2866
2867 (*current_liboctave_error_handler)
2868 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2869 "row indices must appear in ascending order in each column "
2870 "(%" OCTAVE_IDX_TYPE_FORMAT " < %" OCTAVE_IDX_TYPE_FORMAT ")",
2871 i+1, iold, itmp);
2872 }
2873
2874 iold = itmp;
2875 jold = jtmp;
2876
2877 tmp = read_fcn (is);
2878
2879 if (! is)
2880 return is; // Problem, return is in error state
2881
2882 a.data (ii) = tmp;
2883 a.ridx (ii++) = itmp;
2884 }
2885
2886 for (octave_idx_type j = jold; j < nc; j++)
2887 a.cidx (j+1) = ii;
2888 }
2889
2890 return is;
2891}
2892
2893/*
2894 * Tests
2895 *
2896
2897%!function x = set_slice (x, dim, slice, arg)
2898%! switch (dim)
2899%! case 11
2900%! x(slice) = 2;
2901%! case 21
2902%! x(slice, :) = 2;
2903%! case 22
2904%! x(:, slice) = 2;
2905%! otherwise
2906%! error ("invalid dim, '%d'", dim);
2907%! endswitch
2908%!endfunction
2909
2910%!function x = set_slice2 (x, dim, slice)
2911%! switch (dim)
2912%! case 11
2913%! x(slice) = 2 * ones (size (slice));
2914%! case 21
2915%! x(slice, :) = 2 * ones (length (slice), columns (x));
2916%! case 22
2917%! x(:, slice) = 2 * ones (rows (x), length (slice));
2918%! otherwise
2919%! error ("invalid dim, '%d'", dim);
2920%! endswitch
2921%!endfunction
2922
2923%!function test_sparse_slice (size, dim, slice)
2924%! x = ones (size);
2925%! s = set_slice (sparse (x), dim, slice);
2926%! f = set_slice (x, dim, slice);
2927%! assert (nnz (s), nnz (f));
2928%! assert (full (s), f);
2929%! s = set_slice2 (sparse (x), dim, slice);
2930%! f = set_slice2 (x, dim, slice);
2931%! assert (nnz (s), nnz (f));
2932%! assert (full (s), f);
2933%!endfunction
2934
2935#### 1d indexing
2936
2937## size = [2 0]
2938%!test test_sparse_slice ([2 0], 11, []);
2939%!assert (set_slice (sparse (ones ([2 0])), 11, 1), sparse ([2 0]')) # sparse different from full
2940%!assert (set_slice (sparse (ones ([2 0])), 11, 2), sparse ([0 2]')) # sparse different from full
2941%!assert (set_slice (sparse (ones ([2 0])), 11, 3), sparse ([0 0; 2 0]')) # sparse different from full
2942%!assert (set_slice (sparse (ones ([2 0])), 11, 4), sparse ([0 0; 0 2]')) # sparse different from full
2943
2944## size = [0 2]
2945%!test test_sparse_slice ([0 2], 11, []);
2946%!assert (set_slice (sparse (ones ([0 2])), 11, 1), sparse ([2 0])) # sparse different from full
2947%!test test_sparse_slice ([0 2], 11, 2);
2948%!test test_sparse_slice ([0 2], 11, 3);
2949%!test test_sparse_slice ([0 2], 11, 4);
2950%!test test_sparse_slice ([0 2], 11, [4, 4]);
2951
2952## size = [2 1]
2953%!test test_sparse_slice ([2 1], 11, []);
2954%!test test_sparse_slice ([2 1], 11, 1);
2955%!test test_sparse_slice ([2 1], 11, 2);
2956%!test test_sparse_slice ([2 1], 11, 3);
2957%!test test_sparse_slice ([2 1], 11, 4);
2958%!test test_sparse_slice ([2 1], 11, [4, 4]);
2959
2960## size = [1 2]
2961%!test test_sparse_slice ([1 2], 11, []);
2962%!test test_sparse_slice ([1 2], 11, 1);
2963%!test test_sparse_slice ([1 2], 11, 2);
2964%!test test_sparse_slice ([1 2], 11, 3);
2965%!test test_sparse_slice ([1 2], 11, 4);
2966%!test test_sparse_slice ([1 2], 11, [4, 4]);
2967
2968## size = [2 2]
2969%!test test_sparse_slice ([2 2], 11, []);
2970%!test test_sparse_slice ([2 2], 11, 1);
2971%!test test_sparse_slice ([2 2], 11, 2);
2972%!test test_sparse_slice ([2 2], 11, 3);
2973%!test test_sparse_slice ([2 2], 11, 4);
2974%!test test_sparse_slice ([2 2], 11, [4, 4]);
2975# These 2 errors are the same as in the full case
2976%!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 5)
2977%!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 6)
2978
2979#### 2d indexing
2980
2981## size = [2 0]
2982%!test test_sparse_slice ([2 0], 21, []);
2983%!test test_sparse_slice ([2 0], 21, 1);
2984%!test test_sparse_slice ([2 0], 21, 2);
2985%!test test_sparse_slice ([2 0], 21, [2,2]);
2986%!assert (set_slice (sparse (ones ([2 0])), 21, 3), sparse (3,0))
2987%!assert (set_slice (sparse (ones ([2 0])), 21, 4), sparse (4,0))
2988%!test test_sparse_slice ([2 0], 22, []);
2989%!test test_sparse_slice ([2 0], 22, 1);
2990%!test test_sparse_slice ([2 0], 22, 2);
2991%!test test_sparse_slice ([2 0], 22, [2,2]);
2992%!assert (set_slice (sparse (ones ([2 0])), 22, 3), sparse ([0 0 2;0 0 2])) # sparse different from full
2993%!assert (set_slice (sparse (ones ([2 0])), 22, 4), sparse ([0 0 0 2;0 0 0 2])) # sparse different from full
2994
2995## size = [0 2]
2996%!test test_sparse_slice ([0 2], 21, []);
2997%!test test_sparse_slice ([0 2], 21, 1);
2998%!test test_sparse_slice ([0 2], 21, 2);
2999%!test test_sparse_slice ([0 2], 21, [2,2]);
3000%!assert (set_slice (sparse (ones ([0 2])), 21, 3), sparse ([0 0;0 0;2 2])) # sparse different from full
3001%!assert (set_slice (sparse (ones ([0 2])), 21, 4), sparse ([0 0;0 0;0 0;2 2])) # sparse different from full
3002%!test test_sparse_slice ([0 2], 22, []);
3003%!test test_sparse_slice ([0 2], 22, 1);
3004%!test test_sparse_slice ([0 2], 22, 2);
3005%!test test_sparse_slice ([0 2], 22, [2,2]);
3006%!assert (set_slice (sparse (ones ([0 2])), 22, 3), sparse (0,3))
3007%!assert (set_slice (sparse (ones ([0 2])), 22, 4), sparse (0,4))
3008
3009## size = [2 1]
3010%!test test_sparse_slice ([2 1], 21, []);
3011%!test test_sparse_slice ([2 1], 21, 1);
3012%!test test_sparse_slice ([2 1], 21, 2);
3013%!test test_sparse_slice ([2 1], 21, [2,2]);
3014%!test test_sparse_slice ([2 1], 21, 3);
3015%!test test_sparse_slice ([2 1], 21, 4);
3016%!test test_sparse_slice ([2 1], 22, []);
3017%!test test_sparse_slice ([2 1], 22, 1);
3018%!test test_sparse_slice ([2 1], 22, 2);
3019%!test test_sparse_slice ([2 1], 22, [2,2]);
3020%!test test_sparse_slice ([2 1], 22, 3);
3021%!test test_sparse_slice ([2 1], 22, 4);
3022
3023## size = [1 2]
3024%!test test_sparse_slice ([1 2], 21, []);
3025%!test test_sparse_slice ([1 2], 21, 1);
3026%!test test_sparse_slice ([1 2], 21, 2);
3027%!test test_sparse_slice ([1 2], 21, [2,2]);
3028%!test test_sparse_slice ([1 2], 21, 3);
3029%!test test_sparse_slice ([1 2], 21, 4);
3030%!test test_sparse_slice ([1 2], 22, []);
3031%!test test_sparse_slice ([1 2], 22, 1);
3032%!test test_sparse_slice ([1 2], 22, 2);
3033%!test test_sparse_slice ([1 2], 22, [2,2]);
3034%!test test_sparse_slice ([1 2], 22, 3);
3035%!test test_sparse_slice ([1 2], 22, 4);
3036
3037## size = [2 2]
3038%!test test_sparse_slice ([2 2], 21, []);
3039%!test test_sparse_slice ([2 2], 21, 1);
3040%!test test_sparse_slice ([2 2], 21, 2);
3041%!test test_sparse_slice ([2 2], 21, [2,2]);
3042%!test test_sparse_slice ([2 2], 21, 3);
3043%!test test_sparse_slice ([2 2], 21, 4);
3044%!test test_sparse_slice ([2 2], 22, []);
3045%!test test_sparse_slice ([2 2], 22, 1);
3046%!test test_sparse_slice ([2 2], 22, 2);
3047%!test test_sparse_slice ([2 2], 22, [2,2]);
3048%!test test_sparse_slice ([2 2], 22, 3);
3049%!test test_sparse_slice ([2 2], 22, 4);
3050
3051%!assert <*35570> (speye (3,1)(3:-1:1), sparse ([0; 0; 1]))
3052
3053## Test removing columns
3054%!test <*36656>
3055%! s = sparse (magic (5));
3056%! s(:,2:4) = [];
3057%! assert (s, sparse (magic (5)(:, [1,5])));
3058
3059%!test
3060%! s = sparse ([], [], [], 1, 1);
3061%! s(1,:) = [];
3062%! assert (s, sparse ([], [], [], 0, 1));
3063
3064## Test (bug #37321)
3065%!test <*37321> a=sparse (0,0); assert (all (a) == sparse ([1]));
3066%!test <*37321> a=sparse (0,1); assert (all (a) == sparse ([1]));
3067%!test <*37321> a=sparse (1,0); assert (all (a) == sparse ([1]));
3068%!test <*37321> a=sparse (1,0); assert (all (a,2) == sparse ([1]));
3069%!test <*37321> a=sparse (1,0); assert (size (all (a,1)), [1 0]);
3070%!test <*37321> a=sparse (1,1);
3071%! assert (all (a) == sparse ([0]));
3072%! assert (size (all (a)), [1 1]);
3073%!test <*37321> a=sparse (2,1);
3074%! assert (all (a) == sparse ([0]));
3075%! assert (size (all (a)), [1 1]);
3076%!test <*37321> a=sparse (1,2);
3077%! assert (all (a) == sparse ([0]));
3078%! assert (size (all (a)), [1 1]);
3079%!test <*37321> a=sparse (2,2); assert (isequal (all (a), sparse ([0 0])));
3080
3081## Test assigning row to a column slice
3082%!test <45589>
3083%! a = sparse (magic (3));
3084%! b = a;
3085%! a(1,:) = 1:3;
3086%! b(1,:) = (1:3)';
3087%! assert (a, b);
3088
3089*/
3090
3091template <typename T, typename Alloc>
3093void
3094Sparse<T, Alloc>::print_info (std::ostream& os, const std::string& prefix) const
3095{
3096 os << prefix << "m_rep address: " << m_rep << "\n"
3097 << prefix << "m_rep->m_nzmax: " << m_rep->m_nzmax << "\n"
3098 << prefix << "m_rep->m_nrows: " << m_rep->m_nrows << "\n"
3099 << prefix << "m_rep->m_ncols: " << m_rep->m_ncols << "\n"
3100 << prefix << "m_rep->m_data: " << m_rep->m_data << "\n"
3101 << prefix << "m_rep->m_ridx: " << m_rep->m_ridx << "\n"
3102 << prefix << "m_rep->m_cidx: " << m_rep->m_cidx << "\n"
3103 << prefix << "m_rep->m_count: " << m_rep->m_count << "\n";
3104}
3105
3106#if defined (__clang__)
3107# define INSTANTIATE_SPARSE(T) \
3108 template class OCTAVE_API Sparse<T>; \
3109 template OCTAVE_API std::istream& \
3110 read_sparse_matrix<T> (std::istream& is, Sparse<T>& a, \
3111 T (*read_fcn) (std::istream&));
3112#else
3113# define INSTANTIATE_SPARSE(T) \
3114 template class Sparse<T>; \
3115 template OCTAVE_API std::istream& \
3116 read_sparse_matrix<T> (std::istream& is, Sparse<T>& a, \
3117 T (*read_fcn) (std::istream&));
3118#endif
std::istream & read_sparse_matrix(std::istream &is, Sparse< T > &a, T(*read_fcn)(std::istream &))
Definition Sparse.cc:2791
bool sparse_descending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition Sparse.cc:2308
bool sparse_ascending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition Sparse.cc:2299
N Dimensional Array with copy-on-write semantics.
Definition Array.h:130
T & xelem(octave_idx_type n)
Size of the specified dimension.
Definition Array.h:525
Array< T, Alloc > index(const octave::idx_vector &i) const
Indexing without resizing.
void assign(const octave::idx_vector &i, const Array< T, Alloc > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Array< T, Alloc > transpose() const
Size of the specified dimension.
T & elem(octave_idx_type n)
Size of the specified dimension.
Definition Array.h:563
octave_idx_type rows() const
Definition Array.h:463
octave_idx_type cols() const
Definition Array.h:473
Array< T, Alloc > as_matrix() const
Return the array as a matrix.
Definition Array.h:446
const T * data() const
Size of the specified dimension.
Definition Array.h:665
T * rwdata()
Size of the specified dimension.
octave_idx_type numel() const
Number of elements in the array.
Definition Array.h:418
octave_idx_type rows() const
Definition PermMatrix.h:62
const Array< octave_idx_type > & col_perm_vec() const
Definition PermMatrix.h:83
bool indices_ok() const
Definition Sparse.cc:178
bool any_element_is_nan() const
Definition Sparse.cc:186
void change_length(octave_idx_type nz)
Definition Sparse.cc:144
octave_idx_type m_ncols
Definition Sparse.h:73
octave::refcount< octave_idx_type > m_count
Definition Sparse.h:74
idx_type_pointer m_cidx
Definition Sparse.h:70
T_pointer m_data
Definition Sparse.h:68
void maybe_compress(bool remove_zeros)
Definition Sparse.cc:119
T & elem(octave_idx_type r, octave_idx_type c)
Definition Sparse.cc:65
idx_type_pointer m_ridx
Definition Sparse.h:69
octave_idx_type m_nzmax
Definition Sparse.h:71
T celem(octave_idx_type r, octave_idx_type c) const
Definition Sparse.cc:107
octave_idx_type cols() const
Definition Sparse.h:349
Sparse< T, Alloc > diag(octave_idx_type k=0) const
Definition Sparse.cc:2492
Sparse< T, Alloc > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Definition Sparse.cc:930
octave_idx_type nzmax() const
Amount of storage for nonzero elements.
Definition Sparse.h:333
Array< T > array_value() const
Definition Sparse.cc:2766
void delete_elements(const octave::idx_vector &i)
Definition Sparse.cc:1197
Sparse< T, Alloc > index(const octave::idx_vector &i, bool resize_ok=false) const
Definition Sparse.cc:1433
void print_info(std::ostream &os, const std::string &prefix) const
Definition Sparse.cc:3094
octave_idx_type * cidx()
Definition Sparse.h:593
octave_idx_type columns() const
Definition Sparse.h:350
void assign(const octave::idx_vector &i, const Sparse< T, Alloc > &rhs)
Definition Sparse.cc:1880
virtual ~Sparse()
Definition Sparse.cc:706
T * data()
Definition Sparse.h:571
void resize(octave_idx_type r, octave_idx_type c)
Definition Sparse.cc:992
Sparse< T, Alloc > sort(octave_idx_type dim=0, sortmode mode=ASCENDING) const
Definition Sparse.cc:2317
Sparse< T, Alloc > & insert(const Sparse< T, Alloc > &a, octave_idx_type r, octave_idx_type c)
Definition Sparse.cc:1043
T * xdata()
Definition Sparse.h:573
octave_idx_type * ridx()
Definition Sparse.h:580
void resize1(octave_idx_type n)
Definition Sparse.cc:959
OCTAVE_NORETURN T range_error(const char *fcn, octave_idx_type n) const
Definition Sparse.cc:757
octave_idx_type numel() const
Definition Sparse.h:340
Sparse()
Definition Sparse.h:256
static Sparse< T, Alloc > cat(int dim, octave_idx_type n, const Sparse< T, Alloc > *sparse_list)
Definition Sparse.cc:2666
Sparse< T, Alloc > maybe_compress(bool remove_zeros=false)
Definition Sparse.h:528
Sparse< T, Alloc > & operator=(const Sparse< T, Alloc > &a)
Definition Sparse.cc:714
octave_idx_type nnz() const
Actual number of nonzero terms.
Definition Sparse.h:336
octave_idx_type rows() const
Definition Sparse.h:348
void change_capacity(octave_idx_type nz)
Definition Sparse.h:553
Sparse< T, Alloc > transpose() const
Definition Sparse.cc:1139
dim_vector m_dimensions
Definition Sparse.h:248
octave_idx_type compute_index(const Array< octave_idx_type > &ra_idx) const
Definition Sparse.cc:733
Sparse< T, Alloc >::SparseRep * m_rep
Definition Sparse.h:246
Sparse< T, Alloc > reshape(const dim_vector &new_dims) const
Definition Sparse.cc:847
dim_vector dims() const
Definition Sparse.h:368
octave_idx_type * xcidx()
Definition Sparse.h:599
octave_idx_type * xridx()
Definition Sparse.h:586
bool isempty() const
Definition Sparse.h:567
Vector representing the dimensions (size) of an Array.
Definition dim-vector.h:90
bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
octave_idx_type safe_numel() const
The following function will throw a std::bad_alloc () exception if the requested size is larger than ...
Definition dim-vector.cc:98
std::string str(char sep='x') const
Definition dim-vector.cc:68
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition dim-vector.h:331
void resize(int n, int fill_value=0)
Definition dim-vector.h:268
octave_idx_type ndims() const
Number of dimensions.
Definition dim-vector.h:253
bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
dim_vector redim(int n) const
Force certain dimensionality, preserving numel ().
virtual octave_idx_type numel() const
Definition ov-base.h:401
void set_compare(const compare_fcn_type &comp)
Definition oct-sort.h:115
void sort(T *data, octave_idx_type nel)
Definition oct-sort.cc:1521
if_then_else< is_class_type< T >::no, T, Tconst & >::result type
Definition lo-traits.h:121
OCTAVE_NORETURN liboctave_error_handler current_liboctave_error_handler
Definition lo-error.c:41
#define liboctave_panic_unless(cond)
Definition lo-error.h:102
F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
#define OCTAVE_API
Definition main.in.cc:55
void mx_inline_add2(std::size_t n, R *r, const X *x)
void mx_inline_sub(std::size_t n, R *r, const X *x, const Y *y)
void mx_inline_add(std::size_t n, R *r, const X *x, const Y *y)
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition oct-locbuf.h:44
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition oct-locbuf.h:50
sortmode
Definition oct-sort.h:97
@ ASCENDING
Definition oct-sort.h:97
@ DESCENDING
Definition oct-sort.h:97
T::size_type numel(const T &str)
Definition oct-string.cc:74
const octave_base_value const Array< octave_idx_type > & ra_idx
bool sparse_indices_ok(octave_idx_type *r, octave_idx_type *c, octave_idx_type nrows, octave_idx_type ncols, octave_idx_type nnz)
F77_RET_T len
Definition xerbla.cc:61