GNU Octave 7.1.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
Sparse.cc
Go to the documentation of this file.
1////////////////////////////////////////////////////////////////////////
2//
3// Copyright (C) 1998-2022 The Octave Project Developers
4//
5// See the file COPYRIGHT.md in the top-level directory of this
6// distribution or <https://octave.org/copyright/>.
7//
8// This file is part of Octave.
9//
10// Octave is free software: you can redistribute it and/or modify it
11// under the terms of the GNU General Public License as published by
12// the Free Software Foundation, either version 3 of the License, or
13// (at your option) any later version.
14//
15// Octave is distributed in the hope that it will be useful, but
16// WITHOUT ANY WARRANTY; without even the implied warranty of
17// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18// GNU General Public License for more details.
19//
20// You should have received a copy of the GNU General Public License
21// along with Octave; see the file COPYING. If not, see
22// <https://www.gnu.org/licenses/>.
23//
24////////////////////////////////////////////////////////////////////////
25
26// This file should not include config.h. It is only included in other
27// C++ source files that should have included config.h before including
28// this file.
29
30#include <cassert>
31#include <cinttypes>
32
33#include <algorithm>
34#include <istream>
35#include <ostream>
36#include <sstream>
37#include <vector>
38
39#include "Array.h"
40#include "MArray.h"
41#include "Array-util.h"
42#include "Range.h"
43#include "idx-vector.h"
44#include "lo-error.h"
45#include "quit.h"
46#include "oct-locbuf.h"
47
48#include "Sparse.h"
49#include "sparse-util.h"
50#include "oct-spparms.h"
51#include "mx-inlines.cc"
52
53#include "PermMatrix.h"
54
55template <typename T, typename Alloc>
58{
59 static typename Sparse<T, Alloc>::SparseRep nr;
60 return &nr;
61}
62
63template <typename T, typename Alloc>
65T&
67{
69
70 if (m_nzmax <= 0)
71 (*current_liboctave_error_handler)
72 ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
73
74 for (i = m_cidx[c]; i < m_cidx[c + 1]; i++)
75 if (m_ridx[i] == r)
76 return m_data[i];
77 else if (m_ridx[i] > r)
78 break;
79
80 // Ok, If we've gotten here, we're in trouble. Have to create a
81 // new element in the sparse array. This' gonna be slow!!!
82 if (m_cidx[m_ncols] == m_nzmax)
83 (*current_liboctave_error_handler)
84 ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
85
86 octave_idx_type to_move = m_cidx[m_ncols] - i;
87 if (to_move != 0)
88 {
89 for (octave_idx_type j = m_cidx[m_ncols]; j > i; j--)
90 {
91 m_data[j] = m_data[j-1];
92 m_ridx[j] = m_ridx[j-1];
93 }
94 }
95
96 for (octave_idx_type j = c + 1; j < m_ncols + 1; j++)
97 m_cidx[j] = m_cidx[j] + 1;
98
99 m_data[i] = 0.;
100 m_ridx[i] = r;
101
102 return m_data[i];
103}
104
105template <typename T, typename Alloc>
107T
109{
110 if (m_nzmax > 0)
111 for (octave_idx_type i = m_cidx[c]; i < m_cidx[c + 1]; i++)
112 if (m_ridx[i] == r)
113 return m_data[i];
114 return T ();
115}
116
117template <typename T, typename Alloc>
119void
121{
122 if (remove_zeros)
123 {
124 octave_idx_type i = 0;
125 octave_idx_type k = 0;
126 for (octave_idx_type j = 1; j <= m_ncols; j++)
127 {
128 octave_idx_type u = m_cidx[j];
129 for (; i < u; i++)
130 if (m_data[i] != T ())
131 {
132 m_data[k] = m_data[i];
133 m_ridx[k++] = m_ridx[i];
134 }
135 m_cidx[j] = k;
136 }
137 }
138
139 change_length (m_cidx[m_ncols]);
140}
141
142template <typename T, typename Alloc>
144void
146{
147 for (octave_idx_type j = m_ncols; j > 0 && m_cidx[j] > nz; j--)
148 m_cidx[j] = nz;
149
150 // Always preserve space for 1 element.
151 nz = (nz > 0 ? nz : 1);
152
153 // Skip reallocation if we have less than 1/frac extra elements to discard.
154 static const int frac = 5;
155 if (nz > m_nzmax || nz < m_nzmax - m_nzmax/frac)
156 {
157 // Reallocate.
158 octave_idx_type min_nzmax = std::min (nz, m_nzmax);
159
160 octave_idx_type *new_ridx = idx_type_allocate (nz);
161 std::copy_n (m_ridx, min_nzmax, new_ridx);
162
163 idx_type_deallocate (m_ridx, m_nzmax);
164 m_ridx = new_ridx;
165
166 T *new_data = T_allocate (nz);
167 std::copy_n (m_data, min_nzmax, new_data);
168
169 T_deallocate (m_data, m_nzmax);
170 m_data = new_data;
171
172 m_nzmax = nz;
173 }
175
176template <typename T, typename Alloc>
178bool
181 return sparse_indices_ok (m_ridx, m_cidx, m_nrows, m_ncols, nnz ());
182}
183
184template <typename T, typename Alloc>
186bool
188{
189 octave_idx_type nz = nnz ();
190
191 for (octave_idx_type i = 0; i < nz; i++)
192 if (octave::math::isnan (m_data[i]))
193 return true;
194
195 return false;
196}
197
198template <typename T, typename Alloc>
201 T val)
202 : m_rep (nullptr), m_dimensions (dim_vector (nr, nc))
203{
204 if (val != T ())
205 {
206 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, m_dimensions.safe_numel ());
207
208 octave_idx_type ii = 0;
209 xcidx (0) = 0;
210 for (octave_idx_type j = 0; j < nc; j++)
211 {
212 for (octave_idx_type i = 0; i < nr; i++)
213 {
214 xdata (ii) = val;
215 xridx (ii++) = i;
216 }
217 xcidx (j+1) = ii;
218 }
219 }
220 else
221 {
222 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, 0);
223 for (octave_idx_type j = 0; j < nc+1; j++)
224 xcidx (j) = 0;
225 }
226}
227
228template <typename T, typename Alloc>
231 : m_rep (new typename Sparse<T, Alloc>::SparseRep (a.rows (), a.cols (), a.rows ())),
232 m_dimensions (dim_vector (a.rows (), a.cols ()))
233{
234 octave_idx_type n = a.rows ();
235 for (octave_idx_type i = 0; i <= n; i++)
236 cidx (i) = i;
237
238 const Array<octave_idx_type> pv = a.col_perm_vec ();
239
240 for (octave_idx_type i = 0; i < n; i++)
241 ridx (i) = pv(i);
242
243 for (octave_idx_type i = 0; i < n; i++)
244 data (i) = 1.0;
245}
246
247template <typename T, typename Alloc>
250 : m_rep (nullptr), m_dimensions (dv)
251{
252 if (dv.ndims () != 2)
253 (*current_liboctave_error_handler)
254 ("Sparse::Sparse (const dim_vector&): dimension mismatch");
256 m_rep = new typename Sparse<T, Alloc>::SparseRep (dv(0), dv(1), 0);
257}
258
259template <typename T, typename Alloc>
262 : m_rep (nullptr), m_dimensions (dv)
263{
264
265 // Work in unsigned long long to avoid overflow issues with numel
266 unsigned long long a_nel = static_cast<unsigned long long>(a.rows ()) *
267 static_cast<unsigned long long>(a.cols ());
268 unsigned long long dv_nel = static_cast<unsigned long long>(dv(0)) *
269 static_cast<unsigned long long>(dv(1));
270
271 if (a_nel != dv_nel)
272 (*current_liboctave_error_handler)
273 ("Sparse::Sparse (const Sparse&, const dim_vector&): dimension mismatch");
274
275 dim_vector old_dims = a.dims ();
276 octave_idx_type new_nzmax = a.nnz ();
277 octave_idx_type new_nr = dv(0);
278 octave_idx_type new_nc = dv(1);
279 octave_idx_type old_nr = old_dims(0);
280 octave_idx_type old_nc = old_dims(1);
281
282 m_rep = new typename Sparse<T, Alloc>::SparseRep (new_nr, new_nc, new_nzmax);
283
284 octave_idx_type kk = 0;
285 xcidx (0) = 0;
286 for (octave_idx_type i = 0; i < old_nc; i++)
287 for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++)
288 {
289 octave_idx_type tmp = i * old_nr + a.ridx (j);
290 octave_idx_type ii = tmp % new_nr;
291 octave_idx_type jj = (tmp - ii) / new_nr;
292 for (octave_idx_type k = kk; k < jj; k++)
293 xcidx (k+1) = j;
294 kk = jj;
295 xdata (j) = a.data (j);
296 xridx (j) = ii;
297 }
298 for (octave_idx_type k = kk; k < new_nc; k++)
299 xcidx (k+1) = new_nzmax;
300}
301
302template <typename T, typename Alloc>
305 const octave::idx_vector& r,
306 const octave::idx_vector& c,
308 bool sum_terms, octave_idx_type nzm)
309 : m_rep (nullptr), m_dimensions ()
310{
311 if (nr < 0)
312 nr = r.extent (0);
313 else if (r.extent (nr) > nr)
314 (*current_liboctave_error_handler)
315 ("sparse: row index %" OCTAVE_IDX_TYPE_FORMAT "out of bound "
316 "%" OCTAVE_IDX_TYPE_FORMAT, r.extent (nr), nr);
317
318 if (nc < 0)
319 nc = c.extent (0);
320 else if (c.extent (nc) > nc)
322 ("sparse: column index %" OCTAVE_IDX_TYPE_FORMAT " out of bound "
323 "%" OCTAVE_IDX_TYPE_FORMAT, r.extent (nc), nc);
324
325 m_dimensions = dim_vector (nr, nc);
326
327 octave_idx_type n = a.numel ();
329 octave_idx_type cl = c.length (nc);
330 bool a_scalar = n == 1;
331 if (a_scalar)
333 if (rl != 1)
334 n = rl;
335 else if (cl != 1)
336 n = cl;
337 }
338
339 if ((rl != 1 && rl != n) || (cl != 1 && cl != n))
340 (*current_liboctave_error_handler) ("sparse: dimension mismatch");
341
342 // Only create m_rep after input validation to avoid memory leak.
343 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, (nzm > 0 ? nzm : 0));
344
345 if (rl <= 1 && cl <= 1)
346 {
347 if (n == 1 && a(0) != T ())
348 {
349 change_capacity (nzm > 1 ? nzm : 1);
350 xridx (0) = r(0);
351 xdata (0) = a(0);
352 std::fill_n (xcidx () + c(0) + 1, nc - c(0), 1);
353 }
354 }
355 else if (a_scalar)
356 {
357 // This is completely specialized, because the sorts can be simplified.
358 T a0 = a(0);
359 if (a0 == T ())
360 {
361 // Do nothing, it's an empty matrix.
362 }
363 else if (cl == 1)
364 {
365 // Sparse column vector. Sort row indices.
366 octave::idx_vector rs = r.sorted ();
367
368 octave_quit ();
369
370 const octave_idx_type *rd = rs.raw ();
371 // Count unique indices.
372 octave_idx_type new_nz = 1;
373 for (octave_idx_type i = 1; i < n; i++)
374 new_nz += rd[i-1] != rd[i];
375
376 // Allocate result.
377 change_capacity (nzm > new_nz ? nzm : new_nz);
378 std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
380 octave_idx_type *rri = ridx ();
381 T *rrd = data ();
382
383 octave_quit ();
385 octave_idx_type k = -1;
387
388 if (sum_terms)
390 // Sum repeated indices.
391 for (octave_idx_type i = 0; i < n; i++)
392 {
393 if (rd[i] != l)
394 {
395 l = rd[i];
396 rri[++k] = rd[i];
397 rrd[k] = a0;
398 }
399 else
400 rrd[k] += a0;
401 }
402 }
403 else
404 {
405 // Pick the last one.
406 for (octave_idx_type i = 0; i < n; i++)
407 {
408 if (rd[i] != l)
409 {
410 l = rd[i];
411 rri[++k] = rd[i];
412 rrd[k] = a0;
413 }
414 }
415 }
416
417 }
418 else
419 {
420 octave::idx_vector rr = r;
421 octave::idx_vector cc = c;
422 const octave_idx_type *rd = rr.raw ();
423 const octave_idx_type *cd = cc.raw ();
425 ci[0] = 0;
426 // Bin counts of column indices.
427 for (octave_idx_type i = 0; i < n; i++)
428 ci[cd[i]+1]++;
429 // Make them cumulative, shifted one to right.
430 for (octave_idx_type i = 1, s = 0; i <= nc; i++)
431 {
432 octave_idx_type s1 = s + ci[i];
433 ci[i] = s;
434 s = s1;
435 }
436
437 octave_quit ();
438
439 // Bucket sort.
441 for (octave_idx_type i = 0; i < n; i++)
442 if (rl == 1)
443 sidx[ci[cd[i]+1]++] = rd[0];
444 else
445 sidx[ci[cd[i]+1]++] = rd[i];
446
447 // Subsorts. We don't need a stable sort, all values are equal.
448 xcidx (0) = 0;
449 for (octave_idx_type j = 0; j < nc; j++)
450 {
451 std::sort (sidx + ci[j], sidx + ci[j+1]);
452 octave_idx_type l = -1;
453 octave_idx_type nzj = 0;
454 // Count.
455 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
456 {
457 octave_idx_type k = sidx[i];
458 if (k != l)
459 {
460 l = k;
461 nzj++;
462 }
463 }
464 // Set column pointer.
465 xcidx (j+1) = xcidx (j) + nzj;
466 }
467
468 change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
469 octave_idx_type *rri = ridx ();
470 T *rrd = data ();
471
472 // Fill-in data.
473 for (octave_idx_type j = 0, jj = -1; j < nc; j++)
474 {
475 octave_quit ();
476 octave_idx_type l = -1;
477 if (sum_terms)
478 {
479 // Sum adjacent terms.
480 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
481 {
482 octave_idx_type k = sidx[i];
483 if (k != l)
484 {
485 l = k;
486 rrd[++jj] = a0;
487 rri[jj] = k;
488 }
489 else
490 rrd[jj] += a0;
491 }
492 }
493 else
494 {
495 // Use the last one.
496 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
497 {
498 octave_idx_type k = sidx[i];
499 if (k != l)
500 {
501 l = k;
502 rrd[++jj] = a0;
503 rri[jj] = k;
504 }
505 }
506 }
507 }
508 }
509 }
510 else if (cl == 1)
511 {
512 // Sparse column vector. Sort row indices.
514 octave::idx_vector rs = r.sorted (rsi);
515
516 octave_quit ();
517
518 const octave_idx_type *rd = rs.raw ();
519 const octave_idx_type *rdi = rsi.data ();
520 // Count unique indices.
521 octave_idx_type new_nz = 1;
522 for (octave_idx_type i = 1; i < n; i++)
523 new_nz += rd[i-1] != rd[i];
524
525 // Allocate result.
526 change_capacity (nzm > new_nz ? nzm : new_nz);
527 std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
528
529 octave_idx_type *rri = ridx ();
530 T *rrd = data ();
531
532 octave_quit ();
533
534 octave_idx_type k = 0;
535 rri[k] = rd[0];
536 rrd[k] = a(rdi[0]);
537
538 if (sum_terms)
539 {
540 // Sum repeated indices.
541 for (octave_idx_type i = 1; i < n; i++)
542 {
543 if (rd[i] != rd[i-1])
544 {
545 rri[++k] = rd[i];
546 rrd[k] = a(rdi[i]);
547 }
548 else
549 rrd[k] += a(rdi[i]);
551 }
552 else
553 {
554 // Pick the last one.
555 for (octave_idx_type i = 1; i < n; i++)
556 {
557 if (rd[i] != rd[i-1])
558 rri[++k] = rd[i];
559 rrd[k] = a(rdi[i]);
560 }
561 }
562
563 maybe_compress (true);
565 else
567 octave::idx_vector rr = r;
568 octave::idx_vector cc = c;
569 const octave_idx_type *rd = rr.raw ();
570 const octave_idx_type *cd = cc.raw ();
572 ci[0] = 0;
573 // Bin counts of column indices.
574 for (octave_idx_type i = 0; i < n; i++)
575 ci[cd[i]+1]++;
576 // Make them cumulative, shifted one to right.
577 for (octave_idx_type i = 1, s = 0; i <= nc; i++)
578 {
579 octave_idx_type s1 = s + ci[i];
580 ci[i] = s;
581 s = s1;
582 }
583
584 octave_quit ();
585
586 typedef std::pair<octave_idx_type, octave_idx_type> idx_pair;
587 // Bucket sort.
588 OCTAVE_LOCAL_BUFFER (idx_pair, spairs, n);
589 for (octave_idx_type i = 0; i < n; i++)
590 {
591 idx_pair& p = spairs[ci[cd[i]+1]++];
592 if (rl == 1)
593 p.first = rd[0];
594 else
595 p.first = rd[i];
596 p.second = i;
597 }
598
599 // Subsorts. We don't need a stable sort, the second index stabilizes it.
600 xcidx (0) = 0;
601 for (octave_idx_type j = 0; j < nc; j++)
602 {
603 std::sort (spairs + ci[j], spairs + ci[j+1]);
604 octave_idx_type l = -1;
605 octave_idx_type nzj = 0;
606 // Count.
607 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
608 {
609 octave_idx_type k = spairs[i].first;
610 if (k != l)
612 l = k;
613 nzj++;
614 }
616 // Set column pointer.
617 xcidx (j+1) = xcidx (j) + nzj;
619
620 change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
622 T *rrd = data ();
623
624 // Fill-in data.
625 for (octave_idx_type j = 0, jj = -1; j < nc; j++)
626 {
627 octave_quit ();
628 octave_idx_type l = -1;
629 if (sum_terms)
631 // Sum adjacent terms.
632 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
633 {
634 octave_idx_type k = spairs[i].first;
635 if (k != l)
636 {
637 l = k;
638 rrd[++jj] = a(spairs[i].second);
639 rri[jj] = k;
640 }
641 else
642 rrd[jj] += a(spairs[i].second);
644 }
645 else
647 // Use the last one.
648 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
649 {
650 octave_idx_type k = spairs[i].first;
651 if (k != l)
653 l = k;
654 rri[++jj] = k;
655 }
656 rrd[jj] = a(spairs[i].second);
657 }
658 }
659 }
660
661 maybe_compress (true);
662 }
663}
664
665/*
666%!assert <*51880> (sparse (1:2, 2, 1:2, 2, 2), sparse ([0, 1; 0, 2]))
667%!assert <*51880> (sparse (1:2, 1, 1:2, 2, 2), sparse ([1, 0; 2, 0]))
668%!assert <*51880> (sparse (1:2, 2, 1:2, 2, 3), sparse ([0, 1, 0; 0, 2, 0]))
669*/
670
671template <typename T, typename Alloc>
674 : m_rep (nullptr), m_dimensions (a.dims ())
675{
676 if (m_dimensions.ndims () > 2)
677 (*current_liboctave_error_handler)
678 ("Sparse::Sparse (const Array<T>&): dimension mismatch");
679
680 octave_idx_type nr = rows ();
681 octave_idx_type nc = cols ();
683 octave_idx_type new_nzmax = 0;
684
685 // First count the number of nonzero terms
686 for (octave_idx_type i = 0; i < len; i++)
687 if (a(i) != T ())
688 new_nzmax++;
689
690 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, new_nzmax);
691
692 octave_idx_type ii = 0;
693 xcidx (0) = 0;
694 for (octave_idx_type j = 0; j < nc; j++)
695 {
696 for (octave_idx_type i = 0; i < nr; i++)
697 if (a.elem (i, j) != T ())
698 {
699 xdata (ii) = a.elem (i, j);
700 xridx (ii++) = i;
701 }
702 xcidx (j+1) = ii;
703 }
704}
705
706template <typename T, typename Alloc>
709{
710 if (--m_rep->m_count == 0)
711 delete m_rep;
712}
713
714template <typename T, typename Alloc>
717{
718 if (this != &a)
719 {
720 if (--m_rep->m_count == 0)
721 delete m_rep;
722
723 m_rep = a.m_rep;
724 m_rep->m_count++;
725
726 m_dimensions = a.m_dimensions;
727 }
728
729 return *this;
730}
731
732template <typename T, typename Alloc>
736{
737 octave_idx_type n = m_dimensions.ndims ();
738
739 if (n <= 0 || n != ra_idx.numel ())
740 (*current_liboctave_error_handler)
741 ("Sparse<T, Alloc>::compute_index: invalid ra_idxing operation");
742
743 octave_idx_type retval = -1;
744
745 retval = ra_idx(--n);
746
747 while (--n >= 0)
748 {
749 retval *= m_dimensions(n);
750 retval += ra_idx(n);
751 }
752
753 return retval;
754}
755
756template <typename T, typename Alloc>
758T
760{
761 (*current_liboctave_error_handler) ("%s (%" OCTAVE_IDX_TYPE_FORMAT "): "
762 "range error", fcn, n);
763}
764
765template <typename T, typename Alloc>
767T&
769{
770 (*current_liboctave_error_handler) ("%s (%" OCTAVE_IDX_TYPE_FORMAT "): "
771 "range error", fcn, n);
772}
773
774template <typename T, typename Alloc>
776T
778 octave_idx_type j) const
779{
780 (*current_liboctave_error_handler)
781 ("%s (%" OCTAVE_IDX_TYPE_FORMAT ", %" OCTAVE_IDX_TYPE_FORMAT "): "
782 "range error", fcn, i, j);
783}
784
785template <typename T, typename Alloc>
787T&
790{
791 (*current_liboctave_error_handler)
792 ("%s (%" OCTAVE_IDX_TYPE_FORMAT ", %" OCTAVE_IDX_TYPE_FORMAT "): "
793 "range error", fcn, i, j);
794}
795
796template <typename T, typename Alloc>
798T
800 const Array<octave_idx_type>& ra_idx) const
801{
802 std::ostringstream buf;
803
804 buf << fcn << " (";
805
807
808 if (n > 0)
809 buf << ra_idx(0);
810
811 for (octave_idx_type i = 1; i < n; i++)
812 buf << ", " << ra_idx(i);
813
814 buf << "): range error";
815
816 std::string buf_str = buf.str ();
817
818 (*current_liboctave_error_handler) ("%s", buf_str.c_str ());
819}
820
821template <typename T, typename Alloc>
823T&
826{
827 std::ostringstream buf;
828
829 buf << fcn << " (";
830
832
833 if (n > 0)
834 buf << ra_idx(0);
835
836 for (octave_idx_type i = 1; i < n; i++)
837 buf << ", " << ra_idx(i);
838
839 buf << "): range error";
840
841 std::string buf_str = buf.str ();
842
843 (*current_liboctave_error_handler) ("%s", buf_str.c_str ());
844}
845
846template <typename T, typename Alloc>
850{
851 Sparse<T, Alloc> retval;
852 dim_vector dims2 = new_dims;
853
854 if (dims2.ndims () > 2)
855 {
856 (*current_liboctave_warning_with_id_handler)
857 ("Octave:reshape-smashes-dims",
858 "reshape: sparse reshape to N-D array smashes dims");
859
860 for (octave_idx_type i = 2; i < dims2.ndims (); i++)
861 dims2(1) *= dims2(i);
862
863 dims2.resize (2);
864 }
865
866 if (m_dimensions != dims2)
867 {
868 if (m_dimensions.numel () == dims2.numel ())
869 {
870 octave_idx_type new_nnz = nnz ();
871 octave_idx_type new_nr = dims2 (0);
872 octave_idx_type new_nc = dims2 (1);
873 octave_idx_type old_nr = rows ();
874 octave_idx_type old_nc = cols ();
875 retval = Sparse<T, Alloc> (new_nr, new_nc, new_nnz);
876
877 octave_idx_type kk = 0;
878 retval.xcidx (0) = 0;
879 // Quotient and remainder of i * old_nr divided by new_nr.
880 // Track them individually to avoid overflow (bug #42850).
881 octave_idx_type i_old_qu = 0;
882 octave_idx_type i_old_rm = static_cast<octave_idx_type> (-old_nr);
883 for (octave_idx_type i = 0; i < old_nc; i++)
884 {
885 i_old_rm += old_nr;
886 if (i_old_rm >= new_nr)
887 {
888 i_old_qu += i_old_rm / new_nr;
889 i_old_rm = i_old_rm % new_nr;
890 }
891 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
892 {
893 octave_idx_type ii, jj;
894 ii = (i_old_rm + ridx (j)) % new_nr;
895 jj = i_old_qu + (i_old_rm + ridx (j)) / new_nr;
896
897 // Original calculation subject to overflow
898 // ii = (i*old_nr + ridx (j)) % new_nr
899 // jj = (i*old_nr + ridx (j)) / new_nr
900 for (octave_idx_type k = kk; k < jj; k++)
901 retval.xcidx (k+1) = j;
902 kk = jj;
903 retval.xdata (j) = data (j);
904 retval.xridx (j) = ii;
905 }
906 }
907 for (octave_idx_type k = kk; k < new_nc; k++)
908 retval.xcidx (k+1) = new_nnz;
909 }
910 else
911 {
912 std::string dimensions_str = m_dimensions.str ();
913 std::string new_dims_str = new_dims.str ();
914
915 (*current_liboctave_error_handler)
916 ("reshape: can't reshape %s array to %s array",
917 dimensions_str.c_str (), new_dims_str.c_str ());
918 }
919 }
920 else
921 retval = *this;
922
923 return retval;
924}
925
926template <typename T, typename Alloc>
930{
931 // The only valid permutations of a sparse array are [1, 2] and [2, 1].
932
933 bool fail = false;
934 bool trans = false;
935
936 if (perm_vec.numel () == 2)
937 {
938 if (perm_vec(0) == 0 && perm_vec(1) == 1)
939 /* do nothing */;
940 else if (perm_vec(0) == 1 && perm_vec(1) == 0)
941 trans = true;
942 else
943 fail = true;
944 }
945 else
946 fail = true;
947
948 if (fail)
949 (*current_liboctave_error_handler)
950 ("permutation vector contains an invalid element");
951
952 return trans ? this->transpose () : *this;
953}
954
955template <typename T, typename Alloc>
957void
959{
960 octave_idx_type nr = rows ();
961 octave_idx_type nc = cols ();
962
963 if (nr == 0)
964 resize (1, std::max (nc, n));
965 else if (nc == 0)
966 resize (nr, (n + nr - 1) / nr); // Ain't it wicked?
967 else if (nr == 1)
968 resize (1, n);
969 else if (nc == 1)
970 resize (n, 1);
971 else
973}
974
975template <typename T, typename Alloc>
977void
979{
980 octave_idx_type n = dv.ndims ();
981
982 if (n != 2)
983 (*current_liboctave_error_handler) ("sparse array must be 2-D");
984
985 resize (dv(0), dv(1));
986}
987
988template <typename T, typename Alloc>
990void
992{
993 if (r < 0 || c < 0)
994 (*current_liboctave_error_handler) ("can't resize to negative dimension");
995
996 if (r == dim1 () && c == dim2 ())
997 return;
998
999 // This wouldn't be necessary for r >= rows () if m_nrows wasn't part of the
1000 // Sparse rep. It is not good for anything in there.
1001 make_unique ();
1002
1003 if (r < rows ())
1004 {
1005 octave_idx_type i = 0;
1006 octave_idx_type k = 0;
1007 for (octave_idx_type j = 1; j <= m_rep->m_ncols; j++)
1008 {
1009 octave_idx_type u = xcidx (j);
1010 for (; i < u; i++)
1011 if (xridx (i) < r)
1012 {
1013 xdata (k) = xdata (i);
1014 xridx (k++) = xridx (i);
1015 }
1016 xcidx (j) = k;
1017 }
1018 }
1019
1020 m_rep->m_nrows = m_dimensions(0) = r;
1021
1022 if (c != m_rep->m_ncols)
1023 {
1024 octave_idx_type *new_cidx = m_rep->idx_type_allocate (c+1);
1025 std::copy_n (m_rep->m_cidx, std::min (c, m_rep->m_ncols) + 1, new_cidx);
1026 m_rep->idx_type_deallocate (m_rep->m_cidx, m_rep->m_ncols + 1);
1027 m_rep->m_cidx = new_cidx;
1028
1029 if (c > m_rep->m_ncols)
1030 std::fill_n (m_rep->m_cidx + m_rep->m_ncols + 1, c - m_rep->m_ncols,
1031 m_rep->m_cidx[m_rep->m_ncols]);
1032 }
1033
1034 m_rep->m_ncols = m_dimensions(1) = c;
1035
1036 m_rep->change_length (m_rep->nnz ());
1037}
1038
1039template <typename T, typename Alloc>
1044{
1045 octave_idx_type a_rows = a.rows ();
1046 octave_idx_type a_cols = a.cols ();
1047 octave_idx_type nr = rows ();
1048 octave_idx_type nc = cols ();
1049
1050 if (r < 0 || r + a_rows > rows () || c < 0 || c + a_cols > cols ())
1051 (*current_liboctave_error_handler) ("range error for insert");
1052
1053 // First count the number of elements in the final array
1054 octave_idx_type nel = cidx (c) + a.nnz ();
1055
1056 if (c + a_cols < nc)
1057 nel += cidx (nc) - cidx (c + a_cols);
1058
1059 for (octave_idx_type i = c; i < c + a_cols; i++)
1060 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1061 if (ridx (j) < r || ridx (j) >= r + a_rows)
1062 nel++;
1063
1064 Sparse<T, Alloc> tmp (*this);
1065 --m_rep->m_count;
1066 m_rep = new typename Sparse<T, Alloc>::SparseRep (nr, nc, nel);
1067
1068 for (octave_idx_type i = 0; i < tmp.cidx (c); i++)
1069 {
1070 data (i) = tmp.data (i);
1071 ridx (i) = tmp.ridx (i);
1072 }
1073 for (octave_idx_type i = 0; i < c + 1; i++)
1074 cidx (i) = tmp.cidx (i);
1075
1076 octave_idx_type ii = cidx (c);
1077
1078 for (octave_idx_type i = c; i < c + a_cols; i++)
1079 {
1080 octave_quit ();
1081
1082 for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1083 if (tmp.ridx (j) < r)
1084 {
1085 data (ii) = tmp.data (j);
1086 ridx (ii++) = tmp.ridx (j);
1087 }
1088
1089 octave_quit ();
1090
1091 for (octave_idx_type j = a.cidx (i-c); j < a.cidx (i-c+1); j++)
1092 {
1093 data (ii) = a.data (j);
1094 ridx (ii++) = r + a.ridx (j);
1095 }
1096
1097 octave_quit ();
1098
1099 for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1100 if (tmp.ridx (j) >= r + a_rows)
1101 {
1102 data (ii) = tmp.data (j);
1103 ridx (ii++) = tmp.ridx (j);
1104 }
1105
1106 cidx (i+1) = ii;
1107 }
1108
1109 for (octave_idx_type i = c + a_cols; i < nc; i++)
1110 {
1111 for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1112 {
1113 data (ii) = tmp.data (j);
1114 ridx (ii++) = tmp.ridx (j);
1115 }
1116 cidx (i+1) = ii;
1117 }
1118
1119 return *this;
1120}
1121
1122template <typename T, typename Alloc>
1127{
1128
1129 if (ra_idx.numel () != 2)
1130 (*current_liboctave_error_handler) ("range error for insert");
1131
1132 return insert (a, ra_idx(0), ra_idx(1));
1133}
1134
1135template <typename T, typename Alloc>
1139{
1140 assert (ndims () == 2);
1141
1142 octave_idx_type nr = rows ();
1143 octave_idx_type nc = cols ();
1144 octave_idx_type nz = nnz ();
1145 Sparse<T, Alloc> retval (nc, nr, nz);
1146
1147 for (octave_idx_type i = 0; i < nz; i++)
1148 retval.xcidx (ridx (i) + 1)++;
1149 // retval.xcidx[1:nr] holds the row degrees for rows 0:(nr-1)
1150 nz = 0;
1151 for (octave_idx_type i = 1; i <= nr; i++)
1152 {
1153 const octave_idx_type tmp = retval.xcidx (i);
1154 retval.xcidx (i) = nz;
1155 nz += tmp;
1156 }
1157 // retval.xcidx[1:nr] holds row entry *start* offsets for rows 0:(nr-1)
1158
1159 for (octave_idx_type j = 0; j < nc; j++)
1160 for (octave_idx_type k = cidx (j); k < cidx (j+1); k++)
1161 {
1162 octave_idx_type q = retval.xcidx (ridx (k) + 1)++;
1163 retval.xridx (q) = j;
1164 retval.xdata (q) = data (k);
1165 }
1166 assert (nnz () == retval.xcidx (nr));
1167 // retval.xcidx[1:nr] holds row entry *end* offsets for rows 0:(nr-1)
1168 // and retval.xcidx[0:(nr-1)] holds their row entry *start* offsets
1169
1170 return retval;
1171}
1172
1173// Lower bound lookup. Could also use octave_sort, but that has upper bound
1174// semantics, so requires some manipulation to set right. Uses a plain loop
1175// for small columns.
1176static
1179 octave_idx_type ri)
1180{
1181 if (nr <= 8)
1182 {
1184 for (l = 0; l < nr; l++)
1185 if (ridx[l] >= ri)
1186 break;
1187 return l;
1188 }
1189 else
1190 return std::lower_bound (ridx, ridx + nr, ri) - ridx;
1191}
1192
1193template <typename T, typename Alloc>
1195void
1197{
1198 Sparse<T, Alloc> retval;
1199
1200 assert (ndims () == 2);
1201
1202 octave_idx_type nr = dim1 ();
1203 octave_idx_type nc = dim2 ();
1204 octave_idx_type nz = nnz ();
1205
1206 octave_idx_type nel = numel (); // Can throw.
1207
1208 const dim_vector idx_dims = idx.orig_dimensions ();
1209
1210 if (idx.extent (nel) > nel)
1211 octave::err_del_index_out_of_range (true, idx.extent (nel), nel);
1212
1213 if (nc == 1)
1214 {
1215 // Sparse column vector.
1216 const Sparse<T, Alloc> tmp = *this; // constant copy to prevent COW.
1217
1218 octave_idx_type lb, ub;
1219
1220 if (idx.is_cont_range (nel, lb, ub))
1221 {
1222 // Special-case a contiguous range.
1223 // Look-up indices first.
1224 octave_idx_type li = lblookup (tmp.ridx (), nz, lb);
1225 octave_idx_type ui = lblookup (tmp.ridx (), nz, ub);
1226 // Copy data and adjust indices.
1227 octave_idx_type nz_new = nz - (ui - li);
1228 *this = Sparse<T, Alloc> (nr - (ub - lb), 1, nz_new);
1229 std::copy_n (tmp.data (), li, data ());
1230 std::copy_n (tmp.ridx (), li, xridx ());
1231 std::copy (tmp.data () + ui, tmp.data () + nz, xdata () + li);
1232 mx_inline_sub (nz - ui, xridx () + li, tmp.ridx () + ui, ub - lb);
1233 xcidx (1) = nz_new;
1234 }
1235 else
1236 {
1237 OCTAVE_LOCAL_BUFFER (octave_idx_type, ridx_new, nz);
1238 OCTAVE_LOCAL_BUFFER (T, data_new, nz);
1239 octave::idx_vector sidx = idx.sorted (true);
1240 const octave_idx_type *sj = sidx.raw ();
1241 octave_idx_type sl = sidx.length (nel);
1242 octave_idx_type nz_new = 0;
1243 octave_idx_type j = 0;
1244 for (octave_idx_type i = 0; i < nz; i++)
1245 {
1246 octave_idx_type r = tmp.ridx (i);
1247 for (; j < sl && sj[j] < r; j++) ;
1248 if (j == sl || sj[j] > r)
1249 {
1250 data_new[nz_new] = tmp.data (i);
1251 ridx_new[nz_new++] = r - j;
1252 }
1253 }
1254
1255 *this = Sparse<T, Alloc> (nr - sl, 1, nz_new);
1256 std::copy_n (ridx_new, nz_new, ridx ());
1257 std::copy_n (data_new, nz_new, xdata ());
1258 xcidx (1) = nz_new;
1259 }
1260 }
1261 else if (nr == 1)
1262 {
1263 // Sparse row vector.
1264 octave_idx_type lb, ub;
1265 if (idx.is_cont_range (nc, lb, ub))
1266 {
1267 const Sparse<T, Alloc> tmp = *this;
1268 octave_idx_type lbi = tmp.cidx (lb);
1269 octave_idx_type ubi = tmp.cidx (ub);
1270 octave_idx_type new_nz = nz - (ubi - lbi);
1271 *this = Sparse<T, Alloc> (1, nc - (ub - lb), new_nz);
1272 std::copy_n (tmp.data (), lbi, data ());
1273 std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1274 std::fill_n (ridx (), new_nz, static_cast<octave_idx_type> (0));
1275 std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1276 mx_inline_sub (nc - ub, xcidx () + 1, tmp.cidx () + ub + 1,
1277 ubi - lbi);
1278 }
1279 else
1280 *this = index (idx.complement (nc));
1281 }
1282 else if (idx.length (nel) != 0)
1283 {
1284 if (idx.is_colon_equiv (nel))
1285 *this = Sparse<T, Alloc> ();
1286 else
1287 {
1288 *this = index (octave::idx_vector::colon);
1289 delete_elements (idx);
1290 *this = transpose (); // We want a row vector.
1291 }
1292 }
1293}
1294
1295template <typename T, typename Alloc>
1297void
1299 const octave::idx_vector& idx_j)
1300{
1301 assert (ndims () == 2);
1302
1303 octave_idx_type nr = dim1 ();
1304 octave_idx_type nc = dim2 ();
1305 octave_idx_type nz = nnz ();
1306
1307 if (idx_i.is_colon ())
1308 {
1309 // Deleting columns.
1310 octave_idx_type lb, ub;
1311 if (idx_j.extent (nc) > nc)
1312 octave::err_del_index_out_of_range (false, idx_j.extent (nc), nc);
1313 else if (idx_j.is_cont_range (nc, lb, ub))
1314 {
1315 if (lb == 0 && ub == nc)
1316 {
1317 // Delete all rows and columns.
1318 *this = Sparse<T, Alloc> (nr, 0);
1319 }
1320 else if (nz == 0)
1321 {
1322 // No elements to preserve; adjust dimensions.
1323 *this = Sparse<T, Alloc> (nr, nc - (ub - lb));
1324 }
1325 else
1326 {
1327 const Sparse<T, Alloc> tmp = *this;
1328 octave_idx_type lbi = tmp.cidx (lb);
1329 octave_idx_type ubi = tmp.cidx (ub);
1330 octave_idx_type new_nz = nz - (ubi - lbi);
1331
1332 *this = Sparse<T, Alloc> (nr, nc - (ub - lb), new_nz);
1333 std::copy_n (tmp.data (), lbi, data ());
1334 std::copy_n (tmp.ridx (), lbi, ridx ());
1335 std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1336 std::copy (tmp.ridx () + ubi, tmp.ridx () + nz, xridx () + lbi);
1337 std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1338 mx_inline_sub (nc - ub, xcidx () + lb + 1,
1339 tmp.cidx () + ub + 1, ubi - lbi);
1340 }
1341 }
1342 else
1343 *this = index (idx_i, idx_j.complement (nc));
1344 }
1345 else if (idx_j.is_colon ())
1346 {
1347 // Deleting rows.
1348 octave_idx_type lb, ub;
1349 if (idx_i.extent (nr) > nr)
1350 octave::err_del_index_out_of_range (false, idx_i.extent (nr), nr);
1351 else if (idx_i.is_cont_range (nr, lb, ub))
1352 {
1353 if (lb == 0 && ub == nr)
1354 {
1355 // Delete all rows and columns.
1356 *this = Sparse<T, Alloc> (0, nc);
1357 }
1358 else if (nz == 0)
1359 {
1360 // No elements to preserve; adjust dimensions.
1361 *this = Sparse<T, Alloc> (nr - (ub - lb), nc);
1362 }
1363 else
1364 {
1365 // This is more memory-efficient than the approach below.
1366 const Sparse<T, Alloc> tmpl = index (octave::idx_vector (0, lb), idx_j);
1367 const Sparse<T, Alloc> tmpu = index (octave::idx_vector (ub, nr), idx_j);
1368 *this = Sparse<T, Alloc> (nr - (ub - lb), nc,
1369 tmpl.nnz () + tmpu.nnz ());
1370 for (octave_idx_type j = 0, k = 0; j < nc; j++)
1371 {
1372 for (octave_idx_type i = tmpl.cidx (j); i < tmpl.cidx (j+1);
1373 i++)
1374 {
1375 xdata (k) = tmpl.data (i);
1376 xridx (k++) = tmpl.ridx (i);
1377 }
1378 for (octave_idx_type i = tmpu.cidx (j); i < tmpu.cidx (j+1);
1379 i++)
1380 {
1381 xdata (k) = tmpu.data (i);
1382 xridx (k++) = tmpu.ridx (i) + lb;
1383 }
1384
1385 xcidx (j+1) = k;
1386 }
1387 }
1388 }
1389 else
1390 {
1391 // This is done by transposing, deleting columns, then transposing
1392 // again.
1393 Sparse<T, Alloc> tmp = transpose ();
1394 tmp.delete_elements (idx_j, idx_i);
1395 *this = tmp.transpose ();
1396 }
1397 }
1398 else
1399 {
1400 // Empty assignment (no elements to delete) is OK if there is at
1401 // least one zero-length index and at most one other index that is
1402 // non-colon (or equivalent) index. Since we only have two
1403 // indices, we just need to check that we have at least one zero
1404 // length index. Matlab considers "[]" to be an empty index but
1405 // not "false". We accept both.
1406
1407 bool empty_assignment
1408 = (idx_i.length (nr) == 0 || idx_j.length (nc) == 0);
1409
1410 if (! empty_assignment)
1411 (*current_liboctave_error_handler)
1412 ("a null assignment can only have one non-colon index");
1413 }
1414}
1415
1416template <typename T, typename Alloc>
1418void
1420{
1421 if (dim == 0)
1422 delete_elements (idx, octave::idx_vector::colon);
1423 else if (dim == 1)
1424 delete_elements (octave::idx_vector::colon, idx);
1425 else
1426 (*current_liboctave_error_handler) ("invalid dimension in delete_elements");
1427}
1428
1429template <typename T, typename Alloc>
1432Sparse<T, Alloc>::index (const octave::idx_vector& idx, bool resize_ok) const
1433{
1434 Sparse<T, Alloc> retval;
1435
1436 assert (ndims () == 2);
1437
1438 octave_idx_type nr = dim1 ();
1439 octave_idx_type nc = dim2 ();
1440 octave_idx_type nz = nnz ();
1441
1442 octave_idx_type nel = numel (); // Can throw.
1443
1444 const dim_vector idx_dims = idx.orig_dimensions ().redim (2);
1445
1446 if (idx.is_colon ())
1447 {
1448 if (nc == 1)
1449 retval = *this;
1450 else
1451 {
1452 // Fast magic colon processing.
1453 retval = Sparse<T, Alloc> (nel, 1, nz);
1454
1455 for (octave_idx_type i = 0; i < nc; i++)
1456 {
1457 for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1458 {
1459 retval.xdata (j) = data (j);
1460 retval.xridx (j) = ridx (j) + i * nr;
1461 }
1462 }
1463
1464 retval.xcidx (0) = 0;
1465 retval.xcidx (1) = nz;
1466 }
1467 }
1468 else if (idx.extent (nel) > nel)
1469 {
1470 if (! resize_ok)
1471 octave::err_index_out_of_range (1, 1, idx.extent (nel), nel, dims ());
1472
1473 // resize_ok is completely handled here.
1474 octave_idx_type ext = idx.extent (nel);
1475 Sparse<T, Alloc> tmp = *this;
1476 tmp.resize1 (ext);
1477 retval = tmp.index (idx);
1478 }
1479 else if (nr == 1 && nc == 1)
1480 {
1481 // You have to be pretty sick to get to this bit of code,
1482 // since you have a scalar stored as a sparse matrix, and
1483 // then want to make a dense matrix with sparse representation.
1484 // Ok, we'll do it, but you deserve what you get!!
1485 retval = (Sparse<T, Alloc> (idx_dims(0), idx_dims(1), nz ? data (0) : T ()));
1486 }
1487 else if (nc == 1)
1488 {
1489 // Sparse column vector.
1490 octave_idx_type lb, ub;
1491
1492 if (idx.is_scalar ())
1493 {
1494 // Scalar index - just a binary lookup.
1495 octave_idx_type i = lblookup (ridx (), nz, idx(0));
1496 if (i < nz && ridx (i) == idx(0))
1497 retval = Sparse (1, 1, data (i));
1498 else
1499 retval = Sparse (1, 1);
1500 }
1501 else if (idx.is_cont_range (nel, lb, ub))
1502 {
1503 // Special-case a contiguous range.
1504 // Look-up indices first.
1505 octave_idx_type li = lblookup (ridx (), nz, lb);
1506 octave_idx_type ui = lblookup (ridx (), nz, ub);
1507 // Copy data and adjust indices.
1508 octave_idx_type nz_new = ui - li;
1509 retval = Sparse<T, Alloc> (ub - lb, 1, nz_new);
1510 std::copy_n (data () + li, nz_new, retval.data ());
1511 mx_inline_sub (nz_new, retval.xridx (), ridx () + li, lb);
1512 retval.xcidx (1) = nz_new;
1513 }
1514 else if (idx.is_permutation (nel) && idx.isvector ())
1515 {
1516 if (idx.is_range () && idx.increment () == -1)
1517 {
1518 retval = Sparse<T, Alloc> (nr, 1, nz);
1519
1520 for (octave_idx_type j = 0; j < nz; j++)
1521 retval.ridx (j) = nr - ridx (nz - j - 1) - 1;
1522
1523 std::copy_n (cidx (), 2, retval.cidx ());
1524 std::reverse_copy (data (), data () + nz, retval.data ());
1525 }
1526 else
1527 {
1528 Array<T> tmp = array_value ();
1529 tmp = tmp.index (idx);
1530 retval = Sparse<T, Alloc> (tmp);
1531 }
1532 }
1533 else
1534 {
1535 // If indexing a sparse column vector by a vector, the result is a
1536 // sparse column vector, otherwise it inherits the shape of index.
1537 // Vector transpose is cheap, so do it right here.
1538
1539 Array<octave_idx_type> tmp_idx = idx.as_array ().as_matrix ();
1540
1541 const Array<octave_idx_type> idxa = (idx_dims(0) == 1
1542 ? tmp_idx.transpose ()
1543 : tmp_idx);
1544
1545 octave_idx_type new_nr = idxa.rows ();
1546 octave_idx_type new_nc = idxa.cols ();
1547
1548 // Lookup.
1549 // FIXME: Could specialize for sorted idx?
1550 Array<octave_idx_type> lidx (dim_vector (new_nr, new_nc));
1551 for (octave_idx_type i = 0; i < new_nr*new_nc; i++)
1552 lidx.xelem (i) = lblookup (ridx (), nz, idxa(i));
1553
1554 // Count matches.
1555 retval = Sparse<T, Alloc> (idxa.rows (), idxa.cols ());
1556 for (octave_idx_type j = 0; j < new_nc; j++)
1557 {
1558 octave_idx_type nzj = 0;
1559 for (octave_idx_type i = 0; i < new_nr; i++)
1560 {
1561 octave_idx_type l = lidx.xelem (i, j);
1562 if (l < nz && ridx (l) == idxa(i, j))
1563 nzj++;
1564 else
1565 lidx.xelem (i, j) = nz;
1566 }
1567 retval.xcidx (j+1) = retval.xcidx (j) + nzj;
1568 }
1569
1570 retval.change_capacity (retval.xcidx (new_nc));
1571
1572 // Copy data and set row indices.
1573 octave_idx_type k = 0;
1574 for (octave_idx_type j = 0; j < new_nc; j++)
1575 for (octave_idx_type i = 0; i < new_nr; i++)
1576 {
1577 octave_idx_type l = lidx.xelem (i, j);
1578 if (l < nz)
1579 {
1580 retval.data (k) = data (l);
1581 retval.xridx (k++) = i;
1582 }
1583 }
1584 }
1585 }
1586 else if (nr == 1)
1587 {
1588 octave_idx_type lb, ub;
1589 if (idx.is_scalar ())
1590 retval = Sparse<T, Alloc> (1, 1, elem (0, idx(0)));
1591 else if (idx.is_cont_range (nel, lb, ub))
1592 {
1593 // Special-case a contiguous range.
1594 octave_idx_type lbi = cidx (lb);
1595 octave_idx_type ubi = cidx (ub);
1596 octave_idx_type new_nz = ubi - lbi;
1597 retval = Sparse<T, Alloc> (1, ub - lb, new_nz);
1598 std::copy_n (data () + lbi, new_nz, retval.data ());
1599 std::fill_n (retval.ridx (), new_nz, static_cast<octave_idx_type> (0));
1600 mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1601 }
1602 else
1603 {
1604 // Sparse row vectors occupy O(nr) storage anyway, so let's just
1605 // convert the matrix to full, index, and sparsify the result.
1606 retval = Sparse<T, Alloc> (array_value ().index (idx));
1607 }
1608 }
1609 else
1610 {
1611 if (nr != 0 && idx.is_scalar ())
1612 retval = Sparse<T, Alloc> (1, 1, elem (idx(0) % nr, idx(0) / nr));
1613 else
1614 {
1615 // Indexing a non-vector sparse matrix by linear indexing.
1616 // I suppose this is rare (and it may easily overflow), so let's take
1617 // the easy way, and reshape first to column vector, which is already
1618 // handled above.
1619 retval = index (octave::idx_vector::colon).index (idx);
1620 // In this case we're supposed to always inherit the shape, but
1621 // column(row) doesn't do it, so we'll do it instead.
1622 if (idx_dims(0) == 1 && idx_dims(1) != 1)
1623 retval = retval.transpose ();
1624 }
1625 }
1626
1627 return retval;
1628}
1629
1630template <typename T, typename Alloc>
1634 const octave::idx_vector& idx_j,
1635 bool resize_ok) const
1636{
1637 Sparse<T, Alloc> retval;
1638
1639 assert (ndims () == 2);
1640
1641 octave_idx_type nr = dim1 ();
1642 octave_idx_type nc = dim2 ();
1643
1644 octave_idx_type n = idx_i.length (nr);
1645 octave_idx_type m = idx_j.length (nc);
1646
1647 octave_idx_type lb, ub;
1648
1649 if (idx_i.extent (nr) > nr || idx_j.extent (nc) > nc)
1650 {
1651 // resize_ok is completely handled here.
1652 if (resize_ok)
1653 {
1654 octave_idx_type ext_i = idx_i.extent (nr);
1655 octave_idx_type ext_j = idx_j.extent (nc);
1656 Sparse<T, Alloc> tmp = *this;
1657 tmp.resize (ext_i, ext_j);
1658 retval = tmp.index (idx_i, idx_j);
1659 }
1660 else if (idx_i.extent (nr) > nr)
1661 octave::err_index_out_of_range (2, 1, idx_i.extent (nr), nr, dims ());
1662 else
1663 octave::err_index_out_of_range (2, 2, idx_j.extent (nc), nc, dims ());
1664 }
1665 else if (nr == 1 && nc == 1)
1666 {
1667 // Scalars stored as sparse matrices occupy more memory than
1668 // a scalar, so let's just convert the matrix to full, index,
1669 // and sparsify the result.
1670
1671 retval = Sparse<T, Alloc> (array_value ().index (idx_i, idx_j));
1672 }
1673 else if (idx_i.is_colon ())
1674 {
1675 // Great, we're just manipulating columns. This is going to be quite
1676 // efficient, because the columns can stay compressed as they are.
1677 if (idx_j.is_colon ())
1678 retval = *this; // Shallow copy.
1679 else if (idx_j.is_cont_range (nc, lb, ub))
1680 {
1681 // Special-case a contiguous range.
1682 octave_idx_type lbi = cidx (lb);
1683 octave_idx_type ubi = cidx (ub);
1684 octave_idx_type new_nz = ubi - lbi;
1685 retval = Sparse<T, Alloc> (nr, ub - lb, new_nz);
1686 std::copy_n (data () + lbi, new_nz, retval.data ());
1687 std::copy_n (ridx () + lbi, new_nz, retval.ridx ());
1688 mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1689 }
1690 else
1691 {
1692 // Count new nonzero elements.
1693 retval = Sparse<T, Alloc> (nr, m);
1694 for (octave_idx_type j = 0; j < m; j++)
1695 {
1696 octave_idx_type jj = idx_j(j);
1697 retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1698 }
1699
1700 retval.change_capacity (retval.xcidx (m));
1701
1702 // Copy data & indices.
1703 for (octave_idx_type j = 0; j < m; j++)
1704 {
1705 octave_idx_type ljj = cidx (idx_j(j));
1706 octave_idx_type lj = retval.xcidx (j);
1707 octave_idx_type nzj = retval.xcidx (j+1) - lj;
1708
1709 std::copy_n (data () + ljj, nzj, retval.data () + lj);
1710 std::copy_n (ridx () + ljj, nzj, retval.ridx () + lj);
1711 }
1712 }
1713 }
1714 else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
1715 {
1716 // It's actually vector indexing. The 1D index is specialized for that.
1717 retval = index (idx_i);
1718
1719 // If nr == 1 then the vector indexing will return a column vector!!
1720 if (nr == 1)
1721 retval.transpose ();
1722 }
1723 else if (idx_i.is_scalar ())
1724 {
1725 octave_idx_type ii = idx_i(0);
1726 retval = Sparse<T, Alloc> (1, m);
1728 for (octave_idx_type j = 0; j < m; j++)
1729 {
1730 octave_quit ();
1731 octave_idx_type jj = idx_j(j);
1732 octave_idx_type lj = cidx (jj);
1733 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1734
1735 // Scalar index - just a binary lookup.
1736 octave_idx_type i = lblookup (ridx () + lj, nzj, ii);
1737 if (i < nzj && ridx (i+lj) == ii)
1738 {
1739 ij[j] = i + lj;
1740 retval.xcidx (j+1) = retval.xcidx (j) + 1;
1741 }
1742 else
1743 retval.xcidx (j+1) = retval.xcidx (j);
1744 }
1745
1746 retval.change_capacity (retval.xcidx (m));
1747
1748 // Copy data, adjust row indices.
1749 for (octave_idx_type j = 0; j < m; j++)
1750 {
1751 octave_idx_type i = retval.xcidx (j);
1752 if (retval.xcidx (j+1) > i)
1753 {
1754 retval.xridx (i) = 0;
1755 retval.xdata (i) = data (ij[j]);
1756 }
1757 }
1758 }
1759 else if (idx_i.is_cont_range (nr, lb, ub))
1760 {
1761 retval = Sparse<T, Alloc> (n, m);
1764 for (octave_idx_type j = 0; j < m; j++)
1765 {
1766 octave_quit ();
1767 octave_idx_type jj = idx_j(j);
1768 octave_idx_type lj = cidx (jj);
1769 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1770 octave_idx_type lij, uij;
1771
1772 // Lookup indices.
1773 li[j] = lij = lblookup (ridx () + lj, nzj, lb) + lj;
1774 ui[j] = uij = lblookup (ridx () + lj, nzj, ub) + lj;
1775 retval.xcidx (j+1) = retval.xcidx (j) + ui[j] - li[j];
1776 }
1777
1778 retval.change_capacity (retval.xcidx (m));
1779
1780 // Copy data, adjust row indices.
1781 for (octave_idx_type j = 0, k = 0; j < m; j++)
1782 {
1783 octave_quit ();
1784 for (octave_idx_type i = li[j]; i < ui[j]; i++)
1785 {
1786 retval.xdata (k) = data (i);
1787 retval.xridx (k++) = ridx (i) - lb;
1788 }
1789 }
1790 }
1791 else if (idx_i.is_permutation (nr))
1792 {
1793 // The columns preserve their length, just need to renumber and sort them.
1794 // Count new nonzero elements.
1795 retval = Sparse<T, Alloc> (nr, m);
1796 for (octave_idx_type j = 0; j < m; j++)
1797 {
1798 octave_idx_type jj = idx_j(j);
1799 retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1800 }
1801
1802 retval.change_capacity (retval.xcidx (m));
1803
1804 octave_quit ();
1805
1806 if (idx_i.is_range () && idx_i.increment () == -1)
1807 {
1808 // It's nr:-1:1. Just flip all columns.
1809 for (octave_idx_type j = 0; j < m; j++)
1810 {
1811 octave_quit ();
1812 octave_idx_type jj = idx_j(j);
1813 octave_idx_type lj = cidx (jj);
1814 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1815 octave_idx_type li = retval.xcidx (j);
1816 octave_idx_type uj = lj + nzj - 1;
1817 for (octave_idx_type i = 0; i < nzj; i++)
1818 {
1819 retval.xdata (li + i) = data (uj - i); // Copy in reverse order.
1820 retval.xridx (li + i) = nr - 1 - ridx (uj - i); // Ditto with transform.
1821 }
1822 }
1823 }
1824 else
1825 {
1826 // Get inverse permutation.
1827 octave::idx_vector idx_iinv = idx_i.inverse_permutation (nr);
1828 const octave_idx_type *iinv = idx_iinv.raw ();
1829
1830 // Scatter buffer.
1831 OCTAVE_LOCAL_BUFFER (T, scb, nr);
1832 octave_idx_type *rri = retval.ridx ();
1833
1834 for (octave_idx_type j = 0; j < m; j++)
1835 {
1836 octave_quit ();
1837 octave_idx_type jj = idx_j(j);
1838 octave_idx_type lj = cidx (jj);
1839 octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1840 octave_idx_type li = retval.xcidx (j);
1841 // Scatter the column, transform indices.
1842 for (octave_idx_type i = 0; i < nzj; i++)
1843 scb[rri[li + i] = iinv[ridx (lj + i)]] = data (lj + i);
1844
1845 octave_quit ();
1846
1847 // Sort them.
1848 std::sort (rri + li, rri + li + nzj);
1849
1850 // Gather.
1851 for (octave_idx_type i = 0; i < nzj; i++)
1852 retval.xdata (li + i) = scb[rri[li + i]];
1853 }
1854 }
1855
1856 }
1857 else if (idx_j.is_colon ())
1858 {
1859 // This requires uncompressing columns, which is generally costly,
1860 // so we rely on the efficient transpose to handle this.
1861 // It may still make sense to optimize some cases here.
1862 retval = transpose ();
1863 retval = retval.index (octave::idx_vector::colon, idx_i);
1864 retval = retval.transpose ();
1865 }
1866 else
1867 {
1868 // A(I, J) is decomposed into A(:, J)(I, :).
1869 retval = index (octave::idx_vector::colon, idx_j);
1870 retval = retval.index (idx_i, octave::idx_vector::colon);
1871 }
1872
1873 return retval;
1874}
1875
1876template <typename T, typename Alloc>
1878void
1880 const Sparse<T, Alloc>& rhs)
1881{
1882 Sparse<T, Alloc> retval;
1883
1884 assert (ndims () == 2);
1885
1886 octave_idx_type nr = dim1 ();
1887 octave_idx_type nc = dim2 ();
1888 octave_idx_type nz = nnz ();
1889
1890 octave_idx_type n = numel (); // Can throw.
1891
1892 octave_idx_type rhl = rhs.numel ();
1893
1894 if (idx.length (n) == rhl)
1895 {
1896 if (rhl == 0)
1897 return;
1898
1899 octave_idx_type nx = idx.extent (n);
1900 // Try to resize first if necessary.
1901 if (nx != n)
1902 {
1903 resize1 (nx);
1904 n = numel ();
1905 nr = rows ();
1906 nc = cols ();
1907 // nz is preserved.
1908 }
1909
1910 if (idx.is_colon ())
1911 {
1912 *this = rhs.reshape (m_dimensions);
1913 }
1914 else if (nc == 1 && rhs.cols () == 1)
1915 {
1916 // Sparse column vector to sparse column vector assignment.
1917
1918 octave_idx_type lb, ub;
1919 if (idx.is_cont_range (nr, lb, ub))
1920 {
1921 // Special-case a contiguous range.
1922 // Look-up indices first.
1923 octave_idx_type li = lblookup (ridx (), nz, lb);
1924 octave_idx_type ui = lblookup (ridx (), nz, ub);
1925 octave_idx_type rnz = rhs.nnz ();
1926 octave_idx_type new_nz = nz - (ui - li) + rnz;
1927
1928 if (new_nz >= nz && new_nz <= nzmax ())
1929 {
1930 // Adding/overwriting elements, enough capacity allocated.
1931
1932 if (new_nz > nz)
1933 {
1934 // Make room first.
1935 std::copy_backward (data () + ui, data () + nz,
1936 data () + nz + rnz);
1937 std::copy_backward (ridx () + ui, ridx () + nz,
1938 ridx () + nz + rnz);
1939 }
1940
1941 // Copy data and adjust indices from rhs.
1942 std::copy_n (rhs.data (), rnz, data () + li);
1943 mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1944 }
1945 else
1946 {
1947 // Clearing elements or exceeding capacity, allocate afresh
1948 // and paste pieces.
1949 const Sparse<T, Alloc> tmp = *this;
1950 *this = Sparse<T, Alloc> (nr, 1, new_nz);
1951
1952 // Head ...
1953 std::copy_n (tmp.data (), li, data ());
1954 std::copy_n (tmp.ridx (), li, ridx ());
1955
1956 // new stuff ...
1957 std::copy_n (rhs.data (), rnz, data () + li);
1958 mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1959
1960 // ...tail
1961 std::copy (tmp.data () + ui, tmp.data () + nz,
1962 data () + li + rnz);
1963 std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
1964 ridx () + li + rnz);
1965 }
1966
1967 cidx (1) = new_nz;
1968 }
1969 else if (idx.is_range () && idx.increment () == -1)
1970 {
1971 // It's s(u:-1:l) = r. Reverse the assignment.
1972 assign (idx.sorted (), rhs.index (octave::idx_vector (rhl - 1, 0, -1)));
1973 }
1974 else if (idx.is_permutation (n))
1975 {
1976 *this = rhs.index (idx.inverse_permutation (n));
1977 }
1978 else if (rhs.nnz () == 0)
1979 {
1980 // Elements are being zeroed.
1981 octave_idx_type *ri = ridx ();
1982 for (octave_idx_type i = 0; i < rhl; i++)
1983 {
1984 octave_idx_type iidx = idx(i);
1985 octave_idx_type li = lblookup (ri, nz, iidx);
1986 if (li != nz && ri[li] == iidx)
1987 xdata (li) = T ();
1988 }
1989
1990 maybe_compress (true);
1991 }
1992 else
1993 {
1994 const Sparse<T, Alloc> tmp = *this;
1995 octave_idx_type new_nz = nz + rhl;
1996 // Disassembly our matrix...
1997 Array<octave_idx_type> new_ri (dim_vector (new_nz, 1));
1998 Array<T> new_data (dim_vector (new_nz, 1));
1999 std::copy_n (tmp.ridx (), nz, new_ri.fortran_vec ());
2000 std::copy_n (tmp.data (), nz, new_data.fortran_vec ());
2001 // ... insert new data (densified) ...
2002 idx.copy_data (new_ri.fortran_vec () + nz);
2003 new_data.assign (octave::idx_vector (nz, new_nz), rhs.array_value ());
2004 // ... reassembly.
2005 *this = Sparse<T, Alloc> (new_data, new_ri, 0, nr, nc, false);
2006 }
2007 }
2008 else
2009 {
2010 dim_vector save_dims = m_dimensions;
2011 *this = index (octave::idx_vector::colon);
2012 assign (idx, rhs.index (octave::idx_vector::colon));
2013 *this = reshape (save_dims);
2014 }
2015 }
2016 else if (rhl == 1)
2017 {
2018 rhl = idx.length (n);
2019 if (rhs.nnz () != 0)
2020 assign (idx, Sparse<T, Alloc> (rhl, 1, rhs.data (0)));
2021 else
2022 assign (idx, Sparse<T, Alloc> (rhl, 1));
2023 }
2024 else
2025 octave::err_nonconformant ("=", dim_vector(idx.length (n), 1), rhs.dims());
2026}
2027
2028template <typename T, typename Alloc>
2030void
2032 const T& rhs)
2033{
2034 // FIXME: Converting the RHS and forwarding to the sparse matrix
2035 // assignment function is simpler, but it might be good to have a
2036 // specialization...
2037
2038 assign (idx, Sparse<T, Alloc> (1, 1, rhs));
2039}
2040
2041template <typename T, typename Alloc>
2043void
2045 const octave::idx_vector& idx_j,
2046 const Sparse<T, Alloc>& rhs)
2047{
2048 Sparse<T, Alloc> retval;
2049
2050 assert (ndims () == 2);
2051
2052 octave_idx_type nr = dim1 ();
2053 octave_idx_type nc = dim2 ();
2054 octave_idx_type nz = nnz ();
2055
2056 octave_idx_type n = rhs.rows ();
2057 octave_idx_type m = rhs.columns ();
2058
2059 // FIXME: this should probably be written more like the
2060 // Array<T>::assign function...
2061
2062 bool orig_zero_by_zero = (nr == 0 && nc == 0);
2063
2064 if (orig_zero_by_zero || (idx_i.length (nr) == n && idx_j.length (nc) == m))
2065 {
2066 octave_idx_type nrx;
2067 octave_idx_type ncx;
2068
2069 if (orig_zero_by_zero)
2070 {
2071 if (idx_i.is_colon ())
2072 {
2073 nrx = n;
2074
2075 if (idx_j.is_colon ())
2076 ncx = m;
2077 else
2078 ncx = idx_j.extent (nc);
2079 }
2080 else if (idx_j.is_colon ())
2081 {
2082 nrx = idx_i.extent (nr);
2083 ncx = m;
2084 }
2085 else
2086 {
2087 nrx = idx_i.extent (nr);
2088 ncx = idx_j.extent (nc);
2089 }
2090 }
2091 else
2092 {
2093 nrx = idx_i.extent (nr);
2094 ncx = idx_j.extent (nc);
2095 }
2096
2097 // Try to resize first if necessary.
2098 if (nrx != nr || ncx != nc)
2099 {
2100 resize (nrx, ncx);
2101 nr = rows ();
2102 nc = cols ();
2103 // nz is preserved.
2104 }
2105
2106 if (n == 0 || m == 0)
2107 return;
2108
2109 if (idx_i.is_colon ())
2110 {
2111 octave_idx_type lb, ub;
2112 // Great, we're just manipulating columns. This is going to be quite
2113 // efficient, because the columns can stay compressed as they are.
2114 if (idx_j.is_colon ())
2115 *this = rhs; // Shallow copy.
2116 else if (idx_j.is_cont_range (nc, lb, ub))
2117 {
2118 // Special-case a contiguous range.
2119 octave_idx_type li = cidx (lb);
2120 octave_idx_type ui = cidx (ub);
2121 octave_idx_type rnz = rhs.nnz ();
2122 octave_idx_type new_nz = nz - (ui - li) + rnz;
2123
2124 if (new_nz >= nz && new_nz <= nzmax ())
2125 {
2126 // Adding/overwriting elements, enough capacity allocated.
2127
2128 if (new_nz > nz)
2129 {
2130 // Make room first.
2131 std::copy_backward (data () + ui, data () + nz,
2132 data () + new_nz);
2133 std::copy_backward (ridx () + ui, ridx () + nz,
2134 ridx () + new_nz);
2135 mx_inline_add2 (nc - ub, cidx () + ub + 1, new_nz - nz);
2136 }
2137
2138 // Copy data and indices from rhs.
2139 std::copy_n (rhs.data (), rnz, data () + li);
2140 std::copy_n (rhs.ridx (), rnz, ridx () + li);
2141 mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2142 li);
2143
2144 assert (nnz () == new_nz);
2145 }
2146 else
2147 {
2148 // Clearing elements or exceeding capacity, allocate afresh
2149 // and paste pieces.
2150 const Sparse<T, Alloc> tmp = *this;
2151 *this = Sparse<T, Alloc> (nr, nc, new_nz);
2152
2153 // Head...
2154 std::copy_n (tmp.data (), li, data ());
2155 std::copy_n (tmp.ridx (), li, ridx ());
2156 std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
2157
2158 // new stuff...
2159 std::copy_n (rhs.data (), rnz, data () + li);
2160 std::copy_n (rhs.ridx (), rnz, ridx () + li);
2161 mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2162 li);
2163
2164 // ...tail.
2165 std::copy (tmp.data () + ui, tmp.data () + nz,
2166 data () + li + rnz);
2167 std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
2168 ridx () + li + rnz);
2169 mx_inline_add (nc - ub, cidx () + ub + 1,
2170 tmp.cidx () + ub + 1, new_nz - nz);
2171
2172 assert (nnz () == new_nz);
2173 }
2174 }
2175 else if (idx_j.is_range () && idx_j.increment () == -1)
2176 {
2177 // It's s(:,u:-1:l) = r. Reverse the assignment.
2178 assign (idx_i, idx_j.sorted (),
2179 rhs.index (idx_i, octave::idx_vector (m - 1, 0, -1)));
2180 }
2181 else if (idx_j.is_permutation (nc))
2182 {
2183 *this = rhs.index (idx_i, idx_j.inverse_permutation (nc));
2184 }
2185 else
2186 {
2187 const Sparse<T, Alloc> tmp = *this;
2188 *this = Sparse<T, Alloc> (nr, nc);
2190
2191 // Assemble column lengths.
2192 for (octave_idx_type i = 0; i < nc; i++)
2193 xcidx (i+1) = tmp.cidx (i+1) - tmp.cidx (i);
2194
2195 for (octave_idx_type i = 0; i < m; i++)
2196 {
2197 octave_idx_type j =idx_j(i);
2198 jsav[j] = i;
2199 xcidx (j+1) = rhs.cidx (i+1) - rhs.cidx (i);
2200 }
2201
2202 // Make cumulative.
2203 for (octave_idx_type i = 0; i < nc; i++)
2204 xcidx (i+1) += xcidx (i);
2205
2206 change_capacity (nnz ());
2207
2208 // Merge columns.
2209 for (octave_idx_type i = 0; i < nc; i++)
2210 {
2211 octave_idx_type l = xcidx (i);
2212 octave_idx_type u = xcidx (i+1);
2213 octave_idx_type j = jsav[i];
2214 if (j >= 0)
2215 {
2216 // from rhs
2217 octave_idx_type k = rhs.cidx (j);
2218 std::copy_n (rhs.data () + k, u - l, xdata () + l);
2219 std::copy_n (rhs.ridx () + k, u - l, xridx () + l);
2220 }
2221 else
2222 {
2223 // original
2224 octave_idx_type k = tmp.cidx (i);
2225 std::copy_n (tmp.data () + k, u - l, xdata () + l);
2226 std::copy_n (tmp.ridx () + k, u - l, xridx () + l);
2227 }
2228 }
2229
2230 }
2231 }
2232 else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
2233 {
2234 // It's just vector indexing. The 1D assign is specialized for that.
2235 assign (idx_i, rhs);
2236 }
2237 else if (idx_j.is_colon ())
2238 {
2239 if (idx_i.is_permutation (nr))
2240 {
2241 *this = rhs.index (idx_i.inverse_permutation (nr), idx_j);
2242 }
2243 else
2244 {
2245 // FIXME: optimize more special cases?
2246 // In general this requires unpacking the columns, which is slow,
2247 // especially for many small columns. OTOH, transpose is an
2248 // efficient O(nr+nc+nnz) operation.
2249 *this = transpose ();
2250 assign (octave::idx_vector::colon, idx_i, rhs.transpose ());
2251 *this = transpose ();
2252 }
2253 }
2254 else
2255 {
2256 // Split it into 2 assignments and one indexing.
2257 Sparse<T, Alloc> tmp = index (octave::idx_vector::colon, idx_j);
2258 tmp.assign (idx_i, octave::idx_vector::colon, rhs);
2259 assign (octave::idx_vector::colon, idx_j, tmp);
2260 }
2261 }
2262 else if (m == 1 && n == 1)
2263 {
2264 n = idx_i.length (nr);
2265 m = idx_j.length (nc);
2266 if (rhs.nnz () != 0)
2267 assign (idx_i, idx_j, Sparse<T, Alloc> (n, m, rhs.data (0)));
2268 else
2269 assign (idx_i, idx_j, Sparse<T, Alloc> (n, m));
2270 }
2271 else if (idx_i.length (nr) == m && idx_j.length (nc) == n
2272 && (n == 1 || m == 1))
2273 {
2274 assign (idx_i, idx_j, rhs.transpose ());
2275 }
2276 else
2277 octave::err_nonconformant ("=", idx_i.length (nr), idx_j.length (nc), n, m);
2278}
2279
2280template <typename T, typename Alloc>
2282void
2284 const octave::idx_vector& idx_j,
2285 const T& rhs)
2286{
2287 // FIXME: Converting the RHS and forwarding to the sparse matrix
2288 // assignment function is simpler, but it might be good to have a
2289 // specialization...
2290
2291 assign (idx_i, idx_j, Sparse<T, Alloc> (1, 1, rhs));
2292}
2293
2294// Can't use versions of these in Array.cc due to duplication of the
2295// instantiations for Array<double and Sparse<double>, etc.
2296template <typename T>
2298bool
2300 typename ref_param<T>::type b)
2301{
2302 return (a < b);
2303}
2304
2305template <typename T>
2307bool
2309 typename ref_param<T>::type b)
2310{
2311 return (a > b);
2312}
2313
2314template <typename T, typename Alloc>
2318{
2319 Sparse<T, Alloc> m = *this;
2320
2321 octave_idx_type nr = m.rows ();
2322 octave_idx_type nc = m.columns ();
2323
2324 if (m.numel () < 1 || dim > 1)
2325 return m;
2326
2327 bool sort_by_column = (dim > 0);
2328 if (sort_by_column)
2329 {
2330 m = m.transpose ();
2331 std::swap (nr, nc);
2332 }
2333
2334 octave_sort<T> lsort;
2335
2336 if (mode == ASCENDING)
2337 lsort.set_compare (sparse_ascending_compare<T>);
2338 else if (mode == DESCENDING)
2339 lsort.set_compare (sparse_descending_compare<T>);
2340 else
2342 ("Sparse<T, Alloc>::sort: invalid MODE");
2343
2344 T *v = m.data ();
2345 octave_idx_type *mcidx = m.cidx ();
2346 octave_idx_type *mridx = m.ridx ();
2347
2348 for (octave_idx_type j = 0; j < nc; j++)
2349 {
2350 octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2351 lsort.sort (v, ns);
2352
2354 if (mode == ASCENDING)
2355 {
2356 for (i = 0; i < ns; i++)
2357 if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2358 break;
2359 }
2360 else
2361 {
2362 for (i = 0; i < ns; i++)
2363 if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2364 break;
2365 }
2366 for (octave_idx_type k = 0; k < i; k++)
2367 mridx[k] = k;
2368 for (octave_idx_type k = i; k < ns; k++)
2369 mridx[k] = k - ns + nr;
2370
2371 v += ns;
2372 mridx += ns;
2373 }
2374
2375 if (sort_by_column)
2376 m = m.transpose ();
2377
2378 return m;
2379}
2380
2381template <typename T, typename Alloc>
2385 octave_idx_type dim, sortmode mode) const
2386{
2387 Sparse<T, Alloc> m = *this;
2388
2389 octave_idx_type nr = m.rows ();
2390 octave_idx_type nc = m.columns ();
2391
2392 if (m.numel () < 1 || dim > 1)
2393 {
2394 sidx = Array<octave_idx_type> (dim_vector (nr, nc), 1);
2395 return m;
2396 }
2397
2398 bool sort_by_column = (dim > 0);
2399 if (sort_by_column)
2400 {
2401 m = m.transpose ();
2402 std::swap (nr, nc);
2403 }
2404
2405 octave_sort<T> indexed_sort;
2406
2407 if (mode == ASCENDING)
2408 indexed_sort.set_compare (sparse_ascending_compare<T>);
2409 else if (mode == DESCENDING)
2410 indexed_sort.set_compare (sparse_descending_compare<T>);
2411 else
2413 ("Sparse<T, Alloc>::sort: invalid MODE");
2414
2415 T *v = m.data ();
2416 octave_idx_type *mcidx = m.cidx ();
2417 octave_idx_type *mridx = m.ridx ();
2418
2419 sidx = Array<octave_idx_type> (dim_vector (nr, nc));
2421
2422 for (octave_idx_type j = 0; j < nc; j++)
2423 {
2424 octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2425 octave_idx_type offset = j * nr;
2426
2427 if (ns == 0)
2428 {
2429 for (octave_idx_type k = 0; k < nr; k++)
2430 sidx(offset + k) = k;
2431 }
2432 else
2433 {
2434 for (octave_idx_type i = 0; i < ns; i++)
2435 vi[i] = mridx[i];
2436
2437 indexed_sort.sort (v, vi, ns);
2438
2440 if (mode == ASCENDING)
2441 {
2442 for (i = 0; i < ns; i++)
2443 if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2444 break;
2445 }
2446 else
2447 {
2448 for (i = 0; i < ns; i++)
2449 if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2450 break;
2451 }
2452
2453 octave_idx_type ii = 0;
2454 octave_idx_type jj = i;
2455 for (octave_idx_type k = 0; k < nr; k++)
2456 {
2457 if (ii < ns && mridx[ii] == k)
2458 ii++;
2459 else
2460 sidx(offset + jj++) = k;
2461 }
2462
2463 for (octave_idx_type k = 0; k < i; k++)
2464 {
2465 sidx(k + offset) = vi[k];
2466 mridx[k] = k;
2467 }
2468
2469 for (octave_idx_type k = i; k < ns; k++)
2470 {
2471 sidx(k - ns + nr + offset) = vi[k];
2472 mridx[k] = k - ns + nr;
2473 }
2474
2475 v += ns;
2476 mridx += ns;
2477 }
2478 }
2479
2480 if (sort_by_column)
2481 {
2482 m = m.transpose ();
2483 sidx = sidx.transpose ();
2484 }
2485
2486 return m;
2487}
2488
2489template <typename T, typename Alloc>
2493{
2494 octave_idx_type nnr = rows ();
2495 octave_idx_type nnc = cols ();
2497
2498 if (nnr == 0 || nnc == 0)
2499 ; // do nothing
2500 else if (nnr != 1 && nnc != 1)
2501 {
2502 if (k > 0)
2503 nnc -= k;
2504 else if (k < 0)
2505 nnr += k;
2506
2507 if (nnr > 0 && nnc > 0)
2508 {
2509 octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2510
2511 // Count the number of nonzero elements
2512 octave_idx_type nel = 0;
2513 if (k > 0)
2514 {
2515 for (octave_idx_type i = 0; i < ndiag; i++)
2516 if (elem (i, i+k) != 0.)
2517 nel++;
2518 }
2519 else if (k < 0)
2520 {
2521 for (octave_idx_type i = 0; i < ndiag; i++)
2522 if (elem (i-k, i) != 0.)
2523 nel++;
2524 }
2525 else
2526 {
2527 for (octave_idx_type i = 0; i < ndiag; i++)
2528 if (elem (i, i) != 0.)
2529 nel++;
2530 }
2531
2532 d = Sparse<T, Alloc> (ndiag, 1, nel);
2533 d.xcidx (0) = 0;
2534 d.xcidx (1) = nel;
2535
2536 octave_idx_type ii = 0;
2537 if (k > 0)
2538 {
2539 for (octave_idx_type i = 0; i < ndiag; i++)
2540 {
2541 T tmp = elem (i, i+k);
2542 if (tmp != 0.)
2543 {
2544 d.xdata (ii) = tmp;
2545 d.xridx (ii++) = i;
2546 }
2547 }
2548 }
2549 else if (k < 0)
2550 {
2551 for (octave_idx_type i = 0; i < ndiag; i++)
2552 {
2553 T tmp = elem (i-k, i);
2554 if (tmp != 0.)
2555 {
2556 d.xdata (ii) = tmp;
2557 d.xridx (ii++) = i;
2558 }
2559 }
2560 }
2561 else
2562 {
2563 for (octave_idx_type i = 0; i < ndiag; i++)
2564 {
2565 T tmp = elem (i, i);
2566 if (tmp != 0.)
2567 {
2568 d.xdata (ii) = tmp;
2569 d.xridx (ii++) = i;
2570 }
2571 }
2572 }
2573 }
2574 else
2575 {
2576 // Matlab returns [] 0x1 for out-of-range diagonal
2577
2578 octave_idx_type nr = 0;
2579 octave_idx_type nc = 1;
2580 octave_idx_type nz = 0;
2581
2582 d = Sparse<T, Alloc> (nr, nc, nz);
2583 }
2584 }
2585 else // one of dimensions == 1 (vector)
2586 {
2587 octave_idx_type roff = 0;
2588 octave_idx_type coff = 0;
2589 if (k > 0)
2590 {
2591 roff = 0;
2592 coff = k;
2593 }
2594 else if (k < 0)
2595 {
2596 roff = -k;
2597 coff = 0;
2598 }
2599
2600 if (nnr == 1)
2601 {
2602 octave_idx_type n = nnc + std::abs (k);
2603 octave_idx_type nz = nnz ();
2604
2605 d = Sparse<T, Alloc> (n, n, nz);
2606
2607 if (nnz () > 0)
2608 {
2609 for (octave_idx_type i = 0; i < coff+1; i++)
2610 d.xcidx (i) = 0;
2611
2612 for (octave_idx_type j = 0; j < nnc; j++)
2613 {
2614 for (octave_idx_type i = cidx (j); i < cidx (j+1); i++)
2615 {
2616 d.xdata (i) = data (i);
2617 d.xridx (i) = j + roff;
2618 }
2619 d.xcidx (j + coff + 1) = cidx (j+1);
2620 }
2621
2622 for (octave_idx_type i = nnc + coff + 1; i < n + 1; i++)
2623 d.xcidx (i) = nz;
2624 }
2625 }
2626 else
2627 {
2628 octave_idx_type n = nnr + std::abs (k);
2629 octave_idx_type nz = nnz ();
2630
2631 d = Sparse<T, Alloc> (n, n, nz);
2632
2633 if (nnz () > 0)
2634 {
2635 octave_idx_type ii = 0;
2636 octave_idx_type ir = ridx (0);
2637
2638 for (octave_idx_type i = 0; i < coff+1; i++)
2639 d.xcidx (i) = 0;
2640
2641 for (octave_idx_type i = 0; i < nnr; i++)
2642 {
2643 if (ir == i)
2644 {
2645 d.xdata (ii) = data (ii);
2646 d.xridx (ii++) = ir + roff;
2647
2648 if (ii != nz)
2649 ir = ridx (ii);
2650 }
2651 d.xcidx (i + coff + 1) = ii;
2652 }
2653
2654 for (octave_idx_type i = nnr + coff + 1; i < n+1; i++)
2655 d.xcidx (i) = nz;
2656 }
2657 }
2658 }
2659
2660 return d;
2661}
2662
2663template <typename T, typename Alloc>
2667 const Sparse<T, Alloc> *sparse_list)
2668{
2669 // Default concatenation.
2670 bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2671
2672 if (dim == -1 || dim == -2)
2673 {
2674 concat_rule = &dim_vector::hvcat;
2675 dim = -dim - 1;
2676 }
2677 else if (dim < 0)
2678 (*current_liboctave_error_handler) ("cat: invalid dimension");
2679
2680 dim_vector dv;
2681 octave_idx_type total_nz = 0;
2682 if (dim != 0 && dim != 1)
2683 (*current_liboctave_error_handler)
2684 ("cat: invalid dimension for sparse concatenation");
2685
2686 if (n == 1)
2687 return sparse_list[0];
2688
2689 for (octave_idx_type i = 0; i < n; i++)
2690 {
2691 if (! (dv.*concat_rule) (sparse_list[i].dims (), dim))
2692 (*current_liboctave_error_handler) ("cat: dimension mismatch");
2693
2694 total_nz += sparse_list[i].nnz ();
2695 }
2696
2697 Sparse<T, Alloc> retval (dv, total_nz);
2698
2699 if (retval.isempty ())
2700 return retval;
2701
2702 switch (dim)
2703 {
2704 case 0:
2705 {
2706 // sparse vertcat. This is not efficiently handled by assignment,
2707 // so we'll do it directly.
2708 octave_idx_type l = 0;
2709 for (octave_idx_type j = 0; j < dv(1); j++)
2710 {
2711 octave_quit ();
2712
2713 octave_idx_type rcum = 0;
2714 for (octave_idx_type i = 0; i < n; i++)
2715 {
2716 const Sparse<T, Alloc>& spi = sparse_list[i];
2717 // Skipping empty matrices. See the comment in Array.cc.
2718 if (spi.isempty ())
2719 continue;
2720
2721 octave_idx_type kl = spi.cidx (j);
2722 octave_idx_type ku = spi.cidx (j+1);
2723 for (octave_idx_type k = kl; k < ku; k++, l++)
2724 {
2725 retval.xridx (l) = spi.ridx (k) + rcum;
2726 retval.xdata (l) = spi.data (k);
2727 }
2728
2729 rcum += spi.rows ();
2730 }
2731
2732 retval.xcidx (j+1) = l;
2733 }
2734
2735 break;
2736 }
2737 case 1:
2738 {
2739 octave_idx_type l = 0;
2740 for (octave_idx_type i = 0; i < n; i++)
2741 {
2742 octave_quit ();
2743
2744 // Skipping empty matrices. See the comment in Array.cc.
2745 if (sparse_list[i].isempty ())
2746 continue;
2747
2748 octave_idx_type u = l + sparse_list[i].columns ();
2750 sparse_list[i]);
2751 l = u;
2752 }
2753
2754 break;
2755 }
2756 default:
2757 assert (false);
2758 }
2759
2760 return retval;
2761}
2762
2763template <typename T, typename Alloc>
2767{
2768 Array<T> retval (dims (), T ());
2769 if (rows () == 1)
2770 {
2771 octave_idx_type i = 0;
2772 for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2773 {
2774 if (cidx (j+1) > i)
2775 retval.xelem (j) = data (i++);
2776 }
2777 }
2778 else
2779 {
2780 for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2781 for (octave_idx_type i = cidx (j), iu = cidx (j+1); i < iu; i++)
2782 retval.xelem (ridx (i), j) = data (i);
2783 }
2784
2785 return retval;
2786}
2787
2788template <typename T>
2790std::istream&
2791read_sparse_matrix (std::istream& is, Sparse<T>& a,
2792 T (*read_fcn) (std::istream&))
2793{
2794 octave_idx_type nr = a.rows ();
2795 octave_idx_type nc = a.cols ();
2796 octave_idx_type nz = a.nzmax ();
2797
2798 if (nr > 0 && nc > 0)
2799 {
2800 octave_idx_type itmp;
2801 octave_idx_type jtmp;
2802 octave_idx_type iold = 0;
2803 octave_idx_type jold = 0;
2804 octave_idx_type ii = 0;
2805 T tmp;
2806
2807 a.cidx (0) = 0;
2808 for (octave_idx_type i = 0; i < nz; i++)
2809 {
2810 itmp = 0; jtmp = 0;
2811 is >> itmp;
2812 itmp--;
2813
2814 is >> jtmp;
2815 jtmp--;
2816
2817 if (is.fail ())
2818 {
2819 is.clear();
2820 std::string err_field;
2821 is >> err_field;
2822 (*current_liboctave_error_handler)
2823 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2824 "Symbols '%s' is not an integer format",
2825 i+1, err_field.c_str ());
2826 }
2827
2828 if (itmp < 0 || itmp >= nr)
2829 {
2830 is.setstate (std::ios::failbit);
2831
2832 (*current_liboctave_error_handler)
2833 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2834 "row index = %" OCTAVE_IDX_TYPE_FORMAT " out of range",
2835 i+1, itmp + 1);
2836 }
2837
2838 if (jtmp < 0 || jtmp >= nc)
2839 {
2840 is.setstate (std::ios::failbit);
2841
2842 (*current_liboctave_error_handler)
2843 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2844 "column index = %" OCTAVE_IDX_TYPE_FORMAT " out of range",
2845 i+1, jtmp + 1);
2846 }
2847
2848 if (jtmp < jold)
2849 {
2850 is.setstate (std::ios::failbit);
2851
2852 (*current_liboctave_error_handler)
2853 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ":"
2854 "column indices must appear in ascending order "
2855 "(%" OCTAVE_IDX_TYPE_FORMAT " < %" OCTAVE_IDX_TYPE_FORMAT ")",
2856 i+1, jtmp, jold);
2857 }
2858 else if (jtmp > jold)
2859 {
2860 for (octave_idx_type j = jold; j < jtmp; j++)
2861 a.cidx (j+1) = ii;
2862 }
2863 else if (itmp < iold)
2864 {
2865 is.setstate (std::ios::failbit);
2866
2867 (*current_liboctave_error_handler)
2868 ("invalid sparse matrix: element %" OCTAVE_IDX_TYPE_FORMAT ": "
2869 "row indices must appear in ascending order in each column "
2870 "(%" OCTAVE_IDX_TYPE_FORMAT " < %" OCTAVE_IDX_TYPE_FORMAT ")",
2871 i+1, iold, itmp);
2872 }
2873
2874 iold = itmp;
2875 jold = jtmp;
2876
2877 tmp = read_fcn (is);
2878
2879 if (! is)
2880 return is; // Problem, return is in error state
2881
2882 a.data (ii) = tmp;
2883 a.ridx (ii++) = itmp;
2884 }
2885
2886 for (octave_idx_type j = jold; j < nc; j++)
2887 a.cidx (j+1) = ii;
2888 }
2889
2890 return is;
2891}
2892
2893/*
2894 * Tests
2895 *
2896
2897%!function x = set_slice (x, dim, slice, arg)
2898%! switch (dim)
2899%! case 11
2900%! x(slice) = 2;
2901%! case 21
2902%! x(slice, :) = 2;
2903%! case 22
2904%! x(:, slice) = 2;
2905%! otherwise
2906%! error ("invalid dim, '%d'", dim);
2907%! endswitch
2908%!endfunction
2909
2910%!function x = set_slice2 (x, dim, slice)
2911%! switch (dim)
2912%! case 11
2913%! x(slice) = 2 * ones (size (slice));
2914%! case 21
2915%! x(slice, :) = 2 * ones (length (slice), columns (x));
2916%! case 22
2917%! x(:, slice) = 2 * ones (rows (x), length (slice));
2918%! otherwise
2919%! error ("invalid dim, '%d'", dim);
2920%! endswitch
2921%!endfunction
2922
2923%!function test_sparse_slice (size, dim, slice)
2924%! x = ones (size);
2925%! s = set_slice (sparse (x), dim, slice);
2926%! f = set_slice (x, dim, slice);
2927%! assert (nnz (s), nnz (f));
2928%! assert (full (s), f);
2929%! s = set_slice2 (sparse (x), dim, slice);
2930%! f = set_slice2 (x, dim, slice);
2931%! assert (nnz (s), nnz (f));
2932%! assert (full (s), f);
2933%!endfunction
2934
2935#### 1d indexing
2936
2937## size = [2 0]
2938%!test test_sparse_slice ([2 0], 11, []);
2939%!assert (set_slice (sparse (ones ([2 0])), 11, 1), sparse ([2 0]')) # sparse different from full
2940%!assert (set_slice (sparse (ones ([2 0])), 11, 2), sparse ([0 2]')) # sparse different from full
2941%!assert (set_slice (sparse (ones ([2 0])), 11, 3), sparse ([0 0; 2 0]')) # sparse different from full
2942%!assert (set_slice (sparse (ones ([2 0])), 11, 4), sparse ([0 0; 0 2]')) # sparse different from full
2943
2944## size = [0 2]
2945%!test test_sparse_slice ([0 2], 11, []);
2946%!assert (set_slice (sparse (ones ([0 2])), 11, 1), sparse ([2 0])) # sparse different from full
2947%!test test_sparse_slice ([0 2], 11, 2);
2948%!test test_sparse_slice ([0 2], 11, 3);
2949%!test test_sparse_slice ([0 2], 11, 4);
2950%!test test_sparse_slice ([0 2], 11, [4, 4]);
2951
2952## size = [2 1]
2953%!test test_sparse_slice ([2 1], 11, []);
2954%!test test_sparse_slice ([2 1], 11, 1);
2955%!test test_sparse_slice ([2 1], 11, 2);
2956%!test test_sparse_slice ([2 1], 11, 3);
2957%!test test_sparse_slice ([2 1], 11, 4);
2958%!test test_sparse_slice ([2 1], 11, [4, 4]);
2959
2960## size = [1 2]
2961%!test test_sparse_slice ([1 2], 11, []);
2962%!test test_sparse_slice ([1 2], 11, 1);
2963%!test test_sparse_slice ([1 2], 11, 2);
2964%!test test_sparse_slice ([1 2], 11, 3);
2965%!test test_sparse_slice ([1 2], 11, 4);
2966%!test test_sparse_slice ([1 2], 11, [4, 4]);
2967
2968## size = [2 2]
2969%!test test_sparse_slice ([2 2], 11, []);
2970%!test test_sparse_slice ([2 2], 11, 1);
2971%!test test_sparse_slice ([2 2], 11, 2);
2972%!test test_sparse_slice ([2 2], 11, 3);
2973%!test test_sparse_slice ([2 2], 11, 4);
2974%!test test_sparse_slice ([2 2], 11, [4, 4]);
2975# These 2 errors are the same as in the full case
2976%!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 5)
2977%!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 6)
2978
2979#### 2d indexing
2980
2981## size = [2 0]
2982%!test test_sparse_slice ([2 0], 21, []);
2983%!test test_sparse_slice ([2 0], 21, 1);
2984%!test test_sparse_slice ([2 0], 21, 2);
2985%!test test_sparse_slice ([2 0], 21, [2,2]);
2986%!assert (set_slice (sparse (ones ([2 0])), 21, 3), sparse (3,0))
2987%!assert (set_slice (sparse (ones ([2 0])), 21, 4), sparse (4,0))
2988%!test test_sparse_slice ([2 0], 22, []);
2989%!test test_sparse_slice ([2 0], 22, 1);
2990%!test test_sparse_slice ([2 0], 22, 2);
2991%!test test_sparse_slice ([2 0], 22, [2,2]);
2992%!assert (set_slice (sparse (ones ([2 0])), 22, 3), sparse ([0 0 2;0 0 2])) # sparse different from full
2993%!assert (set_slice (sparse (ones ([2 0])), 22, 4), sparse ([0 0 0 2;0 0 0 2])) # sparse different from full
2994
2995## size = [0 2]
2996%!test test_sparse_slice ([0 2], 21, []);
2997%!test test_sparse_slice ([0 2], 21, 1);
2998%!test test_sparse_slice ([0 2], 21, 2);
2999%!test test_sparse_slice ([0 2], 21, [2,2]);
3000%!assert (set_slice (sparse (ones ([0 2])), 21, 3), sparse ([0 0;0 0;2 2])) # sparse different from full
3001%!assert (set_slice (sparse (ones ([0 2])), 21, 4), sparse ([0 0;0 0;0 0;2 2])) # sparse different from full
3002%!test test_sparse_slice ([0 2], 22, []);
3003%!test test_sparse_slice ([0 2], 22, 1);
3004%!test test_sparse_slice ([0 2], 22, 2);
3005%!test test_sparse_slice ([0 2], 22, [2,2]);
3006%!assert (set_slice (sparse (ones ([0 2])), 22, 3), sparse (0,3))
3007%!assert (set_slice (sparse (ones ([0 2])), 22, 4), sparse (0,4))
3008
3009## size = [2 1]
3010%!test test_sparse_slice ([2 1], 21, []);
3011%!test test_sparse_slice ([2 1], 21, 1);
3012%!test test_sparse_slice ([2 1], 21, 2);
3013%!test test_sparse_slice ([2 1], 21, [2,2]);
3014%!test test_sparse_slice ([2 1], 21, 3);
3015%!test test_sparse_slice ([2 1], 21, 4);
3016%!test test_sparse_slice ([2 1], 22, []);
3017%!test test_sparse_slice ([2 1], 22, 1);
3018%!test test_sparse_slice ([2 1], 22, 2);
3019%!test test_sparse_slice ([2 1], 22, [2,2]);
3020%!test test_sparse_slice ([2 1], 22, 3);
3021%!test test_sparse_slice ([2 1], 22, 4);
3022
3023## size = [1 2]
3024%!test test_sparse_slice ([1 2], 21, []);
3025%!test test_sparse_slice ([1 2], 21, 1);
3026%!test test_sparse_slice ([1 2], 21, 2);
3027%!test test_sparse_slice ([1 2], 21, [2,2]);
3028%!test test_sparse_slice ([1 2], 21, 3);
3029%!test test_sparse_slice ([1 2], 21, 4);
3030%!test test_sparse_slice ([1 2], 22, []);
3031%!test test_sparse_slice ([1 2], 22, 1);
3032%!test test_sparse_slice ([1 2], 22, 2);
3033%!test test_sparse_slice ([1 2], 22, [2,2]);
3034%!test test_sparse_slice ([1 2], 22, 3);
3035%!test test_sparse_slice ([1 2], 22, 4);
3036
3037## size = [2 2]
3038%!test test_sparse_slice ([2 2], 21, []);
3039%!test test_sparse_slice ([2 2], 21, 1);
3040%!test test_sparse_slice ([2 2], 21, 2);
3041%!test test_sparse_slice ([2 2], 21, [2,2]);
3042%!test test_sparse_slice ([2 2], 21, 3);
3043%!test test_sparse_slice ([2 2], 21, 4);
3044%!test test_sparse_slice ([2 2], 22, []);
3045%!test test_sparse_slice ([2 2], 22, 1);
3046%!test test_sparse_slice ([2 2], 22, 2);
3047%!test test_sparse_slice ([2 2], 22, [2,2]);
3048%!test test_sparse_slice ([2 2], 22, 3);
3049%!test test_sparse_slice ([2 2], 22, 4);
3050
3051%!assert <*35570> (speye (3,1)(3:-1:1), sparse ([0; 0; 1]))
3052
3053## Test removing columns
3054%!test <*36656>
3055%! s = sparse (magic (5));
3056%! s(:,2:4) = [];
3057%! assert (s, sparse (magic (5)(:, [1,5])));
3058
3059%!test
3060%! s = sparse ([], [], [], 1, 1);
3061%! s(1,:) = [];
3062%! assert (s, sparse ([], [], [], 0, 1));
3063
3064## Test (bug #37321)
3065%!test <*37321> a=sparse (0,0); assert (all (a) == sparse ([1]));
3066%!test <*37321> a=sparse (0,1); assert (all (a) == sparse ([1]));
3067%!test <*37321> a=sparse (1,0); assert (all (a) == sparse ([1]));
3068%!test <*37321> a=sparse (1,0); assert (all (a,2) == sparse ([1]));
3069%!test <*37321> a=sparse (1,0); assert (size (all (a,1)), [1 0]);
3070%!test <*37321> a=sparse (1,1);
3071%! assert (all (a) == sparse ([0]));
3072%! assert (size (all (a)), [1 1]);
3073%!test <*37321> a=sparse (2,1);
3074%! assert (all (a) == sparse ([0]));
3075%! assert (size (all (a)), [1 1]);
3076%!test <*37321> a=sparse (1,2);
3077%! assert (all (a) == sparse ([0]));
3078%! assert (size (all (a)), [1 1]);
3079%!test <*37321> a=sparse (2,2); assert (isequal (all (a), sparse ([0 0])));
3080
3081## Test assigning row to a column slice
3082%!test <45589>
3083%! a = sparse (magic (3));
3084%! b = a;
3085%! a(1,:) = 1:3;
3086%! b(1,:) = (1:3)';
3087%! assert (a, b);
3088
3089*/
3090
3091template <typename T, typename Alloc>
3093void
3094Sparse<T, Alloc>::print_info (std::ostream& os, const std::string& prefix) const
3095{
3096 os << prefix << "m_rep address: " << m_rep << "\n"
3097 << prefix << "m_rep->m_nzmax: " << m_rep->m_nzmax << "\n"
3098 << prefix << "m_rep->m_nrows: " << m_rep->m_nrows << "\n"
3099 << prefix << "m_rep->m_ncols: " << m_rep->m_ncols << "\n"
3100 << prefix << "m_rep->m_data: " << m_rep->m_data << "\n"
3101 << prefix << "m_rep->m_ridx: " << m_rep->m_ridx << "\n"
3102 << prefix << "m_rep->m_cidx: " << m_rep->m_cidx << "\n"
3103 << prefix << "m_rep->m_count: " << m_rep->m_count << "\n";
3104}
3105
3106#if defined (__clang__)
3107# define INSTANTIATE_SPARSE(T) \
3108 template class OCTAVE_API Sparse<T>; \
3109 template OCTAVE_API std::istream& \
3110 read_sparse_matrix<T> (std::istream& is, Sparse<T>& a, \
3111 T (*read_fcn) (std::istream&));
3112#else
3113# define INSTANTIATE_SPARSE(T) \
3114 template class Sparse<T>; \
3115 template OCTAVE_API std::istream& \
3116 read_sparse_matrix<T> (std::istream& is, Sparse<T>& a, \
3117 T (*read_fcn) (std::istream&));
3118#endif
class OCTARRAY_API Sparse
Definition: Sparse-fwd.h:40
OCTAVE_API std::istream & read_sparse_matrix(std::istream &is, Sparse< T > &a, T(*read_fcn)(std::istream &))
Definition: Sparse.cc:2791
OCTAVE_API bool sparse_descending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2308
static octave_idx_type lblookup(const octave_idx_type *ridx, octave_idx_type nr, octave_idx_type ri)
Definition: Sparse.cc:1178
OCTAVE_API bool sparse_ascending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2299
static int elem
Definition: __contourc__.cc:54
charNDArray max(char d, const charNDArray &m)
Definition: chNDArray.cc:230
charNDArray min(char d, const charNDArray &m)
Definition: chNDArray.cc:207
OCTARRAY_API Array< T, Alloc > transpose(void) const
Size of the specified dimension.
Definition: Array.cc:1603
T & xelem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:504
OCTARRAY_API Array< T, Alloc > index(const octave::idx_vector &i) const
Indexing without resizing.
Definition: Array.cc:697
octave_idx_type numel(void) const
Number of elements in the array.
Definition: Array.h:411
OCTARRAY_API void assign(const octave::idx_vector &i, const Array< T, Alloc > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Definition: Array.cc:1115
octave_idx_type cols(void) const
Definition: Array.h:457
T & elem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:534
octave_idx_type rows(void) const
Definition: Array.h:449
const T * data(void) const
Size of the specified dimension.
Definition: Array.h:616
OCTARRAY_API T * fortran_vec(void)
Size of the specified dimension.
Definition: Array.cc:1744
Array< T, Alloc > as_matrix(void) const
Return the array as a matrix.
Definition: Array.h:435
octave_idx_type rows(void) const
Definition: PermMatrix.h:62
const Array< octave_idx_type > & col_perm_vec(void) const
Definition: PermMatrix.h:83
OCTAVE_API T celem(octave_idx_type r, octave_idx_type c) const
Definition: Sparse.cc:108
OCTAVE_API void change_length(octave_idx_type nz)
Definition: Sparse.cc:145
octave_idx_type m_ncols
Definition: Sparse.h:76
octave::refcount< octave_idx_type > m_count
Definition: Sparse.h:77
idx_type_pointer m_cidx
Definition: Sparse.h:73
T_pointer m_data
Definition: Sparse.h:71
OCTAVE_API bool any_element_is_nan(void) const
Definition: Sparse.cc:187
OCTAVE_API bool indices_ok(void) const
Definition: Sparse.cc:179
OCTAVE_API T & elem(octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:66
idx_type_pointer m_ridx
Definition: Sparse.h:72
octave_idx_type m_nzmax
Definition: Sparse.h:74
OCTAVE_API void maybe_compress(bool remove_zeros)
Definition: Sparse.cc:120
Definition: Sparse.h:49
virtual ~Sparse(void)
Definition: Sparse.cc:708
octave_idx_type rows(void) const
Definition: Sparse.h:351
T * data(void)
Definition: Sparse.h:574
Sparse(void)
Definition: Sparse.h:259
octave_idx_type columns(void) const
Definition: Sparse.h:353
OCTAVE_API Sparse< T, Alloc > & insert(const Sparse< T, Alloc > &a, octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:1042
static OCTAVE_API Sparse< T, Alloc >::SparseRep * nil_rep(void)
Definition: Sparse.cc:57
OCTAVE_API void print_info(std::ostream &os, const std::string &prefix) const
Definition: Sparse.cc:3094
OCTAVE_API Sparse< T, Alloc > transpose(void) const
Definition: Sparse.cc:1138
OCTAVE_API Sparse< T, Alloc > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Definition: Sparse.cc:929
OCTAVE_API void resize(octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:991
T * xdata(void)
Definition: Sparse.h:576
OCTAVE_API void assign(const octave::idx_vector &i, const Sparse< T, Alloc > &rhs)
Definition: Sparse.cc:1879
octave_idx_type nnz(void) const
Actual number of nonzero terms.
Definition: Sparse.h:339
OCTAVE_API Array< T > array_value(void) const
Definition: Sparse.cc:2766
octave_idx_type * xridx(void)
Definition: Sparse.h:589
dim_vector dims(void) const
Definition: Sparse.h:371
octave_idx_type * ridx(void)
Definition: Sparse.h:583
OCTAVE_API Sparse< T, Alloc > index(const octave::idx_vector &i, bool resize_ok=false) const
Definition: Sparse.cc:1432
OCTAVE_API Sparse< T, Alloc > reshape(const dim_vector &new_dims) const
Definition: Sparse.cc:849
OCTAVE_API Sparse< T, Alloc > sort(octave_idx_type dim=0, sortmode mode=ASCENDING) const
Definition: Sparse.cc:2317
OCTAVE_API Sparse< T, Alloc > diag(octave_idx_type k=0) const
Definition: Sparse.cc:2492
octave_idx_type * cidx(void)
Definition: Sparse.h:596
OCTAVE_API octave_idx_type compute_index(const Array< octave_idx_type > &ra_idx) const
Definition: Sparse.cc:735
octave_idx_type nzmax(void) const
Amount of storage for nonzero elements.
Definition: Sparse.h:336
octave_idx_type * xcidx(void)
Definition: Sparse.h:602
bool isempty(void) const
Definition: Sparse.h:570
Sparse< T, Alloc > maybe_compress(bool remove_zeros=false)
Definition: Sparse.h:531
OCTAVE_API Sparse< T, Alloc > & operator=(const Sparse< T, Alloc > &a)
Definition: Sparse.cc:716
OCTAVE_API void delete_elements(const octave::idx_vector &i)
Definition: Sparse.cc:1196
OCTAVE_API void resize1(octave_idx_type n)
Definition: Sparse.cc:958
octave_idx_type cols(void) const
Definition: Sparse.h:352
void change_capacity(octave_idx_type nz)
Definition: Sparse.h:556
OCTAVE_NORETURN OCTAVE_API T range_error(const char *fcn, octave_idx_type n) const
Definition: Sparse.cc:759
dim_vector m_dimensions
Definition: Sparse.h:251
static OCTAVE_API Sparse< T, Alloc > cat(int dim, octave_idx_type n, const Sparse< T, Alloc > *sparse_list)
Definition: Sparse.cc:2666
Sparse< T, Alloc >::SparseRep * m_rep
Definition: Sparse.h:249
octave_idx_type numel(void) const
Definition: Sparse.h:343
Vector representing the dimensions (size) of an Array.
Definition: dim-vector.h:94
OCTAVE_API bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
Definition: dim-vector.cc:140
OCTAVE_API std::string str(char sep='x') const
Definition: dim-vector.cc:68
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition: dim-vector.h:335
void resize(int n, int fill_value=0)
Definition: dim-vector.h:272
OCTAVE_API bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
Definition: dim-vector.cc:201
octave_idx_type ndims(void) const
Number of dimensions.
Definition: dim-vector.h:257
OCTAVE_API dim_vector redim(int n) const
Force certain dimensionality, preserving numel ().
Definition: dim-vector.cc:226
OCTAVE_API octave_idx_type safe_numel(void) const
The following function will throw a std::bad_alloc () exception if the requested size is larger than ...
Definition: dim-vector.cc:98
OCTAVE_API void copy_data(octave_idx_type *data) const
Definition: idx-vector.cc:1009
bool is_colon(void) const
Definition: idx-vector.h:556
OCTAVE_API Array< octave_idx_type > as_array(void) const
Definition: idx-vector.cc:1225
idx_vector sorted(bool uniq=false) const
Definition: idx-vector.h:568
octave_idx_type extent(octave_idx_type n) const
Definition: idx-vector.h:540
OCTAVE_API idx_vector inverse_permutation(octave_idx_type n) const
Definition: idx-vector.cc:1124
OCTAVE_API idx_vector complement(octave_idx_type n) const
Definition: idx-vector.cc:1066
bool is_scalar(void) const
Definition: idx-vector.h:559
OCTAVE_API bool isvector(void) const
Definition: idx-vector.cc:1230
OCTAVE_API bool is_cont_range(octave_idx_type n, octave_idx_type &l, octave_idx_type &u) const
Definition: idx-vector.cc:915
static const idx_vector colon
Definition: idx-vector.h:483
dim_vector orig_dimensions(void) const
Definition: idx-vector.h:574
OCTAVE_API octave_idx_type increment(void) const
Definition: idx-vector.cc:968
bool is_range(void) const
Definition: idx-vector.h:562
octave_idx_type length(octave_idx_type n=0) const
Definition: idx-vector.h:537
OCTAVE_API bool is_permutation(octave_idx_type n) const
Definition: idx-vector.cc:1096
bool is_colon_equiv(octave_idx_type n) const
Definition: idx-vector.h:565
const OCTAVE_API octave_idx_type * raw(void)
Definition: idx-vector.cc:997
virtual octave_idx_type numel(void) const
Definition: ov-base.h:365
void set_compare(const compare_fcn_type &comp)
Definition: oct-sort.h:121
void sort(T *data, octave_idx_type nel)
Definition: oct-sort.cc:1520
if_then_else< is_class_type< T >::no, T, Tconst & >::result type
Definition: lo-traits.h:121
OCTAVE_NORETURN liboctave_error_handler current_liboctave_error_handler
Definition: lo-error.c:41
F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
#define OCTAVE_API
Definition: main.in.cc:55
void mx_inline_add2(std::size_t n, R *r, const X *x)
Definition: mx-inlines.cc:126
void mx_inline_sub(std::size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:108
void mx_inline_add(std::size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:107
bool isnan(bool)
Definition: lo-mappers.h:178
static OCTAVE_NORETURN void err_index_out_of_range(void)
Definition: idx-vector.cc:52
void err_del_index_out_of_range(bool is1d, octave_idx_type idx, octave_idx_type ext)
void err_invalid_resize(void)
void err_nonconformant(const char *op, octave_idx_type op1_len, octave_idx_type op2_len)
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition: oct-locbuf.h:44
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition: oct-locbuf.h:50
sortmode
Definition: oct-sort.h:97
@ ASCENDING
Definition: oct-sort.h:97
@ DESCENDING
Definition: oct-sort.h:97
T::size_type numel(const T &str)
Definition: oct-string.cc:71
const octave_base_value const Array< octave_idx_type > & ra_idx
static T abs(T x)
Definition: pr-output.cc:1678
bool sparse_indices_ok(octave_idx_type *r, octave_idx_type *c, octave_idx_type nrows, octave_idx_type ncols, octave_idx_type nnz)
Definition: sparse-util.cc:87
static void transpose(octave_idx_type N, const octave_idx_type *ridx, const octave_idx_type *cidx, octave_idx_type *ridx2, octave_idx_type *cidx2)
Definition: symrcm.cc:391
F77_RET_T len
Definition: xerbla.cc:61