GNU Octave 10.1.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
 
Loading...
Searching...
No Matches
Array-base.cc
Go to the documentation of this file.
1////////////////////////////////////////////////////////////////////////
2//
3// Copyright (C) 1993-2025 The Octave Project Developers
4//
5// See the file COPYRIGHT.md in the top-level directory of this
6// distribution or <https://octave.org/copyright/>.
7//
8// This file is part of Octave.
9//
10// Octave is free software: you can redistribute it and/or modify it
11// under the terms of the GNU General Public License as published by
12// the Free Software Foundation, either version 3 of the License, or
13// (at your option) any later version.
14//
15// Octave is distributed in the hope that it will be useful, but
16// WITHOUT ANY WARRANTY; without even the implied warranty of
17// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18// GNU General Public License for more details.
19//
20// You should have received a copy of the GNU General Public License
21// along with Octave; see the file COPYING. If not, see
22// <https://www.gnu.org/licenses/>.
23//
24////////////////////////////////////////////////////////////////////////
25
26// Do not include this file directly to instantiate the Array<T> template
27// class in code outside liboctave. Include "Array-oct.cc" or "Array.cc"
28// instead.
29
30// This file should not include config.h. It is only included in other
31// C++ source files that should have included config.h before including
32// this file.
33
34#include <ostream>
35
36#include "Array-util.h"
37#include "Array.h"
38#include "lo-error.h"
39#include "lo-mappers.h"
40#include "oct-locbuf.h"
41
42// One dimensional array class. Handles the reference counting for
43// all the derived classes.
44
45template <typename T, typename Alloc>
48{
49 static ArrayRep nr;
50 return &nr;
51}
52
53// This is similar to the template for containers but specialized for Array.
54// Note we can't specialize a member without also specializing the class.
55template <typename T, typename Alloc>
57 : m_dimensions (dv), m_rep (a.m_rep),
58 m_slice_data (a.m_slice_data), m_slice_len (a.m_slice_len)
59{
60 bool invalid_size = false;
61
62 octave_idx_type new_numel = 0;
63
64 try
65 {
66 // Safe numel may throw a bad_alloc error because the new
67 // dimensions are too large to allocate. Trap that here so
68 // we can issue a more helpful diagnostic that is consistent
69 // with other size mismatch failures.
70
71 new_numel = m_dimensions.safe_numel ();
72 }
73 catch (const std::bad_alloc&)
74 {
75 invalid_size = true;
76 }
77
78 if (invalid_size || new_numel != a.numel ())
79 {
80 std::string dimensions_str = a.m_dimensions.str ();
81 std::string new_dims_str = m_dimensions.str ();
82
83 (*current_liboctave_error_handler)
84 ("reshape: can't reshape %s array to %s array",
85 dimensions_str.c_str (), new_dims_str.c_str ());
86 }
87
88 // This goes here because if an exception is thrown by the above,
89 // destructor will be never called.
90 m_rep->m_count++;
92}
93
94template <typename T, typename Alloc>
95void
97{
98 if (m_rep->m_count > 1)
99 {
100 --m_rep->m_count;
101 m_rep = new ArrayRep (numel (), val);
102 m_slice_data = m_rep->m_data;
103 }
104 else
105 std::fill_n (m_slice_data, m_slice_len, val);
106}
107
108template <typename T, typename Alloc>
109void
111{
112 if (--m_rep->m_count == 0)
113 delete m_rep;
114
115 m_rep = nil_rep ();
116 m_rep->m_count++;
117 m_slice_data = m_rep->m_data;
118 m_slice_len = m_rep->m_len;
119
120 m_dimensions = dim_vector ();
121}
122
123template <typename T, typename Alloc>
124void
126{
127 if (--m_rep->m_count == 0)
128 delete m_rep;
129
130 m_rep = new ArrayRep (dv.safe_numel ());
131 m_slice_data = m_rep->m_data;
132 m_slice_len = m_rep->m_len;
133
134 m_dimensions = dv;
135 m_dimensions.chop_trailing_singletons ();
136}
137
138template <typename T, typename Alloc>
141{
142 Array<T, Alloc> retval = *this;
143
144 if (ndims () > 2)
145 {
146 bool dims_changed = false;
147
148 dim_vector new_dimensions = m_dimensions;
149
150 int k = 0;
151
152 for (int i = 0; i < ndims (); i++)
153 {
154 if (m_dimensions(i) == 1)
155 dims_changed = true;
156 else
157 new_dimensions(k++) = m_dimensions(i);
158 }
159
160 if (dims_changed)
161 {
162 switch (k)
163 {
164 case 0:
165 new_dimensions = dim_vector (1, 1);
166 break;
167
168 case 1:
169 {
170 octave_idx_type tmp = new_dimensions(0);
171
172 new_dimensions.resize (2);
173
174 new_dimensions(0) = tmp;
175 new_dimensions(1) = 1;
176 }
177 break;
178
179 default:
180 new_dimensions.resize (k);
181 break;
182 }
183 }
184
185 retval = Array<T, Alloc> (*this, new_dimensions);
186 }
187
188 return retval;
189}
190
191template <typename T, typename Alloc>
194{
195 return ::compute_index (i, j, m_dimensions);
196}
197
198template <typename T, typename Alloc>
201 octave_idx_type k) const
202{
203 return ::compute_index (i, j, k, m_dimensions);
204}
205
206template <typename T, typename Alloc>
209{
210 return ::compute_index (ra_idx, m_dimensions);
211}
212
213template <typename T, typename Alloc>
216{
217 return m_dimensions.compute_index (ra_idx.data (), ra_idx.numel ());
218}
219
220template <typename T, typename Alloc>
221T&
223{
224 // Do checks directly to avoid recomputing m_slice_len.
225 if (n < 0)
226 octave::err_invalid_index (n);
227 if (n >= m_slice_len)
228 octave::err_index_out_of_range (1, 1, n+1, m_slice_len, m_dimensions);
229
230 return elem (n);
231}
232
233template <typename T, typename Alloc>
234T&
239
240template <typename T, typename Alloc>
241T&
246
247template <typename T, typename Alloc>
248T&
253
254template <typename T, typename Alloc>
257{
258 // Do checks directly to avoid recomputing m_slice_len.
259 if (n < 0)
260 octave::err_invalid_index (n);
261 if (n >= m_slice_len)
262 octave::err_index_out_of_range (1, 1, n+1, m_slice_len, m_dimensions);
263
264 return elem (n);
265}
266
267template <typename T, typename Alloc>
270{
271 return elem (compute_index (i, j));
272}
273
274template <typename T, typename Alloc>
277 octave_idx_type k) const
278{
279 return elem (compute_index (i, j, k));
280}
281
282template <typename T, typename Alloc>
288
289template <typename T, typename Alloc>
292{
293 octave_idx_type r = m_dimensions(0);
294
295 return Array<T, Alloc> (*this, dim_vector (r, 1), k*r, k*r + r);
296}
297
298template <typename T, typename Alloc>
301{
302 octave_idx_type r = m_dimensions(0);
303 octave_idx_type c = m_dimensions(1);
304 octave_idx_type p = r*c;
305
306 return Array<T, Alloc> (*this, dim_vector (r, c), k*p, k*p + p);
307}
308
309template <typename T, typename Alloc>
312{
313 if (up < lo)
314 up = lo;
315
316 return Array<T, Alloc> (*this, dim_vector (up - lo, 1), lo, up);
317}
318
319// Helper class for multi-d dimension permuting (generalized transpose).
320class rec_permute_helper
321{
322public:
323
324 rec_permute_helper (const dim_vector& dv, const Array<octave_idx_type>& perm)
325
326 : m_n (dv.ndims ()), m_top (0), m_dim (new octave_idx_type [2*m_n]),
327 m_stride (m_dim + m_n), m_use_blk (false)
328 {
329 liboctave_panic_unless (m_n == perm.numel ());
330
331 // Get cumulative dimensions.
333 cdim[0] = 1;
334 for (int i = 1; i < m_n+1; i++) cdim[i] = cdim[i-1] * dv(i-1);
335
336 // Setup the permuted strides.
337 for (int k = 0; k < m_n; k++)
338 {
339 int kk = perm(k);
340 m_dim[k] = dv(kk);
341 m_stride[k] = cdim[kk];
342 }
343
344 // Reduce contiguous runs.
345 for (int k = 1; k < m_n; k++)
346 {
347 if (m_stride[k] == m_stride[m_top]*m_dim[m_top])
348 m_dim[m_top] *= m_dim[k];
349 else
350 {
351 m_top++;
352 m_dim[m_top] = m_dim[k];
353 m_stride[m_top] = m_stride[k];
354 }
355 }
356
357 // Determine whether we can use block transposes.
358 m_use_blk = m_top >= 1 && m_stride[1] == 1 && m_stride[0] == m_dim[1];
359
360 }
361
362 OCTAVE_DISABLE_CONSTRUCT_COPY_MOVE (rec_permute_helper)
363
364 ~rec_permute_helper () { delete [] m_dim; }
365
366 template <typename T>
367 void permute (const T *src, T *dest) const { do_permute (src, dest, m_top); }
368
369 // Helper method for fast blocked transpose.
370 template <typename T>
371 static T *
372 blk_trans (const T *src, T *dest, octave_idx_type nr, octave_idx_type nc)
373 {
374 static const octave_idx_type m = 8;
375 OCTAVE_LOCAL_BUFFER (T, blk, m*m);
376 for (octave_idx_type kr = 0; kr < nr; kr += m)
377 for (octave_idx_type kc = 0; kc < nc; kc += m)
378 {
379 octave_idx_type lr = std::min (m, nr - kr);
380 octave_idx_type lc = std::min (m, nc - kc);
381 if (lr == m && lc == m)
382 {
383 const T *ss = src + kc * nr + kr;
384 for (octave_idx_type j = 0; j < m; j++)
385 for (octave_idx_type i = 0; i < m; i++)
386 blk[j*m+i] = ss[j*nr + i];
387 T *dd = dest + kr * nc + kc;
388 for (octave_idx_type j = 0; j < m; j++)
389 for (octave_idx_type i = 0; i < m; i++)
390 dd[j*nc+i] = blk[i*m+j];
391 }
392 else
393 {
394 const T *ss = src + kc * nr + kr;
395 for (octave_idx_type j = 0; j < lc; j++)
396 for (octave_idx_type i = 0; i < lr; i++)
397 blk[j*m+i] = ss[j*nr + i];
398 T *dd = dest + kr * nc + kc;
399 for (octave_idx_type j = 0; j < lr; j++)
400 for (octave_idx_type i = 0; i < lc; i++)
401 dd[j*nc+i] = blk[i*m+j];
402 }
403 }
404
405 return dest + nr*nc;
406 }
407
408private:
409
410 // Recursive N-D generalized transpose
411 template <typename T>
412 T * do_permute (const T *src, T *dest, int lev) const
413 {
414 if (lev == 0)
415 {
416 octave_idx_type step = m_stride[0];
417 octave_idx_type len = m_dim[0];
418 if (step == 1)
419 {
420 std::copy_n (src, len, dest);
421 dest += len;
422 }
423 else
424 {
425 for (octave_idx_type i = 0, j = 0; i < len; i++, j += step)
426 dest[i] = src[j];
427
428 dest += len;
429 }
430 }
431 else if (m_use_blk && lev == 1)
432 dest = blk_trans (src, dest, m_dim[1], m_dim[0]);
433 else
434 {
435 octave_idx_type step = m_stride[lev];
436 octave_idx_type len = m_dim[lev];
437 for (octave_idx_type i = 0, j = 0; i < len; i++, j+= step)
438 dest = do_permute (src + i * step, dest, lev-1);
439 }
440
441 return dest;
442 }
443
444 //--------
445
446 // STRIDE occupies the last half of the space allocated for dim to
447 // avoid a double allocation.
448
449 int m_n;
450 int m_top;
451 octave_idx_type *m_dim;
452 octave_idx_type *m_stride;
453 bool m_use_blk;
454};
455
456template <typename T, typename Alloc>
458Array<T, Alloc>::permute (const Array<octave_idx_type>& perm_vec_arg, bool inv) const
459{
460 Array<T, Alloc> retval;
461
462 Array<octave_idx_type> perm_vec = perm_vec_arg;
463
464 dim_vector dv = dims ();
465
466 int perm_vec_len = perm_vec_arg.numel ();
467
468 if (perm_vec_len < dv.ndims ())
469 (*current_liboctave_error_handler)
470 ("%s: invalid permutation vector", inv ? "ipermute" : "permute");
471
472 dim_vector dv_new = dim_vector::alloc (perm_vec_len);
473
474 // Append singleton dimensions as needed.
475 dv.resize (perm_vec_len, 1);
476
477 // Need this array to check for identical elements in permutation array.
478 OCTAVE_LOCAL_BUFFER_INIT (bool, checked, perm_vec_len, false);
479
480 bool identity = true;
481
482 // Find dimension vector of permuted array.
483 for (int i = 0; i < perm_vec_len; i++)
484 {
485 octave_idx_type perm_elt = perm_vec.elem (i);
486 if (perm_elt >= perm_vec_len || perm_elt < 0)
487 (*current_liboctave_error_handler)
488 ("%s: permutation vector contains an invalid element",
489 inv ? "ipermute" : "permute");
490
491 if (checked[perm_elt])
492 (*current_liboctave_error_handler)
493 ("%s: permutation vector cannot contain identical elements",
494 inv ? "ipermute" : "permute");
495 else
496 {
497 checked[perm_elt] = true;
498 identity = identity && perm_elt == i;
499 }
500 }
501
502 if (identity)
503 return *this;
504
505 if (inv)
506 {
507 for (int i = 0; i < perm_vec_len; i++)
508 perm_vec(perm_vec_arg(i)) = i;
509 }
510
511 for (int i = 0; i < perm_vec_len; i++)
512 dv_new(i) = dv(perm_vec(i));
513
514 retval = Array<T, Alloc> (dv_new);
515
516 if (numel () > 0)
517 {
518 rec_permute_helper rh (dv, perm_vec);
519 rh.permute (data (), retval.rwdata ());
520 }
522 return retval;
523}
524
525// Helper class for multi-d index reduction and recursive
526// indexing/indexed assignment. Rationale: we could avoid recursion
527// using a state machine instead. However, using recursion is much
528// more amenable to possible parallelization in the future.
529// Also, the recursion solution is cleaner and more understandable.
530
531class rec_index_helper
532{
533public:
534
535 rec_index_helper (const dim_vector& dv, const Array<octave::idx_vector>& ia)
536 : m_n (ia.numel ()), m_top (0), m_dim (new octave_idx_type [2*m_n]),
537 m_cdim (m_dim + m_n), m_idx (new octave::idx_vector [m_n])
538 {
539 liboctave_panic_unless (m_n > 0 && dv.ndims () == std::max (m_n, static_cast<octave_idx_type> (2)));
540
541 m_dim[0] = dv(0);
542 m_cdim[0] = 1;
543 m_idx[0] = ia(0);
544
545 for (octave_idx_type i = 1; i < m_n; i++)
546 {
547 // Try reduction...
548 if (m_idx[m_top].maybe_reduce (m_dim[m_top], ia(i), dv(i)))
549 {
550 // Reduction successful, fold dimensions.
551 m_dim[m_top] *= dv(i);
552 }
553 else
554 {
555 // Unsuccessful, store index & cumulative m_dim.
556 m_top++;
557 m_idx[m_top] = ia(i);
558 m_dim[m_top] = dv(i);
559 m_cdim[m_top] = m_cdim[m_top-1] * m_dim[m_top-1];
560 }
562 }
563
564 OCTAVE_DISABLE_CONSTRUCT_COPY_MOVE (rec_index_helper)
565
566 ~rec_index_helper () { delete [] m_idx; delete [] m_dim; }
567
568 template <typename T>
569 void index (const T *src, T *dest) const { do_index (src, dest, m_top); }
570
571 template <typename T>
572 void assign (const T *src, T *dest) const { do_assign (src, dest, m_top); }
573
574 template <typename T>
575 void fill (const T& val, T *dest) const { do_fill (val, dest, m_top); }
576
577 bool is_cont_range (octave_idx_type& l, octave_idx_type& u) const
578 {
579 return m_top == 0 && m_idx[0].is_cont_range (m_dim[0], l, u);
580 }
581
582private:
583
584 // Recursive N-D indexing
585 template <typename T>
586 T * do_index (const T *src, T *dest, octave_idx_type lev) const
588 if (lev == 0)
589 dest += m_idx[0].index (src, m_dim[0], dest);
590 else
592 octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
593 octave_idx_type d = m_cdim[lev];
594 for (octave_idx_type i = 0; i < nn; i++)
595 dest = do_index (src + d*m_idx[lev].xelem (i), dest, lev-1);
596 }
597
598 return dest;
599 }
600
601 // Recursive N-D indexed assignment
602 template <typename T>
603 const T * do_assign (const T *src, T *dest, octave_idx_type lev) const
604 {
605 if (lev == 0)
606 src += m_idx[0].assign (src, m_dim[0], dest);
607 else
608 {
609 octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
610 octave_idx_type d = m_cdim[lev];
611 for (octave_idx_type i = 0; i < nn; i++)
612 src = do_assign (src, dest + d*m_idx[lev].xelem (i), lev-1);
613 }
614
615 return src;
616 }
617
618 // Recursive N-D indexed assignment
619 template <typename T>
620 void do_fill (const T& val, T *dest, octave_idx_type lev) const
621 {
622 if (lev == 0)
623 m_idx[0].fill (val, m_dim[0], dest);
624 else
625 {
626 octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
627 octave_idx_type d = m_cdim[lev];
628 for (octave_idx_type i = 0; i < nn; i++)
629 do_fill (val, dest + d*m_idx[lev].xelem (i), lev-1);
630 }
631 }
632
633 //--------
634
635 // CDIM occupies the last half of the space allocated for dim to
636 // avoid a double allocation.
637
638 octave_idx_type m_n;
639 octave_idx_type m_top;
640 octave_idx_type *m_dim;
641 octave_idx_type *m_cdim;
642 octave::idx_vector *m_idx;
643};
645// Helper class for multi-d recursive resizing
646// This handles resize () in an efficient manner, touching memory only
647// once (apart from reinitialization)
648class rec_resize_helper
649{
650public:
651
652 rec_resize_helper (const dim_vector& ndv, const dim_vector& odv)
653 : m_cext (nullptr), m_sext (nullptr), m_dext (nullptr), m_n (0)
654 {
655 int l = ndv.ndims ();
656 liboctave_panic_unless (odv.ndims () == l);
657 octave_idx_type ld = 1;
658 int i = 0;
659 for (; i < l-1 && ndv(i) == odv(i); i++) ld *= ndv(i);
660 m_n = l - i;
661 m_cext = new octave_idx_type [3*m_n];
662 // Trick to avoid three allocations
663 m_sext = m_cext + m_n;
664 m_dext = m_sext + m_n;
665
666 octave_idx_type sld = ld;
667 octave_idx_type dld = ld;
668 for (int j = 0; j < m_n; j++)
670 m_cext[j] = std::min (ndv(i+j), odv(i+j));
671 m_sext[j] = sld *= odv(i+j);
672 m_dext[j] = dld *= ndv(i+j);
673 }
674 m_cext[0] *= ld;
675 }
676
677 OCTAVE_DISABLE_CONSTRUCT_COPY_MOVE (rec_resize_helper)
678
679 ~rec_resize_helper () { delete [] m_cext; }
680
681 template <typename T>
682 void resize_fill (const T *src, T *dest, const T& rfv) const
683 { do_resize_fill (src, dest, rfv, m_n-1); }
685private:
687 // recursive resizing
688 template <typename T>
689 void do_resize_fill (const T *src, T *dest, const T& rfv, int lev) const
690 {
691 if (lev == 0)
692 {
693 std::copy_n (src, m_cext[0], dest);
694 std::fill_n (dest + m_cext[0], m_dext[0] - m_cext[0], rfv);
696 else
697 {
698 octave_idx_type sd, dd, k;
699 sd = m_sext[lev-1];
700 dd = m_dext[lev-1];
701 for (k = 0; k < m_cext[lev]; k++)
702 do_resize_fill (src + k * sd, dest + k * dd, rfv, lev - 1);
703
704 std::fill_n (dest + k * dd, m_dext[lev] - k * dd, rfv);
706 }
707
708 //--------
709
710 octave_idx_type *m_cext;
711 octave_idx_type *m_sext;
712 octave_idx_type *m_dext;
713 int m_n;
714};
715
716template <typename T, typename Alloc>
718Array<T, Alloc>::index (const octave::idx_vector& i) const
719{
720 // Colon:
721 //
722 // object | index | result orientation
723 // ---------+----------+-------------------
724 // anything | colon | column vector
725 //
726 // Numeric array or logical mask:
727 //
728 // Note that logical mask indices are always transformed to vectors
729 // before we see them.
730 //
731 // object | index | result orientation
732 // ---------+----------+-------------------
733 // vector | vector | indexed object
734 // | other | same size as index
735 // ---------+----------+-------------------
736 // array | anything | same size as index
737
739 Array<T, Alloc> retval;
740
741 if (i.is_colon ())
742 {
743 // A(:) produces a shallow copy as a column vector.
744 retval = Array<T, Alloc> (*this, dim_vector (n, 1));
745 }
746 else
747 {
748 if (i.extent (n) != n)
749 octave::err_index_out_of_range (1, 1, i.extent (n), n, m_dimensions);
750
751 dim_vector result_dims = i.orig_dimensions ();
752 octave_idx_type idx_len = i.length ();
753
754 if (n != 1 && is_nd_vector () && idx_len != 1
755 && result_dims.is_nd_vector ())
756 {
757 // Indexed object and index are both vectors. Set result size
758 // and orientation as above.
759
760 const dim_vector& dv = dims ();
761
762 result_dims = dv.make_nd_vector (idx_len);
763 }
765 octave_idx_type l, u;
766 if (idx_len != 0 && i.is_cont_range (n, l, u))
767 // If suitable, produce a shallow slice.
768 retval = Array<T, Alloc> (*this, result_dims, l, u);
769 else
770 {
771 // Don't use resize here to avoid useless initialization for POD
772 // types.
773 retval = Array<T, Alloc> (result_dims);
775 if (idx_len != 0)
776 i.index (data (), n, retval.rwdata ());
778 }
779
780 return retval;
781}
782
783template <typename T, typename Alloc>
785Array<T, Alloc>::index (const octave::idx_vector& i, const octave::idx_vector& j) const
786{
787 // Get dimensions, allowing Fortran indexing in the 2nd dim.
788 const dim_vector& dv = m_dimensions.redim (2);
790 octave_idx_type c = dv(1);
791 Array<T, Alloc> retval;
792
793 if (i.is_colon () && j.is_colon ())
794 {
795 // A(:,:) produces a shallow copy.
796 retval = Array<T, Alloc> (*this, dv);
797 }
798 else
799 {
800 if (i.extent (r) != r)
801 octave::err_index_out_of_range (2, 1, i.extent (r), r, m_dimensions); // throws
802 if (j.extent (c) != c)
803 octave::err_index_out_of_range (2, 2, j.extent (c), c, m_dimensions); // throws
806 octave_idx_type il = i.length (r);
807 octave_idx_type jl = j.length (c);
808
809 octave::idx_vector ii (i);
810
811 if (ii.maybe_reduce (r, j, c))
813 octave_idx_type l, u;
814 if (ii.length () > 0 && ii.is_cont_range (n, l, u))
815 // If suitable, produce a shallow slice.
816 retval = Array<T, Alloc> (*this, dim_vector (il, jl), l, u);
817 else
818 {
819 // Don't use resize to avoid useless initialization for POD types.
820 retval = Array<T, Alloc> (dim_vector (il, jl));
821
822 ii.index (data (), n, retval.rwdata ());
824 }
825 else
826 {
827 // Don't use resize to avoid useless initialization for POD types.
828 retval = Array<T, Alloc> (dim_vector (il, jl));
829
830 const T *src = data ();
831 T *dest = retval.rwdata ();
832
833 for (octave_idx_type k = 0; k < jl; k++)
834 dest += i.index (src + r * j.xelem (k), r, dest);
835 }
836 }
838 return retval;
839}
840
841template <typename T, typename Alloc>
844{
845 int ial = ia.numel ();
846 Array<T, Alloc> retval;
847
848 // FIXME: is this dispatching necessary?
849 if (ial == 1)
850 retval = index (ia(0));
851 else if (ial == 2)
852 retval = index (ia(0), ia(1));
853 else if (ial > 0)
854 {
855 // Get dimensions, allowing Fortran indexing in the last dim.
856 dim_vector dv = m_dimensions.redim (ial);
857
858 // Check for out of bounds conditions.
859 bool all_colons = true;
860 for (int i = 0; i < ial; i++)
861 {
862 if (ia(i).extent (dv(i)) != dv(i))
863 octave::err_index_out_of_range (ial, i+1, ia(i).extent (dv(i)), dv(i),
864 m_dimensions); // throws
865
866 all_colons = all_colons && ia(i).is_colon ();
867 }
868
869 if (all_colons)
870 {
871 // A(:,:,...,:) produces a shallow copy.
873 retval = Array<T, Alloc> (*this, dv);
874 }
875 else
876 {
877 // Form result dimensions.
878 dim_vector rdv = dim_vector::alloc (ial);
879 for (int i = 0; i < ial; i++) rdv(i) = ia(i).length (dv(i));
881
882 // Prepare for recursive indexing
883 rec_index_helper rh (dv, ia);
884
885 octave_idx_type l, u;
886 if (rh.is_cont_range (l, u))
887 // If suitable, produce a shallow slice.
888 retval = Array<T, Alloc> (*this, rdv, l, u);
889 else
890 {
891 // Don't use resize to avoid useless initialization for POD types.
892 retval = Array<T, Alloc> (rdv);
893
894 // Do it.
895 rh.index (data (), retval.rwdata ());
896 }
897 }
898 }
899
900 return retval;
901}
902
903// The default fill value. Override if you want a different one.
904
905template <typename T, typename Alloc>
906T
908{
909 static T zero = T ();
910 return zero;
911}
912
913// Yes, we could do resize using index & assign. However, that would
914// possibly involve a lot more memory traffic than we actually need.
915
916template <typename T, typename Alloc>
917void
919{
920 if (n < 0 || ndims () != 2)
921 octave::err_invalid_resize ();
922
923 dim_vector dv;
924 // This is driven by Matlab's behavior of giving a *row* vector
925 // on some out-of-bounds assignments. Specifically, Matlab
926 // allows a(i) with out-of-bouds i when a is either of 0x0, 1x0,
927 // 1x1, 0xN, and gives a row vector in all cases (yes, even the
928 // last one, search me why). Giving a column vector would make
929 // much more sense (given the way trailing singleton dims are
930 // treated).
931 bool invalid = false;
932 if (rows () == 0 || rows () == 1)
933 dv = dim_vector (1, n);
934 else if (columns () == 1)
935 dv = dim_vector (n, 1);
936 else
937 invalid = true;
938
939 if (invalid)
940 octave::err_invalid_resize ();
941
942 octave_idx_type nx = numel ();
943 if (n == nx - 1 && n > 0)
944 {
945 // Stack "pop" operation.
946 if (m_rep->m_count == 1)
947 m_slice_data[m_slice_len-1] = T ();
948 m_slice_len--;
949 m_dimensions = dv;
950 }
951 else if (n == nx + 1 && nx > 0)
952 {
953 // Stack "push" operation.
954 if (m_rep->m_count == 1
955 && m_slice_data + m_slice_len < m_rep->m_data + m_rep->m_len)
956 {
957 m_slice_data[m_slice_len++] = rfv;
958 m_dimensions = dv;
959 }
960 else
961 {
962 static const octave_idx_type MAX_STACK_CHUNK = 1024;
963 octave_idx_type nn = n + std::min (nx, MAX_STACK_CHUNK);
964 Array<T, Alloc> tmp (Array<T, Alloc> (dim_vector (nn, 1)), dv, 0, n);
965 T *dest = tmp.rwdata ();
966
967 std::copy_n (data (), nx, dest);
968 dest[nx] = rfv;
969
970 *this = tmp;
971 }
972 }
973 else if (n != nx)
974 {
976 T *dest = tmp.rwdata ();
977
978 octave_idx_type n0 = std::min (n, nx);
979 octave_idx_type n1 = n - n0;
980 std::copy_n (data (), n0, dest);
981 std::fill_n (dest + n0, n1, rfv);
982
983 *this = tmp;
984 }
985}
986
987template <typename T, typename Alloc>
988void
990{
991 if (r < 0 || c < 0 || ndims () != 2)
992 octave::err_invalid_resize ();
993
994 octave_idx_type rx = rows ();
995 octave_idx_type cx = columns ();
996 if (r != rx || c != cx)
997 {
999 T *dest = tmp.rwdata ();
1000
1001 octave_idx_type r0 = std::min (r, rx);
1002 octave_idx_type r1 = r - r0;
1003 octave_idx_type c0 = std::min (c, cx);
1004 octave_idx_type c1 = c - c0;
1005 const T *src = data ();
1006 if (r == rx)
1007 {
1008 std::copy_n (src, r * c0, dest);
1009 dest += r * c0;
1010 }
1011 else
1012 {
1013 for (octave_idx_type k = 0; k < c0; k++)
1014 {
1015 std::copy_n (src, r0, dest);
1016 src += rx;
1017 dest += r0;
1018 std::fill_n (dest, r1, rfv);
1019 dest += r1;
1020 }
1021 }
1022
1023 std::fill_n (dest, r * c1, rfv);
1024
1025 *this = tmp;
1026 }
1027}
1028
1029template <typename T, typename Alloc>
1030void
1031Array<T, Alloc>::resize (const dim_vector& dv, const T& rfv)
1032{
1033 int dvl = dv.ndims ();
1034 if (dvl == 2)
1035 resize2 (dv(0), dv(1), rfv);
1036 else if (m_dimensions != dv)
1037 {
1038 if (m_dimensions.ndims () > dvl || dv.any_neg ())
1039 octave::err_invalid_resize ();
1040
1041 Array<T, Alloc> tmp (dv);
1042 // Prepare for recursive resizing.
1043 rec_resize_helper rh (dv, m_dimensions.redim (dvl));
1044
1045 // Do it.
1046 rh.resize_fill (data (), tmp.rwdata (), rfv);
1047 *this = tmp;
1048 }
1049}
1050
1051template <typename T, typename Alloc>
1053Array<T, Alloc>::index (const octave::idx_vector& i, bool resize_ok, const T& rfv) const
1054{
1055 Array<T, Alloc> tmp = *this;
1056 if (resize_ok)
1057 {
1058 octave_idx_type n = numel ();
1059 octave_idx_type nx = i.extent (n);
1060 if (n != nx)
1061 {
1062 if (i.is_scalar ())
1063 return Array<T, Alloc> (dim_vector (1, 1), rfv);
1064 else
1065 tmp.resize1 (nx, rfv);
1066 }
1067
1068 if (tmp.numel () != nx)
1069 return Array<T, Alloc> ();
1070 }
1071
1072 return tmp.index (i);
1073}
1074
1075template <typename T, typename Alloc>
1077Array<T, Alloc>::index (const octave::idx_vector& i, const octave::idx_vector& j,
1078 bool resize_ok, const T& rfv) const
1079{
1080 Array<T, Alloc> tmp = *this;
1081 if (resize_ok)
1082 {
1083 const dim_vector& dv = m_dimensions.redim (2);
1084 octave_idx_type r = dv(0);
1085 octave_idx_type c = dv(1);
1086 octave_idx_type rx = i.extent (r);
1087 octave_idx_type cx = j.extent (c);
1088 if (r != rx || c != cx)
1089 {
1090 if (i.is_scalar () && j.is_scalar ())
1091 return Array<T, Alloc> (dim_vector (1, 1), rfv);
1092 else
1093 tmp.resize2 (rx, cx, rfv);
1094 }
1095
1096 if (tmp.rows () != rx || tmp.columns () != cx)
1097 return Array<T, Alloc> ();
1098 }
1099
1100 return tmp.index (i, j);
1101}
1102
1103template <typename T, typename Alloc>
1106 bool resize_ok, const T& rfv) const
1107{
1108 Array<T, Alloc> tmp = *this;
1109 if (resize_ok)
1110 {
1111 int ial = ia.numel ();
1112 const dim_vector& dv = m_dimensions.redim (ial);
1113 dim_vector dvx = dim_vector::alloc (ial);
1114 for (int i = 0; i < ial; i++)
1115 dvx(i) = ia(i).extent (dv(i));
1116 if (! (dvx == dv))
1117 {
1118 bool all_scalars = true;
1119 for (int i = 0; i < ial; i++)
1120 all_scalars = all_scalars && ia(i).is_scalar ();
1121 if (all_scalars)
1122 return Array<T, Alloc> (dim_vector (1, 1), rfv);
1123 else
1124 tmp.resize (dvx, rfv);
1125
1126 if (tmp.m_dimensions != dvx)
1127 return Array<T, Alloc> ();
1128 }
1129 }
1130
1131 return tmp.index (ia);
1132}
1133
1134template <typename T, typename Alloc>
1136Array<T, Alloc>::index (const Array<octave::idx_vector>& ia, bool resize_ok) const
1137{
1138 return index (ia, resize_ok, resize_fill_value ());
1139}
1140
1141template <typename T, typename Alloc>
1142void
1143Array<T, Alloc>::assign (const octave::idx_vector& i, const Array<T, Alloc>& rhs, const T& rfv)
1144{
1145 octave_idx_type n = numel ();
1146 octave_idx_type rhl = rhs.numel ();
1147
1148 if (rhl != 1 && i.length (n) != rhl)
1149 octave::err_nonconformant ("=", dim_vector(i.length(n), 1), rhs.dims());
1150
1151 octave_idx_type nx = i.extent (n);
1152 bool colon = i.is_colon_equiv (nx);
1153 // Try to resize first if necessary.
1154 if (nx != n)
1155 {
1156 // Optimize case A = []; A(1:n) = X with A empty.
1157 if (m_dimensions.zero_by_zero () && colon)
1158 {
1159 if (rhl == 1)
1160 *this = Array<T, Alloc> (dim_vector (1, nx), rhs(0));
1161 else
1162 *this = Array<T, Alloc> (rhs, dim_vector (1, nx));
1163 return;
1164 }
1165
1166 resize1 (nx, rfv);
1167 n = numel ();
1168 }
1169
1170 if (colon)
1171 {
1172 // A(:) = X makes a full fill or a shallow copy.
1173 if (rhl == 1)
1174 fill (rhs(0));
1175 else
1176 *this = rhs.reshape (m_dimensions);
1177 }
1178 else
1179 {
1180 if (rhl == 1)
1181 i.fill (rhs(0), n, rwdata ());
1182 else
1183 i.assign (rhs.data (), n, rwdata ());
1184 }
1185}
1186
1187// Assignment to a 2-dimensional array
1188template <typename T, typename Alloc>
1189void
1190Array<T, Alloc>::assign (const octave::idx_vector& i, const octave::idx_vector& j,
1191 const Array<T, Alloc>& rhs, const T& rfv)
1192{
1193 bool initial_dims_all_zero = m_dimensions.all_zero ();
1194
1195 // Get RHS extents, discarding singletons.
1196 dim_vector rhdv = rhs.dims ();
1197
1198 // Get LHS extents, allowing Fortran indexing in the second dim.
1199 dim_vector dv = m_dimensions.redim (2);
1200
1201 // Check for out-of-bounds and form resizing m_dimensions.
1202 dim_vector rdv;
1203
1204 // In the special when all dimensions are zero, colons are allowed
1205 // to inquire the shape of RHS. The rules are more obscure, so we
1206 // solve that elsewhere.
1207 if (initial_dims_all_zero)
1208 rdv = zero_dims_inquire (i, j, rhdv);
1209 else
1210 {
1211 rdv(0) = i.extent (dv(0));
1212 rdv(1) = j.extent (dv(1));
1213 }
1214
1215 bool isfill = rhs.numel () == 1;
1216 octave_idx_type il = i.length (rdv(0));
1217 octave_idx_type jl = j.length (rdv(1));
1218 rhdv.chop_all_singletons ();
1219 bool match = (isfill
1220 || (rhdv.ndims () == 2 && il == rhdv(0) && jl == rhdv(1)));
1221 match = match || (il == 1 && jl == rhdv(0) && rhdv(1) == 1);
1222
1223 if (match)
1224 {
1225 bool all_colons = (i.is_colon_equiv (rdv(0))
1226 && j.is_colon_equiv (rdv(1)));
1227 // Resize if requested.
1228 if (rdv != dv)
1229 {
1230 // Optimize case A = []; A(1:m, 1:n) = X
1231 if (dv.zero_by_zero () && all_colons)
1232 {
1233 if (isfill)
1234 *this = Array<T, Alloc> (rdv, rhs(0));
1235 else
1236 *this = Array<T, Alloc> (rhs, rdv);
1237 return;
1238 }
1239
1240 resize (rdv, rfv);
1241 dv = m_dimensions;
1242 }
1243
1244 if (all_colons)
1245 {
1246 // A(:,:) = X makes a full fill or a shallow copy
1247 if (isfill)
1248 fill (rhs(0));
1249 else
1250 *this = rhs.reshape (m_dimensions);
1251 }
1252 else
1253 {
1254 // The actual work.
1255 octave_idx_type n = numel ();
1256 octave_idx_type r = dv(0);
1257 octave_idx_type c = dv(1);
1258 octave::idx_vector ii (i);
1259
1260 const T *src = rhs.data ();
1261 T *dest = rwdata ();
1262
1263 // Try reduction first.
1264 if (ii.maybe_reduce (r, j, c))
1265 {
1266 if (isfill)
1267 ii.fill (*src, n, dest);
1268 else
1269 ii.assign (src, n, dest);
1270 }
1271 else
1272 {
1273 if (isfill)
1274 {
1275 for (octave_idx_type k = 0; k < jl; k++)
1276 i.fill (*src, r, dest + r * j.xelem (k));
1277 }
1278 else
1279 {
1280 for (octave_idx_type k = 0; k < jl; k++)
1281 src += i.assign (src, r, dest + r * j.xelem (k));
1282 }
1283 }
1284 }
1285 }
1286 // any empty RHS can be assigned to an empty LHS
1287 else if ((il != 0 && jl != 0) || (rhdv(0) != 0 && rhdv(1) != 0))
1288 octave::err_nonconformant ("=", il, jl, rhs.dim1 (), rhs.dim2 ());
1289}
1290
1291// Assignment to a multi-dimensional array
1292template <typename T, typename Alloc>
1293void
1295 const Array<T, Alloc>& rhs, const T& rfv)
1296{
1297 int ial = ia.numel ();
1298
1299 // FIXME: is this dispatching necessary / desirable?
1300 if (ial == 1)
1301 assign (ia(0), rhs, rfv);
1302 else if (ial == 2)
1303 assign (ia(0), ia(1), rhs, rfv);
1304 else if (ial > 0)
1305 {
1306 bool initial_dims_all_zero = m_dimensions.all_zero ();
1307
1308 // Get RHS extents, discarding singletons.
1309 dim_vector rhdv = rhs.dims ();
1310
1311 // Get LHS extents, allowing Fortran indexing in the second dim.
1312 dim_vector dv = m_dimensions.redim (ial);
1313
1314 // Get the extents forced by indexing.
1315 dim_vector rdv;
1316
1317 // In the special when all dimensions are zero, colons are
1318 // allowed to inquire the shape of RHS. The rules are more
1319 // obscure, so we solve that elsewhere.
1320 if (initial_dims_all_zero)
1321 rdv = zero_dims_inquire (ia, rhdv);
1322 else
1323 {
1324 rdv = dim_vector::alloc (ial);
1325 for (int i = 0; i < ial; i++)
1326 rdv(i) = ia(i).extent (dv(i));
1327 }
1328
1329 // Check whether LHS and RHS match, up to singleton dims.
1330 bool match = true;
1331 bool all_colons = true;
1332 bool isfill = rhs.numel () == 1;
1333
1334 rhdv.chop_all_singletons ();
1335 int j = 0;
1336 int rhdvl = rhdv.ndims ();
1337 for (int i = 0; i < ial; i++)
1338 {
1339 all_colons = all_colons && ia(i).is_colon_equiv (rdv(i));
1340 octave_idx_type l = ia(i).length (rdv(i));
1341 if (l == 1) continue;
1342 match = match && j < rhdvl && l == rhdv(j++);
1343 }
1344
1345 match = match && (j == rhdvl || rhdv(j) == 1);
1346 match = match || isfill;
1347
1348 if (match)
1349 {
1350 // Resize first if necessary.
1351 if (rdv != dv)
1352 {
1353 // Optimize case A = []; A(1:m, 1:n) = X
1354 if (dv.zero_by_zero () && all_colons)
1355 {
1357 if (isfill)
1358 *this = Array<T, Alloc> (rdv, rhs(0));
1359 else
1360 *this = Array<T, Alloc> (rhs, rdv);
1361 return;
1362 }
1363
1364 resize (rdv, rfv);
1365 dv = rdv;
1366 }
1367
1368 if (all_colons)
1369 {
1370 // A(:,:,...,:) = X makes a full fill or a shallow copy.
1371 if (isfill)
1372 fill (rhs(0));
1373 else
1374 *this = rhs.reshape (m_dimensions);
1375 }
1376 else
1377 {
1378 // Do the actual work.
1379
1380 // Prepare for recursive indexing
1381 rec_index_helper rh (dv, ia);
1382
1383 // Do it.
1384 if (isfill)
1385 rh.fill (rhs(0), rwdata ());
1386 else
1387 rh.assign (rhs.data (), rwdata ());
1388 }
1389 }
1390 else
1391 {
1392 // dimension mismatch, unless LHS and RHS both empty
1393 bool lhsempty, rhsempty;
1394 lhsempty = rhsempty = false;
1395 dim_vector lhs_dv = dim_vector::alloc (ial);
1396 for (int i = 0; i < ial; i++)
1397 {
1398 octave_idx_type l = ia(i).length (rdv(i));
1399 lhs_dv(i) = l;
1400 lhsempty = lhsempty || (l == 0);
1401 rhsempty = rhsempty || (rhdv(j++) == 0);
1402 }
1403 if (! lhsempty || ! rhsempty)
1404 {
1405 lhs_dv.chop_trailing_singletons ();
1406 octave::err_nonconformant ("=", lhs_dv, rhdv);
1407 }
1408 }
1409 }
1410}
1411
1412template <typename T, typename Alloc>
1413void
1415 const Array<T, Alloc>& rhs)
1416{
1417 assign (ia, rhs, resize_fill_value ());
1418}
1419
1420/*
1421%!shared a
1422%! a = [1 2; 3 4];
1423%!error <op1 is 1x2, op2 is 1x3> a(1,:) = [1 2 3]
1424%!error <op1 is 2x1, op2 is 1x3> a(:,1) = [1 2 3]
1425%!error <op1 is 2x2, op2 is 2x1> a(1:2,2:3) = [1;2]
1426*/
1427
1428template <typename T, typename Alloc>
1429void
1430Array<T, Alloc>::delete_elements (const octave::idx_vector& i)
1431{
1432 octave_idx_type n = numel ();
1433 if (i.is_colon ())
1434 {
1435 *this = Array<T, Alloc> ();
1436 }
1437 else if (i.length (n) != 0)
1438 {
1439 if (i.extent (n) != n)
1440 octave::err_del_index_out_of_range (true, i.extent (n), n);
1441
1442 octave_idx_type l, u;
1443 bool col_vec = ndims () == 2 && columns () == 1 && rows () != 1;
1444 if (i.is_scalar () && i(0) == n-1 && m_dimensions.isvector ())
1445 {
1446 // Stack "pop" operation.
1447 resize1 (n-1);
1448 }
1449 else if (i.is_cont_range (n, l, u))
1450 {
1451 // Special case deleting a contiguous range.
1452 octave_idx_type m = n + l - u;
1453 Array<T, Alloc> tmp (dim_vector (col_vec ? m : 1, ! col_vec ? m : 1));
1454 const T *src = data ();
1455 T *dest = tmp.rwdata ();
1456 std::copy_n (src, l, dest);
1457 std::copy (src + u, src + n, dest + l);
1458 *this = tmp;
1459 }
1460 else
1461 {
1462 // Use index.
1463 *this = index (i.complement (n));
1464 }
1465 }
1466}
1467
1468template <typename T, typename Alloc>
1469void
1470Array<T, Alloc>::delete_elements (int dim, const octave::idx_vector& i)
1471{
1472 if (dim < 0)
1473 (*current_liboctave_error_handler) ("invalid dimension in delete_elements");
1474
1475 dim_vector dimensions = m_dimensions;
1476
1477 if (dim >= ndims ())
1478 dimensions.resize (dim + 1, 1);
1479
1480 octave_idx_type ndim = dimensions.ndims ();
1481 octave_idx_type n = dimensions(dim);
1482
1483 if (i.is_colon ())
1484 {
1485 *this = Array<T, Alloc> ();
1486 }
1487 else if (i.length (n) != 0)
1488 {
1489 if (i.extent (n) != n)
1490 octave::err_del_index_out_of_range (false, i.extent (n), n);
1491
1492 octave_idx_type l, u;
1493
1494 if (i.is_cont_range (n, l, u))
1495 {
1496 // Special case deleting a contiguous range.
1497 octave_idx_type nd = n + l - u;
1498 octave_idx_type dl = 1;
1499 octave_idx_type du = 1;
1500 dim_vector rdv = dimensions;
1501 rdv(dim) = nd;
1502 for (int k = 0; k < dim; k++) dl *= dimensions(k);
1503 for (int k = dim + 1; k < ndim; k++) du *= dimensions(k);
1504
1505 // Special case deleting a contiguous range.
1506 Array<T, Alloc> tmp = Array<T, Alloc> (rdv);
1507 const T *src = data ();
1508 T *dest = tmp.rwdata ();
1509 l *= dl; u *= dl; n *= dl;
1510 for (octave_idx_type k = 0; k < du; k++)
1511 {
1512 std::copy_n (src, l, dest);
1513 dest += l;
1514 std::copy (src + u, src + n, dest);
1515 dest += n - u;
1516 src += n;
1517 }
1518
1519 *this = tmp;
1520 }
1521 else
1522 {
1523 // Use index.
1524 Array<octave::idx_vector> ia (dim_vector (ndim, 1), octave::idx_vector::colon);
1525 ia (dim) = i.complement (n);
1526 *this = index (ia);
1527 }
1528 }
1529}
1530
1531template <typename T, typename Alloc>
1532void
1534{
1535 int ial = ia.numel ();
1536
1537 if (ial == 1)
1538 delete_elements (ia(0));
1539 else
1540 {
1541 int k, dim = -1;
1542 for (k = 0; k < ial; k++)
1543 {
1544 if (! ia(k).is_colon ())
1545 {
1546 if (dim < 0)
1547 dim = k;
1548 else
1549 break;
1550 }
1551 }
1552 if (dim < 0)
1553 {
1554 dim_vector dv = m_dimensions;
1555 dv(0) = 0;
1556 *this = Array<T, Alloc> (dv);
1557 }
1558 else if (k == ial)
1559 {
1560 delete_elements (dim, ia(dim));
1561 }
1562 else
1563 {
1564 // Allow the null assignment to succeed if it won't change
1565 // anything because the indices reference an empty slice,
1566 // provided that there is at most one non-colon (or
1567 // equivalent) index. So, we still have the requirement of
1568 // deleting a slice, but it is OK if the slice is empty.
1569
1570 // For compatibility with Matlab, stop checking once we see
1571 // more than one non-colon index or an empty index. Matlab
1572 // considers "[]" to be an empty index but not "false". We
1573 // accept both.
1574
1575 bool empty_assignment = false;
1576
1577 int num_non_colon_indices = 0;
1578
1579 int nd = ndims ();
1580
1581 for (int i = 0; i < ial; i++)
1582 {
1583 octave_idx_type dim_len = (i >= nd ? 1 : m_dimensions(i));
1584
1585 if (ia(i).length (dim_len) == 0)
1586 {
1587 empty_assignment = true;
1588 break;
1589 }
1590
1591 if (! ia(i).is_colon_equiv (dim_len))
1592 {
1593 num_non_colon_indices++;
1594
1595 if (num_non_colon_indices == 2)
1596 break;
1597 }
1598 }
1599
1600 if (! empty_assignment)
1601 (*current_liboctave_error_handler)
1602 ("a null assignment can only have one non-colon index");
1603 }
1604 }
1605
1606}
1607
1608template <typename T, typename Alloc>
1611{
1612 octave::idx_vector i (r, r + a.rows ());
1613 octave::idx_vector j (c, c + a.columns ());
1614 if (ndims () == 2 && a.ndims () == 2)
1615 assign (i, j, a);
1616 else
1617 {
1619 idx(0) = i;
1620 idx(1) = j;
1621 for (int k = 2; k < a.ndims (); k++)
1622 idx(k) = octave::idx_vector (0, a.m_dimensions(k));
1623 assign (idx, a);
1624 }
1625
1626 return *this;
1627}
1628
1629template <typename T, typename Alloc>
1632{
1635 const dim_vector dva = a.dims ().redim (n);
1636 for (octave_idx_type k = 0; k < n; k++)
1637 idx(k) = octave::idx_vector (ra_idx(k), ra_idx(k) + dva(k));
1638
1639 assign (idx, a);
1640
1641 return *this;
1642}
1643
1644template <typename T, typename Alloc>
1647{
1648 liboctave_panic_unless (ndims () == 2);
1649
1650 octave_idx_type nr = dim1 ();
1651 octave_idx_type nc = dim2 ();
1652
1653 if (nr >= 8 && nc >= 8)
1654 {
1655 Array<T, Alloc> result (dim_vector (nc, nr));
1656
1657 // Reuse the implementation used for permuting.
1658
1659 rec_permute_helper::blk_trans (data (), result.rwdata (), nr, nc);
1660
1661 return result;
1662 }
1663 else if (nr > 1 && nc > 1)
1664 {
1665 Array<T, Alloc> result (dim_vector (nc, nr));
1666
1667 for (octave_idx_type j = 0; j < nc; j++)
1668 for (octave_idx_type i = 0; i < nr; i++)
1669 result.xelem (j, i) = xelem (i, j);
1670
1671 return result;
1672 }
1673 else
1674 {
1675 // Fast transpose for vectors and empty matrices.
1676 return Array<T, Alloc> (*this, dim_vector (nc, nr));
1677 }
1678}
1679
1680template <typename T>
1681static T
1682no_op_fcn (const T& x)
1683{
1684 return x;
1685}
1686
1687template <typename T, typename Alloc>
1689Array<T, Alloc>::hermitian (T (*fcn) (const T&)) const
1690{
1691 liboctave_panic_unless (ndims () == 2);
1692
1693 if (! fcn)
1694 fcn = no_op_fcn<T>;
1695
1696 octave_idx_type nr = dim1 ();
1697 octave_idx_type nc = dim2 ();
1698
1699 if (nr >= 8 && nc >= 8)
1700 {
1701 Array<T, Alloc> result (dim_vector (nc, nr));
1702
1703 // Blocked transpose to attempt to avoid cache misses.
1704
1705 T buf[64];
1706
1707 octave_idx_type jj;
1708 for (jj = 0; jj < (nc - 8 + 1); jj += 8)
1709 {
1710 octave_idx_type ii;
1711 for (ii = 0; ii < (nr - 8 + 1); ii += 8)
1712 {
1713 // Copy to buffer
1714 for (octave_idx_type j = jj, k = 0, idxj = jj * nr;
1715 j < jj + 8; j++, idxj += nr)
1716 for (octave_idx_type i = ii; i < ii + 8; i++)
1717 buf[k++] = xelem (i + idxj);
1718
1719 // Copy from buffer
1720 for (octave_idx_type i = ii, idxi = ii * nc; i < ii + 8;
1721 i++, idxi += nc)
1722 for (octave_idx_type j = jj, k = i - ii; j < jj + 8;
1723 j++, k+=8)
1724 result.xelem (j + idxi) = fcn (buf[k]);
1725 }
1726
1727 if (ii < nr)
1728 for (octave_idx_type j = jj; j < jj + 8; j++)
1729 for (octave_idx_type i = ii; i < nr; i++)
1730 result.xelem (j, i) = fcn (xelem (i, j));
1731 }
1732
1733 for (octave_idx_type j = jj; j < nc; j++)
1734 for (octave_idx_type i = 0; i < nr; i++)
1735 result.xelem (j, i) = fcn (xelem (i, j));
1736
1737 return result;
1738 }
1739 else
1740 {
1741 Array<T, Alloc> result (dim_vector (nc, nr));
1742
1743 for (octave_idx_type j = 0; j < nc; j++)
1744 for (octave_idx_type i = 0; i < nr; i++)
1745 result.xelem (j, i) = fcn (xelem (i, j));
1746
1747 return result;
1748 }
1749}
1750
1751/*
1752
1753## Transpose tests for matrices of the tile size and plus or minus a row
1754## and with four tiles.
1755
1756%!shared m7, mt7, m8, mt8, m9, mt9
1757%! m7 = reshape (1 : 7*8, 8, 7);
1758%! mt7 = [1:8; 9:16; 17:24; 25:32; 33:40; 41:48; 49:56];
1759%! m8 = reshape (1 : 8*8, 8, 8);
1760%! mt8 = mt8 = [mt7; 57:64];
1761%! m9 = reshape (1 : 9*8, 8, 9);
1762%! mt9 = [mt8; 65:72];
1763
1764%!assert (m7', mt7)
1765%!assert ((1i*m7).', 1i * mt7)
1766%!assert ((1i*m7)', conj (1i * mt7))
1767%!assert (m8', mt8)
1768%!assert ((1i*m8).', 1i * mt8)
1769%!assert ((1i*m8)', conj (1i * mt8))
1770%!assert (m9', mt9)
1771%!assert ((1i*m9).', 1i * mt9)
1772%!assert ((1i*m9)', conj (1i * mt9))
1773%!assert ([m7, m8; m7, m8]', [mt7, mt7; mt8, mt8])
1774%!assert ((1i*[m7, m8; m7, m8]).', 1i * [mt7, mt7; mt8, mt8])
1775%!assert ((1i*[m7, m8; m7, m8])', conj (1i * [mt7, mt7; mt8, mt8]))
1776%!assert ([m8, m8; m8, m8]', [mt8, mt8; mt8, mt8])
1777%!assert ((1i*[m8, m8; m8, m8]).', 1i * [mt8, mt8; mt8, mt8])
1778%!assert ((1i*[m8, m8; m8, m8])', conj (1i * [mt8, mt8; mt8, mt8]))
1779%!assert ([m9, m8; m9, m8]', [mt9, mt9; mt8, mt8])
1780%!assert ((1i*[m9, m8; m9, m8]).', 1i * [mt9, mt9; mt8, mt8])
1781%!assert ((1i*[m9, m8; m9, m8])', conj (1i * [mt9, mt9; mt8, mt8]))
1782
1783*/
1784
1785template <typename T, typename Alloc>
1786T *
1788{
1789 make_unique ();
1790
1791 return m_slice_data;
1792}
1793
1794// Non-real types don't have NaNs.
1795template <typename T>
1796inline bool
1798{
1799 return false;
1800}
1801
1802template <typename T, typename Alloc>
1804Array<T, Alloc>::sort (int dim, sortmode mode) const
1805{
1806 if (dim < 0)
1807 (*current_liboctave_error_handler) ("sort: invalid dimension");
1808
1809 if (numel () < 1) // input array is empty,
1810 return *this;
1811
1812 if (dim >= ndims ())
1813 {
1814 // The dimension to sort along exceeds the array's dimensions,
1815 // ==> array is already trivially sorted in such higher dimensions,
1816 // ==> return early.
1817
1818 return *this;
1819 }
1820
1821 Array<T, Alloc> m (dims ());
1822
1823 dim_vector dv = m.dims ();
1824
1825 octave_idx_type ns = dv(dim);
1826 octave_idx_type iter = dv.numel () / ns;
1827 octave_idx_type stride = 1;
1828
1829 for (int i = 0; i < dim; i++)
1830 stride *= dv(i);
1831
1832 T *v = m.rwdata ();
1833 const T *ov = data ();
1834
1835 octave_sort<T> lsort;
1836
1837 if (mode != UNSORTED)
1838 lsort.set_compare (mode);
1839 else
1840 return m;
1841
1842 if (stride == 1)
1843 {
1844 // Special case along first dimension avoids gather/scatter AND directly
1845 // sorts into destination buffer for an 11% performance boost.
1846 for (octave_idx_type j = 0; j < iter; j++)
1847 {
1848 // Copy and partition out NaNs.
1849 // No need to special case integer types <T> from floating point
1850 // types <T> to avoid sort_isnan() test as it makes no discernible
1851 // performance impact.
1852 octave_idx_type kl = 0;
1853 octave_idx_type ku = ns;
1854 for (octave_idx_type i = 0; i < ns; i++)
1855 {
1856 T tmp = ov[i];
1857 if (sort_isnan<T> (tmp))
1858 v[--ku] = tmp;
1859 else
1860 v[kl++] = tmp;
1861 }
1862
1863 // sort.
1864 lsort.sort (v, kl);
1865
1866 if (ku < ns)
1867 {
1868 // NaNs are in reverse order
1869 std::reverse (v + ku, v + ns);
1870 if (mode == DESCENDING)
1871 std::rotate (v, v + ku, v + ns);
1872 }
1873
1874 v += ns;
1875 ov += ns;
1876 }
1877 }
1878 else
1879 {
1880 OCTAVE_LOCAL_BUFFER (T, buf, ns);
1881
1882 for (octave_idx_type j = 0; j < iter; j++)
1883 {
1884 octave_idx_type offset = j;
1885 octave_idx_type n_strides = j / stride;
1886 offset += n_strides * stride * (ns - 1);
1887
1888 // gather and partition out NaNs.
1889 octave_idx_type kl = 0;
1890 octave_idx_type ku = ns;
1891 for (octave_idx_type i = 0; i < ns; i++)
1892 {
1893 T tmp = ov[i*stride + offset];
1894 if (sort_isnan<T> (tmp))
1895 buf[--ku] = tmp;
1896 else
1897 buf[kl++] = tmp;
1898 }
1899
1900 // sort.
1901 lsort.sort (buf, kl);
1902
1903 if (ku < ns)
1904 {
1905 // NaNs are in reverse order
1906 std::reverse (buf + ku, buf + ns);
1907 if (mode == DESCENDING)
1908 std::rotate (buf, buf + ku, buf + ns);
1909 }
1910
1911 // scatter.
1912 for (octave_idx_type i = 0; i < ns; i++)
1913 v[i*stride + offset] = buf[i];
1914 }
1915 }
1916
1917 return m;
1918}
1919
1920template <typename T, typename Alloc>
1923 sortmode mode) const
1924{
1925 if (dim < 0)
1926 (*current_liboctave_error_handler) ("sort: invalid dimension");
1927
1928 const dim_vector& dv = dims ();
1929
1930 sidx = Array<octave_idx_type> (dv);
1931
1932 if (numel () < 1 || dim >= ndims ())
1933 return *this;
1934
1935 Array<T, Alloc> m (dims ());
1936
1937 octave_idx_type ns = dv(dim);
1938 octave_idx_type iter = dv.numel () / ns;
1939 octave_idx_type stride = 1;
1940
1941 for (int i = 0; i < dim; i++)
1942 stride *= dv(i);
1943
1944 T *v = m.rwdata ();
1945 const T *ov = data ();
1946
1947 octave_sort<T> lsort;
1948
1949 octave_idx_type *vi = sidx.rwdata ();
1950
1951 if (mode != UNSORTED)
1952 lsort.set_compare (mode);
1953 else
1954 return m;
1955
1956 if (stride == 1)
1957 {
1958 // Special case for dim 1 avoids gather/scatter for performance boost.
1959 // See comments in Array::sort (dim, mode).
1960 for (octave_idx_type j = 0; j < iter; j++)
1961 {
1962 // copy and partition out NaNs.
1963 octave_idx_type kl = 0;
1964 octave_idx_type ku = ns;
1965 for (octave_idx_type i = 0; i < ns; i++)
1966 {
1967 T tmp = ov[i];
1968 if (sort_isnan<T> (tmp))
1969 {
1970 --ku;
1971 v[ku] = tmp;
1972 vi[ku] = i;
1973 }
1974 else
1975 {
1976 v[kl] = tmp;
1977 vi[kl] = i;
1978 kl++;
1979 }
1980 }
1981
1982 // sort.
1983 lsort.sort (v, vi, kl);
1984
1985 if (ku < ns)
1986 {
1987 // NaNs are in reverse order
1988 std::reverse (v + ku, v + ns);
1989 std::reverse (vi + ku, vi + ns);
1990 if (mode == DESCENDING)
1991 {
1992 std::rotate (v, v + ku, v + ns);
1993 std::rotate (vi, vi + ku, vi + ns);
1994 }
1995 }
1996
1997 v += ns;
1998 vi += ns;
1999 ov += ns;
2000 }
2001 }
2002 else
2003 {
2004 OCTAVE_LOCAL_BUFFER (T, buf, ns);
2006
2007 for (octave_idx_type j = 0; j < iter; j++)
2008 {
2009 octave_idx_type offset = j;
2010 octave_idx_type n_strides = j / stride;
2011 offset += n_strides * stride * (ns - 1);
2012
2013 // gather and partition out NaNs.
2014 octave_idx_type kl = 0;
2015 octave_idx_type ku = ns;
2016 for (octave_idx_type i = 0; i < ns; i++)
2017 {
2018 T tmp = ov[i*stride + offset];
2019 if (sort_isnan<T> (tmp))
2020 {
2021 --ku;
2022 buf[ku] = tmp;
2023 bufi[ku] = i;
2024 }
2025 else
2026 {
2027 buf[kl] = tmp;
2028 bufi[kl] = i;
2029 kl++;
2030 }
2031 }
2032
2033 // sort.
2034 lsort.sort (buf, bufi, kl);
2035
2036 if (ku < ns)
2037 {
2038 // NaNs are in reverse order
2039 std::reverse (buf + ku, buf + ns);
2040 std::reverse (bufi + ku, bufi + ns);
2041 if (mode == DESCENDING)
2042 {
2043 std::rotate (buf, buf + ku, buf + ns);
2044 std::rotate (bufi, bufi + ku, bufi + ns);
2045 }
2046 }
2047
2048 // scatter.
2049 for (octave_idx_type i = 0; i < ns; i++)
2050 v[i*stride + offset] = buf[i];
2051 for (octave_idx_type i = 0; i < ns; i++)
2052 vi[i*stride + offset] = bufi[i];
2053 }
2054 }
2055
2056 return m;
2057}
2058
2059template <typename T, typename Alloc>
2062 bool /* allow_chk */)
2063{
2064 if (mode == ASCENDING)
2066 else if (mode == DESCENDING)
2068 else
2069 return nullptr;
2070}
2071
2072template <typename T, typename Alloc>
2075{
2076 octave_sort<T> lsort;
2077
2078 octave_idx_type n = numel ();
2079
2080 if (n <= 1)
2081 return (mode == UNSORTED) ? ASCENDING : mode;
2082
2083 if (mode == UNSORTED)
2084 {
2085 // Auto-detect mode.
2086 compare_fcn_type compare
2087 = safe_comparator (ASCENDING, *this, false);
2088
2089 if (compare (elem (n-1), elem (0)))
2090 mode = DESCENDING;
2091 else
2092 mode = ASCENDING;
2093 }
2094
2095 lsort.set_compare (safe_comparator (mode, *this, false));
2096
2097 if (! lsort.issorted (data (), n))
2098 mode = UNSORTED;
2099
2100 return mode;
2101
2102}
2103
2104template <typename T, typename Alloc>
2107{
2109
2110 octave_sort<T> lsort (safe_comparator (mode, *this, true));
2111
2112 octave_idx_type r = rows ();
2113 octave_idx_type c = cols ();
2114
2115 idx = Array<octave_idx_type> (dim_vector (r, 1));
2116
2117 lsort.sort_rows (data (), idx.rwdata (), r, c);
2118
2119 return idx;
2120}
2121
2122template <typename T, typename Alloc>
2125{
2126 octave_sort<T> lsort;
2127
2128 octave_idx_type r = rows ();
2129 octave_idx_type c = cols ();
2130
2131 if (r <= 1 || c == 0)
2132 return (mode == UNSORTED) ? ASCENDING : mode;
2133
2134 if (mode == UNSORTED)
2135 {
2136 // Auto-detect mode.
2137 compare_fcn_type compare
2138 = safe_comparator (ASCENDING, *this, false);
2139
2141 for (i = 0; i < cols (); i++)
2142 {
2143 T l = elem (0, i);
2144 T u = elem (rows () - 1, i);
2145 if (compare (l, u))
2146 {
2147 if (mode == DESCENDING)
2148 {
2149 mode = UNSORTED;
2150 break;
2151 }
2152 else
2153 mode = ASCENDING;
2154 }
2155 else if (compare (u, l))
2156 {
2157 if (mode == ASCENDING)
2158 {
2159 mode = UNSORTED;
2160 break;
2161 }
2162 else
2163 mode = DESCENDING;
2164 }
2165 }
2166 if (mode == UNSORTED && i == cols ())
2167 mode = ASCENDING;
2168 }
2169
2170 if (mode != UNSORTED)
2171 {
2172 lsort.set_compare (safe_comparator (mode, *this, false));
2173
2174 if (! lsort.is_sorted_rows (data (), r, c))
2175 mode = UNSORTED;
2176 }
2177
2178 return mode;
2179
2180}
2181
2182// Do a binary lookup in a sorted array.
2183template <typename T, typename Alloc>
2185Array<T, Alloc>::lookup (const T& value, sortmode mode) const
2186{
2187 octave_idx_type n = numel ();
2188 octave_sort<T> lsort;
2189
2190 if (mode == UNSORTED)
2191 {
2192 // auto-detect mode
2193 if (n > 1 && lsort.descending_compare (elem (0), elem (n-1)))
2194 mode = DESCENDING;
2195 else
2196 mode = ASCENDING;
2197 }
2198
2199 lsort.set_compare (mode);
2200
2201 return lsort.lookup (data (), n, value);
2202}
2203
2204template <typename T, typename Alloc>
2207{
2208 octave_idx_type n = numel ();
2209 octave_idx_type nval = values.numel ();
2210 octave_sort<T> lsort;
2211 Array<octave_idx_type> idx (values.dims ());
2212
2213 if (mode == UNSORTED)
2214 {
2215 // auto-detect mode
2216 if (n > 1 && lsort.descending_compare (elem (0), elem (n-1)))
2217 mode = DESCENDING;
2218 else
2219 mode = ASCENDING;
2220 }
2221
2222 lsort.set_compare (mode);
2223
2224 // This determines the split ratio between the O(M*log2(N)) and O(M+N)
2225 // algorithms.
2226 static const double RATIO = 1.0;
2227 sortmode vmode = UNSORTED;
2228
2229 // Attempt the O(M+N) algorithm if M is large enough.
2230 if (nval > RATIO * n / octave::math::log2 (n + 1.0))
2231 {
2232 vmode = values.issorted ();
2233 // The table must not contain a NaN.
2234 if ((vmode == ASCENDING && sort_isnan<T> (values(nval-1)))
2235 || (vmode == DESCENDING && sort_isnan<T> (values(0))))
2236 vmode = UNSORTED;
2237 }
2238
2239 if (vmode != UNSORTED)
2240 lsort.lookup_sorted (data (), n, values.data (), nval,
2241 idx.rwdata (), vmode != mode);
2242 else
2243 lsort.lookup (data (), n, values.data (), nval, idx.rwdata ());
2244
2245 return idx;
2246}
2247
2248template <typename T, typename Alloc>
2251{
2252 const T *src = data ();
2253 octave_idx_type nel = numel ();
2254 octave_idx_type retval = 0;
2255 const T zero = T ();
2256 for (octave_idx_type i = 0; i < nel; i++)
2257 if (src[i] != zero)
2258 retval++;
2259
2260 return retval;
2261}
2262
2263template <typename T, typename Alloc>
2266{
2268 const T *src = data ();
2269 octave_idx_type nel = numel ();
2270 const T zero = T ();
2271 if (n < 0 || n >= nel)
2272 {
2273 // We want all elements, which means we'll almost surely need
2274 // to resize. So count first, then allocate array of exact size.
2275 octave_idx_type cnt = 0;
2276 for (octave_idx_type i = 0; i < nel; i++)
2277 cnt += src[i] != zero;
2278
2279 retval.clear (cnt, 1);
2280 octave_idx_type *dest = retval.rwdata ();
2281 for (octave_idx_type i = 0; i < nel; i++)
2282 if (src[i] != zero) *dest++ = i;
2283 }
2284 else
2285 {
2286 // We want a fixed max number of elements, usually small. So be
2287 // optimistic, alloc the array in advance, and then resize if
2288 // needed.
2289 retval.clear (n, 1);
2290 if (backward)
2291 {
2292 // Do the search as a series of successive single-element searches.
2293 octave_idx_type k = 0;
2294 octave_idx_type l = nel - 1;
2295 for (; k < n; k++)
2296 {
2297 for (; l >= 0 && src[l] == zero; l--) ;
2298 if (l >= 0)
2299 retval(k) = l--;
2300 else
2301 break;
2302 }
2303 if (k < n)
2304 retval.resize2 (k, 1);
2305 octave_idx_type *rdata = retval.rwdata ();
2306 std::reverse (rdata, rdata + k);
2307 }
2308 else
2309 {
2310 // Do the search as a series of successive single-element searches.
2311 octave_idx_type k = 0;
2312 octave_idx_type l = 0;
2313 for (; k < n; k++)
2314 {
2315 for (; l != nel && src[l] == zero; l++) ;
2316 if (l != nel)
2317 retval(k) = l++;
2318 else
2319 break;
2320 }
2321 if (k < n)
2322 retval.resize2 (k, 1);
2323 }
2324 }
2325
2326 // Fixup return dimensions, for Matlab compatibility.
2327 // find (zeros (0,0)) -> zeros (0,0)
2328 // find (zeros (1,0)) -> zeros (1,0)
2329 // find (zeros (0,1)) -> zeros (0,1)
2330 // find (zeros (0,X)) -> zeros (0,1)
2331 // find (zeros (1,1)) -> zeros (0,0) !!!! WHY?
2332 // find (zeros (0,1,0)) -> zeros (0,0)
2333 // find (zeros (0,1,0,1)) -> zeros (0,0) etc
2334
2335 if ((numel () == 1 && retval.isempty ())
2336 || (rows () == 0 && dims ().numel (1) == 0))
2337 retval.m_dimensions = dim_vector ();
2338 else if (rows () == 1 && ndims () == 2)
2339 retval.m_dimensions = dim_vector (1, retval.numel ());
2340
2341 return retval;
2342}
2343
2344template <typename T, typename Alloc>
2346Array<T, Alloc>::nth_element (const octave::idx_vector& n, int dim) const
2347{
2348 if (dim < 0)
2349 (*current_liboctave_error_handler) ("nth_element: invalid dimension");
2350
2351 dim_vector dv = dims ();
2352 if (dim >= dv.ndims ())
2353 dv.resize (dim+1, 1);
2354
2355 octave_idx_type ns = dv(dim);
2356
2357 octave_idx_type nn = n.length (ns);
2358
2359 dv(dim) = std::min (nn, ns);
2360 dv.chop_trailing_singletons ();
2361 dim = std::min (dv.ndims (), static_cast<octave_idx_type> (dim));
2362
2363 Array<T, Alloc> m (dv);
2364
2365 if (m.isempty ())
2366 return m;
2367
2368 sortmode mode = UNSORTED;
2369 octave_idx_type lo = 0;
2370
2371 switch (n.idx_class ())
2372 {
2373 case octave::idx_vector::class_scalar:
2374 mode = ASCENDING;
2375 lo = n(0);
2376 break;
2377 case octave::idx_vector::class_range:
2378 {
2379 octave_idx_type inc = n.increment ();
2380 if (inc == 1)
2381 {
2382 mode = ASCENDING;
2383 lo = n(0);
2384 }
2385 else if (inc == -1)
2386 {
2387 mode = DESCENDING;
2388 lo = ns - 1 - n(0);
2389 }
2390 }
2391 break;
2392 case octave::idx_vector::class_vector:
2393 // This case resolves bug #51329, a fallback to allow the given index
2394 // to be a sequential vector instead of the typical scalar or range
2395 if (n(1) - n(0) == 1)
2396 {
2397 mode = ASCENDING;
2398 lo = n(0);
2399 }
2400 else if (n(1) - n(0) == -1)
2401 {
2402 mode = DESCENDING;
2403 lo = ns - 1 - n(0);
2404 }
2405 // Ensure that the vector is actually an arithmetic contiguous sequence
2406 for (octave_idx_type i = 2; i < n.length () && mode != UNSORTED; i++)
2407 if ((mode == ASCENDING && n(i) - n(i-1) != 1)
2408 || (mode == DESCENDING && n(i) - n(i-1) != -1))
2409 mode = UNSORTED;
2410 break;
2411 default:
2412 break;
2413 }
2414
2415 if (mode == UNSORTED)
2416 (*current_liboctave_error_handler)
2417 ("nth_element: n must be a scalar or a contiguous range");
2418
2419 octave_idx_type up = lo + nn;
2420
2421 if (lo < 0 || up > ns)
2422 (*current_liboctave_error_handler) ("nth_element: invalid element index");
2423
2424 octave_idx_type iter = numel () / ns;
2425 octave_idx_type stride = 1;
2426
2427 for (int i = 0; i < dim; i++)
2428 stride *= dv(i);
2429
2430 T *v = m.rwdata ();
2431 const T *ov = data ();
2432
2433 OCTAVE_LOCAL_BUFFER (T, buf, ns);
2434
2435 octave_sort<T> lsort;
2436 lsort.set_compare (mode);
2437
2438 for (octave_idx_type j = 0; j < iter; j++)
2439 {
2440 octave_idx_type kl = 0;
2441 octave_idx_type ku = ns;
2442
2443 if (stride == 1)
2444 {
2445 // copy without NaNs.
2446 for (octave_idx_type i = 0; i < ns; i++)
2447 {
2448 T tmp = ov[i];
2449 if (sort_isnan<T> (tmp))
2450 buf[--ku] = tmp;
2451 else
2452 buf[kl++] = tmp;
2453 }
2454
2455 ov += ns;
2456 }
2457 else
2458 {
2459 octave_idx_type offset = j % stride;
2460 // copy without NaNs.
2461 for (octave_idx_type i = 0; i < ns; i++)
2462 {
2463 T tmp = ov[offset + i*stride];
2464 if (sort_isnan<T> (tmp))
2465 buf[--ku] = tmp;
2466 else
2467 buf[kl++] = tmp;
2468 }
2469
2470 if (offset == stride-1)
2471 ov += ns*stride;
2472 }
2473
2474 if (ku == ns)
2475 lsort.nth_element (buf, ns, lo, up);
2476 else if (mode == ASCENDING)
2477 lsort.nth_element (buf, ku, lo, std::min (ku, up));
2478 else
2479 {
2480 octave_idx_type nnan = ns - ku;
2481 octave_idx_type zero = 0;
2482 lsort.nth_element (buf, ku, std::max (lo - nnan, zero),
2483 std::max (up - nnan, zero));
2484 std::rotate (buf, buf + ku, buf + ns);
2485 }
2486
2487 if (stride == 1)
2488 {
2489 for (octave_idx_type i = 0; i < nn; i++)
2490 v[i] = buf[lo + i];
2491
2492 v += nn;
2493 }
2494 else
2495 {
2496 octave_idx_type offset = j % stride;
2497 for (octave_idx_type i = 0; i < nn; i++)
2498 v[offset + stride * i] = buf[lo + i];
2499 if (offset == stride-1)
2500 v += nn*stride;
2501 }
2502 }
2503
2504 return m;
2505}
2506
2507#define NO_INSTANTIATE_ARRAY_SORT_API(T, API) \
2508 template <> API Array<T> \
2509 Array<T>::sort (int, sortmode) const \
2510 { \
2511 return *this; \
2512 } \
2513 template <> API Array<T> \
2514 Array<T>::sort (Array<octave_idx_type> &sidx, int, sortmode) const \
2515 { \
2516 sidx = Array<octave_idx_type> (); \
2517 return *this; \
2518 } \
2519 template <> API sortmode \
2520 Array<T>::issorted (sortmode) const \
2521 { \
2522 return UNSORTED; \
2523 } \
2524 API Array<T>::compare_fcn_type \
2525 safe_comparator (sortmode, const Array<T>&, bool) \
2526 { \
2527 return nullptr; \
2528 } \
2529 template <> API Array<octave_idx_type> \
2530 Array<T>::sort_rows_idx (sortmode) const \
2531 { \
2532 return Array<octave_idx_type> (); \
2533 } \
2534 template <> API sortmode \
2535 Array<T>::is_sorted_rows (sortmode) const \
2536 { \
2537 return UNSORTED; \
2538 } \
2539 template <> API octave_idx_type \
2540 Array<T>::lookup (T const &, sortmode) const \
2541 { \
2542 return 0; \
2543 } \
2544 template <> API Array<octave_idx_type> \
2545 Array<T>::lookup (const Array<T>&, sortmode) const \
2546 { \
2547 return Array<octave_idx_type> (); \
2548 } \
2549 template <> API octave_idx_type \
2550 Array<T>::nnz () const \
2551 { \
2552 return 0; \
2553 } \
2554 template <> API Array<octave_idx_type> \
2555 Array<T>::find (octave_idx_type, bool) const \
2556 { \
2557 return Array<octave_idx_type> (); \
2558 } \
2559 template <> API Array<T> \
2560 Array<T>::nth_element (const octave::idx_vector&, int) const { \
2561 return Array<T> (); \
2562 }
2563
2564#define NO_INSTANTIATE_ARRAY_SORT(T) NO_INSTANTIATE_ARRAY_SORT_API (T,)
2565
2566template <typename T, typename Alloc>
2569{
2570 const dim_vector& dv = dims ();
2571 octave_idx_type nd = dv.ndims ();
2573
2574 if (nd > 2)
2575 (*current_liboctave_error_handler) ("Matrix must be 2-dimensional");
2576
2577 octave_idx_type nnr = dv(0);
2578 octave_idx_type nnc = dv(1);
2579
2580 if (nnr == 0 && nnc == 0)
2581 ; // do nothing for empty matrix
2582 else if (nnr != 1 && nnc != 1)
2583 {
2584 // Extract diag from matrix
2585 if (k > 0)
2586 nnc -= k;
2587 else if (k < 0)
2588 nnr += k;
2589
2590 if (nnr > 0 && nnc > 0)
2591 {
2592 octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2593
2594 d.resize (dim_vector (ndiag, 1));
2595
2596 if (k > 0)
2597 {
2598 for (octave_idx_type i = 0; i < ndiag; i++)
2599 d.xelem (i) = elem (i, i+k);
2600 }
2601 else if (k < 0)
2602 {
2603 for (octave_idx_type i = 0; i < ndiag; i++)
2604 d.xelem (i) = elem (i-k, i);
2605 }
2606 else
2607 {
2608 for (octave_idx_type i = 0; i < ndiag; i++)
2609 d.xelem (i) = elem (i, i);
2610 }
2611 }
2612 else // Matlab returns [] 0x1 for out-of-range diagonal
2613 d.resize (dim_vector (0, 1));
2614 }
2615 else
2616 {
2617 // Create diag matrix from vector
2618 octave_idx_type roff = 0;
2619 octave_idx_type coff = 0;
2620 if (k > 0)
2621 {
2622 roff = 0;
2623 coff = k;
2624 }
2625 else if (k < 0)
2626 {
2627 roff = -k;
2628 coff = 0;
2629 }
2630
2631 if (nnr == 1)
2632 {
2633 octave_idx_type n = nnc + std::abs (k);
2634 d = Array<T, Alloc> (dim_vector (n, n), resize_fill_value ());
2635
2636 for (octave_idx_type i = 0; i < nnc; i++)
2637 d.xelem (i+roff, i+coff) = elem (0, i);
2638 }
2639 else
2640 {
2641 octave_idx_type n = nnr + std::abs (k);
2642 d = Array<T, Alloc> (dim_vector (n, n), resize_fill_value ());
2643
2644 for (octave_idx_type i = 0; i < nnr; i++)
2645 d.xelem (i+roff, i+coff) = elem (i, 0);
2646 }
2647 }
2648
2649 return d;
2650}
2651
2652template <typename T, typename Alloc>
2655{
2656 if (ndims () != 2 || (rows () != 1 && cols () != 1))
2657 (*current_liboctave_error_handler) ("cat: invalid dimension");
2658
2659 Array<T, Alloc> retval (dim_vector (m, n), resize_fill_value ());
2660
2661 octave_idx_type nel = std::min (numel (), std::min (m, n));
2662 for (octave_idx_type i = 0; i < nel; i++)
2663 retval.xelem (i, i) = xelem (i);
2664
2665 return retval;
2666}
2667
2668template <typename T, typename Alloc>
2671{
2672 // Default concatenation.
2673 bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2674
2675 if (dim == -1 || dim == -2)
2676 {
2677 concat_rule = &dim_vector::hvcat;
2678 dim = -dim - 1;
2679 }
2680 else if (dim < 0)
2681 (*current_liboctave_error_handler) ("cat: invalid dimension");
2682
2683 if (n == 1)
2684 return array_list[0];
2685 else if (n == 0)
2686 return Array<T, Alloc> ();
2687
2688 // Special case:
2689 //
2690 // cat (dim, [], ..., [], A, ...)
2691 //
2692 // with dim > 2, A not 0x0, and at least three arguments to
2693 // concatenate is equivalent to
2694 //
2695 // cat (dim, A, ...)
2696 //
2697 // Note that this check must be performed here because for full-on
2698 // braindead Matlab compatibility, we need to have things like
2699 //
2700 // cat (3, [], [], A)
2701 //
2702 // succeed, but to have things like
2703 //
2704 // cat (3, cat (3, [], []), A)
2705 // cat (3, zeros (0, 0, 2), A)
2706 //
2707 // fail. See also bug report #31615.
2708
2709 octave_idx_type istart = 0;
2710
2711 if (n > 2 && dim > 1)
2712 {
2713 for (octave_idx_type i = 0; i < n; i++)
2714 {
2715 const dim_vector& dv = array_list[i].dims ();
2716
2717 if (dv.zero_by_zero ())
2718 istart++;
2719 else
2720 break;
2721 }
2722
2723 // Don't skip any initial arguments if they are all empty.
2724 if (istart >= n)
2725 istart = 0;
2726 }
2727
2728 dim_vector dv = array_list[istart++].dims ();
2729
2730 for (octave_idx_type i = istart; i < n; i++)
2731 if (! (dv.*concat_rule) (array_list[i].dims (), dim))
2732 (*current_liboctave_error_handler) ("cat: dimension mismatch");
2733
2734 Array<T, Alloc> retval (dv);
2735
2736 if (retval.isempty ())
2737 return retval;
2738
2739 int nidx = std::max (dv.ndims (), static_cast<octave_idx_type> (dim + 1));
2740 Array<octave::idx_vector> idxa (dim_vector (nidx, 1), octave::idx_vector::colon);
2741 octave_idx_type l = 0;
2742
2743 for (octave_idx_type i = 0; i < n; i++)
2744 {
2745 // NOTE: This takes some thinking, but no matter what the above rules
2746 // are, an empty array can always be skipped at this point, because
2747 // the result dimensions are already determined, and there is no way
2748 // an empty array may contribute a nonzero piece along the dimension
2749 // at this point, unless an empty array can be promoted to a non-empty
2750 // one (which makes no sense). I repeat, *no way*, think about it.
2751 if (array_list[i].isempty ())
2752 continue;
2753
2754 octave_quit ();
2755
2757 if (dim < array_list[i].ndims ())
2758 u = l + array_list[i].dims ()(dim);
2759 else
2760 u = l + 1;
2761
2762 idxa(dim) = octave::idx_vector (l, u);
2763
2764 retval.assign (idxa, array_list[i]);
2765
2766 l = u;
2767 }
2768
2769 return retval;
2770}
2771
2772template <typename T, typename Alloc>
2773void
2774Array<T, Alloc>::print_info (std::ostream& os, const std::string& prefix) const
2775{
2776 os << prefix << "m_rep address: " << m_rep << '\n'
2777 << prefix << "m_rep->m_len: " << m_rep->m_len << '\n'
2778 << prefix << "m_rep->m_data: " << static_cast<void *> (m_rep->m_data) << '\n'
2779 << prefix << "m_rep->m_count: " << m_rep->m_count << '\n'
2780 << prefix << "m_slice_data: " << static_cast<void *> (m_slice_data) << '\n'
2781 << prefix << "m_slice_len: " << m_slice_len << '\n';
2782
2783 // 2D info:
2784 //
2785 // << pefix << "rows: " << rows () << "\n"
2786 // << prefix << "cols: " << cols () << "\n";
2787}
2788
2789template <typename T, typename Alloc>
2790bool
2792{
2793 bool retval = m_dimensions == dv;
2794 if (retval)
2795 m_dimensions = dv;
2796
2797 return retval;
2798}
2799
2800template <typename T, typename Alloc>
2801void
2803{
2804 // This guards against accidental implicit instantiations.
2805 // Array<T, Alloc> instances should always be explicit and use INSTANTIATE_ARRAY.
2806 T::__xXxXx__ ();
2807}
2808
2809#define INSTANTIATE_ARRAY(T, API) \
2810 template <> API void \
2811 Array<T>::instantiation_guard () { } \
2812 \
2813 template class API Array<T>
2814
2815// FIXME: is this used?
2816
2817template <typename T, typename Alloc>
2818std::ostream&
2819operator << (std::ostream& os, const Array<T, Alloc>& a)
2820{
2821 const dim_vector& a_dims = a.dims ();
2822
2823 int n_dims = a_dims.ndims ();
2824
2825 os << n_dims << "-dimensional array";
2826
2827 if (n_dims)
2828 os << " (" << a_dims.str () << ')';
2829
2830 os <<"\n\n";
2831
2832 if (n_dims)
2833 {
2834 os << "data:";
2835
2836 Array<octave_idx_type> ra_idx (dim_vector (n_dims, 1), 0);
2837
2838 // Number of times the first 2d-array is to be displayed.
2839
2840 octave_idx_type m = 1;
2841 for (int i = 2; i < n_dims; i++)
2842 m *= a_dims(i);
2843
2844 if (m == 1)
2845 {
2846 octave_idx_type rows = 0;
2847 octave_idx_type cols = 0;
2848
2849 switch (n_dims)
2850 {
2851 case 2:
2852 rows = a_dims(0);
2853 cols = a_dims(1);
2854
2855 for (octave_idx_type j = 0; j < rows; j++)
2856 {
2857 ra_idx(0) = j;
2858 for (octave_idx_type k = 0; k < cols; k++)
2859 {
2860 ra_idx(1) = k;
2861 os << ' ' << a.elem (ra_idx);
2862 }
2863 os << "\n";
2864 }
2865 break;
2866
2867 default:
2868 rows = a_dims(0);
2869
2870 for (octave_idx_type k = 0; k < rows; k++)
2871 {
2872 ra_idx(0) = k;
2873 os << ' ' << a.elem (ra_idx);
2874 }
2875 break;
2876 }
2877
2878 os << "\n";
2879 }
2880 else
2881 {
2882 octave_idx_type rows = a_dims(0);
2883 octave_idx_type cols = a_dims(1);
2884
2885 for (int i = 0; i < m; i++)
2886 {
2887 os << "\n(:,:,";
2888
2889 for (int j = 2; j < n_dims - 1; j++)
2890 os << ra_idx(j) + 1 << ',';
2891
2892 os << ra_idx(n_dims - 1) + 1 << ") = \n";
2893
2894 for (octave_idx_type j = 0; j < rows; j++)
2895 {
2896 ra_idx(0) = j;
2897
2898 for (octave_idx_type k = 0; k < cols; k++)
2899 {
2900 ra_idx(1) = k;
2901 os << ' ' << a.elem (ra_idx);
2902 }
2903
2904 os << "\n";
2905 }
2906
2907 os << "\n";
2908
2909 if (i != m - 1)
2910 increment_index (ra_idx, a_dims, 2);
2911 }
2912 }
2913 }
2914
2915 return os;
2916}
bool sort_isnan(typename ref_param< T >::type)
Array< T, Alloc >::compare_fcn_type safe_comparator(sortmode mode, const Array< T, Alloc > &, bool)
std::ostream & operator<<(std::ostream &os, const Array< T, Alloc > &a)
octave_idx_type compute_index(octave_idx_type n, const dim_vector &dims)
dim_vector zero_dims_inquire(const Array< octave::idx_vector > &ia, const dim_vector &rhdv)
void increment_index(Array< octave_idx_type > &ra_idx, const dim_vector &dimensions, int start_dimension)
Definition Array-util.cc:60
The real representation of all arrays.
Definition Array.h:135
octave::refcount< octave_idx_type > m_count
Definition Array.h:145
N Dimensional Array with copy-on-write semantics.
Definition Array.h:130
const dim_vector & dims() const
Return a const-reference so that dims ()(i) works efficiently.
Definition Array.h:507
Array< T, Alloc >::ArrayRep * m_rep
Definition Array.h:251
T & xelem(octave_idx_type n)
Size of the specified dimension.
Definition Array.h:525
Array< T, Alloc > index(const octave::idx_vector &i) const
Indexing without resizing.
friend class Array
Size of the specified dimension.
Definition Array.h:943
void assign(const octave::idx_vector &i, const Array< T, Alloc > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Array< T, Alloc > transpose() const
Size of the specified dimension.
Array< T, Alloc > nth_element(const octave::idx_vector &n, int dim=0) const
Returns the n-th element in increasing order, using the same ordering as used for sort.
void clear()
octave_idx_type compute_index_unchecked(const Array< octave_idx_type > &ra_idx) const
Size of the specified dimension.
Array< T, Alloc > hermitian(T(*fcn)(const T &)=nullptr) const
Size of the specified dimension.
void resize1(octave_idx_type n, const T &rfv)
Size of the specified dimension.
T & elem(octave_idx_type n)
Size of the specified dimension.
Definition Array.h:563
int ndims() const
Size of the specified dimension.
Definition Array.h:679
ref_param< T >::type crefT
Definition Array.h:242
Array< T, Alloc > column(octave_idx_type k) const
Extract column: A(:,k+1).
Array< T, Alloc > linear_slice(octave_idx_type lo, octave_idx_type up) const
Extract a slice from this array as a column vector: A(:)(lo+1:up).
bool optimize_dimensions(const dim_vector &dv)
Returns true if this->dims () == dv, and if so, replaces this->m_dimensions by a shallow copy of dv.
octave_idx_type nnz() const
Count nonzero elements.
octave_idx_type rows() const
Definition Array.h:463
Array< T, Alloc > page(octave_idx_type k) const
Extract page: A(:,:,k+1).
void resize(const dim_vector &dv, const T &rfv)
Size of the specified dimension.
Array< T, Alloc > & insert(const Array< T, Alloc > &a, const Array< octave_idx_type > &idx)
Insert an array into another at a specified position.
Array< T, Alloc > squeeze() const
Chop off leading singleton dimensions.
octave_idx_type dim2() const
Definition Array.h:471
octave_idx_type columns() const
Definition Array.h:475
void delete_elements(const octave::idx_vector &i)
Deleting elements.
Array< octave_idx_type > sort_rows_idx(sortmode mode=ASCENDING) const
Sort by rows returns only indices.
Array< octave_idx_type > find(octave_idx_type n=-1, bool backward=false) const
Find indices of (at most n) nonzero elements.
octave_idx_type dim1() const
Definition Array.h:460
dim_vector m_dimensions
Definition Array.h:249
Array< T, Alloc > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Size of the specified dimension.
Array< T, Alloc > sort(int dim=0, sortmode mode=ASCENDING) const
Size of the specified dimension.
Array< T, Alloc > reshape(octave_idx_type nr, octave_idx_type nc) const
Size of the specified dimension.
Definition Array.h:636
bool isempty() const
Size of the specified dimension.
Definition Array.h:652
const T * data() const
Size of the specified dimension.
Definition Array.h:665
void print_info(std::ostream &os, const std::string &prefix) const
Size of the specified dimension.
sortmode issorted(sortmode mode=UNSORTED) const
Ordering is auto-detected or can be specified.
T * rwdata()
Size of the specified dimension.
T & checkelem(octave_idx_type n)
Size of the specified dimension.
sortmode is_sorted_rows(sortmode mode=UNSORTED) const
Ordering is auto-detected or can be specified.
void fill(const T &val)
Definition Array-base.cc:96
Array< T, Alloc > diag(octave_idx_type k=0) const
Get the kth super or subdiagonal.
virtual T resize_fill_value() const
Size of the specified dimension.
octave_idx_type compute_index(octave_idx_type i, octave_idx_type j) const
Size of the specified dimension.
octave_idx_type lookup(const T &value, sortmode mode=UNSORTED) const
Do a binary lookup in a sorted array.
void resize2(octave_idx_type nr, octave_idx_type nc, const T &rfv)
Resizing (with fill).
static Array< T, Alloc > cat(int dim, octave_idx_type n, const Array< T, Alloc > *array_list)
Concatenation along a specified (0-based) dimension, equivalent to cat().
octave_idx_type numel() const
Number of elements in the array.
Definition Array.h:418
Vector representing the dimensions (size) of an Array.
Definition dim-vector.h:90
bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
octave_idx_type safe_numel() const
The following function will throw a std::bad_alloc () exception if the requested size is larger than ...
Definition dim-vector.cc:98
std::string str(char sep='x') const
Definition dim-vector.cc:68
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition dim-vector.h:331
void chop_trailing_singletons()
Definition dim-vector.h:160
void resize(int n, int fill_value=0)
Definition dim-vector.h:268
static dim_vector alloc(int n)
Definition dim-vector.h:198
bool is_nd_vector() const
Definition dim-vector.h:396
bool any_neg() const
Definition dim-vector.h:353
octave_idx_type ndims() const
Number of dimensions.
Definition dim-vector.h:253
void chop_all_singletons()
Definition dim-vector.cc:50
bool zero_by_zero() const
Definition dim-vector.h:307
bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
dim_vector redim(int n) const
Force certain dimensionality, preserving numel ().
dim_vector make_nd_vector(octave_idx_type n) const
Definition dim-vector.h:417
virtual octave_idx_type numel() const
Definition ov-base.h:401
void set_compare(const compare_fcn_type &comp)
Definition oct-sort.h:115
void sort_rows(const T *data, octave_idx_type *idx, octave_idx_type rows, octave_idx_type cols)
Definition oct-sort.cc:1666
static bool descending_compare(typename ref_param< T >::type, typename ref_param< T >::type)
Definition oct-sort.cc:1976
bool issorted(const T *data, octave_idx_type nel)
Definition oct-sort.cc:1578
octave_idx_type lookup(const T *data, octave_idx_type nel, const T &value)
Definition oct-sort.cc:1788
void sort(T *data, octave_idx_type nel)
Definition oct-sort.cc:1521
void nth_element(T *data, octave_idx_type nel, octave_idx_type lo, octave_idx_type up=-1)
Definition oct-sort.cc:1946
bool is_sorted_rows(const T *data, octave_idx_type rows, octave_idx_type cols)
Definition oct-sort.cc:1743
void lookup_sorted(const T *data, octave_idx_type nel, const T *values, octave_idx_type nvalues, octave_idx_type *idx, bool rev=false)
Definition oct-sort.cc:1899
if_then_else< is_class_type< T >::no, T, Tconst & >::result type
Definition lo-traits.h:121
#define liboctave_panic_unless(cond)
Definition lo-error.h:102
F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
F77_RET_T const F77_DBLE * x
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition oct-locbuf.h:44
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition oct-locbuf.h:50
sortmode
Definition oct-sort.h:97
@ UNSORTED
Definition oct-sort.h:97
@ ASCENDING
Definition oct-sort.h:97
@ DESCENDING
Definition oct-sort.h:97
T::size_type numel(const T &str)
Definition oct-string.cc:74
const octave_base_value const Array< octave_idx_type > & ra_idx
F77_RET_T len
Definition xerbla.cc:61