GNU Octave 7.1.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
Array.cc
Go to the documentation of this file.
1////////////////////////////////////////////////////////////////////////
2//
3// Copyright (C) 1993-2022 The Octave Project Developers
4//
5// See the file COPYRIGHT.md in the top-level directory of this
6// distribution or <https://octave.org/copyright/>.
7//
8// This file is part of Octave.
9//
10// Octave is free software: you can redistribute it and/or modify it
11// under the terms of the GNU General Public License as published by
12// the Free Software Foundation, either version 3 of the License, or
13// (at your option) any later version.
14//
15// Octave is distributed in the hope that it will be useful, but
16// WITHOUT ANY WARRANTY; without even the implied warranty of
17// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18// GNU General Public License for more details.
19//
20// You should have received a copy of the GNU General Public License
21// along with Octave; see the file COPYING. If not, see
22// <https://www.gnu.org/licenses/>.
23//
24////////////////////////////////////////////////////////////////////////
25
26// This file should not include config.h. It is only included in other
27// C++ source files that should have included config.h before including
28// this file.
29
30#include <ostream>
31
32#include "Array-util.h"
33#include "Array.h"
34#include "lo-mappers.h"
35#include "oct-locbuf.h"
36
37// One dimensional array class. Handles the reference counting for
38// all the derived classes.
39
40template <typename T, typename Alloc>
43{
44 static ArrayRep nr;
45 return &nr;
46}
47
48// This is similar to the template for containers but specialized for Array.
49// Note we can't specialize a member without also specializing the class.
50template <typename T, typename Alloc>
52 : m_dimensions (dv), m_rep (a.m_rep),
53 m_slice_data (a.m_slice_data), m_slice_len (a.m_slice_len)
54{
55 if (m_dimensions.safe_numel () != a.numel ())
56 {
57 std::string dimensions_str = a.m_dimensions.str ();
58 std::string new_dims_str = m_dimensions.str ();
59
60 (*current_liboctave_error_handler)
61 ("reshape: can't reshape %s array to %s array",
62 dimensions_str.c_str (), new_dims_str.c_str ());
63 }
64
65 // This goes here because if an exception is thrown by the above,
66 // destructor will be never called.
67 m_rep->m_count++;
69}
70
71template <typename T, typename Alloc>
72void
74{
75 if (m_rep->m_count > 1)
76 {
77 --m_rep->m_count;
78 m_rep = new ArrayRep (numel (), val);
79 m_slice_data = m_rep->m_data;
80 }
81 else
82 std::fill_n (m_slice_data, m_slice_len, val);
83}
84
85template <typename T, typename Alloc>
86void
88{
89 if (--m_rep->m_count == 0)
90 delete m_rep;
91
92 m_rep = nil_rep ();
93 m_rep->m_count++;
94 m_slice_data = m_rep->m_data;
95 m_slice_len = m_rep->m_len;
96
97 m_dimensions = dim_vector ();
98}
99
100template <typename T, typename Alloc>
101void
103{
104 if (--m_rep->m_count == 0)
105 delete m_rep;
106
107 m_rep = new ArrayRep (dv.safe_numel ());
108 m_slice_data = m_rep->m_data;
109 m_slice_len = m_rep->m_len;
110
111 m_dimensions = dv;
112 m_dimensions.chop_trailing_singletons ();
113}
114
115template <typename T, typename Alloc>
118{
119 Array<T, Alloc> retval = *this;
120
121 if (ndims () > 2)
122 {
123 bool dims_changed = false;
124
125 dim_vector new_dimensions = m_dimensions;
126
127 int k = 0;
128
129 for (int i = 0; i < ndims (); i++)
130 {
131 if (m_dimensions(i) == 1)
132 dims_changed = true;
133 else
134 new_dimensions(k++) = m_dimensions(i);
135 }
136
137 if (dims_changed)
138 {
139 switch (k)
140 {
141 case 0:
142 new_dimensions = dim_vector (1, 1);
143 break;
144
145 case 1:
146 {
147 octave_idx_type tmp = new_dimensions(0);
148
149 new_dimensions.resize (2);
150
151 new_dimensions(0) = tmp;
152 new_dimensions(1) = 1;
153 }
154 break;
155
156 default:
157 new_dimensions.resize (k);
158 break;
159 }
160 }
161
162 retval = Array<T, Alloc> (*this, new_dimensions);
163 }
164
165 return retval;
166}
167
168template <typename T, typename Alloc>
171{
172 return ::compute_index (i, j, m_dimensions);
173}
174
175template <typename T, typename Alloc>
178 octave_idx_type k) const
179{
180 return ::compute_index (i, j, k, m_dimensions);
181}
182
183template <typename T, typename Alloc>
186{
187 return ::compute_index (ra_idx, m_dimensions);
188}
189
190template <typename T, typename Alloc>
191T&
193{
194 // Do checks directly to avoid recomputing m_slice_len.
195 if (n < 0)
197 if (n >= m_slice_len)
198 octave::err_index_out_of_range (1, 1, n+1, m_slice_len, m_dimensions);
199
200 return elem (n);
201}
202
203template <typename T, typename Alloc>
204T&
206{
207 return elem (compute_index (i, j));
208}
209
210template <typename T, typename Alloc>
211T&
213{
214 return elem (compute_index (i, j, k));
215}
216
217template <typename T, typename Alloc>
218T&
220{
221 return elem (compute_index (ra_idx));
222}
223
224template <typename T, typename Alloc>
227{
228 // Do checks directly to avoid recomputing m_slice_len.
229 if (n < 0)
231 if (n >= m_slice_len)
232 octave::err_index_out_of_range (1, 1, n+1, m_slice_len, m_dimensions);
233
234 return elem (n);
235}
236
237template <typename T, typename Alloc>
240{
241 return elem (compute_index (i, j));
242}
243
244template <typename T, typename Alloc>
247 octave_idx_type k) const
248{
249 return elem (compute_index (i, j, k));
250}
251
252template <typename T, typename Alloc>
255{
256 return elem (compute_index (ra_idx));
257}
258
259template <typename T, typename Alloc>
262{
263 octave_idx_type r = m_dimensions(0);
264
265 return Array<T, Alloc> (*this, dim_vector (r, 1), k*r, k*r + r);
266}
267
268template <typename T, typename Alloc>
271{
272 octave_idx_type r = m_dimensions(0);
273 octave_idx_type c = m_dimensions(1);
274 octave_idx_type p = r*c;
275
276 return Array<T, Alloc> (*this, dim_vector (r, c), k*p, k*p + p);
277}
278
279template <typename T, typename Alloc>
282{
283 if (up < lo)
284 up = lo;
285
286 return Array<T, Alloc> (*this, dim_vector (up - lo, 1), lo, up);
287}
288
289// Helper class for multi-d dimension permuting (generalized transpose).
291{
292public:
294
295 : m_n (dv.ndims ()), m_top (0), m_dim (new octave_idx_type [2*m_n]),
296 m_stride (m_dim + m_n), m_use_blk (false)
297 {
298 assert (m_n == perm.numel ());
299
300 // Get cumulative dimensions.
302 cdim[0] = 1;
303 for (int i = 1; i < m_n+1; i++) cdim[i] = cdim[i-1] * dv(i-1);
304
305 // Setup the permuted strides.
306 for (int k = 0; k < m_n; k++)
307 {
308 int kk = perm(k);
309 m_dim[k] = dv(kk);
310 m_stride[k] = cdim[kk];
311 }
312
313 // Reduce contiguous runs.
314 for (int k = 1; k < m_n; k++)
315 {
317 m_dim[m_top] *= m_dim[k];
318 else
319 {
320 m_top++;
321 m_dim[m_top] = m_dim[k];
322 m_stride[m_top] = m_stride[k];
323 }
324 }
325
326 // Determine whether we can use block transposes.
327 m_use_blk = m_top >= 1 && m_stride[1] == 1 && m_stride[0] == m_dim[1];
328
329 }
330
331 // No copying!
332
334
336
337 ~rec_permute_helper (void) { delete [] m_dim; }
338
339 template <typename T>
340 void permute (const T *src, T *dest) const { do_permute (src, dest, m_top); }
341
342 // Helper method for fast blocked transpose.
343 template <typename T>
344 static T *
345 blk_trans (const T *src, T *dest, octave_idx_type nr, octave_idx_type nc)
346 {
347 static const octave_idx_type m = 8;
348 OCTAVE_LOCAL_BUFFER (T, blk, m*m);
349 for (octave_idx_type kr = 0; kr < nr; kr += m)
350 for (octave_idx_type kc = 0; kc < nc; kc += m)
351 {
352 octave_idx_type lr = std::min (m, nr - kr);
353 octave_idx_type lc = std::min (m, nc - kc);
354 if (lr == m && lc == m)
355 {
356 const T *ss = src + kc * nr + kr;
357 for (octave_idx_type j = 0; j < m; j++)
358 for (octave_idx_type i = 0; i < m; i++)
359 blk[j*m+i] = ss[j*nr + i];
360 T *dd = dest + kr * nc + kc;
361 for (octave_idx_type j = 0; j < m; j++)
362 for (octave_idx_type i = 0; i < m; i++)
363 dd[j*nc+i] = blk[i*m+j];
364 }
365 else
366 {
367 const T *ss = src + kc * nr + kr;
368 for (octave_idx_type j = 0; j < lc; j++)
369 for (octave_idx_type i = 0; i < lr; i++)
370 blk[j*m+i] = ss[j*nr + i];
371 T *dd = dest + kr * nc + kc;
372 for (octave_idx_type j = 0; j < lr; j++)
373 for (octave_idx_type i = 0; i < lc; i++)
374 dd[j*nc+i] = blk[i*m+j];
375 }
376 }
377
378 return dest + nr*nc;
379 }
380
381private:
382
383 // Recursive N-D generalized transpose
384 template <typename T>
385 T * do_permute (const T *src, T *dest, int lev) const
386 {
387 if (lev == 0)
388 {
389 octave_idx_type step = m_stride[0];
391 if (step == 1)
392 {
393 std::copy_n (src, len, dest);
394 dest += len;
395 }
396 else
397 {
398 for (octave_idx_type i = 0, j = 0; i < len; i++, j += step)
399 dest[i] = src[j];
400
401 dest += len;
403 }
404 else if (m_use_blk && lev == 1)
405 dest = blk_trans (src, dest, m_dim[1], m_dim[0]);
406 else
407 {
408 octave_idx_type step = m_stride[lev];
410 for (octave_idx_type i = 0, j = 0; i < len; i++, j+= step)
411 dest = do_permute (src + i * step, dest, lev-1);
412 }
413
414 return dest;
415 }
416
417 //--------
418
419 // STRIDE occupies the last half of the space allocated for dim to
420 // avoid a double allocation.
421
422 int m_n;
423 int m_top;
427};
428
429template <typename T, typename Alloc>
431Array<T, Alloc>::permute (const Array<octave_idx_type>& perm_vec_arg, bool inv) const
432{
433 Array<T, Alloc> retval;
434
435 Array<octave_idx_type> perm_vec = perm_vec_arg;
436
437 dim_vector dv = dims ();
438
439 int perm_vec_len = perm_vec_arg.numel ();
440
441 if (perm_vec_len < dv.ndims ())
442 (*current_liboctave_error_handler)
443 ("%s: invalid permutation vector", inv ? "ipermute" : "permute");
444
445 dim_vector dv_new = dim_vector::alloc (perm_vec_len);
446
447 // Append singleton dimensions as needed.
448 dv.resize (perm_vec_len, 1);
449
450 // Need this array to check for identical elements in permutation array.
451 OCTAVE_LOCAL_BUFFER_INIT (bool, checked, perm_vec_len, false);
452
453 bool identity = true;
454
455 // Find dimension vector of permuted array.
456 for (int i = 0; i < perm_vec_len; i++)
457 {
458 octave_idx_type perm_elt = perm_vec.elem (i);
459 if (perm_elt >= perm_vec_len || perm_elt < 0)
460 (*current_liboctave_error_handler)
461 ("%s: permutation vector contains an invalid element",
462 inv ? "ipermute" : "permute");
463
464 if (checked[perm_elt])
465 (*current_liboctave_error_handler)
466 ("%s: permutation vector cannot contain identical elements",
467 inv ? "ipermute" : "permute");
468 else
469 {
470 checked[perm_elt] = true;
471 identity = identity && perm_elt == i;
472 }
473 }
474
475 if (identity)
476 return *this;
477
478 if (inv)
479 {
480 for (int i = 0; i < perm_vec_len; i++)
481 perm_vec(perm_vec_arg(i)) = i;
482 }
483
484 for (int i = 0; i < perm_vec_len; i++)
485 dv_new(i) = dv(perm_vec(i));
486
487 retval = Array<T, Alloc> (dv_new);
488
489 if (numel () > 0)
491 rec_permute_helper rh (dv, perm_vec);
492 rh.permute (data (), retval.fortran_vec ());
494
495 return retval;
496}
497
498// Helper class for multi-d index reduction and recursive
499// indexing/indexed assignment. Rationale: we could avoid recursion
500// using a state machine instead. However, using recursion is much
501// more amenable to possible parallelization in the future.
502// Also, the recursion solution is cleaner and more understandable.
503
505{
506public:
508 : m_n (ia.numel ()), m_top (0), m_dim (new octave_idx_type [2*m_n]),
510 {
511 assert (m_n > 0 && (dv.ndims () == std::max (m_n, 2)));
512
513 m_dim[0] = dv(0);
514 m_cdim[0] = 1;
515 m_idx[0] = ia(0);
516
517 for (int i = 1; i < m_n; i++)
518 {
519 // Try reduction...
520 if (m_idx[m_top].maybe_reduce (m_dim[m_top], ia(i), dv(i)))
521 {
522 // Reduction successful, fold dimensions.
523 m_dim[m_top] *= dv(i);
524 }
525 else
527 // Unsuccessful, store index & cumulative m_dim.
529 m_idx[m_top] = ia(i);
530 m_dim[m_top] = dv(i);
531 m_cdim[m_top] = m_cdim[m_top-1] * m_dim[m_top-1];
533 }
534 }
535
536 // No copying!
537
539
541
542 ~rec_index_helper (void) { delete [] m_idx; delete [] m_dim; }
543
544 template <typename T>
545 void index (const T *src, T *dest) const { do_index (src, dest, m_top); }
546
547 template <typename T>
548 void assign (const T *src, T *dest) const { do_assign (src, dest, m_top); }
549
550 template <typename T>
551 void fill (const T& val, T *dest) const { do_fill (val, dest, m_top); }
552
554 {
555 return m_top == 0 && m_idx[0].is_cont_range (m_dim[0], l, u);
556 }
558private:
560 // Recursive N-D indexing
561 template <typename T>
562 T * do_index (const T *src, T *dest, int lev) const
563 {
564 if (lev == 0)
565 dest += m_idx[0].index (src, m_dim[0], dest);
566 else
567 {
568 octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
569 octave_idx_type d = m_cdim[lev];
570 for (octave_idx_type i = 0; i < nn; i++)
571 dest = do_index (src + d*m_idx[lev].xelem (i), dest, lev-1);
572 }
573
574 return dest;
575 }
576
577 // Recursive N-D indexed assignment
578 template <typename T>
579 const T * do_assign (const T *src, T *dest, int lev) const
580 {
581 if (lev == 0)
582 src += m_idx[0].assign (src, m_dim[0], dest);
583 else
584 {
585 octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
586 octave_idx_type d = m_cdim[lev];
587 for (octave_idx_type i = 0; i < nn; i++)
588 src = do_assign (src, dest + d*m_idx[lev].xelem (i), lev-1);
590
591 return src;
592 }
594 // Recursive N-D indexed assignment
595 template <typename T>
596 void do_fill (const T& val, T *dest, int lev) const
597 {
598 if (lev == 0)
599 m_idx[0].fill (val, m_dim[0], dest);
600 else
602 octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
603 octave_idx_type d = m_cdim[lev];
604 for (octave_idx_type i = 0; i < nn; i++)
605 do_fill (val, dest + d*m_idx[lev].xelem (i), lev-1);
606 }
607 }
608
609 //--------
610
611 // CDIM occupies the last half of the space allocated for dim to
612 // avoid a double allocation.
614 int m_n;
615 int m_top;
619};
620
621// Helper class for multi-d recursive resizing
622// This handles resize () in an efficient manner, touching memory only
623// once (apart from reinitialization)
625{
626public:
627 rec_resize_helper (const dim_vector& ndv, const dim_vector& odv)
628 : m_cext (nullptr), m_sext (nullptr), m_dext (nullptr), m_n (0)
629 {
630 int l = ndv.ndims ();
631 assert (odv.ndims () == l);
632 octave_idx_type ld = 1;
633 int i = 0;
634 for (; i < l-1 && ndv(i) == odv(i); i++) ld *= ndv(i);
635 m_n = l - i;
636 m_cext = new octave_idx_type [3*m_n];
637 // Trick to avoid three allocations
639 m_dext = m_sext + m_n;
640
641 octave_idx_type sld = ld;
643 for (int j = 0; j < m_n; j++)
644 {
645 m_cext[j] = std::min (ndv(i+j), odv(i+j));
646 m_sext[j] = sld *= odv(i+j);
647 m_dext[j] = dld *= ndv(i+j);
649 m_cext[0] *= ld;
650 }
652 // No copying!
653
655
657
658 ~rec_resize_helper (void) { delete [] m_cext; }
659
660 template <typename T>
661 void resize_fill (const T *src, T *dest, const T& rfv) const
662 { do_resize_fill (src, dest, rfv, m_n-1); }
663
664private:
665
666 // recursive resizing
667 template <typename T>
668 void do_resize_fill (const T *src, T *dest, const T& rfv, int lev) const
669 {
670 if (lev == 0)
671 {
672 std::copy_n (src, m_cext[0], dest);
673 std::fill_n (dest + m_cext[0], m_dext[0] - m_cext[0], rfv);
674 }
675 else
677 octave_idx_type sd, dd, k;
678 sd = m_sext[lev-1];
679 dd = m_dext[lev-1];
680 for (k = 0; k < m_cext[lev]; k++)
681 do_resize_fill (src + k * sd, dest + k * dd, rfv, lev - 1);
682
683 std::fill_n (dest + k * dd, m_dext[lev] - k * dd, rfv);
684 }
685 }
687 //--------
688
692 int m_n;
693};
694
695template <typename T, typename Alloc>
698{
699 // Colon:
700 //
701 // object | index | result orientation
702 // ---------+----------+-------------------
703 // anything | colon | column vector
704 //
705 // Numeric array or logical mask:
706 //
707 // Note that logical mask indices are always transformed to vectors
708 // before we see them.
709 //
710 // object | index | result orientation
711 // ---------+----------+-------------------
712 // vector | vector | indexed object
713 // | other | same size as index
714 // ---------+----------+-------------------
715 // array | anything | same size as index
716
718 Array<T, Alloc> retval;
719
720 if (i.is_colon ())
721 {
722 // A(:) produces a shallow copy as a column vector.
723 retval = Array<T, Alloc> (*this, dim_vector (n, 1));
725 else
726 {
727 if (i.extent (n) != n)
728 octave::err_index_out_of_range (1, 1, i.extent (n), n, m_dimensions);
729
730 dim_vector result_dims = i.orig_dimensions ();
731 octave_idx_type idx_len = i.length ();
732
733 if (n != 1 && is_nd_vector () && idx_len != 1
734 && result_dims.is_nd_vector ())
735 {
736 // Indexed object and index are both vectors. Set result size
737 // and orientation as above.
738
739 dim_vector dv = dims ();
741 result_dims = dv.make_nd_vector (idx_len);
744 octave_idx_type l, u;
745 if (idx_len != 0 && i.is_cont_range (n, l, u))
746 // If suitable, produce a shallow slice.
747 retval = Array<T, Alloc> (*this, result_dims, l, u);
748 else
749 {
750 // Don't use resize here to avoid useless initialization for POD
751 // types.
752 retval = Array<T, Alloc> (result_dims);
754 if (idx_len != 0)
755 i.index (data (), n, retval.fortran_vec ());
756 }
758
759 return retval;
760}
762template <typename T, typename Alloc>
766 // Get dimensions, allowing Fortran indexing in the 2nd dim.
767 dim_vector dv = m_dimensions.redim (2);
768 octave_idx_type r = dv(0);
770 Array<T, Alloc> retval;
771
772 if (i.is_colon () && j.is_colon ())
773 {
774 // A(:,:) produces a shallow copy.
775 retval = Array<T, Alloc> (*this, dv);
776 }
777 else
778 {
779 if (i.extent (r) != r)
780 octave::err_index_out_of_range (2, 1, i.extent (r), r, m_dimensions); // throws
781 if (j.extent (c) != c)
782 octave::err_index_out_of_range (2, 2, j.extent (c), c, m_dimensions); // throws
783
784 octave_idx_type n = numel ();
785 octave_idx_type il = i.length (r);
786 octave_idx_type jl = j.length (c);
787
789
790 if (ii.maybe_reduce (r, j, c))
791 {
792 octave_idx_type l, u;
793 if (ii.length () > 0 && ii.is_cont_range (n, l, u))
794 // If suitable, produce a shallow slice.
795 retval = Array<T, Alloc> (*this, dim_vector (il, jl), l, u);
796 else
797 {
798 // Don't use resize to avoid useless initialization for POD types.
799 retval = Array<T, Alloc> (dim_vector (il, jl));
800
801 ii.index (data (), n, retval.fortran_vec ());
802 }
803 }
804 else
805 {
806 // Don't use resize to avoid useless initialization for POD types.
807 retval = Array<T, Alloc> (dim_vector (il, jl));
808
809 const T *src = data ();
810 T *dest = retval.fortran_vec ();
811
812 for (octave_idx_type k = 0; k < jl; k++)
813 dest += i.index (src + r * j.xelem (k), r, dest);
814 }
815 }
816
817 return retval;
818}
819
820template <typename T, typename Alloc>
823{
824 int ial = ia.numel ();
825 Array<T, Alloc> retval;
826
827 // FIXME: is this dispatching necessary?
828 if (ial == 1)
829 retval = index (ia(0));
830 else if (ial == 2)
831 retval = index (ia(0), ia(1));
832 else if (ial > 0)
833 {
834 // Get dimensions, allowing Fortran indexing in the last dim.
835 dim_vector dv = m_dimensions.redim (ial);
836
837 // Check for out of bounds conditions.
838 bool all_colons = true;
839 for (int i = 0; i < ial; i++)
840 {
841 if (ia(i).extent (dv(i)) != dv(i))
842 octave::err_index_out_of_range (ial, i+1, ia(i).extent (dv(i)), dv(i),
843 m_dimensions); // throws
844
845 all_colons = all_colons && ia(i).is_colon ();
846 }
847
848 if (all_colons)
849 {
850 // A(:,:,...,:) produces a shallow copy.
852 retval = Array<T, Alloc> (*this, dv);
853 }
854 else
855 {
856 // Form result dimensions.
857 dim_vector rdv = dim_vector::alloc (ial);
858 for (int i = 0; i < ial; i++) rdv(i) = ia(i).length (dv(i));
860
861 // Prepare for recursive indexing
862 rec_index_helper rh (dv, ia);
863
864 octave_idx_type l, u;
865 if (rh.is_cont_range (l, u))
866 // If suitable, produce a shallow slice.
867 retval = Array<T, Alloc> (*this, rdv, l, u);
868 else
869 {
870 // Don't use resize to avoid useless initialization for POD types.
871 retval = Array<T, Alloc> (rdv);
872
873 // Do it.
874 rh.index (data (), retval.fortran_vec ());
875 }
876 }
877 }
878
879 return retval;
880}
881
882// The default fill value. Override if you want a different one.
883
884template <typename T, typename Alloc>
885T
887{
888 static T zero = T ();
889 return zero;
890}
891
892// Yes, we could do resize using index & assign. However, that would
893// possibly involve a lot more memory traffic than we actually need.
894
895template <typename T, typename Alloc>
896void
898{
899 if (n < 0 || ndims () != 2)
901
902 dim_vector dv;
903 // This is driven by Matlab's behavior of giving a *row* vector
904 // on some out-of-bounds assignments. Specifically, Matlab
905 // allows a(i) with out-of-bouds i when a is either of 0x0, 1x0,
906 // 1x1, 0xN, and gives a row vector in all cases (yes, even the
907 // last one, search me why). Giving a column vector would make
908 // much more sense (given the way trailing singleton dims are
909 // treated).
910 bool invalid = false;
911 if (rows () == 0 || rows () == 1)
912 dv = dim_vector (1, n);
913 else if (columns () == 1)
914 dv = dim_vector (n, 1);
915 else
916 invalid = true;
917
918 if (invalid)
920
921 octave_idx_type nx = numel ();
922 if (n == nx - 1 && n > 0)
923 {
924 // Stack "pop" operation.
925 if (m_rep->m_count == 1)
926 m_slice_data[m_slice_len-1] = T ();
927 m_slice_len--;
928 m_dimensions = dv;
929 }
930 else if (n == nx + 1 && nx > 0)
931 {
932 // Stack "push" operation.
933 if (m_rep->m_count == 1
934 && m_slice_data + m_slice_len < m_rep->m_data + m_rep->m_len)
935 {
936 m_slice_data[m_slice_len++] = rfv;
937 m_dimensions = dv;
938 }
939 else
940 {
941 static const octave_idx_type max_stack_chunk = 1024;
942 octave_idx_type nn = n + std::min (nx, max_stack_chunk);
943 Array<T, Alloc> tmp (Array<T, Alloc> (dim_vector (nn, 1)), dv, 0, n);
944 T *dest = tmp.fortran_vec ();
945
946 std::copy_n (data (), nx, dest);
947 dest[nx] = rfv;
948
949 *this = tmp;
950 }
951 }
952 else if (n != nx)
953 {
955 T *dest = tmp.fortran_vec ();
956
957 octave_idx_type n0 = std::min (n, nx);
958 octave_idx_type n1 = n - n0;
959 std::copy_n (data (), n0, dest);
960 std::fill_n (dest + n0, n1, rfv);
961
962 *this = tmp;
963 }
964}
965
966template <typename T, typename Alloc>
967void
969{
970 if (r < 0 || c < 0 || ndims () != 2)
972
973 octave_idx_type rx = rows ();
974 octave_idx_type cx = columns ();
975 if (r != rx || c != cx)
976 {
978 T *dest = tmp.fortran_vec ();
979
980 octave_idx_type r0 = std::min (r, rx);
981 octave_idx_type r1 = r - r0;
982 octave_idx_type c0 = std::min (c, cx);
983 octave_idx_type c1 = c - c0;
984 const T *src = data ();
985 if (r == rx)
986 {
987 std::copy_n (src, r * c0, dest);
988 dest += r * c0;
989 }
990 else
991 {
992 for (octave_idx_type k = 0; k < c0; k++)
993 {
994 std::copy_n (src, r0, dest);
995 src += rx;
996 dest += r0;
997 std::fill_n (dest, r1, rfv);
998 dest += r1;
999 }
1000 }
1001
1002 std::fill_n (dest, r * c1, rfv);
1003
1004 *this = tmp;
1005 }
1006}
1007
1008template <typename T, typename Alloc>
1009void
1010Array<T, Alloc>::resize (const dim_vector& dv, const T& rfv)
1011{
1012 int dvl = dv.ndims ();
1013 if (dvl == 2)
1014 resize2 (dv(0), dv(1), rfv);
1015 else if (m_dimensions != dv)
1016 {
1017 if (m_dimensions.ndims () > dvl || dv.any_neg ())
1019
1020 Array<T, Alloc> tmp (dv);
1021 // Prepare for recursive resizing.
1022 rec_resize_helper rh (dv, m_dimensions.redim (dvl));
1023
1024 // Do it.
1025 rh.resize_fill (data (), tmp.fortran_vec (), rfv);
1026 *this = tmp;
1027 }
1028}
1029
1030template <typename T, typename Alloc>
1032Array<T, Alloc>::index (const octave::idx_vector& i, bool resize_ok, const T& rfv) const
1033{
1034 Array<T, Alloc> tmp = *this;
1035 if (resize_ok)
1036 {
1037 octave_idx_type n = numel ();
1038 octave_idx_type nx = i.extent (n);
1039 if (n != nx)
1040 {
1041 if (i.is_scalar ())
1042 return Array<T, Alloc> (dim_vector (1, 1), rfv);
1043 else
1044 tmp.resize1 (nx, rfv);
1045 }
1046
1047 if (tmp.numel () != nx)
1048 return Array<T, Alloc> ();
1049 }
1050
1051 return tmp.index (i);
1052}
1053
1054template <typename T, typename Alloc>
1057 bool resize_ok, const T& rfv) const
1058{
1059 Array<T, Alloc> tmp = *this;
1060 if (resize_ok)
1061 {
1062 dim_vector dv = m_dimensions.redim (2);
1063 octave_idx_type r = dv(0);
1064 octave_idx_type c = dv(1);
1065 octave_idx_type rx = i.extent (r);
1066 octave_idx_type cx = j.extent (c);
1067 if (r != rx || c != cx)
1068 {
1069 if (i.is_scalar () && j.is_scalar ())
1070 return Array<T, Alloc> (dim_vector (1, 1), rfv);
1071 else
1072 tmp.resize2 (rx, cx, rfv);
1073 }
1074
1075 if (tmp.rows () != rx || tmp.columns () != cx)
1076 return Array<T, Alloc> ();
1077 }
1078
1079 return tmp.index (i, j);
1080}
1081
1082template <typename T, typename Alloc>
1085 bool resize_ok, const T& rfv) const
1086{
1087 Array<T, Alloc> tmp = *this;
1088 if (resize_ok)
1089 {
1090 int ial = ia.numel ();
1091 dim_vector dv = m_dimensions.redim (ial);
1092 dim_vector dvx = dim_vector::alloc (ial);
1093 for (int i = 0; i < ial; i++)
1094 dvx(i) = ia(i).extent (dv(i));
1095 if (! (dvx == dv))
1096 {
1097 bool all_scalars = true;
1098 for (int i = 0; i < ial; i++)
1099 all_scalars = all_scalars && ia(i).is_scalar ();
1100 if (all_scalars)
1101 return Array<T, Alloc> (dim_vector (1, 1), rfv);
1102 else
1103 tmp.resize (dvx, rfv);
1104
1105 if (tmp.m_dimensions != dvx)
1106 return Array<T, Alloc> ();
1107 }
1108 }
1109
1110 return tmp.index (ia);
1111}
1112
1113template <typename T, typename Alloc>
1114void
1116{
1117 octave_idx_type n = numel ();
1118 octave_idx_type rhl = rhs.numel ();
1119
1120 if (rhl != 1 && i.length (n) != rhl)
1121 octave::err_nonconformant ("=", dim_vector(i.length(n), 1), rhs.dims());
1122
1123 octave_idx_type nx = i.extent (n);
1124 bool colon = i.is_colon_equiv (nx);
1125 // Try to resize first if necessary.
1126 if (nx != n)
1127 {
1128 // Optimize case A = []; A(1:n) = X with A empty.
1129 if (m_dimensions.zero_by_zero () && colon)
1130 {
1131 if (rhl == 1)
1132 *this = Array<T, Alloc> (dim_vector (1, nx), rhs(0));
1133 else
1134 *this = Array<T, Alloc> (rhs, dim_vector (1, nx));
1135 return;
1136 }
1137
1138 resize1 (nx, rfv);
1139 n = numel ();
1140 }
1141
1142 if (colon)
1143 {
1144 // A(:) = X makes a full fill or a shallow copy.
1145 if (rhl == 1)
1146 fill (rhs(0));
1147 else
1148 *this = rhs.reshape (m_dimensions);
1149 }
1150 else
1151 {
1152 if (rhl == 1)
1153 i.fill (rhs(0), n, fortran_vec ());
1154 else
1155 i.assign (rhs.data (), n, fortran_vec ());
1156 }
1157}
1158
1159// Assignment to a 2-dimensional array
1160template <typename T, typename Alloc>
1161void
1163 const Array<T, Alloc>& rhs, const T& rfv)
1164{
1165 bool initial_dims_all_zero = m_dimensions.all_zero ();
1166
1167 // Get RHS extents, discarding singletons.
1168 dim_vector rhdv = rhs.dims ();
1169
1170 // Get LHS extents, allowing Fortran indexing in the second dim.
1171 dim_vector dv = m_dimensions.redim (2);
1172
1173 // Check for out-of-bounds and form resizing m_dimensions.
1174 dim_vector rdv;
1175
1176 // In the special when all dimensions are zero, colons are allowed
1177 // to inquire the shape of RHS. The rules are more obscure, so we
1178 // solve that elsewhere.
1179 if (initial_dims_all_zero)
1180 rdv = zero_dims_inquire (i, j, rhdv);
1181 else
1182 {
1183 rdv(0) = i.extent (dv(0));
1184 rdv(1) = j.extent (dv(1));
1185 }
1186
1187 bool isfill = rhs.numel () == 1;
1188 octave_idx_type il = i.length (rdv(0));
1189 octave_idx_type jl = j.length (rdv(1));
1190 rhdv.chop_all_singletons ();
1191 bool match = (isfill
1192 || (rhdv.ndims () == 2 && il == rhdv(0) && jl == rhdv(1)));
1193 match = match || (il == 1 && jl == rhdv(0) && rhdv(1) == 1);
1194
1195 if (match)
1196 {
1197 bool all_colons = (i.is_colon_equiv (rdv(0))
1198 && j.is_colon_equiv (rdv(1)));
1199 // Resize if requested.
1200 if (rdv != dv)
1201 {
1202 // Optimize case A = []; A(1:m, 1:n) = X
1203 if (dv.zero_by_zero () && all_colons)
1204 {
1205 if (isfill)
1206 *this = Array<T, Alloc> (rdv, rhs(0));
1207 else
1208 *this = Array<T, Alloc> (rhs, rdv);
1209 return;
1210 }
1211
1212 resize (rdv, rfv);
1213 dv = m_dimensions;
1214 }
1215
1216 if (all_colons)
1217 {
1218 // A(:,:) = X makes a full fill or a shallow copy
1219 if (isfill)
1220 fill (rhs(0));
1221 else
1222 *this = rhs.reshape (m_dimensions);
1223 }
1224 else
1225 {
1226 // The actual work.
1227 octave_idx_type n = numel ();
1228 octave_idx_type r = dv(0);
1229 octave_idx_type c = dv(1);
1230 octave::idx_vector ii (i);
1231
1232 const T *src = rhs.data ();
1233 T *dest = fortran_vec ();
1234
1235 // Try reduction first.
1236 if (ii.maybe_reduce (r, j, c))
1237 {
1238 if (isfill)
1239 ii.fill (*src, n, dest);
1240 else
1241 ii.assign (src, n, dest);
1242 }
1243 else
1244 {
1245 if (isfill)
1246 {
1247 for (octave_idx_type k = 0; k < jl; k++)
1248 i.fill (*src, r, dest + r * j.xelem (k));
1249 }
1250 else
1251 {
1252 for (octave_idx_type k = 0; k < jl; k++)
1253 src += i.assign (src, r, dest + r * j.xelem (k));
1254 }
1255 }
1256 }
1257 }
1258 // any empty RHS can be assigned to an empty LHS
1259 else if ((il != 0 && jl != 0) || (rhdv(0) != 0 && rhdv(1) != 0))
1260 octave::err_nonconformant ("=", il, jl, rhs.dim1 (), rhs.dim2 ());
1261}
1262
1263// Assignment to a multi-dimensional array
1264template <typename T, typename Alloc>
1265void
1267 const Array<T, Alloc>& rhs, const T& rfv)
1268{
1269 int ial = ia.numel ();
1270
1271 // FIXME: is this dispatching necessary / desirable?
1272 if (ial == 1)
1273 assign (ia(0), rhs, rfv);
1274 else if (ial == 2)
1275 assign (ia(0), ia(1), rhs, rfv);
1276 else if (ial > 0)
1277 {
1278 bool initial_dims_all_zero = m_dimensions.all_zero ();
1279
1280 // Get RHS extents, discarding singletons.
1281 dim_vector rhdv = rhs.dims ();
1282
1283 // Get LHS extents, allowing Fortran indexing in the second dim.
1284 dim_vector dv = m_dimensions.redim (ial);
1285
1286 // Get the extents forced by indexing.
1287 dim_vector rdv;
1288
1289 // In the special when all dimensions are zero, colons are
1290 // allowed to inquire the shape of RHS. The rules are more
1291 // obscure, so we solve that elsewhere.
1292 if (initial_dims_all_zero)
1293 rdv = zero_dims_inquire (ia, rhdv);
1294 else
1295 {
1296 rdv = dim_vector::alloc (ial);
1297 for (int i = 0; i < ial; i++)
1298 rdv(i) = ia(i).extent (dv(i));
1299 }
1300
1301 // Check whether LHS and RHS match, up to singleton dims.
1302 bool match = true;
1303 bool all_colons = true;
1304 bool isfill = rhs.numel () == 1;
1305
1306 rhdv.chop_all_singletons ();
1307 int j = 0;
1308 int rhdvl = rhdv.ndims ();
1309 for (int i = 0; i < ial; i++)
1310 {
1311 all_colons = all_colons && ia(i).is_colon_equiv (rdv(i));
1312 octave_idx_type l = ia(i).length (rdv(i));
1313 if (l == 1) continue;
1314 match = match && j < rhdvl && l == rhdv(j++);
1315 }
1316
1317 match = match && (j == rhdvl || rhdv(j) == 1);
1318 match = match || isfill;
1319
1320 if (match)
1321 {
1322 // Resize first if necessary.
1323 if (rdv != dv)
1324 {
1325 // Optimize case A = []; A(1:m, 1:n) = X
1326 if (dv.zero_by_zero () && all_colons)
1327 {
1329 if (isfill)
1330 *this = Array<T, Alloc> (rdv, rhs(0));
1331 else
1332 *this = Array<T, Alloc> (rhs, rdv);
1333 return;
1334 }
1335
1336 resize (rdv, rfv);
1337 dv = rdv;
1338 }
1339
1340 if (all_colons)
1341 {
1342 // A(:,:,...,:) = X makes a full fill or a shallow copy.
1343 if (isfill)
1344 fill (rhs(0));
1345 else
1346 *this = rhs.reshape (m_dimensions);
1347 }
1348 else
1349 {
1350 // Do the actual work.
1351
1352 // Prepare for recursive indexing
1353 rec_index_helper rh (dv, ia);
1354
1355 // Do it.
1356 if (isfill)
1357 rh.fill (rhs(0), fortran_vec ());
1358 else
1359 rh.assign (rhs.data (), fortran_vec ());
1360 }
1361 }
1362 else
1363 {
1364 // dimension mismatch, unless LHS and RHS both empty
1365 bool lhsempty, rhsempty;
1366 lhsempty = rhsempty = false;
1367 dim_vector lhs_dv = dim_vector::alloc (ial);
1368 for (int i = 0; i < ial; i++)
1369 {
1370 octave_idx_type l = ia(i).length (rdv(i));
1371 lhs_dv(i) = l;
1372 lhsempty = lhsempty || (l == 0);
1373 rhsempty = rhsempty || (rhdv(j++) == 0);
1374 }
1375 if (! lhsempty || ! rhsempty)
1376 {
1377 lhs_dv.chop_trailing_singletons ();
1378 octave::err_nonconformant ("=", lhs_dv, rhdv);
1379 }
1380 }
1381 }
1382}
1383
1384/*
1385%!shared a
1386%! a = [1 2; 3 4];
1387%!error <op1 is 1x2, op2 is 1x3> a(1,:) = [1 2 3]
1388%!error <op1 is 2x1, op2 is 1x3> a(:,1) = [1 2 3]
1389%!error <op1 is 2x2, op2 is 2x1> a(1:2,2:3) = [1;2]
1390*/
1391
1392template <typename T, typename Alloc>
1393void
1395{
1396 octave_idx_type n = numel ();
1397 if (i.is_colon ())
1398 {
1399 *this = Array<T, Alloc> ();
1400 }
1401 else if (i.length (n) != 0)
1402 {
1403 if (i.extent (n) != n)
1405
1406 octave_idx_type l, u;
1407 bool col_vec = ndims () == 2 && columns () == 1 && rows () != 1;
1408 if (i.is_scalar () && i(0) == n-1 && m_dimensions.isvector ())
1409 {
1410 // Stack "pop" operation.
1411 resize1 (n-1);
1412 }
1413 else if (i.is_cont_range (n, l, u))
1414 {
1415 // Special case deleting a contiguous range.
1416 octave_idx_type m = n + l - u;
1417 Array<T, Alloc> tmp (dim_vector (col_vec ? m : 1, ! col_vec ? m : 1));
1418 const T *src = data ();
1419 T *dest = tmp.fortran_vec ();
1420 std::copy_n (src, l, dest);
1421 std::copy (src + u, src + n, dest + l);
1422 *this = tmp;
1423 }
1424 else
1425 {
1426 // Use index.
1427 *this = index (i.complement (n));
1428 }
1429 }
1430}
1431
1432template <typename T, typename Alloc>
1433void
1435{
1436 if (dim < 0 || dim >= ndims ())
1437 (*current_liboctave_error_handler) ("invalid dimension in delete_elements");
1438
1439 octave_idx_type n = m_dimensions(dim);
1440 if (i.is_colon ())
1441 {
1442 *this = Array<T, Alloc> ();
1443 }
1444 else if (i.length (n) != 0)
1445 {
1446 if (i.extent (n) != n)
1448
1449 octave_idx_type l, u;
1450
1451 if (i.is_cont_range (n, l, u))
1452 {
1453 // Special case deleting a contiguous range.
1454 octave_idx_type nd = n + l - u;
1455 octave_idx_type dl = 1;
1456 octave_idx_type du = 1;
1457 dim_vector rdv = m_dimensions;
1458 rdv(dim) = nd;
1459 for (int k = 0; k < dim; k++) dl *= m_dimensions(k);
1460 for (int k = dim + 1; k < ndims (); k++) du *= m_dimensions(k);
1461
1462 // Special case deleting a contiguous range.
1463 Array<T, Alloc> tmp = Array<T, Alloc> (rdv);
1464 const T *src = data ();
1465 T *dest = tmp.fortran_vec ();
1466 l *= dl; u *= dl; n *= dl;
1467 for (octave_idx_type k = 0; k < du; k++)
1468 {
1469 std::copy_n (src, l, dest);
1470 dest += l;
1471 std::copy (src + u, src + n, dest);
1472 dest += n - u;
1473 src += n;
1474 }
1475
1476 *this = tmp;
1477 }
1478 else
1479 {
1480 // Use index.
1482 ia (dim) = i.complement (n);
1483 *this = index (ia);
1484 }
1485 }
1486}
1487
1488template <typename T, typename Alloc>
1489void
1491{
1492 int ial = ia.numel ();
1493
1494 if (ial == 1)
1495 delete_elements (ia(0));
1496 else
1497 {
1498 int k, dim = -1;
1499 for (k = 0; k < ial; k++)
1500 {
1501 if (! ia(k).is_colon ())
1502 {
1503 if (dim < 0)
1504 dim = k;
1505 else
1506 break;
1507 }
1508 }
1509 if (dim < 0)
1510 {
1511 dim_vector dv = m_dimensions;
1512 dv(0) = 0;
1513 *this = Array<T, Alloc> (dv);
1514 }
1515 else if (k == ial)
1516 {
1517 delete_elements (dim, ia(dim));
1518 }
1519 else
1520 {
1521 // Allow the null assignment to succeed if it won't change
1522 // anything because the indices reference an empty slice,
1523 // provided that there is at most one non-colon (or
1524 // equivalent) index. So, we still have the requirement of
1525 // deleting a slice, but it is OK if the slice is empty.
1526
1527 // For compatibility with Matlab, stop checking once we see
1528 // more than one non-colon index or an empty index. Matlab
1529 // considers "[]" to be an empty index but not "false". We
1530 // accept both.
1531
1532 bool empty_assignment = false;
1533
1534 int num_non_colon_indices = 0;
1535
1536 int nd = ndims ();
1537
1538 for (int i = 0; i < ial; i++)
1539 {
1540 octave_idx_type dim_len = (i >= nd ? 1 : m_dimensions(i));
1541
1542 if (ia(i).length (dim_len) == 0)
1543 {
1544 empty_assignment = true;
1545 break;
1546 }
1547
1548 if (! ia(i).is_colon_equiv (dim_len))
1549 {
1550 num_non_colon_indices++;
1551
1552 if (num_non_colon_indices == 2)
1553 break;
1554 }
1555 }
1556
1557 if (! empty_assignment)
1558 (*current_liboctave_error_handler)
1559 ("a null assignment can only have one non-colon index");
1560 }
1561 }
1562
1563}
1564
1565template <typename T, typename Alloc>
1568{
1569 octave::idx_vector i (r, r + a.rows ());
1570 octave::idx_vector j (c, c + a.columns ());
1571 if (ndims () == 2 && a.ndims () == 2)
1572 assign (i, j, a);
1573 else
1574 {
1576 idx(0) = i;
1577 idx(1) = j;
1578 for (int k = 2; k < a.ndims (); k++)
1579 idx(k) = octave::idx_vector (0, a.m_dimensions(k));
1580 assign (idx, a);
1581 }
1582
1583 return *this;
1584}
1585
1586template <typename T, typename Alloc>
1589{
1592 const dim_vector dva = a.dims ().redim (n);
1593 for (octave_idx_type k = 0; k < n; k++)
1594 idx(k) = octave::idx_vector (ra_idx(k), ra_idx(k) + dva(k));
1595
1596 assign (idx, a);
1597
1598 return *this;
1599}
1600
1601template <typename T, typename Alloc>
1604{
1605 assert (ndims () == 2);
1606
1607 octave_idx_type nr = dim1 ();
1608 octave_idx_type nc = dim2 ();
1609
1610 if (nr >= 8 && nc >= 8)
1611 {
1612 Array<T, Alloc> result (dim_vector (nc, nr));
1613
1614 // Reuse the implementation used for permuting.
1615
1616 rec_permute_helper::blk_trans (data (), result.fortran_vec (), nr, nc);
1617
1618 return result;
1619 }
1620 else if (nr > 1 && nc > 1)
1621 {
1622 Array<T, Alloc> result (dim_vector (nc, nr));
1623
1624 for (octave_idx_type j = 0; j < nc; j++)
1625 for (octave_idx_type i = 0; i < nr; i++)
1626 result.xelem (j, i) = xelem (i, j);
1627
1628 return result;
1629 }
1630 else
1631 {
1632 // Fast transpose for vectors and empty matrices.
1633 return Array<T, Alloc> (*this, dim_vector (nc, nr));
1634 }
1635}
1636
1637template <typename T>
1638static T
1639no_op_fcn (const T& x)
1640{
1641 return x;
1642}
1643
1644template <typename T, typename Alloc>
1646Array<T, Alloc>::hermitian (T (*fcn) (const T&)) const
1647{
1648 assert (ndims () == 2);
1649
1650 if (! fcn)
1651 fcn = no_op_fcn<T>;
1652
1653 octave_idx_type nr = dim1 ();
1654 octave_idx_type nc = dim2 ();
1655
1656 if (nr >= 8 && nc >= 8)
1657 {
1658 Array<T, Alloc> result (dim_vector (nc, nr));
1659
1660 // Blocked transpose to attempt to avoid cache misses.
1661
1662 T buf[64];
1663
1664 octave_idx_type jj;
1665 for (jj = 0; jj < (nc - 8 + 1); jj += 8)
1666 {
1667 octave_idx_type ii;
1668 for (ii = 0; ii < (nr - 8 + 1); ii += 8)
1669 {
1670 // Copy to buffer
1671 for (octave_idx_type j = jj, k = 0, idxj = jj * nr;
1672 j < jj + 8; j++, idxj += nr)
1673 for (octave_idx_type i = ii; i < ii + 8; i++)
1674 buf[k++] = xelem (i + idxj);
1675
1676 // Copy from buffer
1677 for (octave_idx_type i = ii, idxi = ii * nc; i < ii + 8;
1678 i++, idxi += nc)
1679 for (octave_idx_type j = jj, k = i - ii; j < jj + 8;
1680 j++, k+=8)
1681 result.xelem (j + idxi) = fcn (buf[k]);
1682 }
1683
1684 if (ii < nr)
1685 for (octave_idx_type j = jj; j < jj + 8; j++)
1686 for (octave_idx_type i = ii; i < nr; i++)
1687 result.xelem (j, i) = fcn (xelem (i, j));
1688 }
1689
1690 for (octave_idx_type j = jj; j < nc; j++)
1691 for (octave_idx_type i = 0; i < nr; i++)
1692 result.xelem (j, i) = fcn (xelem (i, j));
1693
1694 return result;
1695 }
1696 else
1697 {
1698 Array<T, Alloc> result (dim_vector (nc, nr));
1699
1700 for (octave_idx_type j = 0; j < nc; j++)
1701 for (octave_idx_type i = 0; i < nr; i++)
1702 result.xelem (j, i) = fcn (xelem (i, j));
1703
1704 return result;
1705 }
1706}
1707
1708/*
1709
1710## Transpose tests for matrices of the tile size and plus or minus a row
1711## and with four tiles.
1712
1713%!shared m7, mt7, m8, mt8, m9, mt9
1714%! m7 = reshape (1 : 7*8, 8, 7);
1715%! mt7 = [1:8; 9:16; 17:24; 25:32; 33:40; 41:48; 49:56];
1716%! m8 = reshape (1 : 8*8, 8, 8);
1717%! mt8 = mt8 = [mt7; 57:64];
1718%! m9 = reshape (1 : 9*8, 8, 9);
1719%! mt9 = [mt8; 65:72];
1720
1721%!assert (m7', mt7)
1722%!assert ((1i*m7).', 1i * mt7)
1723%!assert ((1i*m7)', conj (1i * mt7))
1724%!assert (m8', mt8)
1725%!assert ((1i*m8).', 1i * mt8)
1726%!assert ((1i*m8)', conj (1i * mt8))
1727%!assert (m9', mt9)
1728%!assert ((1i*m9).', 1i * mt9)
1729%!assert ((1i*m9)', conj (1i * mt9))
1730%!assert ([m7, m8; m7, m8]', [mt7, mt7; mt8, mt8])
1731%!assert ((1i*[m7, m8; m7, m8]).', 1i * [mt7, mt7; mt8, mt8])
1732%!assert ((1i*[m7, m8; m7, m8])', conj (1i * [mt7, mt7; mt8, mt8]))
1733%!assert ([m8, m8; m8, m8]', [mt8, mt8; mt8, mt8])
1734%!assert ((1i*[m8, m8; m8, m8]).', 1i * [mt8, mt8; mt8, mt8])
1735%!assert ((1i*[m8, m8; m8, m8])', conj (1i * [mt8, mt8; mt8, mt8]))
1736%!assert ([m9, m8; m9, m8]', [mt9, mt9; mt8, mt8])
1737%!assert ((1i*[m9, m8; m9, m8]).', 1i * [mt9, mt9; mt8, mt8])
1738%!assert ((1i*[m9, m8; m9, m8])', conj (1i * [mt9, mt9; mt8, mt8]))
1739
1740*/
1741
1742template <typename T, typename Alloc>
1743T *
1745{
1746 make_unique ();
1747
1748 return m_slice_data;
1749}
1750
1751// Non-real types don't have NaNs.
1752template <typename T>
1753inline bool
1755{
1756 return false;
1757}
1758
1759template <typename T, typename Alloc>
1761Array<T, Alloc>::sort (int dim, sortmode mode) const
1762{
1763 if (dim < 0)
1764 (*current_liboctave_error_handler) ("sort: invalid dimension");
1765
1766 Array<T, Alloc> m (dims ());
1767
1768 dim_vector dv = m.dims ();
1769
1770 if (m.numel () < 1)
1771 return m;
1772
1773 if (dim >= dv.ndims ())
1774 dv.resize (dim+1, 1);
1775
1776 octave_idx_type ns = dv(dim);
1777 octave_idx_type iter = dv.numel () / ns;
1778 octave_idx_type stride = 1;
1779
1780 for (int i = 0; i < dim; i++)
1781 stride *= dv(i);
1782
1783 T *v = m.fortran_vec ();
1784 const T *ov = data ();
1785
1786 octave_sort<T> lsort;
1787
1788 if (mode != UNSORTED)
1789 lsort.set_compare (mode);
1790 else
1791 return m;
1792
1793 if (stride == 1)
1794 {
1795 // Special case along first dimension avoids gather/scatter AND directly
1796 // sorts into destination buffer for an 11% performance boost.
1797 for (octave_idx_type j = 0; j < iter; j++)
1798 {
1799 // Copy and partition out NaNs.
1800 // No need to special case integer types <T> from floating point
1801 // types <T> to avoid sort_isnan() test as it makes no discernible
1802 // performance impact.
1803 octave_idx_type kl = 0;
1804 octave_idx_type ku = ns;
1805 for (octave_idx_type i = 0; i < ns; i++)
1806 {
1807 T tmp = ov[i];
1808 if (sort_isnan<T> (tmp))
1809 v[--ku] = tmp;
1810 else
1811 v[kl++] = tmp;
1812 }
1813
1814 // sort.
1815 lsort.sort (v, kl);
1816
1817 if (ku < ns)
1818 {
1819 // NaNs are in reverse order
1820 std::reverse (v + ku, v + ns);
1821 if (mode == DESCENDING)
1822 std::rotate (v, v + ku, v + ns);
1823 }
1824
1825 v += ns;
1826 ov += ns;
1827 }
1828 }
1829 else
1830 {
1831 OCTAVE_LOCAL_BUFFER (T, buf, ns);
1832
1833 for (octave_idx_type j = 0; j < iter; j++)
1834 {
1835 octave_idx_type offset = j;
1836 octave_idx_type n_strides = j / stride;
1837 offset += n_strides * stride * (ns - 1);
1838
1839 // gather and partition out NaNs.
1840 octave_idx_type kl = 0;
1841 octave_idx_type ku = ns;
1842 for (octave_idx_type i = 0; i < ns; i++)
1843 {
1844 T tmp = ov[i*stride + offset];
1845 if (sort_isnan<T> (tmp))
1846 buf[--ku] = tmp;
1847 else
1848 buf[kl++] = tmp;
1849 }
1850
1851 // sort.
1852 lsort.sort (buf, kl);
1853
1854 if (ku < ns)
1855 {
1856 // NaNs are in reverse order
1857 std::reverse (buf + ku, buf + ns);
1858 if (mode == DESCENDING)
1859 std::rotate (buf, buf + ku, buf + ns);
1860 }
1861
1862 // scatter.
1863 for (octave_idx_type i = 0; i < ns; i++)
1864 v[i*stride + offset] = buf[i];
1865 }
1866 }
1867
1868 return m;
1869}
1870
1871template <typename T, typename Alloc>
1874 sortmode mode) const
1875{
1876 if (dim < 0 || dim >= ndims ())
1877 (*current_liboctave_error_handler) ("sort: invalid dimension");
1878
1879 Array<T, Alloc> m (dims ());
1880
1881 dim_vector dv = m.dims ();
1882
1883 if (m.numel () < 1)
1884 {
1885 sidx = Array<octave_idx_type> (dv);
1886 return m;
1887 }
1888
1889 octave_idx_type ns = dv(dim);
1890 octave_idx_type iter = dv.numel () / ns;
1891 octave_idx_type stride = 1;
1892
1893 for (int i = 0; i < dim; i++)
1894 stride *= dv(i);
1895
1896 T *v = m.fortran_vec ();
1897 const T *ov = data ();
1898
1899 octave_sort<T> lsort;
1900
1901 sidx = Array<octave_idx_type> (dv);
1902 octave_idx_type *vi = sidx.fortran_vec ();
1903
1904 if (mode != UNSORTED)
1905 lsort.set_compare (mode);
1906 else
1907 return m;
1908
1909 if (stride == 1)
1910 {
1911 // Special case for dim 1 avoids gather/scatter for performance boost.
1912 // See comments in Array::sort (dim, mode).
1913 for (octave_idx_type j = 0; j < iter; j++)
1914 {
1915 // copy and partition out NaNs.
1916 octave_idx_type kl = 0;
1917 octave_idx_type ku = ns;
1918 for (octave_idx_type i = 0; i < ns; i++)
1919 {
1920 T tmp = ov[i];
1921 if (sort_isnan<T> (tmp))
1922 {
1923 --ku;
1924 v[ku] = tmp;
1925 vi[ku] = i;
1926 }
1927 else
1928 {
1929 v[kl] = tmp;
1930 vi[kl] = i;
1931 kl++;
1932 }
1933 }
1934
1935 // sort.
1936 lsort.sort (v, vi, kl);
1937
1938 if (ku < ns)
1939 {
1940 // NaNs are in reverse order
1941 std::reverse (v + ku, v + ns);
1942 std::reverse (vi + ku, vi + ns);
1943 if (mode == DESCENDING)
1944 {
1945 std::rotate (v, v + ku, v + ns);
1946 std::rotate (vi, vi + ku, vi + ns);
1947 }
1948 }
1949
1950 v += ns;
1951 vi += ns;
1952 ov += ns;
1953 }
1954 }
1955 else
1956 {
1957 OCTAVE_LOCAL_BUFFER (T, buf, ns);
1959
1960 for (octave_idx_type j = 0; j < iter; j++)
1961 {
1962 octave_idx_type offset = j;
1963 octave_idx_type n_strides = j / stride;
1964 offset += n_strides * stride * (ns - 1);
1965
1966 // gather and partition out NaNs.
1967 octave_idx_type kl = 0;
1968 octave_idx_type ku = ns;
1969 for (octave_idx_type i = 0; i < ns; i++)
1970 {
1971 T tmp = ov[i*stride + offset];
1972 if (sort_isnan<T> (tmp))
1973 {
1974 --ku;
1975 buf[ku] = tmp;
1976 bufi[ku] = i;
1977 }
1978 else
1979 {
1980 buf[kl] = tmp;
1981 bufi[kl] = i;
1982 kl++;
1983 }
1984 }
1985
1986 // sort.
1987 lsort.sort (buf, bufi, kl);
1988
1989 if (ku < ns)
1990 {
1991 // NaNs are in reverse order
1992 std::reverse (buf + ku, buf + ns);
1993 std::reverse (bufi + ku, bufi + ns);
1994 if (mode == DESCENDING)
1995 {
1996 std::rotate (buf, buf + ku, buf + ns);
1997 std::rotate (bufi, bufi + ku, bufi + ns);
1998 }
1999 }
2000
2001 // scatter.
2002 for (octave_idx_type i = 0; i < ns; i++)
2003 v[i*stride + offset] = buf[i];
2004 for (octave_idx_type i = 0; i < ns; i++)
2005 vi[i*stride + offset] = bufi[i];
2006 }
2007 }
2008
2009 return m;
2010}
2011
2012template <typename T, typename Alloc>
2015 bool /* allow_chk */)
2016{
2017 if (mode == ASCENDING)
2019 else if (mode == DESCENDING)
2021 else
2022 return nullptr;
2023}
2024
2025template <typename T, typename Alloc>
2028{
2029 octave_sort<T> lsort;
2030
2031 octave_idx_type n = numel ();
2032
2033 if (n <= 1)
2034 return (mode == UNSORTED) ? ASCENDING : mode;
2035
2036 if (mode == UNSORTED)
2037 {
2038 // Auto-detect mode.
2039 compare_fcn_type compare
2040 = safe_comparator (ASCENDING, *this, false);
2041
2042 if (compare (elem (n-1), elem (0)))
2043 mode = DESCENDING;
2044 else
2045 mode = ASCENDING;
2046 }
2047
2048 lsort.set_compare (safe_comparator (mode, *this, false));
2049
2050 if (! lsort.issorted (data (), n))
2051 mode = UNSORTED;
2052
2053 return mode;
2054
2055}
2056
2057template <typename T, typename Alloc>
2060{
2062
2063 octave_sort<T> lsort (safe_comparator (mode, *this, true));
2064
2065 octave_idx_type r = rows ();
2066 octave_idx_type c = cols ();
2067
2068 idx = Array<octave_idx_type> (dim_vector (r, 1));
2069
2070 lsort.sort_rows (data (), idx.fortran_vec (), r, c);
2071
2072 return idx;
2073}
2074
2075template <typename T, typename Alloc>
2078{
2079 octave_sort<T> lsort;
2080
2081 octave_idx_type r = rows ();
2082 octave_idx_type c = cols ();
2083
2084 if (r <= 1 || c == 0)
2085 return (mode == UNSORTED) ? ASCENDING : mode;
2086
2087 if (mode == UNSORTED)
2088 {
2089 // Auto-detect mode.
2090 compare_fcn_type compare
2091 = safe_comparator (ASCENDING, *this, false);
2092
2094 for (i = 0; i < cols (); i++)
2095 {
2096 T l = elem (0, i);
2097 T u = elem (rows () - 1, i);
2098 if (compare (l, u))
2099 {
2100 if (mode == DESCENDING)
2101 {
2102 mode = UNSORTED;
2103 break;
2104 }
2105 else
2106 mode = ASCENDING;
2107 }
2108 else if (compare (u, l))
2109 {
2110 if (mode == ASCENDING)
2111 {
2112 mode = UNSORTED;
2113 break;
2114 }
2115 else
2116 mode = DESCENDING;
2117 }
2118 }
2119 if (mode == UNSORTED && i == cols ())
2120 mode = ASCENDING;
2121 }
2122
2123 if (mode != UNSORTED)
2124 {
2125 lsort.set_compare (safe_comparator (mode, *this, false));
2126
2127 if (! lsort.is_sorted_rows (data (), r, c))
2128 mode = UNSORTED;
2129 }
2130
2131 return mode;
2132
2133}
2134
2135// Do a binary lookup in a sorted array.
2136template <typename T, typename Alloc>
2138Array<T, Alloc>::lookup (const T& value, sortmode mode) const
2139{
2140 octave_idx_type n = numel ();
2141 octave_sort<T> lsort;
2142
2143 if (mode == UNSORTED)
2144 {
2145 // auto-detect mode
2146 if (n > 1 && lsort.descending_compare (elem (0), elem (n-1)))
2147 mode = DESCENDING;
2148 else
2149 mode = ASCENDING;
2150 }
2151
2152 lsort.set_compare (mode);
2153
2154 return lsort.lookup (data (), n, value);
2155}
2156
2157template <typename T, typename Alloc>
2160{
2161 octave_idx_type n = numel ();
2162 octave_idx_type nval = values.numel ();
2163 octave_sort<T> lsort;
2164 Array<octave_idx_type> idx (values.dims ());
2165
2166 if (mode == UNSORTED)
2167 {
2168 // auto-detect mode
2169 if (n > 1 && lsort.descending_compare (elem (0), elem (n-1)))
2170 mode = DESCENDING;
2171 else
2172 mode = ASCENDING;
2173 }
2174
2175 lsort.set_compare (mode);
2176
2177 // This determines the split ratio between the O(M*log2(N)) and O(M+N)
2178 // algorithms.
2179 static const double ratio = 1.0;
2180 sortmode vmode = UNSORTED;
2181
2182 // Attempt the O(M+N) algorithm if M is large enough.
2183 if (nval > ratio * n / octave::math::log2 (n + 1.0))
2184 {
2185 vmode = values.issorted ();
2186 // The table must not contain a NaN.
2187 if ((vmode == ASCENDING && sort_isnan<T> (values(nval-1)))
2188 || (vmode == DESCENDING && sort_isnan<T> (values(0))))
2189 vmode = UNSORTED;
2190 }
2191
2192 if (vmode != UNSORTED)
2193 lsort.lookup_sorted (data (), n, values.data (), nval,
2194 idx.fortran_vec (), vmode != mode);
2195 else
2196 lsort.lookup (data (), n, values.data (), nval, idx.fortran_vec ());
2197
2198 return idx;
2199}
2200
2201template <typename T, typename Alloc>
2204{
2205 const T *src = data ();
2206 octave_idx_type nel = numel ();
2207 octave_idx_type retval = 0;
2208 const T zero = T ();
2209 for (octave_idx_type i = 0; i < nel; i++)
2210 if (src[i] != zero)
2211 retval++;
2212
2213 return retval;
2214}
2215
2216template <typename T, typename Alloc>
2219{
2221 const T *src = data ();
2222 octave_idx_type nel = numel ();
2223 const T zero = T ();
2224 if (n < 0 || n >= nel)
2225 {
2226 // We want all elements, which means we'll almost surely need
2227 // to resize. So count first, then allocate array of exact size.
2228 octave_idx_type cnt = 0;
2229 for (octave_idx_type i = 0; i < nel; i++)
2230 cnt += src[i] != zero;
2231
2232 retval.clear (cnt, 1);
2233 octave_idx_type *dest = retval.fortran_vec ();
2234 for (octave_idx_type i = 0; i < nel; i++)
2235 if (src[i] != zero) *dest++ = i;
2236 }
2237 else
2238 {
2239 // We want a fixed max number of elements, usually small. So be
2240 // optimistic, alloc the array in advance, and then resize if
2241 // needed.
2242 retval.clear (n, 1);
2243 if (backward)
2244 {
2245 // Do the search as a series of successive single-element searches.
2246 octave_idx_type k = 0;
2247 octave_idx_type l = nel - 1;
2248 for (; k < n; k++)
2249 {
2250 for (; l >= 0 && src[l] == zero; l--) ;
2251 if (l >= 0)
2252 retval(k) = l--;
2253 else
2254 break;
2255 }
2256 if (k < n)
2257 retval.resize2 (k, 1);
2258 octave_idx_type *rdata = retval.fortran_vec ();
2259 std::reverse (rdata, rdata + k);
2260 }
2261 else
2262 {
2263 // Do the search as a series of successive single-element searches.
2264 octave_idx_type k = 0;
2265 octave_idx_type l = 0;
2266 for (; k < n; k++)
2267 {
2268 for (; l != nel && src[l] == zero; l++) ;
2269 if (l != nel)
2270 retval(k) = l++;
2271 else
2272 break;
2273 }
2274 if (k < n)
2275 retval.resize2 (k, 1);
2276 }
2277 }
2278
2279 // Fixup return dimensions, for Matlab compatibility.
2280 // find (zeros (0,0)) -> zeros (0,0)
2281 // find (zeros (1,0)) -> zeros (1,0)
2282 // find (zeros (0,1)) -> zeros (0,1)
2283 // find (zeros (0,X)) -> zeros (0,1)
2284 // find (zeros (1,1)) -> zeros (0,0) !!!! WHY?
2285 // find (zeros (0,1,0)) -> zeros (0,0)
2286 // find (zeros (0,1,0,1)) -> zeros (0,0) etc
2287
2288 if ((numel () == 1 && retval.isempty ())
2289 || (rows () == 0 && dims ().numel (1) == 0))
2290 retval.m_dimensions = dim_vector ();
2291 else if (rows () == 1 && ndims () == 2)
2292 retval.m_dimensions = dim_vector (1, retval.numel ());
2293
2294 return retval;
2295}
2296
2297template <typename T, typename Alloc>
2300{
2301 if (dim < 0)
2302 (*current_liboctave_error_handler) ("nth_element: invalid dimension");
2303
2304 dim_vector dv = dims ();
2305 if (dim >= dv.ndims ())
2306 dv.resize (dim+1, 1);
2307
2308 octave_idx_type ns = dv(dim);
2309
2310 octave_idx_type nn = n.length (ns);
2311
2312 dv(dim) = std::min (nn, ns);
2313 dv.chop_trailing_singletons ();
2314 dim = std::min (dv.ndims (), static_cast<octave_idx_type> (dim));
2315
2316 Array<T, Alloc> m (dv);
2317
2318 if (m.isempty ())
2319 return m;
2320
2321 sortmode mode = UNSORTED;
2322 octave_idx_type lo = 0;
2323
2324 switch (n.idx_class ())
2325 {
2327 mode = ASCENDING;
2328 lo = n(0);
2329 break;
2331 {
2332 octave_idx_type inc = n.increment ();
2333 if (inc == 1)
2334 {
2335 mode = ASCENDING;
2336 lo = n(0);
2337 }
2338 else if (inc == -1)
2339 {
2340 mode = DESCENDING;
2341 lo = ns - 1 - n(0);
2342 }
2343 }
2344 break;
2346 // This case resolves bug #51329, a fallback to allow the given index
2347 // to be a sequential vector instead of the typical scalar or range
2348 if (n(1) - n(0) == 1)
2349 {
2350 mode = ASCENDING;
2351 lo = n(0);
2352 }
2353 else if (n(1) - n(0) == -1)
2354 {
2355 mode = DESCENDING;
2356 lo = ns - 1 - n(0);
2357 }
2358 // Ensure that the vector is actually an arithmetic contiguous sequence
2359 for (octave_idx_type i = 2; i < n.length () && mode != UNSORTED; i++)
2360 if ((mode == ASCENDING && n(i) - n(i-1) != 1)
2361 || (mode == DESCENDING && n(i) - n(i-1) != -1))
2362 mode = UNSORTED;
2363 break;
2364 default:
2365 break;
2366 }
2367
2368 if (mode == UNSORTED)
2369 (*current_liboctave_error_handler)
2370 ("nth_element: n must be a scalar or a contiguous range");
2371
2372 octave_idx_type up = lo + nn;
2373
2374 if (lo < 0 || up > ns)
2375 (*current_liboctave_error_handler) ("nth_element: invalid element index");
2376
2377 octave_idx_type iter = numel () / ns;
2378 octave_idx_type stride = 1;
2379
2380 for (int i = 0; i < dim; i++)
2381 stride *= dv(i);
2382
2383 T *v = m.fortran_vec ();
2384 const T *ov = data ();
2385
2386 OCTAVE_LOCAL_BUFFER (T, buf, ns);
2387
2388 octave_sort<T> lsort;
2389 lsort.set_compare (mode);
2390
2391 for (octave_idx_type j = 0; j < iter; j++)
2392 {
2393 octave_idx_type kl = 0;
2394 octave_idx_type ku = ns;
2395
2396 if (stride == 1)
2397 {
2398 // copy without NaNs.
2399 for (octave_idx_type i = 0; i < ns; i++)
2400 {
2401 T tmp = ov[i];
2402 if (sort_isnan<T> (tmp))
2403 buf[--ku] = tmp;
2404 else
2405 buf[kl++] = tmp;
2406 }
2407
2408 ov += ns;
2409 }
2410 else
2411 {
2412 octave_idx_type offset = j % stride;
2413 // copy without NaNs.
2414 for (octave_idx_type i = 0; i < ns; i++)
2415 {
2416 T tmp = ov[offset + i*stride];
2417 if (sort_isnan<T> (tmp))
2418 buf[--ku] = tmp;
2419 else
2420 buf[kl++] = tmp;
2421 }
2422
2423 if (offset == stride-1)
2424 ov += ns*stride;
2425 }
2426
2427 if (ku == ns)
2428 lsort.nth_element (buf, ns, lo, up);
2429 else if (mode == ASCENDING)
2430 lsort.nth_element (buf, ku, lo, std::min (ku, up));
2431 else
2432 {
2433 octave_idx_type nnan = ns - ku;
2434 octave_idx_type zero = 0;
2435 lsort.nth_element (buf, ku, std::max (lo - nnan, zero),
2436 std::max (up - nnan, zero));
2437 std::rotate (buf, buf + ku, buf + ns);
2438 }
2439
2440 if (stride == 1)
2441 {
2442 for (octave_idx_type i = 0; i < nn; i++)
2443 v[i] = buf[lo + i];
2444
2445 v += nn;
2446 }
2447 else
2448 {
2449 octave_idx_type offset = j % stride;
2450 for (octave_idx_type i = 0; i < nn; i++)
2451 v[offset + stride * i] = buf[lo + i];
2452 if (offset == stride-1)
2453 v += nn*stride;
2454 }
2455 }
2456
2457 return m;
2458}
2459
2460#define NO_INSTANTIATE_ARRAY_SORT_API(T, API) \
2461 template <> API Array<T> \
2462 Array<T>::sort (int, sortmode) const \
2463 { \
2464 return *this; \
2465 } \
2466 template <> API Array<T> \
2467 Array<T>::sort (Array<octave_idx_type> &sidx, int, sortmode) const \
2468 { \
2469 sidx = Array<octave_idx_type> (); \
2470 return *this; \
2471 } \
2472 template <> API sortmode \
2473 Array<T>::issorted (sortmode) const \
2474 { \
2475 return UNSORTED; \
2476 } \
2477 API Array<T>::compare_fcn_type \
2478 safe_comparator (sortmode, const Array<T>&, bool) \
2479 { \
2480 return nullptr; \
2481 } \
2482 template <> API Array<octave_idx_type> \
2483 Array<T>::sort_rows_idx (sortmode) const \
2484 { \
2485 return Array<octave_idx_type> (); \
2486 } \
2487 template <> API sortmode \
2488 Array<T>::is_sorted_rows (sortmode) const \
2489 { \
2490 return UNSORTED; \
2491 } \
2492 template <> API octave_idx_type \
2493 Array<T>::lookup (T const &, sortmode) const \
2494 { \
2495 return 0; \
2496 } \
2497 template <> API Array<octave_idx_type> \
2498 Array<T>::lookup (const Array<T>&, sortmode) const \
2499 { \
2500 return Array<octave_idx_type> (); \
2501 } \
2502 template <> API octave_idx_type \
2503 Array<T>::nnz (void) const \
2504 { \
2505 return 0; \
2506 } \
2507 template <> API Array<octave_idx_type> \
2508 Array<T>::find (octave_idx_type, bool) const \
2509 { \
2510 return Array<octave_idx_type> (); \
2511 } \
2512 template <> API Array<T> \
2513 Array<T>::nth_element (const octave::idx_vector&, int) const { \
2514 return Array<T> (); \
2515 }
2516
2517#define NO_INSTANTIATE_ARRAY_SORT(T) NO_INSTANTIATE_ARRAY_SORT_API (T,)
2518
2519template <typename T, typename Alloc>
2522{
2523 dim_vector dv = dims ();
2524 octave_idx_type nd = dv.ndims ();
2526
2527 if (nd > 2)
2528 (*current_liboctave_error_handler) ("Matrix must be 2-dimensional");
2529
2530 octave_idx_type nnr = dv(0);
2531 octave_idx_type nnc = dv(1);
2532
2533 if (nnr == 0 && nnc == 0)
2534 ; // do nothing for empty matrix
2535 else if (nnr != 1 && nnc != 1)
2536 {
2537 // Extract diag from matrix
2538 if (k > 0)
2539 nnc -= k;
2540 else if (k < 0)
2541 nnr += k;
2542
2543 if (nnr > 0 && nnc > 0)
2544 {
2545 octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2546
2547 d.resize (dim_vector (ndiag, 1));
2548
2549 if (k > 0)
2550 {
2551 for (octave_idx_type i = 0; i < ndiag; i++)
2552 d.xelem (i) = elem (i, i+k);
2553 }
2554 else if (k < 0)
2555 {
2556 for (octave_idx_type i = 0; i < ndiag; i++)
2557 d.xelem (i) = elem (i-k, i);
2558 }
2559 else
2560 {
2561 for (octave_idx_type i = 0; i < ndiag; i++)
2562 d.xelem (i) = elem (i, i);
2563 }
2564 }
2565 else // Matlab returns [] 0x1 for out-of-range diagonal
2566 d.resize (dim_vector (0, 1));
2567 }
2568 else
2569 {
2570 // Create diag matrix from vector
2571 octave_idx_type roff = 0;
2572 octave_idx_type coff = 0;
2573 if (k > 0)
2574 {
2575 roff = 0;
2576 coff = k;
2577 }
2578 else if (k < 0)
2579 {
2580 roff = -k;
2581 coff = 0;
2582 }
2583
2584 if (nnr == 1)
2585 {
2586 octave_idx_type n = nnc + std::abs (k);
2587 d = Array<T, Alloc> (dim_vector (n, n), resize_fill_value ());
2588
2589 for (octave_idx_type i = 0; i < nnc; i++)
2590 d.xelem (i+roff, i+coff) = elem (0, i);
2591 }
2592 else
2593 {
2594 octave_idx_type n = nnr + std::abs (k);
2595 d = Array<T, Alloc> (dim_vector (n, n), resize_fill_value ());
2596
2597 for (octave_idx_type i = 0; i < nnr; i++)
2598 d.xelem (i+roff, i+coff) = elem (i, 0);
2599 }
2600 }
2601
2602 return d;
2603}
2604
2605template <typename T, typename Alloc>
2608{
2609 if (ndims () != 2 || (rows () != 1 && cols () != 1))
2610 (*current_liboctave_error_handler) ("cat: invalid dimension");
2611
2612 Array<T, Alloc> retval (dim_vector (m, n), resize_fill_value ());
2613
2614 octave_idx_type nel = std::min (numel (), std::min (m, n));
2615 for (octave_idx_type i = 0; i < nel; i++)
2616 retval.xelem (i, i) = xelem (i);
2617
2618 return retval;
2619}
2620
2621template <typename T, typename Alloc>
2624{
2625 // Default concatenation.
2626 bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2627
2628 if (dim == -1 || dim == -2)
2629 {
2630 concat_rule = &dim_vector::hvcat;
2631 dim = -dim - 1;
2632 }
2633 else if (dim < 0)
2634 (*current_liboctave_error_handler) ("cat: invalid dimension");
2635
2636 if (n == 1)
2637 return array_list[0];
2638 else if (n == 0)
2639 return Array<T, Alloc> ();
2640
2641 // Special case:
2642 //
2643 // cat (dim, [], ..., [], A, ...)
2644 //
2645 // with dim > 2, A not 0x0, and at least three arguments to
2646 // concatenate is equivalent to
2647 //
2648 // cat (dim, A, ...)
2649 //
2650 // Note that this check must be performed here because for full-on
2651 // braindead Matlab compatibility, we need to have things like
2652 //
2653 // cat (3, [], [], A)
2654 //
2655 // succeed, but to have things like
2656 //
2657 // cat (3, cat (3, [], []), A)
2658 // cat (3, zeros (0, 0, 2), A)
2659 //
2660 // fail. See also bug report #31615.
2661
2662 octave_idx_type istart = 0;
2663
2664 if (n > 2 && dim > 1)
2665 {
2666 for (octave_idx_type i = 0; i < n; i++)
2667 {
2668 dim_vector dv = array_list[i].dims ();
2669
2670 if (dv.zero_by_zero ())
2671 istart++;
2672 else
2673 break;
2674 }
2675
2676 // Don't skip any initial arguments if they are all empty.
2677 if (istart >= n)
2678 istart = 0;
2679 }
2680
2681 dim_vector dv = array_list[istart++].dims ();
2682
2683 for (octave_idx_type i = istart; i < n; i++)
2684 if (! (dv.*concat_rule) (array_list[i].dims (), dim))
2685 (*current_liboctave_error_handler) ("cat: dimension mismatch");
2686
2687 Array<T, Alloc> retval (dv);
2688
2689 if (retval.isempty ())
2690 return retval;
2691
2692 int nidx = std::max (dv.ndims (), static_cast<octave_idx_type> (dim + 1));
2694 octave_idx_type l = 0;
2695
2696 for (octave_idx_type i = 0; i < n; i++)
2697 {
2698 // NOTE: This takes some thinking, but no matter what the above rules
2699 // are, an empty array can always be skipped at this point, because
2700 // the result dimensions are already determined, and there is no way
2701 // an empty array may contribute a nonzero piece along the dimension
2702 // at this point, unless an empty array can be promoted to a non-empty
2703 // one (which makes no sense). I repeat, *no way*, think about it.
2704 if (array_list[i].isempty ())
2705 continue;
2706
2707 octave_quit ();
2708
2710 if (dim < array_list[i].ndims ())
2711 u = l + array_list[i].dims ()(dim);
2712 else
2713 u = l + 1;
2714
2715 idxa(dim) = octave::idx_vector (l, u);
2716
2717 retval.assign (idxa, array_list[i]);
2718
2719 l = u;
2720 }
2721
2722 return retval;
2723}
2724
2725template <typename T, typename Alloc>
2726void
2727Array<T, Alloc>::print_info (std::ostream& os, const std::string& prefix) const
2728{
2729 os << prefix << "m_rep address: " << m_rep << '\n'
2730 << prefix << "m_rep->m_len: " << m_rep->m_len << '\n'
2731 << prefix << "m_rep->m_data: " << static_cast<void *> (m_rep->m_data) << '\n'
2732 << prefix << "m_rep->m_count: " << m_rep->m_count << '\n'
2733 << prefix << "m_slice_data: " << static_cast<void *> (m_slice_data) << '\n'
2734 << prefix << "m_slice_len: " << m_slice_len << '\n';
2735
2736 // 2D info:
2737 //
2738 // << pefix << "rows: " << rows () << "\n"
2739 // << prefix << "cols: " << cols () << "\n";
2740}
2741
2742template <typename T, typename Alloc>
2744{
2745 bool retval = m_dimensions == dv;
2746 if (retval)
2747 m_dimensions = dv;
2748
2749 return retval;
2750}
2751
2752template <typename T, typename Alloc>
2754{
2755 // This guards against accidental implicit instantiations.
2756 // Array<T, Alloc> instances should always be explicit and use INSTANTIATE_ARRAY.
2757 T::__xXxXx__ ();
2758}
2759
2760#if defined (__clang__)
2761# define INSTANTIATE_ARRAY(T, API) \
2762 template <> API void \
2763 Array<T>::instantiation_guard () { } \
2764 \
2765 template class API Array<T>
2766#else
2767# define INSTANTIATE_ARRAY(T, API) \
2768 template <> API void \
2769 Array<T>::instantiation_guard () { } \
2770 \
2771 template class Array<T>
2772#endif
2773
2774// FIXME: is this used?
2775
2776template <typename T, typename Alloc>
2777std::ostream&
2778operator << (std::ostream& os, const Array<T, Alloc>& a)
2779{
2780 dim_vector a_dims = a.dims ();
2781
2782 int n_dims = a_dims.ndims ();
2783
2784 os << n_dims << "-dimensional array";
2785
2786 if (n_dims)
2787 os << " (" << a_dims.str () << ')';
2788
2789 os <<"\n\n";
2790
2791 if (n_dims)
2792 {
2793 os << "data:";
2794
2795 Array<octave_idx_type> ra_idx (dim_vector (n_dims, 1), 0);
2796
2797 // Number of times the first 2d-array is to be displayed.
2798
2799 octave_idx_type m = 1;
2800 for (int i = 2; i < n_dims; i++)
2801 m *= a_dims(i);
2802
2803 if (m == 1)
2804 {
2805 octave_idx_type rows = 0;
2806 octave_idx_type cols = 0;
2807
2808 switch (n_dims)
2809 {
2810 case 2:
2811 rows = a_dims(0);
2812 cols = a_dims(1);
2813
2814 for (octave_idx_type j = 0; j < rows; j++)
2815 {
2816 ra_idx(0) = j;
2817 for (octave_idx_type k = 0; k < cols; k++)
2818 {
2819 ra_idx(1) = k;
2820 os << ' ' << a.elem (ra_idx);
2821 }
2822 os << "\n";
2823 }
2824 break;
2825
2826 default:
2827 rows = a_dims(0);
2828
2829 for (octave_idx_type k = 0; k < rows; k++)
2830 {
2831 ra_idx(0) = k;
2832 os << ' ' << a.elem (ra_idx);
2833 }
2834 break;
2835 }
2836
2837 os << "\n";
2838 }
2839 else
2840 {
2841 octave_idx_type rows = a_dims(0);
2842 octave_idx_type cols = a_dims(1);
2843
2844 for (int i = 0; i < m; i++)
2845 {
2846 os << "\n(:,:,";
2847
2848 for (int j = 2; j < n_dims - 1; j++)
2849 os << ra_idx(j) + 1 << ',';
2850
2851 os << ra_idx(n_dims - 1) + 1 << ") = \n";
2852
2853 for (octave_idx_type j = 0; j < rows; j++)
2854 {
2855 ra_idx(0) = j;
2856
2857 for (octave_idx_type k = 0; k < cols; k++)
2858 {
2859 ra_idx(1) = k;
2860 os << ' ' << a.elem (ra_idx);
2861 }
2862
2863 os << "\n";
2864 }
2865
2866 os << "\n";
2867
2868 if (i != m - 1)
2869 increment_index (ra_idx, a_dims, 2);
2870 }
2871 }
2872 }
2873
2874 return os;
2875}
octave_idx_type compute_index(octave_idx_type n, const dim_vector &dims)
Definition: Array-util.cc:177
dim_vector zero_dims_inquire(const Array< octave::idx_vector > &ia, const dim_vector &rhdv)
Definition: Array-util.cc:429
void increment_index(Array< octave_idx_type > &ra_idx, const dim_vector &dimensions, int start_dimension)
Definition: Array-util.cc:60
bool sort_isnan(typename ref_param< T >::type)
Definition: Array.cc:1754
Array< T, Alloc >::compare_fcn_type safe_comparator(sortmode mode, const Array< T, Alloc > &, bool)
Definition: Array.cc:2014
std::ostream & operator<<(std::ostream &os, const Array< T, Alloc > &a)
Definition: Array.cc:2778
static T no_op_fcn(const T &x)
Definition: Array.cc:1639
static F77_INT nn
Definition: DASPK.cc:66
static int elem
Definition: __contourc__.cc:54
charNDArray max(char d, const charNDArray &m)
Definition: chNDArray.cc:230
charNDArray min(char d, const charNDArray &m)
Definition: chNDArray.cc:207
The real representation of all arrays.
Definition: Array.h:134
octave::refcount< octave_idx_type > m_count
Definition: Array.h:144
N Dimensional Array with copy-on-write semantics.
Definition: Array.h:129
Array< T, Alloc >::ArrayRep * m_rep
Definition: Array.h:246
OCTARRAY_API Array< T, Alloc > transpose(void) const
Size of the specified dimension.
Definition: Array.cc:1603
T & xelem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:504
OCTARRAY_API void clear(void)
Definition: Array.cc:87
OCTARRAY_API Array< T, Alloc > squeeze(void) const
Chop off leading singleton dimensions.
Definition: Array.cc:117
OCTARRAY_API Array< T, Alloc > index(const octave::idx_vector &i) const
Indexing without resizing.
Definition: Array.cc:697
octave_idx_type numel(void) const
Number of elements in the array.
Definition: Array.h:411
friend class Array
Size of the specified dimension.
Definition: Array.h:881
OCTARRAY_API void assign(const octave::idx_vector &i, const Array< T, Alloc > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Definition: Array.cc:1115
const dim_vector & dims(void) const
Return a const-reference so that dims ()(i) works efficiently.
Definition: Array.h:487
OCTARRAY_API Array< T, Alloc > nth_element(const octave::idx_vector &n, int dim=0) const
Returns the n-th element in increasing order, using the same ordering as used for sort.
Definition: Array.cc:2299
OCTARRAY_API octave_idx_type nnz(void) const
Count nonzero elements.
Definition: Array.cc:2203
OCTARRAY_API Array< T, Alloc > hermitian(T(*fcn)(const T &)=nullptr) const
Size of the specified dimension.
Definition: Array.cc:1646
octave_idx_type dim2(void) const
Definition: Array.h:456
OCTARRAY_API void resize1(octave_idx_type n, const T &rfv)
Size of the specified dimension.
Definition: Array.cc:897
T & elem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:534
ref_param< T >::type crefT
Definition: Array.h:237
octave_idx_type rows(void) const
Definition: Array.h:449
static OCTARRAY_API void instantiation_guard()
Size of the specified dimension.
Definition: Array.cc:2753
OCTARRAY_API Array< T, Alloc > column(octave_idx_type k) const
Extract column: A(:,k+1).
Definition: Array.cc:261
bool isempty(void) const
Size of the specified dimension.
Definition: Array.h:607
OCTARRAY_API Array< T, Alloc > linear_slice(octave_idx_type lo, octave_idx_type up) const
Extract a slice from this array as a column vector: A(:)(lo+1:up).
Definition: Array.cc:281
OCTARRAY_API bool optimize_dimensions(const dim_vector &dv)
Returns true if this->dims () == dv, and if so, replaces this->m_dimensions by a shallow copy of dv.
Definition: Array.cc:2743
OCTARRAY_API Array< T, Alloc > page(octave_idx_type k) const
Extract page: A(:,:,k+1).
Definition: Array.cc:270
OCTARRAY_API void resize(const dim_vector &dv, const T &rfv)
Size of the specified dimension.
Definition: Array.cc:1010
OCTARRAY_API Array< T, Alloc > & insert(const Array< T, Alloc > &a, const Array< octave_idx_type > &idx)
Insert an array into another at a specified position.
Definition: Array.cc:1588
octave_idx_type columns(void) const
Definition: Array.h:458
static OCTARRAY_API Array< T, Alloc >::ArrayRep * nil_rep(void)
Definition: Array.cc:42
const T * data(void) const
Size of the specified dimension.
Definition: Array.h:616
octave_idx_type dim1(void) const
Definition: Array.h:448
OCTARRAY_API T * fortran_vec(void)
Size of the specified dimension.
Definition: Array.cc:1744
OCTARRAY_API void delete_elements(const octave::idx_vector &i)
Deleting elements.
Definition: Array.cc:1394
OCTARRAY_API Array< octave_idx_type > sort_rows_idx(sortmode mode=ASCENDING) const
Sort by rows returns only indices.
Definition: Array.cc:2059
OCTARRAY_API Array< octave_idx_type > find(octave_idx_type n=-1, bool backward=false) const
Find indices of (at most n) nonzero elements.
Definition: Array.cc:2218
dim_vector m_dimensions
Definition: Array.h:244
OCTARRAY_API Array< T, Alloc > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Size of the specified dimension.
Definition: Array.cc:431
OCTARRAY_API Array< T, Alloc > sort(int dim=0, sortmode mode=ASCENDING) const
Size of the specified dimension.
Definition: Array.cc:1761
Array< T, Alloc > reshape(octave_idx_type nr, octave_idx_type nc) const
Size of the specified dimension.
Definition: Array.h:595
OCTARRAY_API void print_info(std::ostream &os, const std::string &prefix) const
Size of the specified dimension.
Definition: Array.cc:2727
int ndims(void) const
Size of the specified dimension.
Definition: Array.h:627
OCTARRAY_API sortmode issorted(sortmode mode=UNSORTED) const
Ordering is auto-detected or can be specified.
Definition: Array.cc:2027
virtual OCTARRAY_API T resize_fill_value(void) const
Size of the specified dimension.
Definition: Array.cc:886
OCTARRAY_API T & checkelem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.cc:192
OCTARRAY_API sortmode is_sorted_rows(sortmode mode=UNSORTED) const
Ordering is auto-detected or can be specified.
Definition: Array.cc:2077
OCTARRAY_API void fill(const T &val)
Definition: Array.cc:73
OCTARRAY_API Array< T, Alloc > diag(octave_idx_type k=0) const
Get the kth super or subdiagonal.
Definition: Array.cc:2521
OCTARRAY_API octave_idx_type compute_index(octave_idx_type i, octave_idx_type j) const
Size of the specified dimension.
Definition: Array.cc:170
OCTARRAY_API octave_idx_type lookup(const T &value, sortmode mode=UNSORTED) const
Do a binary lookup in a sorted array.
Definition: Array.cc:2138
OCTARRAY_API void resize2(octave_idx_type nr, octave_idx_type nc, const T &rfv)
Resizing (with fill).
Definition: Array.cc:968
static OCTARRAY_API Array< T, Alloc > cat(int dim, octave_idx_type n, const Array< T, Alloc > *array_list)
Concatenation along a specified (0-based) dimension, equivalent to cat().
Definition: Array.cc:2623
Vector representing the dimensions (size) of an Array.
Definition: dim-vector.h:94
OCTAVE_API bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
Definition: dim-vector.cc:140
OCTAVE_API void chop_all_singletons(void)
Definition: dim-vector.cc:50
OCTAVE_API std::string str(char sep='x') const
Definition: dim-vector.cc:68
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition: dim-vector.h:335
bool zero_by_zero(void) const
Definition: dim-vector.h:311
void resize(int n, int fill_value=0)
Definition: dim-vector.h:272
static dim_vector alloc(int n)
Definition: dim-vector.h:202
OCTAVE_API bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
Definition: dim-vector.cc:201
void chop_trailing_singletons(void)
Definition: dim-vector.h:164
bool any_neg(void) const
Definition: dim-vector.h:357
octave_idx_type ndims(void) const
Number of dimensions.
Definition: dim-vector.h:257
bool is_nd_vector(void) const
Definition: dim-vector.h:400
OCTAVE_API dim_vector redim(int n) const
Force certain dimensionality, preserving numel ().
Definition: dim-vector.cc:226
OCTAVE_API octave_idx_type safe_numel(void) const
The following function will throw a std::bad_alloc () exception if the requested size is larger than ...
Definition: dim-vector.cc:98
dim_vector make_nd_vector(octave_idx_type n) const
Definition: dim-vector.h:421
bool is_colon(void) const
Definition: idx-vector.h:556
octave_idx_type extent(octave_idx_type n) const
Definition: idx-vector.h:540
octave_idx_type xelem(octave_idx_type n) const
Definition: idx-vector.h:543
idx_class_type idx_class(void) const
Definition: idx-vector.h:535
OCTAVE_API idx_vector complement(octave_idx_type n) const
Definition: idx-vector.cc:1066
octave_idx_type assign(const T *src, octave_idx_type n, T *dest) const
Definition: idx-vector.h:676
bool is_scalar(void) const
Definition: idx-vector.h:559
OCTAVE_API bool maybe_reduce(octave_idx_type n, const idx_vector &j, octave_idx_type nj)
Definition: idx-vector.cc:747
octave_idx_type index(const T *src, octave_idx_type n, T *dest) const
Definition: idx-vector.h:602
OCTAVE_API bool is_cont_range(octave_idx_type n, octave_idx_type &l, octave_idx_type &u) const
Definition: idx-vector.cc:915
static const idx_vector colon
Definition: idx-vector.h:483
dim_vector orig_dimensions(void) const
Definition: idx-vector.h:574
octave_idx_type fill(const T &val, octave_idx_type n, T *dest) const
Definition: idx-vector.h:748
OCTAVE_API octave_idx_type increment(void) const
Definition: idx-vector.cc:968
octave_idx_type length(octave_idx_type n=0) const
Definition: idx-vector.h:537
bool is_colon_equiv(octave_idx_type n) const
Definition: idx-vector.h:565
virtual octave_idx_type numel(void) const
Definition: ov-base.h:365
void set_compare(const compare_fcn_type &comp)
Definition: oct-sort.h:121
void sort_rows(const T *data, octave_idx_type *idx, octave_idx_type rows, octave_idx_type cols)
Definition: oct-sort.cc:1665
static bool descending_compare(typename ref_param< T >::type, typename ref_param< T >::type)
Definition: oct-sort.cc:1975
bool issorted(const T *data, octave_idx_type nel)
Definition: oct-sort.cc:1577
octave_idx_type lookup(const T *data, octave_idx_type nel, const T &value)
Definition: oct-sort.cc:1787
void sort(T *data, octave_idx_type nel)
Definition: oct-sort.cc:1520
void nth_element(T *data, octave_idx_type nel, octave_idx_type lo, octave_idx_type up=-1)
Definition: oct-sort.cc:1945
bool is_sorted_rows(const T *data, octave_idx_type rows, octave_idx_type cols)
Definition: oct-sort.cc:1742
void lookup_sorted(const T *data, octave_idx_type nel, const T *values, octave_idx_type nvalues, octave_idx_type *idx, bool rev=false)
Definition: oct-sort.cc:1898
void assign(const T *src, T *dest) const
Definition: Array.cc:548
octave::idx_vector * m_idx
Definition: Array.cc:618
~rec_index_helper(void)
Definition: Array.cc:542
const T * do_assign(const T *src, T *dest, int lev) const
Definition: Array.cc:579
rec_index_helper(const rec_index_helper &)=delete
rec_index_helper(const dim_vector &dv, const Array< octave::idx_vector > &ia)
Definition: Array.cc:507
octave_idx_type * m_cdim
Definition: Array.cc:617
void index(const T *src, T *dest) const
Definition: Array.cc:545
void do_fill(const T &val, T *dest, int lev) const
Definition: Array.cc:596
rec_index_helper & operator=(const rec_index_helper &)=delete
void fill(const T &val, T *dest) const
Definition: Array.cc:551
octave_idx_type * m_dim
Definition: Array.cc:616
bool is_cont_range(octave_idx_type &l, octave_idx_type &u) const
Definition: Array.cc:553
T * do_index(const T *src, T *dest, int lev) const
Definition: Array.cc:562
~rec_permute_helper(void)
Definition: Array.cc:337
static T * blk_trans(const T *src, T *dest, octave_idx_type nr, octave_idx_type nc)
Definition: Array.cc:345
void permute(const T *src, T *dest) const
Definition: Array.cc:340
T * do_permute(const T *src, T *dest, int lev) const
Definition: Array.cc:385
octave_idx_type * m_stride
Definition: Array.cc:425
rec_permute_helper(const dim_vector &dv, const Array< octave_idx_type > &perm)
Definition: Array.cc:293
rec_permute_helper(const rec_permute_helper &)=delete
octave_idx_type * m_dim
Definition: Array.cc:424
rec_permute_helper & operator=(const rec_permute_helper &)=delete
octave_idx_type * m_dext
Definition: Array.cc:691
rec_resize_helper & operator=(const rec_resize_helper &)=delete
rec_resize_helper(const rec_resize_helper &)=delete
void do_resize_fill(const T *src, T *dest, const T &rfv, int lev) const
Definition: Array.cc:668
octave_idx_type * m_sext
Definition: Array.cc:690
void resize_fill(const T *src, T *dest, const T &rfv) const
Definition: Array.cc:661
~rec_resize_helper(void)
Definition: Array.cc:658
rec_resize_helper(const dim_vector &ndv, const dim_vector &odv)
Definition: Array.cc:627
octave_idx_type * m_cext
Definition: Array.cc:689
if_then_else< is_class_type< T >::no, T, Tconst & >::result type
Definition: lo-traits.h:121
octave::idx_vector idx_vector
Definition: idx-vector.h:1037
F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
F77_RET_T const F77_DBLE * x
Complex log2(const Complex &x)
Definition: lo-mappers.cc:139
static OCTAVE_NORETURN void err_index_out_of_range(void)
Definition: idx-vector.cc:52
void err_invalid_index(const std::string &idx, octave_idx_type nd, octave_idx_type dim, const std::string &)
void err_del_index_out_of_range(bool is1d, octave_idx_type idx, octave_idx_type ext)
void err_invalid_resize(void)
void err_nonconformant(const char *op, octave_idx_type op1_len, octave_idx_type op2_len)
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition: oct-locbuf.h:44
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition: oct-locbuf.h:50
sortmode
Definition: oct-sort.h:97
@ UNSORTED
Definition: oct-sort.h:97
@ ASCENDING
Definition: oct-sort.h:97
@ DESCENDING
Definition: oct-sort.h:97
T::size_type numel(const T &str)
Definition: oct-string.cc:71
const octave_base_value const Array< octave_idx_type > & ra_idx
static T abs(T x)
Definition: pr-output.cc:1678
F77_RET_T len
Definition: xerbla.cc:61