GNU Octave  8.1.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
Array-base.cc
Go to the documentation of this file.
1 ////////////////////////////////////////////////////////////////////////
2 //
3 // Copyright (C) 1993-2023 The Octave Project Developers
4 //
5 // See the file COPYRIGHT.md in the top-level directory of this
6 // distribution or <https://octave.org/copyright/>.
7 //
8 // This file is part of Octave.
9 //
10 // Octave is free software: you can redistribute it and/or modify it
11 // under the terms of the GNU General Public License as published by
12 // the Free Software Foundation, either version 3 of the License, or
13 // (at your option) any later version.
14 //
15 // Octave is distributed in the hope that it will be useful, but
16 // WITHOUT ANY WARRANTY; without even the implied warranty of
17 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 // GNU General Public License for more details.
19 //
20 // You should have received a copy of the GNU General Public License
21 // along with Octave; see the file COPYING. If not, see
22 // <https://www.gnu.org/licenses/>.
23 //
24 ////////////////////////////////////////////////////////////////////////
25 
26 // Do not include this file directly to instantiate the Array<T> template
27 // class in code outside liboctave. Include "Array-oct.cc" or "Array.cc"
28 // instead.
29 
30 // This file should not include config.h. It is only included in other
31 // C++ source files that should have included config.h before including
32 // this file.
33 
34 #include <ostream>
35 
36 #include "Array-util.h"
37 #include "Array.h"
38 #include "lo-mappers.h"
39 #include "oct-locbuf.h"
40 
41 // One dimensional array class. Handles the reference counting for
42 // all the derived classes.
43 
44 template <typename T, typename Alloc>
47 {
48  static ArrayRep nr;
49  return &nr;
50 }
51 
52 // This is similar to the template for containers but specialized for Array.
53 // Note we can't specialize a member without also specializing the class.
54 template <typename T, typename Alloc>
56  : m_dimensions (dv), m_rep (a.m_rep),
57  m_slice_data (a.m_slice_data), m_slice_len (a.m_slice_len)
58 {
59  bool invalid_size = false;
60 
61  octave_idx_type new_numel = 0;
62 
63  try
64  {
65  // Safe numel may throw a bad_alloc error because the new
66  // dimensions are too large to allocate. Trap that here so
67  // we can issue a more helpful diagnostic that is consistent
68  // with other size mismatch failures.
69 
70  new_numel = m_dimensions.safe_numel ();
71  }
72  catch (const std::bad_alloc&)
73  {
74  invalid_size = true;
75  }
76 
77  if (invalid_size || new_numel != a.numel ())
78  {
79  std::string dimensions_str = a.m_dimensions.str ();
80  std::string new_dims_str = m_dimensions.str ();
81 
82  (*current_liboctave_error_handler)
83  ("reshape: can't reshape %s array to %s array",
84  dimensions_str.c_str (), new_dims_str.c_str ());
85  }
86 
87  // This goes here because if an exception is thrown by the above,
88  // destructor will be never called.
89  m_rep->m_count++;
91 }
92 
93 template <typename T, typename Alloc>
94 void
95 Array<T, Alloc>::fill (const T& val)
96 {
97  if (m_rep->m_count > 1)
98  {
99  --m_rep->m_count;
100  m_rep = new ArrayRep (numel (), val);
101  m_slice_data = m_rep->m_data;
102  }
103  else
104  std::fill_n (m_slice_data, m_slice_len, val);
105 }
106 
107 template <typename T, typename Alloc>
108 void
110 {
111  if (--m_rep->m_count == 0)
112  delete m_rep;
113 
114  m_rep = nil_rep ();
115  m_rep->m_count++;
116  m_slice_data = m_rep->m_data;
117  m_slice_len = m_rep->m_len;
118 
119  m_dimensions = dim_vector ();
120 }
121 
122 template <typename T, typename Alloc>
123 void
125 {
126  if (--m_rep->m_count == 0)
127  delete m_rep;
128 
129  m_rep = new ArrayRep (dv.safe_numel ());
130  m_slice_data = m_rep->m_data;
131  m_slice_len = m_rep->m_len;
132 
133  m_dimensions = dv;
134  m_dimensions.chop_trailing_singletons ();
135 }
136 
137 template <typename T, typename Alloc>
140 {
141  Array<T, Alloc> retval = *this;
142 
143  if (ndims () > 2)
144  {
145  bool dims_changed = false;
146 
147  dim_vector new_dimensions = m_dimensions;
148 
149  int k = 0;
150 
151  for (int i = 0; i < ndims (); i++)
152  {
153  if (m_dimensions(i) == 1)
154  dims_changed = true;
155  else
156  new_dimensions(k++) = m_dimensions(i);
157  }
158 
159  if (dims_changed)
160  {
161  switch (k)
162  {
163  case 0:
164  new_dimensions = dim_vector (1, 1);
165  break;
166 
167  case 1:
168  {
169  octave_idx_type tmp = new_dimensions(0);
170 
171  new_dimensions.resize (2);
172 
173  new_dimensions(0) = tmp;
174  new_dimensions(1) = 1;
175  }
176  break;
177 
178  default:
179  new_dimensions.resize (k);
180  break;
181  }
182  }
183 
184  retval = Array<T, Alloc> (*this, new_dimensions);
185  }
186 
187  return retval;
188 }
189 
190 template <typename T, typename Alloc>
193 {
194  return ::compute_index (i, j, m_dimensions);
195 }
196 
197 template <typename T, typename Alloc>
200  octave_idx_type k) const
201 {
202  return ::compute_index (i, j, k, m_dimensions);
203 }
204 
205 template <typename T, typename Alloc>
208 {
209  return ::compute_index (ra_idx, m_dimensions);
210 }
211 
212 template <typename T, typename Alloc>
213 T&
215 {
216  // Do checks directly to avoid recomputing m_slice_len.
217  if (n < 0)
219  if (n >= m_slice_len)
220  octave::err_index_out_of_range (1, 1, n+1, m_slice_len, m_dimensions);
221 
222  return elem (n);
223 }
224 
225 template <typename T, typename Alloc>
226 T&
228 {
229  return elem (compute_index (i, j));
230 }
231 
232 template <typename T, typename Alloc>
233 T&
235 {
236  return elem (compute_index (i, j, k));
237 }
238 
239 template <typename T, typename Alloc>
240 T&
242 {
243  return elem (compute_index (ra_idx));
244 }
245 
246 template <typename T, typename Alloc>
247 typename Array<T, Alloc>::crefT
249 {
250  // Do checks directly to avoid recomputing m_slice_len.
251  if (n < 0)
253  if (n >= m_slice_len)
254  octave::err_index_out_of_range (1, 1, n+1, m_slice_len, m_dimensions);
255 
256  return elem (n);
257 }
258 
259 template <typename T, typename Alloc>
260 typename Array<T, Alloc>::crefT
262 {
263  return elem (compute_index (i, j));
264 }
265 
266 template <typename T, typename Alloc>
267 typename Array<T, Alloc>::crefT
269  octave_idx_type k) const
270 {
271  return elem (compute_index (i, j, k));
272 }
273 
274 template <typename T, typename Alloc>
275 typename Array<T, Alloc>::crefT
277 {
278  return elem (compute_index (ra_idx));
279 }
280 
281 template <typename T, typename Alloc>
284 {
285  octave_idx_type r = m_dimensions(0);
286 
287  return Array<T, Alloc> (*this, dim_vector (r, 1), k*r, k*r + r);
288 }
289 
290 template <typename T, typename Alloc>
293 {
294  octave_idx_type r = m_dimensions(0);
295  octave_idx_type c = m_dimensions(1);
296  octave_idx_type p = r*c;
297 
298  return Array<T, Alloc> (*this, dim_vector (r, c), k*p, k*p + p);
299 }
300 
301 template <typename T, typename Alloc>
304 {
305  if (up < lo)
306  up = lo;
307 
308  return Array<T, Alloc> (*this, dim_vector (up - lo, 1), lo, up);
309 }
310 
311 // Helper class for multi-d dimension permuting (generalized transpose).
313 {
314 public:
316 
317  : m_n (dv.ndims ()), m_top (0), m_dim (new octave_idx_type [2*m_n]),
319  {
320  assert (m_n == perm.numel ());
321 
322  // Get cumulative dimensions.
324  cdim[0] = 1;
325  for (int i = 1; i < m_n+1; i++) cdim[i] = cdim[i-1] * dv(i-1);
326 
327  // Setup the permuted strides.
328  for (int k = 0; k < m_n; k++)
329  {
330  int kk = perm(k);
331  m_dim[k] = dv(kk);
332  m_stride[k] = cdim[kk];
333  }
334 
335  // Reduce contiguous runs.
336  for (int k = 1; k < m_n; k++)
337  {
338  if (m_stride[k] == m_stride[m_top]*m_dim[m_top])
339  m_dim[m_top] *= m_dim[k];
340  else
341  {
342  m_top++;
343  m_dim[m_top] = m_dim[k];
344  m_stride[m_top] = m_stride[k];
345  }
346  }
347 
348  // Determine whether we can use block transposes.
349  m_use_blk = m_top >= 1 && m_stride[1] == 1 && m_stride[0] == m_dim[1];
350 
351  }
352 
353  // No copying!
354 
356 
358 
359  ~rec_permute_helper (void) { delete [] m_dim; }
360 
361  template <typename T>
362  void permute (const T *src, T *dest) const { do_permute (src, dest, m_top); }
363 
364  // Helper method for fast blocked transpose.
365  template <typename T>
366  static T *
367  blk_trans (const T *src, T *dest, octave_idx_type nr, octave_idx_type nc)
368  {
369  static const octave_idx_type m = 8;
370  OCTAVE_LOCAL_BUFFER (T, blk, m*m);
371  for (octave_idx_type kr = 0; kr < nr; kr += m)
372  for (octave_idx_type kc = 0; kc < nc; kc += m)
373  {
374  octave_idx_type lr = std::min (m, nr - kr);
375  octave_idx_type lc = std::min (m, nc - kc);
376  if (lr == m && lc == m)
377  {
378  const T *ss = src + kc * nr + kr;
379  for (octave_idx_type j = 0; j < m; j++)
380  for (octave_idx_type i = 0; i < m; i++)
381  blk[j*m+i] = ss[j*nr + i];
382  T *dd = dest + kr * nc + kc;
383  for (octave_idx_type j = 0; j < m; j++)
384  for (octave_idx_type i = 0; i < m; i++)
385  dd[j*nc+i] = blk[i*m+j];
386  }
387  else
388  {
389  const T *ss = src + kc * nr + kr;
390  for (octave_idx_type j = 0; j < lc; j++)
391  for (octave_idx_type i = 0; i < lr; i++)
392  blk[j*m+i] = ss[j*nr + i];
393  T *dd = dest + kr * nc + kc;
394  for (octave_idx_type j = 0; j < lr; j++)
395  for (octave_idx_type i = 0; i < lc; i++)
396  dd[j*nc+i] = blk[i*m+j];
397  }
398  }
399 
400  return dest + nr*nc;
401  }
402 
403 private:
404 
405  // Recursive N-D generalized transpose
406  template <typename T>
407  T * do_permute (const T *src, T *dest, int lev) const
408  {
409  if (lev == 0)
410  {
411  octave_idx_type step = m_stride[0];
413  if (step == 1)
414  {
415  std::copy_n (src, len, dest);
416  dest += len;
417  }
418  else
419  {
420  for (octave_idx_type i = 0, j = 0; i < len; i++, j += step)
421  dest[i] = src[j];
422 
423  dest += len;
424  }
425  }
426  else if (m_use_blk && lev == 1)
427  dest = blk_trans (src, dest, m_dim[1], m_dim[0]);
428  else
429  {
430  octave_idx_type step = m_stride[lev];
431  octave_idx_type len = m_dim[lev];
432  for (octave_idx_type i = 0, j = 0; i < len; i++, j+= step)
433  dest = do_permute (src + i * step, dest, lev-1);
434  }
435 
436  return dest;
437  }
438 
439  //--------
440 
441  // STRIDE occupies the last half of the space allocated for dim to
442  // avoid a double allocation.
443 
444  int m_n;
445  int m_top;
448  bool m_use_blk;
449 };
450 
451 template <typename T, typename Alloc>
453 Array<T, Alloc>::permute (const Array<octave_idx_type>& perm_vec_arg, bool inv) const
454 {
455  Array<T, Alloc> retval;
456 
457  Array<octave_idx_type> perm_vec = perm_vec_arg;
458 
459  dim_vector dv = dims ();
460 
461  int perm_vec_len = perm_vec_arg.numel ();
462 
463  if (perm_vec_len < dv.ndims ())
464  (*current_liboctave_error_handler)
465  ("%s: invalid permutation vector", inv ? "ipermute" : "permute");
466 
467  dim_vector dv_new = dim_vector::alloc (perm_vec_len);
468 
469  // Append singleton dimensions as needed.
470  dv.resize (perm_vec_len, 1);
471 
472  // Need this array to check for identical elements in permutation array.
473  OCTAVE_LOCAL_BUFFER_INIT (bool, checked, perm_vec_len, false);
474 
475  bool identity = true;
476 
477  // Find dimension vector of permuted array.
478  for (int i = 0; i < perm_vec_len; i++)
479  {
480  octave_idx_type perm_elt = perm_vec.elem (i);
481  if (perm_elt >= perm_vec_len || perm_elt < 0)
482  (*current_liboctave_error_handler)
483  ("%s: permutation vector contains an invalid element",
484  inv ? "ipermute" : "permute");
485 
486  if (checked[perm_elt])
487  (*current_liboctave_error_handler)
488  ("%s: permutation vector cannot contain identical elements",
489  inv ? "ipermute" : "permute");
490  else
491  {
492  checked[perm_elt] = true;
493  identity = identity && perm_elt == i;
494  }
495  }
496 
497  if (identity)
498  return *this;
499 
500  if (inv)
501  {
502  for (int i = 0; i < perm_vec_len; i++)
503  perm_vec(perm_vec_arg(i)) = i;
504  }
505 
506  for (int i = 0; i < perm_vec_len; i++)
507  dv_new(i) = dv(perm_vec(i));
508 
509  retval = Array<T, Alloc> (dv_new);
510 
511  if (numel () > 0)
512  {
513  rec_permute_helper rh (dv, perm_vec);
514  rh.permute (data (), retval.fortran_vec ());
515  }
516 
517  return retval;
518 }
519 
520 // Helper class for multi-d index reduction and recursive
521 // indexing/indexed assignment. Rationale: we could avoid recursion
522 // using a state machine instead. However, using recursion is much
523 // more amenable to possible parallelization in the future.
524 // Also, the recursion solution is cleaner and more understandable.
525 
527 {
528 public:
530  : m_n (ia.numel ()), m_top (0), m_dim (new octave_idx_type [2*m_n]),
531  m_cdim (m_dim + m_n), m_idx (new octave::idx_vector [m_n])
532  {
533  assert (m_n > 0 && (dv.ndims () == std::max (m_n, 2)));
534 
535  m_dim[0] = dv(0);
536  m_cdim[0] = 1;
537  m_idx[0] = ia(0);
538 
539  for (int i = 1; i < m_n; i++)
540  {
541  // Try reduction...
542  if (m_idx[m_top].maybe_reduce (m_dim[m_top], ia(i), dv(i)))
543  {
544  // Reduction successful, fold dimensions.
545  m_dim[m_top] *= dv(i);
546  }
547  else
548  {
549  // Unsuccessful, store index & cumulative m_dim.
550  m_top++;
551  m_idx[m_top] = ia(i);
552  m_dim[m_top] = dv(i);
553  m_cdim[m_top] = m_cdim[m_top-1] * m_dim[m_top-1];
554  }
555  }
556  }
557 
558  // No copying!
559 
561 
563 
564  ~rec_index_helper (void) { delete [] m_idx; delete [] m_dim; }
565 
566  template <typename T>
567  void index (const T *src, T *dest) const { do_index (src, dest, m_top); }
568 
569  template <typename T>
570  void assign (const T *src, T *dest) const { do_assign (src, dest, m_top); }
571 
572  template <typename T>
573  void fill (const T& val, T *dest) const { do_fill (val, dest, m_top); }
574 
576  {
577  return m_top == 0 && m_idx[0].is_cont_range (m_dim[0], l, u);
578  }
579 
580 private:
581 
582  // Recursive N-D indexing
583  template <typename T>
584  T * do_index (const T *src, T *dest, int lev) const
585  {
586  if (lev == 0)
587  dest += m_idx[0].index (src, m_dim[0], dest);
588  else
589  {
590  octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
591  octave_idx_type d = m_cdim[lev];
592  for (octave_idx_type i = 0; i < nn; i++)
593  dest = do_index (src + d*m_idx[lev].xelem (i), dest, lev-1);
594  }
595 
596  return dest;
597  }
598 
599  // Recursive N-D indexed assignment
600  template <typename T>
601  const T * do_assign (const T *src, T *dest, int lev) const
602  {
603  if (lev == 0)
604  src += m_idx[0].assign (src, m_dim[0], dest);
605  else
606  {
607  octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
608  octave_idx_type d = m_cdim[lev];
609  for (octave_idx_type i = 0; i < nn; i++)
610  src = do_assign (src, dest + d*m_idx[lev].xelem (i), lev-1);
611  }
612 
613  return src;
614  }
615 
616  // Recursive N-D indexed assignment
617  template <typename T>
618  void do_fill (const T& val, T *dest, int lev) const
619  {
620  if (lev == 0)
621  m_idx[0].fill (val, m_dim[0], dest);
622  else
623  {
624  octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
626  for (octave_idx_type i = 0; i < nn; i++)
627  do_fill (val, dest + d*m_idx[lev].xelem (i), lev-1);
628  }
629  }
630 
631  //--------
632 
633  // CDIM occupies the last half of the space allocated for dim to
634  // avoid a double allocation.
635 
636  int m_n;
637  int m_top;
641 };
642 
643 // Helper class for multi-d recursive resizing
644 // This handles resize () in an efficient manner, touching memory only
645 // once (apart from reinitialization)
647 {
648 public:
649  rec_resize_helper (const dim_vector& ndv, const dim_vector& odv)
650  : m_cext (nullptr), m_sext (nullptr), m_dext (nullptr), m_n (0)
651  {
652  int l = ndv.ndims ();
653  assert (odv.ndims () == l);
654  octave_idx_type ld = 1;
655  int i = 0;
656  for (; i < l-1 && ndv(i) == odv(i); i++) ld *= ndv(i);
657  m_n = l - i;
658  m_cext = new octave_idx_type [3*m_n];
659  // Trick to avoid three allocations
662 
663  octave_idx_type sld = ld;
664  octave_idx_type dld = ld;
665  for (int j = 0; j < m_n; j++)
666  {
667  m_cext[j] = std::min (ndv(i+j), odv(i+j));
668  m_sext[j] = sld *= odv(i+j);
669  m_dext[j] = dld *= ndv(i+j);
670  }
671  m_cext[0] *= ld;
672  }
673 
674  // No copying!
675 
677 
679 
680  ~rec_resize_helper (void) { delete [] m_cext; }
681 
682  template <typename T>
683  void resize_fill (const T *src, T *dest, const T& rfv) const
684  { do_resize_fill (src, dest, rfv, m_n-1); }
685 
686 private:
687 
688  // recursive resizing
689  template <typename T>
690  void do_resize_fill (const T *src, T *dest, const T& rfv, int lev) const
691  {
692  if (lev == 0)
693  {
694  std::copy_n (src, m_cext[0], dest);
695  std::fill_n (dest + m_cext[0], m_dext[0] - m_cext[0], rfv);
696  }
697  else
698  {
699  octave_idx_type sd, dd, k;
700  sd = m_sext[lev-1];
701  dd = m_dext[lev-1];
702  for (k = 0; k < m_cext[lev]; k++)
703  do_resize_fill (src + k * sd, dest + k * dd, rfv, lev - 1);
704 
705  std::fill_n (dest + k * dd, m_dext[lev] - k * dd, rfv);
706  }
707  }
708 
709  //--------
710 
714  int m_n;
715 };
716 
717 template <typename T, typename Alloc>
720 {
721  // Colon:
722  //
723  // object | index | result orientation
724  // ---------+----------+-------------------
725  // anything | colon | column vector
726  //
727  // Numeric array or logical mask:
728  //
729  // Note that logical mask indices are always transformed to vectors
730  // before we see them.
731  //
732  // object | index | result orientation
733  // ---------+----------+-------------------
734  // vector | vector | indexed object
735  // | other | same size as index
736  // ---------+----------+-------------------
737  // array | anything | same size as index
738 
739  octave_idx_type n = numel ();
740  Array<T, Alloc> retval;
741 
742  if (i.is_colon ())
743  {
744  // A(:) produces a shallow copy as a column vector.
745  retval = Array<T, Alloc> (*this, dim_vector (n, 1));
746  }
747  else
748  {
749  if (i.extent (n) != n)
750  octave::err_index_out_of_range (1, 1, i.extent (n), n, m_dimensions);
751 
752  dim_vector result_dims = i.orig_dimensions ();
753  octave_idx_type idx_len = i.length ();
754 
755  if (n != 1 && is_nd_vector () && idx_len != 1
756  && result_dims.is_nd_vector ())
757  {
758  // Indexed object and index are both vectors. Set result size
759  // and orientation as above.
760 
761  dim_vector dv = dims ();
762 
763  result_dims = dv.make_nd_vector (idx_len);
764  }
765 
766  octave_idx_type l, u;
767  if (idx_len != 0 && i.is_cont_range (n, l, u))
768  // If suitable, produce a shallow slice.
769  retval = Array<T, Alloc> (*this, result_dims, l, u);
770  else
771  {
772  // Don't use resize here to avoid useless initialization for POD
773  // types.
774  retval = Array<T, Alloc> (result_dims);
775 
776  if (idx_len != 0)
777  i.index (data (), n, retval.fortran_vec ());
778  }
779  }
780 
781  return retval;
782 }
783 
784 template <typename T, typename Alloc>
787 {
788  // Get dimensions, allowing Fortran indexing in the 2nd dim.
789  dim_vector dv = m_dimensions.redim (2);
790  octave_idx_type r = dv(0);
791  octave_idx_type c = dv(1);
792  Array<T, Alloc> retval;
793 
794  if (i.is_colon () && j.is_colon ())
795  {
796  // A(:,:) produces a shallow copy.
797  retval = Array<T, Alloc> (*this, dv);
798  }
799  else
800  {
801  if (i.extent (r) != r)
802  octave::err_index_out_of_range (2, 1, i.extent (r), r, m_dimensions); // throws
803  if (j.extent (c) != c)
804  octave::err_index_out_of_range (2, 2, j.extent (c), c, m_dimensions); // throws
805 
807  octave_idx_type il = i.length (r);
808  octave_idx_type jl = j.length (c);
809 
810  octave::idx_vector ii (i);
811 
812  if (ii.maybe_reduce (r, j, c))
813  {
814  octave_idx_type l, u;
815  if (ii.length () > 0 && ii.is_cont_range (n, l, u))
816  // If suitable, produce a shallow slice.
817  retval = Array<T, Alloc> (*this, dim_vector (il, jl), l, u);
818  else
819  {
820  // Don't use resize to avoid useless initialization for POD types.
821  retval = Array<T, Alloc> (dim_vector (il, jl));
822 
823  ii.index (data (), n, retval.fortran_vec ());
824  }
825  }
826  else
827  {
828  // Don't use resize to avoid useless initialization for POD types.
829  retval = Array<T, Alloc> (dim_vector (il, jl));
830 
831  const T *src = data ();
832  T *dest = retval.fortran_vec ();
833 
834  for (octave_idx_type k = 0; k < jl; k++)
835  dest += i.index (src + r * j.xelem (k), r, dest);
836  }
837  }
838 
839  return retval;
840 }
841 
842 template <typename T, typename Alloc>
845 {
846  int ial = ia.numel ();
848 
849  // FIXME: is this dispatching necessary?
850  if (ial == 1)
851  retval = index (ia(0));
852  else if (ial == 2)
853  retval = index (ia(0), ia(1));
854  else if (ial > 0)
855  {
856  // Get dimensions, allowing Fortran indexing in the last dim.
857  dim_vector dv = m_dimensions.redim (ial);
858 
859  // Check for out of bounds conditions.
860  bool all_colons = true;
861  for (int i = 0; i < ial; i++)
862  {
863  if (ia(i).extent (dv(i)) != dv(i))
864  octave::err_index_out_of_range (ial, i+1, ia(i).extent (dv(i)), dv(i),
865  m_dimensions); // throws
866 
867  all_colons = all_colons && ia(i).is_colon ();
868  }
869 
870  if (all_colons)
871  {
872  // A(:,:,...,:) produces a shallow copy.
874  retval = Array<T, Alloc> (*this, dv);
875  }
876  else
877  {
878  // Form result dimensions.
879  dim_vector rdv = dim_vector::alloc (ial);
880  for (int i = 0; i < ial; i++) rdv(i) = ia(i).length (dv(i));
882 
883  // Prepare for recursive indexing
884  rec_index_helper rh (dv, ia);
885 
886  octave_idx_type l, u;
887  if (rh.is_cont_range (l, u))
888  // If suitable, produce a shallow slice.
889  retval = Array<T, Alloc> (*this, rdv, l, u);
890  else
891  {
892  // Don't use resize to avoid useless initialization for POD types.
893  retval = Array<T, Alloc> (rdv);
894 
895  // Do it.
896  rh.index (data (), retval.fortran_vec ());
897  }
898  }
899  }
900 
901  return retval;
902 }
903 
904 // The default fill value. Override if you want a different one.
905 
906 template <typename T, typename Alloc>
907 T
909 {
910  static T zero = T ();
911  return zero;
912 }
913 
914 // Yes, we could do resize using index & assign. However, that would
915 // possibly involve a lot more memory traffic than we actually need.
916 
917 template <typename T, typename Alloc>
918 void
920 {
921  if (n < 0 || ndims () != 2)
923 
924  dim_vector dv;
925  // This is driven by Matlab's behavior of giving a *row* vector
926  // on some out-of-bounds assignments. Specifically, Matlab
927  // allows a(i) with out-of-bouds i when a is either of 0x0, 1x0,
928  // 1x1, 0xN, and gives a row vector in all cases (yes, even the
929  // last one, search me why). Giving a column vector would make
930  // much more sense (given the way trailing singleton dims are
931  // treated).
932  bool invalid = false;
933  if (rows () == 0 || rows () == 1)
934  dv = dim_vector (1, n);
935  else if (columns () == 1)
936  dv = dim_vector (n, 1);
937  else
938  invalid = true;
939 
940  if (invalid)
942 
943  octave_idx_type nx = numel ();
944  if (n == nx - 1 && n > 0)
945  {
946  // Stack "pop" operation.
947  if (m_rep->m_count == 1)
948  m_slice_data[m_slice_len-1] = T ();
949  m_slice_len--;
950  m_dimensions = dv;
951  }
952  else if (n == nx + 1 && nx > 0)
953  {
954  // Stack "push" operation.
955  if (m_rep->m_count == 1
956  && m_slice_data + m_slice_len < m_rep->m_data + m_rep->m_len)
957  {
958  m_slice_data[m_slice_len++] = rfv;
959  m_dimensions = dv;
960  }
961  else
962  {
963  static const octave_idx_type max_stack_chunk = 1024;
964  octave_idx_type nn = n + std::min (nx, max_stack_chunk);
965  Array<T, Alloc> tmp (Array<T, Alloc> (dim_vector (nn, 1)), dv, 0, n);
966  T *dest = tmp.fortran_vec ();
967 
968  std::copy_n (data (), nx, dest);
969  dest[nx] = rfv;
970 
971  *this = tmp;
972  }
973  }
974  else if (n != nx)
975  {
976  Array<T, Alloc> tmp = Array<T, Alloc> (dv);
977  T *dest = tmp.fortran_vec ();
978 
979  octave_idx_type n0 = std::min (n, nx);
980  octave_idx_type n1 = n - n0;
981  std::copy_n (data (), n0, dest);
982  std::fill_n (dest + n0, n1, rfv);
983 
984  *this = tmp;
985  }
986 }
987 
988 template <typename T, typename Alloc>
989 void
991 {
992  if (r < 0 || c < 0 || ndims () != 2)
994 
995  octave_idx_type rx = rows ();
996  octave_idx_type cx = columns ();
997  if (r != rx || c != cx)
998  {
1000  T *dest = tmp.fortran_vec ();
1001 
1002  octave_idx_type r0 = std::min (r, rx);
1003  octave_idx_type r1 = r - r0;
1004  octave_idx_type c0 = std::min (c, cx);
1005  octave_idx_type c1 = c - c0;
1006  const T *src = data ();
1007  if (r == rx)
1008  {
1009  std::copy_n (src, r * c0, dest);
1010  dest += r * c0;
1011  }
1012  else
1013  {
1014  for (octave_idx_type k = 0; k < c0; k++)
1015  {
1016  std::copy_n (src, r0, dest);
1017  src += rx;
1018  dest += r0;
1019  std::fill_n (dest, r1, rfv);
1020  dest += r1;
1021  }
1022  }
1023 
1024  std::fill_n (dest, r * c1, rfv);
1025 
1026  *this = tmp;
1027  }
1028 }
1029 
1030 template <typename T, typename Alloc>
1031 void
1032 Array<T, Alloc>::resize (const dim_vector& dv, const T& rfv)
1033 {
1034  int dvl = dv.ndims ();
1035  if (dvl == 2)
1036  resize2 (dv(0), dv(1), rfv);
1037  else if (m_dimensions != dv)
1038  {
1039  if (m_dimensions.ndims () > dvl || dv.any_neg ())
1041 
1042  Array<T, Alloc> tmp (dv);
1043  // Prepare for recursive resizing.
1044  rec_resize_helper rh (dv, m_dimensions.redim (dvl));
1045 
1046  // Do it.
1047  rh.resize_fill (data (), tmp.fortran_vec (), rfv);
1048  *this = tmp;
1049  }
1050 }
1051 
1052 template <typename T, typename Alloc>
1054 Array<T, Alloc>::index (const octave::idx_vector& i, bool resize_ok, const T& rfv) const
1055 {
1056  Array<T, Alloc> tmp = *this;
1057  if (resize_ok)
1058  {
1059  octave_idx_type n = numel ();
1060  octave_idx_type nx = i.extent (n);
1061  if (n != nx)
1062  {
1063  if (i.is_scalar ())
1064  return Array<T, Alloc> (dim_vector (1, 1), rfv);
1065  else
1066  tmp.resize1 (nx, rfv);
1067  }
1068 
1069  if (tmp.numel () != nx)
1070  return Array<T, Alloc> ();
1071  }
1072 
1073  return tmp.index (i);
1074 }
1075 
1076 template <typename T, typename Alloc>
1079  bool resize_ok, const T& rfv) const
1080 {
1081  Array<T, Alloc> tmp = *this;
1082  if (resize_ok)
1083  {
1084  dim_vector dv = m_dimensions.redim (2);
1085  octave_idx_type r = dv(0);
1086  octave_idx_type c = dv(1);
1087  octave_idx_type rx = i.extent (r);
1088  octave_idx_type cx = j.extent (c);
1089  if (r != rx || c != cx)
1090  {
1091  if (i.is_scalar () && j.is_scalar ())
1092  return Array<T, Alloc> (dim_vector (1, 1), rfv);
1093  else
1094  tmp.resize2 (rx, cx, rfv);
1095  }
1096 
1097  if (tmp.rows () != rx || tmp.columns () != cx)
1098  return Array<T, Alloc> ();
1099  }
1100 
1101  return tmp.index (i, j);
1102 }
1103 
1104 template <typename T, typename Alloc>
1107  bool resize_ok, const T& rfv) const
1108 {
1109  Array<T, Alloc> tmp = *this;
1110  if (resize_ok)
1111  {
1112  int ial = ia.numel ();
1113  dim_vector dv = m_dimensions.redim (ial);
1114  dim_vector dvx = dim_vector::alloc (ial);
1115  for (int i = 0; i < ial; i++)
1116  dvx(i) = ia(i).extent (dv(i));
1117  if (! (dvx == dv))
1118  {
1119  bool all_scalars = true;
1120  for (int i = 0; i < ial; i++)
1121  all_scalars = all_scalars && ia(i).is_scalar ();
1122  if (all_scalars)
1123  return Array<T, Alloc> (dim_vector (1, 1), rfv);
1124  else
1125  tmp.resize (dvx, rfv);
1126 
1127  if (tmp.m_dimensions != dvx)
1128  return Array<T, Alloc> ();
1129  }
1130  }
1131 
1132  return tmp.index (ia);
1133 }
1134 
1135 template <typename T, typename Alloc>
1136 void
1137 Array<T, Alloc>::assign (const octave::idx_vector& i, const Array<T, Alloc>& rhs, const T& rfv)
1138 {
1139  octave_idx_type n = numel ();
1140  octave_idx_type rhl = rhs.numel ();
1141 
1142  if (rhl != 1 && i.length (n) != rhl)
1143  octave::err_nonconformant ("=", dim_vector(i.length(n), 1), rhs.dims());
1144 
1145  octave_idx_type nx = i.extent (n);
1146  bool colon = i.is_colon_equiv (nx);
1147  // Try to resize first if necessary.
1148  if (nx != n)
1149  {
1150  // Optimize case A = []; A(1:n) = X with A empty.
1151  if (m_dimensions.zero_by_zero () && colon)
1152  {
1153  if (rhl == 1)
1154  *this = Array<T, Alloc> (dim_vector (1, nx), rhs(0));
1155  else
1156  *this = Array<T, Alloc> (rhs, dim_vector (1, nx));
1157  return;
1158  }
1159 
1160  resize1 (nx, rfv);
1161  n = numel ();
1162  }
1163 
1164  if (colon)
1165  {
1166  // A(:) = X makes a full fill or a shallow copy.
1167  if (rhl == 1)
1168  fill (rhs(0));
1169  else
1170  *this = rhs.reshape (m_dimensions);
1171  }
1172  else
1173  {
1174  if (rhl == 1)
1175  i.fill (rhs(0), n, fortran_vec ());
1176  else
1177  i.assign (rhs.data (), n, fortran_vec ());
1178  }
1179 }
1180 
1181 // Assignment to a 2-dimensional array
1182 template <typename T, typename Alloc>
1183 void
1185  const Array<T, Alloc>& rhs, const T& rfv)
1186 {
1187  bool initial_dims_all_zero = m_dimensions.all_zero ();
1188 
1189  // Get RHS extents, discarding singletons.
1190  dim_vector rhdv = rhs.dims ();
1191 
1192  // Get LHS extents, allowing Fortran indexing in the second dim.
1193  dim_vector dv = m_dimensions.redim (2);
1194 
1195  // Check for out-of-bounds and form resizing m_dimensions.
1196  dim_vector rdv;
1197 
1198  // In the special when all dimensions are zero, colons are allowed
1199  // to inquire the shape of RHS. The rules are more obscure, so we
1200  // solve that elsewhere.
1201  if (initial_dims_all_zero)
1202  rdv = zero_dims_inquire (i, j, rhdv);
1203  else
1204  {
1205  rdv(0) = i.extent (dv(0));
1206  rdv(1) = j.extent (dv(1));
1207  }
1208 
1209  bool isfill = rhs.numel () == 1;
1210  octave_idx_type il = i.length (rdv(0));
1211  octave_idx_type jl = j.length (rdv(1));
1212  rhdv.chop_all_singletons ();
1213  bool match = (isfill
1214  || (rhdv.ndims () == 2 && il == rhdv(0) && jl == rhdv(1)));
1215  match = match || (il == 1 && jl == rhdv(0) && rhdv(1) == 1);
1216 
1217  if (match)
1218  {
1219  bool all_colons = (i.is_colon_equiv (rdv(0))
1220  && j.is_colon_equiv (rdv(1)));
1221  // Resize if requested.
1222  if (rdv != dv)
1223  {
1224  // Optimize case A = []; A(1:m, 1:n) = X
1225  if (dv.zero_by_zero () && all_colons)
1226  {
1227  if (isfill)
1228  *this = Array<T, Alloc> (rdv, rhs(0));
1229  else
1230  *this = Array<T, Alloc> (rhs, rdv);
1231  return;
1232  }
1233 
1234  resize (rdv, rfv);
1235  dv = m_dimensions;
1236  }
1237 
1238  if (all_colons)
1239  {
1240  // A(:,:) = X makes a full fill or a shallow copy
1241  if (isfill)
1242  fill (rhs(0));
1243  else
1244  *this = rhs.reshape (m_dimensions);
1245  }
1246  else
1247  {
1248  // The actual work.
1249  octave_idx_type n = numel ();
1250  octave_idx_type r = dv(0);
1251  octave_idx_type c = dv(1);
1252  octave::idx_vector ii (i);
1253 
1254  const T *src = rhs.data ();
1255  T *dest = fortran_vec ();
1256 
1257  // Try reduction first.
1258  if (ii.maybe_reduce (r, j, c))
1259  {
1260  if (isfill)
1261  ii.fill (*src, n, dest);
1262  else
1263  ii.assign (src, n, dest);
1264  }
1265  else
1266  {
1267  if (isfill)
1268  {
1269  for (octave_idx_type k = 0; k < jl; k++)
1270  i.fill (*src, r, dest + r * j.xelem (k));
1271  }
1272  else
1273  {
1274  for (octave_idx_type k = 0; k < jl; k++)
1275  src += i.assign (src, r, dest + r * j.xelem (k));
1276  }
1277  }
1278  }
1279  }
1280  // any empty RHS can be assigned to an empty LHS
1281  else if ((il != 0 && jl != 0) || (rhdv(0) != 0 && rhdv(1) != 0))
1282  octave::err_nonconformant ("=", il, jl, rhs.dim1 (), rhs.dim2 ());
1283 }
1284 
1285 // Assignment to a multi-dimensional array
1286 template <typename T, typename Alloc>
1287 void
1289  const Array<T, Alloc>& rhs, const T& rfv)
1290 {
1291  int ial = ia.numel ();
1292 
1293  // FIXME: is this dispatching necessary / desirable?
1294  if (ial == 1)
1295  assign (ia(0), rhs, rfv);
1296  else if (ial == 2)
1297  assign (ia(0), ia(1), rhs, rfv);
1298  else if (ial > 0)
1299  {
1300  bool initial_dims_all_zero = m_dimensions.all_zero ();
1301 
1302  // Get RHS extents, discarding singletons.
1303  dim_vector rhdv = rhs.dims ();
1304 
1305  // Get LHS extents, allowing Fortran indexing in the second dim.
1306  dim_vector dv = m_dimensions.redim (ial);
1307 
1308  // Get the extents forced by indexing.
1309  dim_vector rdv;
1310 
1311  // In the special when all dimensions are zero, colons are
1312  // allowed to inquire the shape of RHS. The rules are more
1313  // obscure, so we solve that elsewhere.
1314  if (initial_dims_all_zero)
1315  rdv = zero_dims_inquire (ia, rhdv);
1316  else
1317  {
1318  rdv = dim_vector::alloc (ial);
1319  for (int i = 0; i < ial; i++)
1320  rdv(i) = ia(i).extent (dv(i));
1321  }
1322 
1323  // Check whether LHS and RHS match, up to singleton dims.
1324  bool match = true;
1325  bool all_colons = true;
1326  bool isfill = rhs.numel () == 1;
1327 
1328  rhdv.chop_all_singletons ();
1329  int j = 0;
1330  int rhdvl = rhdv.ndims ();
1331  for (int i = 0; i < ial; i++)
1332  {
1333  all_colons = all_colons && ia(i).is_colon_equiv (rdv(i));
1334  octave_idx_type l = ia(i).length (rdv(i));
1335  if (l == 1) continue;
1336  match = match && j < rhdvl && l == rhdv(j++);
1337  }
1338 
1339  match = match && (j == rhdvl || rhdv(j) == 1);
1340  match = match || isfill;
1341 
1342  if (match)
1343  {
1344  // Resize first if necessary.
1345  if (rdv != dv)
1346  {
1347  // Optimize case A = []; A(1:m, 1:n) = X
1348  if (dv.zero_by_zero () && all_colons)
1349  {
1350  rdv.chop_trailing_singletons ();
1351  if (isfill)
1352  *this = Array<T, Alloc> (rdv, rhs(0));
1353  else
1354  *this = Array<T, Alloc> (rhs, rdv);
1355  return;
1356  }
1357 
1358  resize (rdv, rfv);
1359  dv = rdv;
1360  }
1361 
1362  if (all_colons)
1363  {
1364  // A(:,:,...,:) = X makes a full fill or a shallow copy.
1365  if (isfill)
1366  fill (rhs(0));
1367  else
1368  *this = rhs.reshape (m_dimensions);
1369  }
1370  else
1371  {
1372  // Do the actual work.
1373 
1374  // Prepare for recursive indexing
1375  rec_index_helper rh (dv, ia);
1376 
1377  // Do it.
1378  if (isfill)
1379  rh.fill (rhs(0), fortran_vec ());
1380  else
1381  rh.assign (rhs.data (), fortran_vec ());
1382  }
1383  }
1384  else
1385  {
1386  // dimension mismatch, unless LHS and RHS both empty
1387  bool lhsempty, rhsempty;
1388  lhsempty = rhsempty = false;
1389  dim_vector lhs_dv = dim_vector::alloc (ial);
1390  for (int i = 0; i < ial; i++)
1391  {
1392  octave_idx_type l = ia(i).length (rdv(i));
1393  lhs_dv(i) = l;
1394  lhsempty = lhsempty || (l == 0);
1395  rhsempty = rhsempty || (rhdv(j++) == 0);
1396  }
1397  if (! lhsempty || ! rhsempty)
1398  {
1399  lhs_dv.chop_trailing_singletons ();
1400  octave::err_nonconformant ("=", lhs_dv, rhdv);
1401  }
1402  }
1403  }
1404 }
1405 
1406 /*
1407 %!shared a
1408 %! a = [1 2; 3 4];
1409 %!error <op1 is 1x2, op2 is 1x3> a(1,:) = [1 2 3]
1410 %!error <op1 is 2x1, op2 is 1x3> a(:,1) = [1 2 3]
1411 %!error <op1 is 2x2, op2 is 2x1> a(1:2,2:3) = [1;2]
1412 */
1413 
1414 template <typename T, typename Alloc>
1415 void
1417 {
1418  octave_idx_type n = numel ();
1419  if (i.is_colon ())
1420  {
1421  *this = Array<T, Alloc> ();
1422  }
1423  else if (i.length (n) != 0)
1424  {
1425  if (i.extent (n) != n)
1426  octave::err_del_index_out_of_range (true, i.extent (n), n);
1427 
1428  octave_idx_type l, u;
1429  bool col_vec = ndims () == 2 && columns () == 1 && rows () != 1;
1430  if (i.is_scalar () && i(0) == n-1 && m_dimensions.isvector ())
1431  {
1432  // Stack "pop" operation.
1433  resize1 (n-1);
1434  }
1435  else if (i.is_cont_range (n, l, u))
1436  {
1437  // Special case deleting a contiguous range.
1438  octave_idx_type m = n + l - u;
1439  Array<T, Alloc> tmp (dim_vector (col_vec ? m : 1, ! col_vec ? m : 1));
1440  const T *src = data ();
1441  T *dest = tmp.fortran_vec ();
1442  std::copy_n (src, l, dest);
1443  std::copy (src + u, src + n, dest + l);
1444  *this = tmp;
1445  }
1446  else
1447  {
1448  // Use index.
1449  *this = index (i.complement (n));
1450  }
1451  }
1452 }
1453 
1454 template <typename T, typename Alloc>
1455 void
1457 {
1458  if (dim < 0 || dim >= ndims ())
1459  (*current_liboctave_error_handler) ("invalid dimension in delete_elements");
1460 
1461  octave_idx_type n = m_dimensions(dim);
1462  if (i.is_colon ())
1463  {
1464  *this = Array<T, Alloc> ();
1465  }
1466  else if (i.length (n) != 0)
1467  {
1468  if (i.extent (n) != n)
1469  octave::err_del_index_out_of_range (false, i.extent (n), n);
1470 
1471  octave_idx_type l, u;
1472 
1473  if (i.is_cont_range (n, l, u))
1474  {
1475  // Special case deleting a contiguous range.
1476  octave_idx_type nd = n + l - u;
1477  octave_idx_type dl = 1;
1478  octave_idx_type du = 1;
1479  dim_vector rdv = m_dimensions;
1480  rdv(dim) = nd;
1481  for (int k = 0; k < dim; k++) dl *= m_dimensions(k);
1482  for (int k = dim + 1; k < ndims (); k++) du *= m_dimensions(k);
1483 
1484  // Special case deleting a contiguous range.
1485  Array<T, Alloc> tmp = Array<T, Alloc> (rdv);
1486  const T *src = data ();
1487  T *dest = tmp.fortran_vec ();
1488  l *= dl; u *= dl; n *= dl;
1489  for (octave_idx_type k = 0; k < du; k++)
1490  {
1491  std::copy_n (src, l, dest);
1492  dest += l;
1493  std::copy (src + u, src + n, dest);
1494  dest += n - u;
1495  src += n;
1496  }
1497 
1498  *this = tmp;
1499  }
1500  else
1501  {
1502  // Use index.
1503  Array<octave::idx_vector> ia (dim_vector (ndims (), 1), octave::idx_vector::colon);
1504  ia (dim) = i.complement (n);
1505  *this = index (ia);
1506  }
1507  }
1508 }
1509 
1510 template <typename T, typename Alloc>
1511 void
1513 {
1514  int ial = ia.numel ();
1515 
1516  if (ial == 1)
1517  delete_elements (ia(0));
1518  else
1519  {
1520  int k, dim = -1;
1521  for (k = 0; k < ial; k++)
1522  {
1523  if (! ia(k).is_colon ())
1524  {
1525  if (dim < 0)
1526  dim = k;
1527  else
1528  break;
1529  }
1530  }
1531  if (dim < 0)
1532  {
1533  dim_vector dv = m_dimensions;
1534  dv(0) = 0;
1535  *this = Array<T, Alloc> (dv);
1536  }
1537  else if (k == ial)
1538  {
1539  delete_elements (dim, ia(dim));
1540  }
1541  else
1542  {
1543  // Allow the null assignment to succeed if it won't change
1544  // anything because the indices reference an empty slice,
1545  // provided that there is at most one non-colon (or
1546  // equivalent) index. So, we still have the requirement of
1547  // deleting a slice, but it is OK if the slice is empty.
1548 
1549  // For compatibility with Matlab, stop checking once we see
1550  // more than one non-colon index or an empty index. Matlab
1551  // considers "[]" to be an empty index but not "false". We
1552  // accept both.
1553 
1554  bool empty_assignment = false;
1555 
1556  int num_non_colon_indices = 0;
1557 
1558  int nd = ndims ();
1559 
1560  for (int i = 0; i < ial; i++)
1561  {
1562  octave_idx_type dim_len = (i >= nd ? 1 : m_dimensions(i));
1563 
1564  if (ia(i).length (dim_len) == 0)
1565  {
1566  empty_assignment = true;
1567  break;
1568  }
1569 
1570  if (! ia(i).is_colon_equiv (dim_len))
1571  {
1572  num_non_colon_indices++;
1573 
1574  if (num_non_colon_indices == 2)
1575  break;
1576  }
1577  }
1578 
1579  if (! empty_assignment)
1580  (*current_liboctave_error_handler)
1581  ("a null assignment can only have one non-colon index");
1582  }
1583  }
1584 
1585 }
1586 
1587 template <typename T, typename Alloc>
1590 {
1591  octave::idx_vector i (r, r + a.rows ());
1592  octave::idx_vector j (c, c + a.columns ());
1593  if (ndims () == 2 && a.ndims () == 2)
1594  assign (i, j, a);
1595  else
1596  {
1597  Array<octave::idx_vector> idx (dim_vector (a.ndims (), 1));
1598  idx(0) = i;
1599  idx(1) = j;
1600  for (int k = 2; k < a.ndims (); k++)
1601  idx(k) = octave::idx_vector (0, a.m_dimensions(k));
1602  assign (idx, a);
1603  }
1604 
1605  return *this;
1606 }
1607 
1608 template <typename T, typename Alloc>
1611 {
1614  const dim_vector dva = a.dims ().redim (n);
1615  for (octave_idx_type k = 0; k < n; k++)
1616  idx(k) = octave::idx_vector (ra_idx(k), ra_idx(k) + dva(k));
1617 
1618  assign (idx, a);
1619 
1620  return *this;
1621 }
1622 
1623 template <typename T, typename Alloc>
1626 {
1627  assert (ndims () == 2);
1628 
1629  octave_idx_type nr = dim1 ();
1630  octave_idx_type nc = dim2 ();
1631 
1632  if (nr >= 8 && nc >= 8)
1633  {
1634  Array<T, Alloc> result (dim_vector (nc, nr));
1635 
1636  // Reuse the implementation used for permuting.
1637 
1638  rec_permute_helper::blk_trans (data (), result.fortran_vec (), nr, nc);
1639 
1640  return result;
1641  }
1642  else if (nr > 1 && nc > 1)
1643  {
1644  Array<T, Alloc> result (dim_vector (nc, nr));
1645 
1646  for (octave_idx_type j = 0; j < nc; j++)
1647  for (octave_idx_type i = 0; i < nr; i++)
1648  result.xelem (j, i) = xelem (i, j);
1649 
1650  return result;
1651  }
1652  else
1653  {
1654  // Fast transpose for vectors and empty matrices.
1655  return Array<T, Alloc> (*this, dim_vector (nc, nr));
1656  }
1657 }
1658 
1659 template <typename T>
1660 static T
1661 no_op_fcn (const T& x)
1662 {
1663  return x;
1664 }
1665 
1666 template <typename T, typename Alloc>
1668 Array<T, Alloc>::hermitian (T (*fcn) (const T&)) const
1669 {
1670  assert (ndims () == 2);
1671 
1672  if (! fcn)
1673  fcn = no_op_fcn<T>;
1674 
1675  octave_idx_type nr = dim1 ();
1676  octave_idx_type nc = dim2 ();
1677 
1678  if (nr >= 8 && nc >= 8)
1679  {
1680  Array<T, Alloc> result (dim_vector (nc, nr));
1681 
1682  // Blocked transpose to attempt to avoid cache misses.
1683 
1684  T buf[64];
1685 
1686  octave_idx_type jj;
1687  for (jj = 0; jj < (nc - 8 + 1); jj += 8)
1688  {
1689  octave_idx_type ii;
1690  for (ii = 0; ii < (nr - 8 + 1); ii += 8)
1691  {
1692  // Copy to buffer
1693  for (octave_idx_type j = jj, k = 0, idxj = jj * nr;
1694  j < jj + 8; j++, idxj += nr)
1695  for (octave_idx_type i = ii; i < ii + 8; i++)
1696  buf[k++] = xelem (i + idxj);
1697 
1698  // Copy from buffer
1699  for (octave_idx_type i = ii, idxi = ii * nc; i < ii + 8;
1700  i++, idxi += nc)
1701  for (octave_idx_type j = jj, k = i - ii; j < jj + 8;
1702  j++, k+=8)
1703  result.xelem (j + idxi) = fcn (buf[k]);
1704  }
1705 
1706  if (ii < nr)
1707  for (octave_idx_type j = jj; j < jj + 8; j++)
1708  for (octave_idx_type i = ii; i < nr; i++)
1709  result.xelem (j, i) = fcn (xelem (i, j));
1710  }
1711 
1712  for (octave_idx_type j = jj; j < nc; j++)
1713  for (octave_idx_type i = 0; i < nr; i++)
1714  result.xelem (j, i) = fcn (xelem (i, j));
1715 
1716  return result;
1717  }
1718  else
1719  {
1720  Array<T, Alloc> result (dim_vector (nc, nr));
1721 
1722  for (octave_idx_type j = 0; j < nc; j++)
1723  for (octave_idx_type i = 0; i < nr; i++)
1724  result.xelem (j, i) = fcn (xelem (i, j));
1725 
1726  return result;
1727  }
1728 }
1729 
1730 /*
1731 
1732 ## Transpose tests for matrices of the tile size and plus or minus a row
1733 ## and with four tiles.
1734 
1735 %!shared m7, mt7, m8, mt8, m9, mt9
1736 %! m7 = reshape (1 : 7*8, 8, 7);
1737 %! mt7 = [1:8; 9:16; 17:24; 25:32; 33:40; 41:48; 49:56];
1738 %! m8 = reshape (1 : 8*8, 8, 8);
1739 %! mt8 = mt8 = [mt7; 57:64];
1740 %! m9 = reshape (1 : 9*8, 8, 9);
1741 %! mt9 = [mt8; 65:72];
1742 
1743 %!assert (m7', mt7)
1744 %!assert ((1i*m7).', 1i * mt7)
1745 %!assert ((1i*m7)', conj (1i * mt7))
1746 %!assert (m8', mt8)
1747 %!assert ((1i*m8).', 1i * mt8)
1748 %!assert ((1i*m8)', conj (1i * mt8))
1749 %!assert (m9', mt9)
1750 %!assert ((1i*m9).', 1i * mt9)
1751 %!assert ((1i*m9)', conj (1i * mt9))
1752 %!assert ([m7, m8; m7, m8]', [mt7, mt7; mt8, mt8])
1753 %!assert ((1i*[m7, m8; m7, m8]).', 1i * [mt7, mt7; mt8, mt8])
1754 %!assert ((1i*[m7, m8; m7, m8])', conj (1i * [mt7, mt7; mt8, mt8]))
1755 %!assert ([m8, m8; m8, m8]', [mt8, mt8; mt8, mt8])
1756 %!assert ((1i*[m8, m8; m8, m8]).', 1i * [mt8, mt8; mt8, mt8])
1757 %!assert ((1i*[m8, m8; m8, m8])', conj (1i * [mt8, mt8; mt8, mt8]))
1758 %!assert ([m9, m8; m9, m8]', [mt9, mt9; mt8, mt8])
1759 %!assert ((1i*[m9, m8; m9, m8]).', 1i * [mt9, mt9; mt8, mt8])
1760 %!assert ((1i*[m9, m8; m9, m8])', conj (1i * [mt9, mt9; mt8, mt8]))
1761 
1762 */
1763 
1764 template <typename T, typename Alloc>
1765 T *
1767 {
1768  make_unique ();
1769 
1770  return m_slice_data;
1771 }
1772 
1773 // Non-real types don't have NaNs.
1774 template <typename T>
1775 inline bool
1777 {
1778  return false;
1779 }
1780 
1781 template <typename T, typename Alloc>
1783 Array<T, Alloc>::sort (int dim, sortmode mode) const
1784 {
1785  if (dim < 0)
1786  (*current_liboctave_error_handler) ("sort: invalid dimension");
1787 
1788  Array<T, Alloc> m (dims ());
1789 
1790  dim_vector dv = m.dims ();
1791 
1792  if (m.numel () < 1)
1793  return m;
1794 
1795  if (dim >= dv.ndims ())
1796  dv.resize (dim+1, 1);
1797 
1798  octave_idx_type ns = dv(dim);
1799  octave_idx_type iter = dv.numel () / ns;
1800  octave_idx_type stride = 1;
1801 
1802  for (int i = 0; i < dim; i++)
1803  stride *= dv(i);
1804 
1805  T *v = m.fortran_vec ();
1806  const T *ov = data ();
1807 
1808  octave_sort<T> lsort;
1809 
1810  if (mode != UNSORTED)
1811  lsort.set_compare (mode);
1812  else
1813  return m;
1814 
1815  if (stride == 1)
1816  {
1817  // Special case along first dimension avoids gather/scatter AND directly
1818  // sorts into destination buffer for an 11% performance boost.
1819  for (octave_idx_type j = 0; j < iter; j++)
1820  {
1821  // Copy and partition out NaNs.
1822  // No need to special case integer types <T> from floating point
1823  // types <T> to avoid sort_isnan() test as it makes no discernible
1824  // performance impact.
1825  octave_idx_type kl = 0;
1826  octave_idx_type ku = ns;
1827  for (octave_idx_type i = 0; i < ns; i++)
1828  {
1829  T tmp = ov[i];
1830  if (sort_isnan<T> (tmp))
1831  v[--ku] = tmp;
1832  else
1833  v[kl++] = tmp;
1834  }
1835 
1836  // sort.
1837  lsort.sort (v, kl);
1838 
1839  if (ku < ns)
1840  {
1841  // NaNs are in reverse order
1842  std::reverse (v + ku, v + ns);
1843  if (mode == DESCENDING)
1844  std::rotate (v, v + ku, v + ns);
1845  }
1846 
1847  v += ns;
1848  ov += ns;
1849  }
1850  }
1851  else
1852  {
1853  OCTAVE_LOCAL_BUFFER (T, buf, ns);
1854 
1855  for (octave_idx_type j = 0; j < iter; j++)
1856  {
1857  octave_idx_type offset = j;
1858  octave_idx_type n_strides = j / stride;
1859  offset += n_strides * stride * (ns - 1);
1860 
1861  // gather and partition out NaNs.
1862  octave_idx_type kl = 0;
1863  octave_idx_type ku = ns;
1864  for (octave_idx_type i = 0; i < ns; i++)
1865  {
1866  T tmp = ov[i*stride + offset];
1867  if (sort_isnan<T> (tmp))
1868  buf[--ku] = tmp;
1869  else
1870  buf[kl++] = tmp;
1871  }
1872 
1873  // sort.
1874  lsort.sort (buf, kl);
1875 
1876  if (ku < ns)
1877  {
1878  // NaNs are in reverse order
1879  std::reverse (buf + ku, buf + ns);
1880  if (mode == DESCENDING)
1881  std::rotate (buf, buf + ku, buf + ns);
1882  }
1883 
1884  // scatter.
1885  for (octave_idx_type i = 0; i < ns; i++)
1886  v[i*stride + offset] = buf[i];
1887  }
1888  }
1889 
1890  return m;
1891 }
1892 
1893 template <typename T, typename Alloc>
1896  sortmode mode) const
1897 {
1898  if (dim < 0 || dim >= ndims ())
1899  (*current_liboctave_error_handler) ("sort: invalid dimension");
1900 
1901  Array<T, Alloc> m (dims ());
1902 
1903  dim_vector dv = m.dims ();
1904 
1905  if (m.numel () < 1)
1906  {
1907  sidx = Array<octave_idx_type> (dv);
1908  return m;
1909  }
1910 
1911  octave_idx_type ns = dv(dim);
1912  octave_idx_type iter = dv.numel () / ns;
1913  octave_idx_type stride = 1;
1914 
1915  for (int i = 0; i < dim; i++)
1916  stride *= dv(i);
1917 
1918  T *v = m.fortran_vec ();
1919  const T *ov = data ();
1920 
1921  octave_sort<T> lsort;
1922 
1923  sidx = Array<octave_idx_type> (dv);
1924  octave_idx_type *vi = sidx.fortran_vec ();
1925 
1926  if (mode != UNSORTED)
1927  lsort.set_compare (mode);
1928  else
1929  return m;
1930 
1931  if (stride == 1)
1932  {
1933  // Special case for dim 1 avoids gather/scatter for performance boost.
1934  // See comments in Array::sort (dim, mode).
1935  for (octave_idx_type j = 0; j < iter; j++)
1936  {
1937  // copy and partition out NaNs.
1938  octave_idx_type kl = 0;
1939  octave_idx_type ku = ns;
1940  for (octave_idx_type i = 0; i < ns; i++)
1941  {
1942  T tmp = ov[i];
1943  if (sort_isnan<T> (tmp))
1944  {
1945  --ku;
1946  v[ku] = tmp;
1947  vi[ku] = i;
1948  }
1949  else
1950  {
1951  v[kl] = tmp;
1952  vi[kl] = i;
1953  kl++;
1954  }
1955  }
1956 
1957  // sort.
1958  lsort.sort (v, vi, kl);
1959 
1960  if (ku < ns)
1961  {
1962  // NaNs are in reverse order
1963  std::reverse (v + ku, v + ns);
1964  std::reverse (vi + ku, vi + ns);
1965  if (mode == DESCENDING)
1966  {
1967  std::rotate (v, v + ku, v + ns);
1968  std::rotate (vi, vi + ku, vi + ns);
1969  }
1970  }
1971 
1972  v += ns;
1973  vi += ns;
1974  ov += ns;
1975  }
1976  }
1977  else
1978  {
1979  OCTAVE_LOCAL_BUFFER (T, buf, ns);
1981 
1982  for (octave_idx_type j = 0; j < iter; j++)
1983  {
1984  octave_idx_type offset = j;
1985  octave_idx_type n_strides = j / stride;
1986  offset += n_strides * stride * (ns - 1);
1987 
1988  // gather and partition out NaNs.
1989  octave_idx_type kl = 0;
1990  octave_idx_type ku = ns;
1991  for (octave_idx_type i = 0; i < ns; i++)
1992  {
1993  T tmp = ov[i*stride + offset];
1994  if (sort_isnan<T> (tmp))
1995  {
1996  --ku;
1997  buf[ku] = tmp;
1998  bufi[ku] = i;
1999  }
2000  else
2001  {
2002  buf[kl] = tmp;
2003  bufi[kl] = i;
2004  kl++;
2005  }
2006  }
2007 
2008  // sort.
2009  lsort.sort (buf, bufi, kl);
2010 
2011  if (ku < ns)
2012  {
2013  // NaNs are in reverse order
2014  std::reverse (buf + ku, buf + ns);
2015  std::reverse (bufi + ku, bufi + ns);
2016  if (mode == DESCENDING)
2017  {
2018  std::rotate (buf, buf + ku, buf + ns);
2019  std::rotate (bufi, bufi + ku, bufi + ns);
2020  }
2021  }
2022 
2023  // scatter.
2024  for (octave_idx_type i = 0; i < ns; i++)
2025  v[i*stride + offset] = buf[i];
2026  for (octave_idx_type i = 0; i < ns; i++)
2027  vi[i*stride + offset] = bufi[i];
2028  }
2029  }
2030 
2031  return m;
2032 }
2033 
2034 template <typename T, typename Alloc>
2037  bool /* allow_chk */)
2038 {
2039  if (mode == ASCENDING)
2041  else if (mode == DESCENDING)
2043  else
2044  return nullptr;
2045 }
2046 
2047 template <typename T, typename Alloc>
2048 sortmode
2050 {
2051  octave_sort<T> lsort;
2052 
2053  octave_idx_type n = numel ();
2054 
2055  if (n <= 1)
2056  return (mode == UNSORTED) ? ASCENDING : mode;
2057 
2058  if (mode == UNSORTED)
2059  {
2060  // Auto-detect mode.
2061  compare_fcn_type compare
2062  = safe_comparator (ASCENDING, *this, false);
2063 
2064  if (compare (elem (n-1), elem (0)))
2065  mode = DESCENDING;
2066  else
2067  mode = ASCENDING;
2068  }
2069 
2070  lsort.set_compare (safe_comparator (mode, *this, false));
2071 
2072  if (! lsort.issorted (data (), n))
2073  mode = UNSORTED;
2074 
2075  return mode;
2076 
2077 }
2078 
2079 template <typename T, typename Alloc>
2082 {
2084 
2085  octave_sort<T> lsort (safe_comparator (mode, *this, true));
2086 
2087  octave_idx_type r = rows ();
2088  octave_idx_type c = cols ();
2089 
2090  idx = Array<octave_idx_type> (dim_vector (r, 1));
2091 
2092  lsort.sort_rows (data (), idx.fortran_vec (), r, c);
2093 
2094  return idx;
2095 }
2096 
2097 template <typename T, typename Alloc>
2098 sortmode
2100 {
2101  octave_sort<T> lsort;
2102 
2103  octave_idx_type r = rows ();
2104  octave_idx_type c = cols ();
2105 
2106  if (r <= 1 || c == 0)
2107  return (mode == UNSORTED) ? ASCENDING : mode;
2108 
2109  if (mode == UNSORTED)
2110  {
2111  // Auto-detect mode.
2112  compare_fcn_type compare
2113  = safe_comparator (ASCENDING, *this, false);
2114 
2115  octave_idx_type i;
2116  for (i = 0; i < cols (); i++)
2117  {
2118  T l = elem (0, i);
2119  T u = elem (rows () - 1, i);
2120  if (compare (l, u))
2121  {
2122  if (mode == DESCENDING)
2123  {
2124  mode = UNSORTED;
2125  break;
2126  }
2127  else
2128  mode = ASCENDING;
2129  }
2130  else if (compare (u, l))
2131  {
2132  if (mode == ASCENDING)
2133  {
2134  mode = UNSORTED;
2135  break;
2136  }
2137  else
2138  mode = DESCENDING;
2139  }
2140  }
2141  if (mode == UNSORTED && i == cols ())
2142  mode = ASCENDING;
2143  }
2144 
2145  if (mode != UNSORTED)
2146  {
2147  lsort.set_compare (safe_comparator (mode, *this, false));
2148 
2149  if (! lsort.is_sorted_rows (data (), r, c))
2150  mode = UNSORTED;
2151  }
2152 
2153  return mode;
2154 
2155 }
2156 
2157 // Do a binary lookup in a sorted array.
2158 template <typename T, typename Alloc>
2160 Array<T, Alloc>::lookup (const T& value, sortmode mode) const
2161 {
2162  octave_idx_type n = numel ();
2163  octave_sort<T> lsort;
2164 
2165  if (mode == UNSORTED)
2166  {
2167  // auto-detect mode
2168  if (n > 1 && lsort.descending_compare (elem (0), elem (n-1)))
2169  mode = DESCENDING;
2170  else
2171  mode = ASCENDING;
2172  }
2173 
2174  lsort.set_compare (mode);
2175 
2176  return lsort.lookup (data (), n, value);
2177 }
2178 
2179 template <typename T, typename Alloc>
2182 {
2183  octave_idx_type n = numel ();
2184  octave_idx_type nval = values.numel ();
2185  octave_sort<T> lsort;
2186  Array<octave_idx_type> idx (values.dims ());
2187 
2188  if (mode == UNSORTED)
2189  {
2190  // auto-detect mode
2191  if (n > 1 && lsort.descending_compare (elem (0), elem (n-1)))
2192  mode = DESCENDING;
2193  else
2194  mode = ASCENDING;
2195  }
2196 
2197  lsort.set_compare (mode);
2198 
2199  // This determines the split ratio between the O(M*log2(N)) and O(M+N)
2200  // algorithms.
2201  static const double ratio = 1.0;
2202  sortmode vmode = UNSORTED;
2203 
2204  // Attempt the O(M+N) algorithm if M is large enough.
2205  if (nval > ratio * n / octave::math::log2 (n + 1.0))
2206  {
2207  vmode = values.issorted ();
2208  // The table must not contain a NaN.
2209  if ((vmode == ASCENDING && sort_isnan<T> (values(nval-1)))
2210  || (vmode == DESCENDING && sort_isnan<T> (values(0))))
2211  vmode = UNSORTED;
2212  }
2213 
2214  if (vmode != UNSORTED)
2215  lsort.lookup_sorted (data (), n, values.data (), nval,
2216  idx.fortran_vec (), vmode != mode);
2217  else
2218  lsort.lookup (data (), n, values.data (), nval, idx.fortran_vec ());
2219 
2220  return idx;
2221 }
2222 
2223 template <typename T, typename Alloc>
2226 {
2227  const T *src = data ();
2228  octave_idx_type nel = numel ();
2229  octave_idx_type retval = 0;
2230  const T zero = T ();
2231  for (octave_idx_type i = 0; i < nel; i++)
2232  if (src[i] != zero)
2233  retval++;
2234 
2235  return retval;
2236 }
2237 
2238 template <typename T, typename Alloc>
2241 {
2242  Array<octave_idx_type> retval;
2243  const T *src = data ();
2244  octave_idx_type nel = numel ();
2245  const T zero = T ();
2246  if (n < 0 || n >= nel)
2247  {
2248  // We want all elements, which means we'll almost surely need
2249  // to resize. So count first, then allocate array of exact size.
2250  octave_idx_type cnt = 0;
2251  for (octave_idx_type i = 0; i < nel; i++)
2252  cnt += src[i] != zero;
2253 
2254  retval.clear (cnt, 1);
2255  octave_idx_type *dest = retval.fortran_vec ();
2256  for (octave_idx_type i = 0; i < nel; i++)
2257  if (src[i] != zero) *dest++ = i;
2258  }
2259  else
2260  {
2261  // We want a fixed max number of elements, usually small. So be
2262  // optimistic, alloc the array in advance, and then resize if
2263  // needed.
2264  retval.clear (n, 1);
2265  if (backward)
2266  {
2267  // Do the search as a series of successive single-element searches.
2268  octave_idx_type k = 0;
2269  octave_idx_type l = nel - 1;
2270  for (; k < n; k++)
2271  {
2272  for (; l >= 0 && src[l] == zero; l--) ;
2273  if (l >= 0)
2274  retval(k) = l--;
2275  else
2276  break;
2277  }
2278  if (k < n)
2279  retval.resize2 (k, 1);
2280  octave_idx_type *rdata = retval.fortran_vec ();
2281  std::reverse (rdata, rdata + k);
2282  }
2283  else
2284  {
2285  // Do the search as a series of successive single-element searches.
2286  octave_idx_type k = 0;
2287  octave_idx_type l = 0;
2288  for (; k < n; k++)
2289  {
2290  for (; l != nel && src[l] == zero; l++) ;
2291  if (l != nel)
2292  retval(k) = l++;
2293  else
2294  break;
2295  }
2296  if (k < n)
2297  retval.resize2 (k, 1);
2298  }
2299  }
2300 
2301  // Fixup return dimensions, for Matlab compatibility.
2302  // find (zeros (0,0)) -> zeros (0,0)
2303  // find (zeros (1,0)) -> zeros (1,0)
2304  // find (zeros (0,1)) -> zeros (0,1)
2305  // find (zeros (0,X)) -> zeros (0,1)
2306  // find (zeros (1,1)) -> zeros (0,0) !!!! WHY?
2307  // find (zeros (0,1,0)) -> zeros (0,0)
2308  // find (zeros (0,1,0,1)) -> zeros (0,0) etc
2309 
2310  if ((numel () == 1 && retval.isempty ())
2311  || (rows () == 0 && dims ().numel (1) == 0))
2312  retval.m_dimensions = dim_vector ();
2313  else if (rows () == 1 && ndims () == 2)
2314  retval.m_dimensions = dim_vector (1, retval.numel ());
2315 
2316  return retval;
2317 }
2318 
2319 template <typename T, typename Alloc>
2322 {
2323  if (dim < 0)
2324  (*current_liboctave_error_handler) ("nth_element: invalid dimension");
2325 
2326  dim_vector dv = dims ();
2327  if (dim >= dv.ndims ())
2328  dv.resize (dim+1, 1);
2329 
2330  octave_idx_type ns = dv(dim);
2331 
2332  octave_idx_type nn = n.length (ns);
2333 
2334  dv(dim) = std::min (nn, ns);
2335  dv.chop_trailing_singletons ();
2336  dim = std::min (dv.ndims (), static_cast<octave_idx_type> (dim));
2337 
2338  Array<T, Alloc> m (dv);
2339 
2340  if (m.isempty ())
2341  return m;
2342 
2343  sortmode mode = UNSORTED;
2344  octave_idx_type lo = 0;
2345 
2346  switch (n.idx_class ())
2347  {
2348  case octave::idx_vector::class_scalar:
2349  mode = ASCENDING;
2350  lo = n(0);
2351  break;
2352  case octave::idx_vector::class_range:
2353  {
2354  octave_idx_type inc = n.increment ();
2355  if (inc == 1)
2356  {
2357  mode = ASCENDING;
2358  lo = n(0);
2359  }
2360  else if (inc == -1)
2361  {
2362  mode = DESCENDING;
2363  lo = ns - 1 - n(0);
2364  }
2365  }
2366  break;
2367  case octave::idx_vector::class_vector:
2368  // This case resolves bug #51329, a fallback to allow the given index
2369  // to be a sequential vector instead of the typical scalar or range
2370  if (n(1) - n(0) == 1)
2371  {
2372  mode = ASCENDING;
2373  lo = n(0);
2374  }
2375  else if (n(1) - n(0) == -1)
2376  {
2377  mode = DESCENDING;
2378  lo = ns - 1 - n(0);
2379  }
2380  // Ensure that the vector is actually an arithmetic contiguous sequence
2381  for (octave_idx_type i = 2; i < n.length () && mode != UNSORTED; i++)
2382  if ((mode == ASCENDING && n(i) - n(i-1) != 1)
2383  || (mode == DESCENDING && n(i) - n(i-1) != -1))
2384  mode = UNSORTED;
2385  break;
2386  default:
2387  break;
2388  }
2389 
2390  if (mode == UNSORTED)
2391  (*current_liboctave_error_handler)
2392  ("nth_element: n must be a scalar or a contiguous range");
2393 
2394  octave_idx_type up = lo + nn;
2395 
2396  if (lo < 0 || up > ns)
2397  (*current_liboctave_error_handler) ("nth_element: invalid element index");
2398 
2399  octave_idx_type iter = numel () / ns;
2400  octave_idx_type stride = 1;
2401 
2402  for (int i = 0; i < dim; i++)
2403  stride *= dv(i);
2404 
2405  T *v = m.fortran_vec ();
2406  const T *ov = data ();
2407 
2408  OCTAVE_LOCAL_BUFFER (T, buf, ns);
2409 
2410  octave_sort<T> lsort;
2411  lsort.set_compare (mode);
2412 
2413  for (octave_idx_type j = 0; j < iter; j++)
2414  {
2415  octave_idx_type kl = 0;
2416  octave_idx_type ku = ns;
2417 
2418  if (stride == 1)
2419  {
2420  // copy without NaNs.
2421  for (octave_idx_type i = 0; i < ns; i++)
2422  {
2423  T tmp = ov[i];
2424  if (sort_isnan<T> (tmp))
2425  buf[--ku] = tmp;
2426  else
2427  buf[kl++] = tmp;
2428  }
2429 
2430  ov += ns;
2431  }
2432  else
2433  {
2434  octave_idx_type offset = j % stride;
2435  // copy without NaNs.
2436  for (octave_idx_type i = 0; i < ns; i++)
2437  {
2438  T tmp = ov[offset + i*stride];
2439  if (sort_isnan<T> (tmp))
2440  buf[--ku] = tmp;
2441  else
2442  buf[kl++] = tmp;
2443  }
2444 
2445  if (offset == stride-1)
2446  ov += ns*stride;
2447  }
2448 
2449  if (ku == ns)
2450  lsort.nth_element (buf, ns, lo, up);
2451  else if (mode == ASCENDING)
2452  lsort.nth_element (buf, ku, lo, std::min (ku, up));
2453  else
2454  {
2455  octave_idx_type nnan = ns - ku;
2456  octave_idx_type zero = 0;
2457  lsort.nth_element (buf, ku, std::max (lo - nnan, zero),
2458  std::max (up - nnan, zero));
2459  std::rotate (buf, buf + ku, buf + ns);
2460  }
2461 
2462  if (stride == 1)
2463  {
2464  for (octave_idx_type i = 0; i < nn; i++)
2465  v[i] = buf[lo + i];
2466 
2467  v += nn;
2468  }
2469  else
2470  {
2471  octave_idx_type offset = j % stride;
2472  for (octave_idx_type i = 0; i < nn; i++)
2473  v[offset + stride * i] = buf[lo + i];
2474  if (offset == stride-1)
2475  v += nn*stride;
2476  }
2477  }
2478 
2479  return m;
2480 }
2481 
2482 #define NO_INSTANTIATE_ARRAY_SORT_API(T, API) \
2483  template <> API Array<T> \
2484  Array<T>::sort (int, sortmode) const \
2485  { \
2486  return *this; \
2487  } \
2488  template <> API Array<T> \
2489  Array<T>::sort (Array<octave_idx_type> &sidx, int, sortmode) const \
2490  { \
2491  sidx = Array<octave_idx_type> (); \
2492  return *this; \
2493  } \
2494  template <> API sortmode \
2495  Array<T>::issorted (sortmode) const \
2496  { \
2497  return UNSORTED; \
2498  } \
2499  API Array<T>::compare_fcn_type \
2500  safe_comparator (sortmode, const Array<T>&, bool) \
2501  { \
2502  return nullptr; \
2503  } \
2504  template <> API Array<octave_idx_type> \
2505  Array<T>::sort_rows_idx (sortmode) const \
2506  { \
2507  return Array<octave_idx_type> (); \
2508  } \
2509  template <> API sortmode \
2510  Array<T>::is_sorted_rows (sortmode) const \
2511  { \
2512  return UNSORTED; \
2513  } \
2514  template <> API octave_idx_type \
2515  Array<T>::lookup (T const &, sortmode) const \
2516  { \
2517  return 0; \
2518  } \
2519  template <> API Array<octave_idx_type> \
2520  Array<T>::lookup (const Array<T>&, sortmode) const \
2521  { \
2522  return Array<octave_idx_type> (); \
2523  } \
2524  template <> API octave_idx_type \
2525  Array<T>::nnz (void) const \
2526  { \
2527  return 0; \
2528  } \
2529  template <> API Array<octave_idx_type> \
2530  Array<T>::find (octave_idx_type, bool) const \
2531  { \
2532  return Array<octave_idx_type> (); \
2533  } \
2534  template <> API Array<T> \
2535  Array<T>::nth_element (const octave::idx_vector&, int) const { \
2536  return Array<T> (); \
2537  }
2538 
2539 #define NO_INSTANTIATE_ARRAY_SORT(T) NO_INSTANTIATE_ARRAY_SORT_API (T,)
2540 
2541 template <typename T, typename Alloc>
2544 {
2545  dim_vector dv = dims ();
2546  octave_idx_type nd = dv.ndims ();
2548 
2549  if (nd > 2)
2550  (*current_liboctave_error_handler) ("Matrix must be 2-dimensional");
2551 
2552  octave_idx_type nnr = dv(0);
2553  octave_idx_type nnc = dv(1);
2554 
2555  if (nnr == 0 && nnc == 0)
2556  ; // do nothing for empty matrix
2557  else if (nnr != 1 && nnc != 1)
2558  {
2559  // Extract diag from matrix
2560  if (k > 0)
2561  nnc -= k;
2562  else if (k < 0)
2563  nnr += k;
2564 
2565  if (nnr > 0 && nnc > 0)
2566  {
2567  octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2568 
2569  d.resize (dim_vector (ndiag, 1));
2570 
2571  if (k > 0)
2572  {
2573  for (octave_idx_type i = 0; i < ndiag; i++)
2574  d.xelem (i) = elem (i, i+k);
2575  }
2576  else if (k < 0)
2577  {
2578  for (octave_idx_type i = 0; i < ndiag; i++)
2579  d.xelem (i) = elem (i-k, i);
2580  }
2581  else
2582  {
2583  for (octave_idx_type i = 0; i < ndiag; i++)
2584  d.xelem (i) = elem (i, i);
2585  }
2586  }
2587  else // Matlab returns [] 0x1 for out-of-range diagonal
2588  d.resize (dim_vector (0, 1));
2589  }
2590  else
2591  {
2592  // Create diag matrix from vector
2593  octave_idx_type roff = 0;
2594  octave_idx_type coff = 0;
2595  if (k > 0)
2596  {
2597  roff = 0;
2598  coff = k;
2599  }
2600  else if (k < 0)
2601  {
2602  roff = -k;
2603  coff = 0;
2604  }
2605 
2606  if (nnr == 1)
2607  {
2608  octave_idx_type n = nnc + std::abs (k);
2609  d = Array<T, Alloc> (dim_vector (n, n), resize_fill_value ());
2610 
2611  for (octave_idx_type i = 0; i < nnc; i++)
2612  d.xelem (i+roff, i+coff) = elem (0, i);
2613  }
2614  else
2615  {
2616  octave_idx_type n = nnr + std::abs (k);
2617  d = Array<T, Alloc> (dim_vector (n, n), resize_fill_value ());
2618 
2619  for (octave_idx_type i = 0; i < nnr; i++)
2620  d.xelem (i+roff, i+coff) = elem (i, 0);
2621  }
2622  }
2623 
2624  return d;
2625 }
2626 
2627 template <typename T, typename Alloc>
2630 {
2631  if (ndims () != 2 || (rows () != 1 && cols () != 1))
2632  (*current_liboctave_error_handler) ("cat: invalid dimension");
2633 
2634  Array<T, Alloc> retval (dim_vector (m, n), resize_fill_value ());
2635 
2636  octave_idx_type nel = std::min (numel (), std::min (m, n));
2637  for (octave_idx_type i = 0; i < nel; i++)
2638  retval.xelem (i, i) = xelem (i);
2639 
2640  return retval;
2641 }
2642 
2643 template <typename T, typename Alloc>
2646 {
2647  // Default concatenation.
2648  bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2649 
2650  if (dim == -1 || dim == -2)
2651  {
2652  concat_rule = &dim_vector::hvcat;
2653  dim = -dim - 1;
2654  }
2655  else if (dim < 0)
2656  (*current_liboctave_error_handler) ("cat: invalid dimension");
2657 
2658  if (n == 1)
2659  return array_list[0];
2660  else if (n == 0)
2661  return Array<T, Alloc> ();
2662 
2663  // Special case:
2664  //
2665  // cat (dim, [], ..., [], A, ...)
2666  //
2667  // with dim > 2, A not 0x0, and at least three arguments to
2668  // concatenate is equivalent to
2669  //
2670  // cat (dim, A, ...)
2671  //
2672  // Note that this check must be performed here because for full-on
2673  // braindead Matlab compatibility, we need to have things like
2674  //
2675  // cat (3, [], [], A)
2676  //
2677  // succeed, but to have things like
2678  //
2679  // cat (3, cat (3, [], []), A)
2680  // cat (3, zeros (0, 0, 2), A)
2681  //
2682  // fail. See also bug report #31615.
2683 
2684  octave_idx_type istart = 0;
2685 
2686  if (n > 2 && dim > 1)
2687  {
2688  for (octave_idx_type i = 0; i < n; i++)
2689  {
2690  dim_vector dv = array_list[i].dims ();
2691 
2692  if (dv.zero_by_zero ())
2693  istart++;
2694  else
2695  break;
2696  }
2697 
2698  // Don't skip any initial arguments if they are all empty.
2699  if (istart >= n)
2700  istart = 0;
2701  }
2702 
2703  dim_vector dv = array_list[istart++].dims ();
2704 
2705  for (octave_idx_type i = istart; i < n; i++)
2706  if (! (dv.*concat_rule) (array_list[i].dims (), dim))
2707  (*current_liboctave_error_handler) ("cat: dimension mismatch");
2708 
2709  Array<T, Alloc> retval (dv);
2710 
2711  if (retval.isempty ())
2712  return retval;
2713 
2714  int nidx = std::max (dv.ndims (), static_cast<octave_idx_type> (dim + 1));
2715  Array<octave::idx_vector> idxa (dim_vector (nidx, 1), octave::idx_vector::colon);
2716  octave_idx_type l = 0;
2717 
2718  for (octave_idx_type i = 0; i < n; i++)
2719  {
2720  // NOTE: This takes some thinking, but no matter what the above rules
2721  // are, an empty array can always be skipped at this point, because
2722  // the result dimensions are already determined, and there is no way
2723  // an empty array may contribute a nonzero piece along the dimension
2724  // at this point, unless an empty array can be promoted to a non-empty
2725  // one (which makes no sense). I repeat, *no way*, think about it.
2726  if (array_list[i].isempty ())
2727  continue;
2728 
2729  octave_quit ();
2730 
2731  octave_idx_type u;
2732  if (dim < array_list[i].ndims ())
2733  u = l + array_list[i].dims ()(dim);
2734  else
2735  u = l + 1;
2736 
2737  idxa(dim) = octave::idx_vector (l, u);
2738 
2739  retval.assign (idxa, array_list[i]);
2740 
2741  l = u;
2742  }
2743 
2744  return retval;
2745 }
2746 
2747 template <typename T, typename Alloc>
2748 void
2749 Array<T, Alloc>::print_info (std::ostream& os, const std::string& prefix) const
2750 {
2751  os << prefix << "m_rep address: " << m_rep << '\n'
2752  << prefix << "m_rep->m_len: " << m_rep->m_len << '\n'
2753  << prefix << "m_rep->m_data: " << static_cast<void *> (m_rep->m_data) << '\n'
2754  << prefix << "m_rep->m_count: " << m_rep->m_count << '\n'
2755  << prefix << "m_slice_data: " << static_cast<void *> (m_slice_data) << '\n'
2756  << prefix << "m_slice_len: " << m_slice_len << '\n';
2757 
2758  // 2D info:
2759  //
2760  // << pefix << "rows: " << rows () << "\n"
2761  // << prefix << "cols: " << cols () << "\n";
2762 }
2763 
2764 template <typename T, typename Alloc>
2766 {
2767  bool retval = m_dimensions == dv;
2768  if (retval)
2769  m_dimensions = dv;
2770 
2771  return retval;
2772 }
2773 
2774 template <typename T, typename Alloc>
2776 {
2777  // This guards against accidental implicit instantiations.
2778  // Array<T, Alloc> instances should always be explicit and use INSTANTIATE_ARRAY.
2779  T::__xXxXx__ ();
2780 }
2781 
2782 #define INSTANTIATE_ARRAY(T, API) \
2783  template <> API void \
2784  Array<T>::instantiation_guard () { } \
2785  \
2786  template class API Array<T>
2787 
2788 // FIXME: is this used?
2789 
2790 template <typename T, typename Alloc>
2791 std::ostream&
2792 operator << (std::ostream& os, const Array<T, Alloc>& a)
2793 {
2794  dim_vector a_dims = a.dims ();
2795 
2796  int n_dims = a_dims.ndims ();
2797 
2798  os << n_dims << "-dimensional array";
2799 
2800  if (n_dims)
2801  os << " (" << a_dims.str () << ')';
2802 
2803  os <<"\n\n";
2804 
2805  if (n_dims)
2806  {
2807  os << "data:";
2808 
2809  Array<octave_idx_type> ra_idx (dim_vector (n_dims, 1), 0);
2810 
2811  // Number of times the first 2d-array is to be displayed.
2812 
2813  octave_idx_type m = 1;
2814  for (int i = 2; i < n_dims; i++)
2815  m *= a_dims(i);
2816 
2817  if (m == 1)
2818  {
2819  octave_idx_type rows = 0;
2820  octave_idx_type cols = 0;
2821 
2822  switch (n_dims)
2823  {
2824  case 2:
2825  rows = a_dims(0);
2826  cols = a_dims(1);
2827 
2828  for (octave_idx_type j = 0; j < rows; j++)
2829  {
2830  ra_idx(0) = j;
2831  for (octave_idx_type k = 0; k < cols; k++)
2832  {
2833  ra_idx(1) = k;
2834  os << ' ' << a.elem (ra_idx);
2835  }
2836  os << "\n";
2837  }
2838  break;
2839 
2840  default:
2841  rows = a_dims(0);
2842 
2843  for (octave_idx_type k = 0; k < rows; k++)
2844  {
2845  ra_idx(0) = k;
2846  os << ' ' << a.elem (ra_idx);
2847  }
2848  break;
2849  }
2850 
2851  os << "\n";
2852  }
2853  else
2854  {
2855  octave_idx_type rows = a_dims(0);
2856  octave_idx_type cols = a_dims(1);
2857 
2858  for (int i = 0; i < m; i++)
2859  {
2860  os << "\n(:,:,";
2861 
2862  for (int j = 2; j < n_dims - 1; j++)
2863  os << ra_idx(j) + 1 << ',';
2864 
2865  os << ra_idx(n_dims - 1) + 1 << ") = \n";
2866 
2867  for (octave_idx_type j = 0; j < rows; j++)
2868  {
2869  ra_idx(0) = j;
2870 
2871  for (octave_idx_type k = 0; k < cols; k++)
2872  {
2873  ra_idx(1) = k;
2874  os << ' ' << a.elem (ra_idx);
2875  }
2876 
2877  os << "\n";
2878  }
2879 
2880  os << "\n";
2881 
2882  if (i != m - 1)
2883  increment_index (ra_idx, a_dims, 2);
2884  }
2885  }
2886  }
2887 
2888  return os;
2889 }
bool sort_isnan(typename ref_param< T >::type)
Definition: Array-base.cc:1776
Array< T, Alloc >::compare_fcn_type safe_comparator(sortmode mode, const Array< T, Alloc > &, bool)
Definition: Array-base.cc:2036
std::ostream & operator<<(std::ostream &os, const Array< T, Alloc > &a)
Definition: Array-base.cc:2792
static T no_op_fcn(const T &x)
Definition: Array-base.cc:1661
octave_idx_type compute_index(octave_idx_type n, const dim_vector &dims)
Definition: Array-util.cc:177
dim_vector zero_dims_inquire(const Array< octave::idx_vector > &ia, const dim_vector &rhdv)
Definition: Array-util.cc:429
void increment_index(Array< octave_idx_type > &ra_idx, const dim_vector &dimensions, int start_dimension)
Definition: Array-util.cc:60
static F77_INT nn
Definition: DASPK.cc:66
static int elem
Definition: __contourc__.cc:54
charNDArray max(char d, const charNDArray &m)
Definition: chNDArray.cc:230
charNDArray min(char d, const charNDArray &m)
Definition: chNDArray.cc:207
The real representation of all arrays.
Definition: Array.h:135
octave::refcount< octave_idx_type > m_count
Definition: Array.h:145
N Dimensional Array with copy-on-write semantics.
Definition: Array.h:130
OCTARRAY_OVERRIDABLE_FUNC_API octave_idx_type columns(void) const
Definition: Array.h:471
OCTARRAY_API Array< T, Alloc > transpose(void) const
Size of the specified dimension.
Definition: Array-base.cc:1625
OCTARRAY_API void clear(void)
Definition: Array-base.cc:109
OCTARRAY_API Array< T, Alloc > squeeze(void) const
Chop off leading singleton dimensions.
Definition: Array-base.cc:139
OCTARRAY_API Array< T, Alloc > index(const octave::idx_vector &i) const
Indexing without resizing.
Definition: Array-base.cc:719
OCTARRAY_OVERRIDABLE_FUNC_API const T * data(void) const
Size of the specified dimension.
Definition: Array.h:663
friend class Array
Size of the specified dimension.
Definition: Array.h:947
OCTARRAY_API void assign(const octave::idx_vector &i, const Array< T, Alloc > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Definition: Array-base.cc:1137
OCTARRAY_OVERRIDABLE_FUNC_API bool isempty(void) const
Size of the specified dimension.
Definition: Array.h:651
OCTARRAY_OVERRIDABLE_FUNC_API octave_idx_type dim2(void) const
Definition: Array.h:467
OCTARRAY_OVERRIDABLE_FUNC_API octave_idx_type dim1(void) const
Definition: Array.h:456
OCTARRAY_API Array< T, Alloc > nth_element(const octave::idx_vector &n, int dim=0) const
Returns the n-th element in increasing order, using the same ordering as used for sort.
Definition: Array-base.cc:2321
OCTARRAY_API octave_idx_type nnz(void) const
Count nonzero elements.
Definition: Array-base.cc:2225
OCTARRAY_API Array< T, Alloc > hermitian(T(*fcn)(const T &)=nullptr) const
Size of the specified dimension.
Definition: Array-base.cc:1668
OCTARRAY_API void resize1(octave_idx_type n, const T &rfv)
Size of the specified dimension.
Definition: Array-base.cc:919
ref_param< T >::type crefT
Definition: Array.h:238
OCTARRAY_OVERRIDABLE_FUNC_API octave_idx_type numel(void) const
Number of elements in the array.
Definition: Array.h:414
static OCTARRAY_API void instantiation_guard()
Size of the specified dimension.
Definition: Array-base.cc:2775
OCTARRAY_API Array< T, Alloc > column(octave_idx_type k) const
Extract column: A(:,k+1).
Definition: Array-base.cc:283
OCTARRAY_OVERRIDABLE_FUNC_API const dim_vector & dims(void) const
Return a const-reference so that dims ()(i) works efficiently.
Definition: Array.h:503
OCTARRAY_API Array< T, Alloc > linear_slice(octave_idx_type lo, octave_idx_type up) const
Extract a slice from this array as a column vector: A(:)(lo+1:up).
Definition: Array-base.cc:303
OCTARRAY_API bool optimize_dimensions(const dim_vector &dv)
Returns true if this->dims () == dv, and if so, replaces this->m_dimensions by a shallow copy of dv.
Definition: Array-base.cc:2765
OCTARRAY_API Array< T, Alloc > page(octave_idx_type k) const
Extract page: A(:,:,k+1).
Definition: Array-base.cc:292
OCTARRAY_API void resize(const dim_vector &dv, const T &rfv)
Size of the specified dimension.
Definition: Array-base.cc:1032
OCTARRAY_API Array< T, Alloc > & insert(const Array< T, Alloc > &a, const Array< octave_idx_type > &idx)
Insert an array into another at a specified position.
Definition: Array-base.cc:1610
static OCTARRAY_API Array< T, Alloc >::ArrayRep * nil_rep(void)
Definition: Array-base.cc:46
OCTARRAY_OVERRIDABLE_FUNC_API octave_idx_type rows(void) const
Definition: Array.h:459
OCTARRAY_OVERRIDABLE_FUNC_API Array< T, Alloc > reshape(octave_idx_type nr, octave_idx_type nc) const
Size of the specified dimension.
Definition: Array.h:635
OCTARRAY_API T * fortran_vec(void)
Size of the specified dimension.
Definition: Array-base.cc:1766
OCTARRAY_API void delete_elements(const octave::idx_vector &i)
Deleting elements.
Definition: Array-base.cc:1416
OCTARRAY_API Array< octave_idx_type > sort_rows_idx(sortmode mode=ASCENDING) const
Sort by rows returns only indices.
Definition: Array-base.cc:2081
OCTARRAY_API Array< octave_idx_type > find(octave_idx_type n=-1, bool backward=false) const
Find indices of (at most n) nonzero elements.
Definition: Array-base.cc:2240
dim_vector m_dimensions
Definition: Array.h:245
OCTARRAY_API Array< T, Alloc > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Size of the specified dimension.
Definition: Array-base.cc:453
OCTARRAY_API Array< T, Alloc > sort(int dim=0, sortmode mode=ASCENDING) const
Size of the specified dimension.
Definition: Array-base.cc:1783
OCTARRAY_API void print_info(std::ostream &os, const std::string &prefix) const
Size of the specified dimension.
Definition: Array-base.cc:2749
OCTARRAY_API sortmode issorted(sortmode mode=UNSORTED) const
Ordering is auto-detected or can be specified.
Definition: Array-base.cc:2049
virtual OCTARRAY_API T resize_fill_value(void) const
Size of the specified dimension.
Definition: Array-base.cc:908
OCTARRAY_API T & checkelem(octave_idx_type n)
Size of the specified dimension.
Definition: Array-base.cc:214
OCTARRAY_OVERRIDABLE_FUNC_API int ndims(void) const
Size of the specified dimension.
Definition: Array.h:677
OCTARRAY_API sortmode is_sorted_rows(sortmode mode=UNSORTED) const
Ordering is auto-detected or can be specified.
Definition: Array-base.cc:2099
OCTARRAY_API void fill(const T &val)
Definition: Array-base.cc:95
OCTARRAY_OVERRIDABLE_FUNC_API T & elem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:562
OCTARRAY_API Array< T, Alloc > diag(octave_idx_type k=0) const
Get the kth super or subdiagonal.
Definition: Array-base.cc:2543
OCTARRAY_API octave_idx_type compute_index(octave_idx_type i, octave_idx_type j) const
Size of the specified dimension.
Definition: Array-base.cc:192
OCTARRAY_API octave_idx_type lookup(const T &value, sortmode mode=UNSORTED) const
Do a binary lookup in a sorted array.
Definition: Array-base.cc:2160
Array< T, Alloc >::ArrayRep * m_rep
Definition: Array.h:247
OCTARRAY_API void resize2(octave_idx_type nr, octave_idx_type nc, const T &rfv)
Resizing (with fill).
Definition: Array-base.cc:990
static OCTARRAY_API Array< T, Alloc > cat(int dim, octave_idx_type n, const Array< T, Alloc > *array_list)
Concatenation along a specified (0-based) dimension, equivalent to cat().
Definition: Array-base.cc:2645
OCTARRAY_OVERRIDABLE_FUNC_API T & xelem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:524
Vector representing the dimensions (size) of an Array.
Definition: dim-vector.h:94
OCTAVE_API bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
Definition: dim-vector.cc:140
OCTAVE_API void chop_all_singletons(void)
Definition: dim-vector.cc:50
OCTAVE_API std::string str(char sep='x') const
Definition: dim-vector.cc:68
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition: dim-vector.h:335
bool zero_by_zero(void) const
Definition: dim-vector.h:311
void resize(int n, int fill_value=0)
Definition: dim-vector.h:272
static dim_vector alloc(int n)
Definition: dim-vector.h:202
OCTAVE_API bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
Definition: dim-vector.cc:201
void chop_trailing_singletons(void)
Definition: dim-vector.h:164
bool any_neg(void) const
Definition: dim-vector.h:357
octave_idx_type ndims(void) const
Number of dimensions.
Definition: dim-vector.h:257
bool is_nd_vector(void) const
Definition: dim-vector.h:400
OCTAVE_API dim_vector redim(int n) const
Force certain dimensionality, preserving numel ().
Definition: dim-vector.cc:226
OCTAVE_API octave_idx_type safe_numel(void) const
The following function will throw a std::bad_alloc () exception if the requested size is larger than ...
Definition: dim-vector.cc:98
dim_vector make_nd_vector(octave_idx_type n) const
Definition: dim-vector.h:421
virtual octave_idx_type numel(void) const
Definition: ov-base.h:373
void set_compare(const compare_fcn_type &comp)
Definition: oct-sort.h:121
void sort_rows(const T *data, octave_idx_type *idx, octave_idx_type rows, octave_idx_type cols)
Definition: oct-sort.cc:1665
static bool descending_compare(typename ref_param< T >::type, typename ref_param< T >::type)
Definition: oct-sort.cc:1975
bool issorted(const T *data, octave_idx_type nel)
Definition: oct-sort.cc:1577
octave_idx_type lookup(const T *data, octave_idx_type nel, const T &value)
Definition: oct-sort.cc:1787
void sort(T *data, octave_idx_type nel)
Definition: oct-sort.cc:1520
void nth_element(T *data, octave_idx_type nel, octave_idx_type lo, octave_idx_type up=-1)
Definition: oct-sort.cc:1945
bool is_sorted_rows(const T *data, octave_idx_type rows, octave_idx_type cols)
Definition: oct-sort.cc:1742
void lookup_sorted(const T *data, octave_idx_type nel, const T *values, octave_idx_type nvalues, octave_idx_type *idx, bool rev=false)
Definition: oct-sort.cc:1898
void assign(const T *src, T *dest) const
Definition: Array-base.cc:570
octave::idx_vector * m_idx
Definition: Array-base.cc:640
~rec_index_helper(void)
Definition: Array-base.cc:564
rec_index_helper & operator=(const rec_index_helper &)=delete
const T * do_assign(const T *src, T *dest, int lev) const
Definition: Array-base.cc:601
rec_index_helper(const dim_vector &dv, const Array< octave::idx_vector > &ia)
Definition: Array-base.cc:529
octave_idx_type * m_cdim
Definition: Array-base.cc:639
void index(const T *src, T *dest) const
Definition: Array-base.cc:567
void do_fill(const T &val, T *dest, int lev) const
Definition: Array-base.cc:618
T * do_index(const T *src, T *dest, int lev) const
Definition: Array-base.cc:584
void fill(const T &val, T *dest) const
Definition: Array-base.cc:573
octave_idx_type * m_dim
Definition: Array-base.cc:638
bool is_cont_range(octave_idx_type &l, octave_idx_type &u) const
Definition: Array-base.cc:575
void permute(const T *src, T *dest) const
Definition: Array-base.cc:362
T * do_permute(const T *src, T *dest, int lev) const
Definition: Array-base.cc:407
static T * blk_trans(const T *src, T *dest, octave_idx_type nr, octave_idx_type nc)
Definition: Array-base.cc:367
octave_idx_type * m_stride
Definition: Array-base.cc:447
rec_permute_helper(const dim_vector &dv, const Array< octave_idx_type > &perm)
Definition: Array-base.cc:315
rec_permute_helper & operator=(const rec_permute_helper &)=delete
rec_permute_helper(const rec_permute_helper &)=delete
octave_idx_type * m_dim
Definition: Array-base.cc:446
octave_idx_type * m_dext
Definition: Array-base.cc:713
rec_resize_helper(const rec_resize_helper &)=delete
void do_resize_fill(const T *src, T *dest, const T &rfv, int lev) const
Definition: Array-base.cc:690
octave_idx_type * m_sext
Definition: Array-base.cc:712
void resize_fill(const T *src, T *dest, const T &rfv) const
Definition: Array-base.cc:683
~rec_resize_helper(void)
Definition: Array-base.cc:680
rec_resize_helper(const dim_vector &ndv, const dim_vector &odv)
Definition: Array-base.cc:649
rec_resize_helper & operator=(const rec_resize_helper &)=delete
octave_idx_type * m_cext
Definition: Array-base.cc:711
if_then_else< is_class_type< T >::no, T, T const & >::result type
Definition: lo-traits.h:121
static OCTAVE_NORETURN void err_index_out_of_range(void)
Definition: idx-vector.cc:52
octave::idx_vector idx_vector
Definition: idx-vector.h:1039
void err_invalid_resize(void)
void err_nonconformant(const char *op, octave_idx_type op1_len, octave_idx_type op2_len)
void err_invalid_index(const std::string &idx, octave_idx_type nd, octave_idx_type dim, const std::string &)
void err_del_index_out_of_range(bool is1d, octave_idx_type idx, octave_idx_type ext)
Complex log2(const Complex &x)
Definition: lo-mappers.cc:139
F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
F77_RET_T const F77_DBLE * x
T octave_idx_type m
Definition: mx-inlines.cc:773
octave_idx_type n
Definition: mx-inlines.cc:753
T * r
Definition: mx-inlines.cc:773
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition: oct-locbuf.h:44
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition: oct-locbuf.h:50
sortmode
Definition: oct-sort.h:97
@ UNSORTED
Definition: oct-sort.h:97
@ ASCENDING
Definition: oct-sort.h:97
@ DESCENDING
Definition: oct-sort.h:97
T::size_type numel(const T &str)
Definition: oct-string.cc:71
const octave_base_value const Array< octave_idx_type > & ra_idx
static T abs(T x)
Definition: pr-output.cc:1678
F77_RET_T len
Definition: xerbla.cc:61