GNU Octave  9.1.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
Array-base.cc
Go to the documentation of this file.
1 ////////////////////////////////////////////////////////////////////////
2 //
3 // Copyright (C) 1993-2024 The Octave Project Developers
4 //
5 // See the file COPYRIGHT.md in the top-level directory of this
6 // distribution or <https://octave.org/copyright/>.
7 //
8 // This file is part of Octave.
9 //
10 // Octave is free software: you can redistribute it and/or modify it
11 // under the terms of the GNU General Public License as published by
12 // the Free Software Foundation, either version 3 of the License, or
13 // (at your option) any later version.
14 //
15 // Octave is distributed in the hope that it will be useful, but
16 // WITHOUT ANY WARRANTY; without even the implied warranty of
17 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 // GNU General Public License for more details.
19 //
20 // You should have received a copy of the GNU General Public License
21 // along with Octave; see the file COPYING. If not, see
22 // <https://www.gnu.org/licenses/>.
23 //
24 ////////////////////////////////////////////////////////////////////////
25 
26 // Do not include this file directly to instantiate the Array<T> template
27 // class in code outside liboctave. Include "Array-oct.cc" or "Array.cc"
28 // instead.
29 
30 // This file should not include config.h. It is only included in other
31 // C++ source files that should have included config.h before including
32 // this file.
33 
34 #include <ostream>
35 
36 #include "Array-util.h"
37 #include "Array.h"
38 #include "lo-mappers.h"
39 #include "oct-locbuf.h"
40 
41 // One dimensional array class. Handles the reference counting for
42 // all the derived classes.
43 
44 template <typename T, typename Alloc>
47 {
48  static ArrayRep nr;
49  return &nr;
50 }
51 
52 // This is similar to the template for containers but specialized for Array.
53 // Note we can't specialize a member without also specializing the class.
54 template <typename T, typename Alloc>
56  : m_dimensions (dv), m_rep (a.m_rep),
57  m_slice_data (a.m_slice_data), m_slice_len (a.m_slice_len)
58 {
59  bool invalid_size = false;
60 
61  octave_idx_type new_numel = 0;
62 
63  try
64  {
65  // Safe numel may throw a bad_alloc error because the new
66  // dimensions are too large to allocate. Trap that here so
67  // we can issue a more helpful diagnostic that is consistent
68  // with other size mismatch failures.
69 
70  new_numel = m_dimensions.safe_numel ();
71  }
72  catch (const std::bad_alloc&)
73  {
74  invalid_size = true;
75  }
76 
77  if (invalid_size || new_numel != a.numel ())
78  {
79  std::string dimensions_str = a.m_dimensions.str ();
80  std::string new_dims_str = m_dimensions.str ();
81 
82  (*current_liboctave_error_handler)
83  ("reshape: can't reshape %s array to %s array",
84  dimensions_str.c_str (), new_dims_str.c_str ());
85  }
86 
87  // This goes here because if an exception is thrown by the above,
88  // destructor will be never called.
89  m_rep->m_count++;
91 }
92 
93 template <typename T, typename Alloc>
94 void
95 Array<T, Alloc>::fill (const T& val)
96 {
97  if (m_rep->m_count > 1)
98  {
99  --m_rep->m_count;
100  m_rep = new ArrayRep (numel (), val);
101  m_slice_data = m_rep->m_data;
102  }
103  else
104  std::fill_n (m_slice_data, m_slice_len, val);
105 }
106 
107 template <typename T, typename Alloc>
108 void
110 {
111  if (--m_rep->m_count == 0)
112  delete m_rep;
113 
114  m_rep = nil_rep ();
115  m_rep->m_count++;
116  m_slice_data = m_rep->m_data;
117  m_slice_len = m_rep->m_len;
118 
119  m_dimensions = dim_vector ();
120 }
121 
122 template <typename T, typename Alloc>
123 void
125 {
126  if (--m_rep->m_count == 0)
127  delete m_rep;
128 
129  m_rep = new ArrayRep (dv.safe_numel ());
130  m_slice_data = m_rep->m_data;
131  m_slice_len = m_rep->m_len;
132 
133  m_dimensions = dv;
134  m_dimensions.chop_trailing_singletons ();
135 }
136 
137 template <typename T, typename Alloc>
140 {
141  Array<T, Alloc> retval = *this;
142 
143  if (ndims () > 2)
144  {
145  bool dims_changed = false;
146 
147  dim_vector new_dimensions = m_dimensions;
148 
149  int k = 0;
150 
151  for (int i = 0; i < ndims (); i++)
152  {
153  if (m_dimensions(i) == 1)
154  dims_changed = true;
155  else
156  new_dimensions(k++) = m_dimensions(i);
157  }
158 
159  if (dims_changed)
160  {
161  switch (k)
162  {
163  case 0:
164  new_dimensions = dim_vector (1, 1);
165  break;
166 
167  case 1:
168  {
169  octave_idx_type tmp = new_dimensions(0);
170 
171  new_dimensions.resize (2);
172 
173  new_dimensions(0) = tmp;
174  new_dimensions(1) = 1;
175  }
176  break;
177 
178  default:
179  new_dimensions.resize (k);
180  break;
181  }
182  }
183 
184  retval = Array<T, Alloc> (*this, new_dimensions);
185  }
186 
187  return retval;
188 }
189 
190 template <typename T, typename Alloc>
193 {
194  return ::compute_index (i, j, m_dimensions);
195 }
196 
197 template <typename T, typename Alloc>
200  octave_idx_type k) const
201 {
202  return ::compute_index (i, j, k, m_dimensions);
203 }
204 
205 template <typename T, typename Alloc>
208 {
209  return ::compute_index (ra_idx, m_dimensions);
210 }
211 
212 template <typename T, typename Alloc>
213 T&
215 {
216  // Do checks directly to avoid recomputing m_slice_len.
217  if (n < 0)
219  if (n >= m_slice_len)
220  octave::err_index_out_of_range (1, 1, n+1, m_slice_len, m_dimensions);
221 
222  return elem (n);
223 }
224 
225 template <typename T, typename Alloc>
226 T&
228 {
229  return elem (compute_index (i, j));
230 }
231 
232 template <typename T, typename Alloc>
233 T&
235 {
236  return elem (compute_index (i, j, k));
237 }
238 
239 template <typename T, typename Alloc>
240 T&
242 {
243  return elem (compute_index (ra_idx));
244 }
245 
246 template <typename T, typename Alloc>
247 typename Array<T, Alloc>::crefT
249 {
250  // Do checks directly to avoid recomputing m_slice_len.
251  if (n < 0)
253  if (n >= m_slice_len)
254  octave::err_index_out_of_range (1, 1, n+1, m_slice_len, m_dimensions);
255 
256  return elem (n);
257 }
258 
259 template <typename T, typename Alloc>
260 typename Array<T, Alloc>::crefT
262 {
263  return elem (compute_index (i, j));
264 }
265 
266 template <typename T, typename Alloc>
267 typename Array<T, Alloc>::crefT
269  octave_idx_type k) const
270 {
271  return elem (compute_index (i, j, k));
272 }
273 
274 template <typename T, typename Alloc>
275 typename Array<T, Alloc>::crefT
277 {
278  return elem (compute_index (ra_idx));
279 }
280 
281 template <typename T, typename Alloc>
284 {
285  octave_idx_type r = m_dimensions(0);
286 
287  return Array<T, Alloc> (*this, dim_vector (r, 1), k*r, k*r + r);
288 }
289 
290 template <typename T, typename Alloc>
293 {
294  octave_idx_type r = m_dimensions(0);
295  octave_idx_type c = m_dimensions(1);
296  octave_idx_type p = r*c;
297 
298  return Array<T, Alloc> (*this, dim_vector (r, c), k*p, k*p + p);
299 }
300 
301 template <typename T, typename Alloc>
304 {
305  if (up < lo)
306  up = lo;
307 
308  return Array<T, Alloc> (*this, dim_vector (up - lo, 1), lo, up);
309 }
310 
311 // Helper class for multi-d dimension permuting (generalized transpose).
312 class rec_permute_helper
313 {
314 public:
315 
316  rec_permute_helper (const dim_vector& dv, const Array<octave_idx_type>& perm)
317 
318  : m_n (dv.ndims ()), m_top (0), m_dim (new octave_idx_type [2*m_n]),
319  m_stride (m_dim + m_n), m_use_blk (false)
320  {
321  assert (m_n == perm.numel ());
322 
323  // Get cumulative dimensions.
324  OCTAVE_LOCAL_BUFFER (octave_idx_type, cdim, m_n+1);
325  cdim[0] = 1;
326  for (int i = 1; i < m_n+1; i++) cdim[i] = cdim[i-1] * dv(i-1);
327 
328  // Setup the permuted strides.
329  for (int k = 0; k < m_n; k++)
330  {
331  int kk = perm(k);
332  m_dim[k] = dv(kk);
333  m_stride[k] = cdim[kk];
334  }
335 
336  // Reduce contiguous runs.
337  for (int k = 1; k < m_n; k++)
338  {
339  if (m_stride[k] == m_stride[m_top]*m_dim[m_top])
340  m_dim[m_top] *= m_dim[k];
341  else
342  {
343  m_top++;
344  m_dim[m_top] = m_dim[k];
345  m_stride[m_top] = m_stride[k];
346  }
347  }
348 
349  // Determine whether we can use block transposes.
350  m_use_blk = m_top >= 1 && m_stride[1] == 1 && m_stride[0] == m_dim[1];
351 
352  }
353 
354  OCTAVE_DISABLE_CONSTRUCT_COPY_MOVE (rec_permute_helper)
355 
356  ~rec_permute_helper () { delete [] m_dim; }
357 
358  template <typename T>
359  void permute (const T *src, T *dest) const { do_permute (src, dest, m_top); }
360 
361  // Helper method for fast blocked transpose.
362  template <typename T>
363  static T *
364  blk_trans (const T *src, T *dest, octave_idx_type nr, octave_idx_type nc)
365  {
366  static const octave_idx_type m = 8;
367  OCTAVE_LOCAL_BUFFER (T, blk, m*m);
368  for (octave_idx_type kr = 0; kr < nr; kr += m)
369  for (octave_idx_type kc = 0; kc < nc; kc += m)
370  {
371  octave_idx_type lr = std::min (m, nr - kr);
372  octave_idx_type lc = std::min (m, nc - kc);
373  if (lr == m && lc == m)
374  {
375  const T *ss = src + kc * nr + kr;
376  for (octave_idx_type j = 0; j < m; j++)
377  for (octave_idx_type i = 0; i < m; i++)
378  blk[j*m+i] = ss[j*nr + i];
379  T *dd = dest + kr * nc + kc;
380  for (octave_idx_type j = 0; j < m; j++)
381  for (octave_idx_type i = 0; i < m; i++)
382  dd[j*nc+i] = blk[i*m+j];
383  }
384  else
385  {
386  const T *ss = src + kc * nr + kr;
387  for (octave_idx_type j = 0; j < lc; j++)
388  for (octave_idx_type i = 0; i < lr; i++)
389  blk[j*m+i] = ss[j*nr + i];
390  T *dd = dest + kr * nc + kc;
391  for (octave_idx_type j = 0; j < lr; j++)
392  for (octave_idx_type i = 0; i < lc; i++)
393  dd[j*nc+i] = blk[i*m+j];
394  }
395  }
396 
397  return dest + nr*nc;
398  }
399 
400 private:
401 
402  // Recursive N-D generalized transpose
403  template <typename T>
404  T * do_permute (const T *src, T *dest, int lev) const
405  {
406  if (lev == 0)
407  {
408  octave_idx_type step = m_stride[0];
409  octave_idx_type len = m_dim[0];
410  if (step == 1)
411  {
412  std::copy_n (src, len, dest);
413  dest += len;
414  }
415  else
416  {
417  for (octave_idx_type i = 0, j = 0; i < len; i++, j += step)
418  dest[i] = src[j];
419 
420  dest += len;
421  }
422  }
423  else if (m_use_blk && lev == 1)
424  dest = blk_trans (src, dest, m_dim[1], m_dim[0]);
425  else
426  {
427  octave_idx_type step = m_stride[lev];
428  octave_idx_type len = m_dim[lev];
429  for (octave_idx_type i = 0, j = 0; i < len; i++, j+= step)
430  dest = do_permute (src + i * step, dest, lev-1);
431  }
432 
433  return dest;
434  }
435 
436  //--------
437 
438  // STRIDE occupies the last half of the space allocated for dim to
439  // avoid a double allocation.
440 
441  int m_n;
442  int m_top;
443  octave_idx_type *m_dim;
444  octave_idx_type *m_stride;
445  bool m_use_blk;
446 };
447 
448 template <typename T, typename Alloc>
450 Array<T, Alloc>::permute (const Array<octave_idx_type>& perm_vec_arg, bool inv) const
451 {
452  Array<T, Alloc> retval;
453 
454  Array<octave_idx_type> perm_vec = perm_vec_arg;
455 
456  dim_vector dv = dims ();
457 
458  int perm_vec_len = perm_vec_arg.numel ();
459 
460  if (perm_vec_len < dv.ndims ())
461  (*current_liboctave_error_handler)
462  ("%s: invalid permutation vector", inv ? "ipermute" : "permute");
463 
464  dim_vector dv_new = dim_vector::alloc (perm_vec_len);
465 
466  // Append singleton dimensions as needed.
467  dv.resize (perm_vec_len, 1);
468 
469  // Need this array to check for identical elements in permutation array.
470  OCTAVE_LOCAL_BUFFER_INIT (bool, checked, perm_vec_len, false);
471 
472  bool identity = true;
473 
474  // Find dimension vector of permuted array.
475  for (int i = 0; i < perm_vec_len; i++)
476  {
477  octave_idx_type perm_elt = perm_vec.elem (i);
478  if (perm_elt >= perm_vec_len || perm_elt < 0)
479  (*current_liboctave_error_handler)
480  ("%s: permutation vector contains an invalid element",
481  inv ? "ipermute" : "permute");
482 
483  if (checked[perm_elt])
484  (*current_liboctave_error_handler)
485  ("%s: permutation vector cannot contain identical elements",
486  inv ? "ipermute" : "permute");
487  else
488  {
489  checked[perm_elt] = true;
490  identity = identity && perm_elt == i;
491  }
492  }
493 
494  if (identity)
495  return *this;
496 
497  if (inv)
498  {
499  for (int i = 0; i < perm_vec_len; i++)
500  perm_vec(perm_vec_arg(i)) = i;
501  }
502 
503  for (int i = 0; i < perm_vec_len; i++)
504  dv_new(i) = dv(perm_vec(i));
505 
506  retval = Array<T, Alloc> (dv_new);
507 
508  if (numel () > 0)
509  {
510  rec_permute_helper rh (dv, perm_vec);
511  rh.permute (data (), retval.fortran_vec ());
512  }
513 
514  return retval;
515 }
516 
517 // Helper class for multi-d index reduction and recursive
518 // indexing/indexed assignment. Rationale: we could avoid recursion
519 // using a state machine instead. However, using recursion is much
520 // more amenable to possible parallelization in the future.
521 // Also, the recursion solution is cleaner and more understandable.
522 
523 class rec_index_helper
524 {
525 public:
526 
527  rec_index_helper (const dim_vector& dv, const Array<octave::idx_vector>& ia)
528  : m_n (ia.numel ()), m_top (0), m_dim (new octave_idx_type [2*m_n]),
529  m_cdim (m_dim + m_n), m_idx (new octave::idx_vector [m_n])
530  {
531  assert (m_n > 0 && dv.ndims () == std::max (m_n, static_cast<octave_idx_type> (2)));
532 
533  m_dim[0] = dv(0);
534  m_cdim[0] = 1;
535  m_idx[0] = ia(0);
536 
537  for (octave_idx_type i = 1; i < m_n; i++)
538  {
539  // Try reduction...
540  if (m_idx[m_top].maybe_reduce (m_dim[m_top], ia(i), dv(i)))
541  {
542  // Reduction successful, fold dimensions.
543  m_dim[m_top] *= dv(i);
544  }
545  else
546  {
547  // Unsuccessful, store index & cumulative m_dim.
548  m_top++;
549  m_idx[m_top] = ia(i);
550  m_dim[m_top] = dv(i);
551  m_cdim[m_top] = m_cdim[m_top-1] * m_dim[m_top-1];
552  }
553  }
554  }
555 
556  OCTAVE_DISABLE_CONSTRUCT_COPY_MOVE (rec_index_helper)
557 
558  ~rec_index_helper () { delete [] m_idx; delete [] m_dim; }
559 
560  template <typename T>
561  void index (const T *src, T *dest) const { do_index (src, dest, m_top); }
562 
563  template <typename T>
564  void assign (const T *src, T *dest) const { do_assign (src, dest, m_top); }
565 
566  template <typename T>
567  void fill (const T& val, T *dest) const { do_fill (val, dest, m_top); }
568 
569  bool is_cont_range (octave_idx_type& l, octave_idx_type& u) const
570  {
571  return m_top == 0 && m_idx[0].is_cont_range (m_dim[0], l, u);
572  }
573 
574 private:
575 
576  // Recursive N-D indexing
577  template <typename T>
578  T * do_index (const T *src, T *dest, octave_idx_type lev) const
579  {
580  if (lev == 0)
581  dest += m_idx[0].index (src, m_dim[0], dest);
582  else
583  {
584  octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
585  octave_idx_type d = m_cdim[lev];
586  for (octave_idx_type i = 0; i < nn; i++)
587  dest = do_index (src + d*m_idx[lev].xelem (i), dest, lev-1);
588  }
589 
590  return dest;
591  }
592 
593  // Recursive N-D indexed assignment
594  template <typename T>
595  const T * do_assign (const T *src, T *dest, octave_idx_type lev) const
596  {
597  if (lev == 0)
598  src += m_idx[0].assign (src, m_dim[0], dest);
599  else
600  {
601  octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
602  octave_idx_type d = m_cdim[lev];
603  for (octave_idx_type i = 0; i < nn; i++)
604  src = do_assign (src, dest + d*m_idx[lev].xelem (i), lev-1);
605  }
606 
607  return src;
608  }
609 
610  // Recursive N-D indexed assignment
611  template <typename T>
612  void do_fill (const T& val, T *dest, octave_idx_type lev) const
613  {
614  if (lev == 0)
615  m_idx[0].fill (val, m_dim[0], dest);
616  else
617  {
618  octave_idx_type nn = m_idx[lev].length (m_dim[lev]);
619  octave_idx_type d = m_cdim[lev];
620  for (octave_idx_type i = 0; i < nn; i++)
621  do_fill (val, dest + d*m_idx[lev].xelem (i), lev-1);
622  }
623  }
624 
625  //--------
626 
627  // CDIM occupies the last half of the space allocated for dim to
628  // avoid a double allocation.
629 
630  octave_idx_type m_n;
631  octave_idx_type m_top;
633  octave_idx_type *m_cdim;
634  octave::idx_vector *m_idx;
635 };
636 
637 // Helper class for multi-d recursive resizing
638 // This handles resize () in an efficient manner, touching memory only
639 // once (apart from reinitialization)
640 class rec_resize_helper
641 {
642 public:
643 
644  rec_resize_helper (const dim_vector& ndv, const dim_vector& odv)
645  : m_cext (nullptr), m_sext (nullptr), m_dext (nullptr), m_n (0)
646  {
647  int l = ndv.ndims ();
648  assert (odv.ndims () == l);
649  octave_idx_type ld = 1;
650  int i = 0;
651  for (; i < l-1 && ndv(i) == odv(i); i++) ld *= ndv(i);
652  m_n = l - i;
653  m_cext = new octave_idx_type [3*m_n];
654  // Trick to avoid three allocations
655  m_sext = m_cext + m_n;
656  m_dext = m_sext + m_n;
657 
658  octave_idx_type sld = ld;
659  octave_idx_type dld = ld;
660  for (int j = 0; j < m_n; j++)
661  {
662  m_cext[j] = std::min (ndv(i+j), odv(i+j));
663  m_sext[j] = sld *= odv(i+j);
664  m_dext[j] = dld *= ndv(i+j);
665  }
666  m_cext[0] *= ld;
667  }
668 
669  OCTAVE_DISABLE_CONSTRUCT_COPY_MOVE (rec_resize_helper)
670 
671  ~rec_resize_helper () { delete [] m_cext; }
672 
673  template <typename T>
674  void resize_fill (const T *src, T *dest, const T& rfv) const
675  { do_resize_fill (src, dest, rfv, m_n-1); }
676 
677 private:
678 
679  // recursive resizing
680  template <typename T>
681  void do_resize_fill (const T *src, T *dest, const T& rfv, int lev) const
682  {
683  if (lev == 0)
684  {
685  std::copy_n (src, m_cext[0], dest);
686  std::fill_n (dest + m_cext[0], m_dext[0] - m_cext[0], rfv);
687  }
688  else
689  {
690  octave_idx_type sd, dd, k;
691  sd = m_sext[lev-1];
692  dd = m_dext[lev-1];
693  for (k = 0; k < m_cext[lev]; k++)
694  do_resize_fill (src + k * sd, dest + k * dd, rfv, lev - 1);
695 
696  std::fill_n (dest + k * dd, m_dext[lev] - k * dd, rfv);
697  }
698  }
699 
700  //--------
701 
702  octave_idx_type *m_cext;
703  octave_idx_type *m_sext;
704  octave_idx_type *m_dext;
705  int m_n;
706 };
707 
708 template <typename T, typename Alloc>
711 {
712  // Colon:
713  //
714  // object | index | result orientation
715  // ---------+----------+-------------------
716  // anything | colon | column vector
717  //
718  // Numeric array or logical mask:
719  //
720  // Note that logical mask indices are always transformed to vectors
721  // before we see them.
722  //
723  // object | index | result orientation
724  // ---------+----------+-------------------
725  // vector | vector | indexed object
726  // | other | same size as index
727  // ---------+----------+-------------------
728  // array | anything | same size as index
729 
730  octave_idx_type n = numel ();
731  Array<T, Alloc> retval;
732 
733  if (i.is_colon ())
734  {
735  // A(:) produces a shallow copy as a column vector.
736  retval = Array<T, Alloc> (*this, dim_vector (n, 1));
737  }
738  else
739  {
740  if (i.extent (n) != n)
741  octave::err_index_out_of_range (1, 1, i.extent (n), n, m_dimensions);
742 
743  dim_vector result_dims = i.orig_dimensions ();
744  octave_idx_type idx_len = i.length ();
745 
746  if (n != 1 && is_nd_vector () && idx_len != 1
747  && result_dims.is_nd_vector ())
748  {
749  // Indexed object and index are both vectors. Set result size
750  // and orientation as above.
751 
752  dim_vector dv = dims ();
753 
754  result_dims = dv.make_nd_vector (idx_len);
755  }
756 
758  if (idx_len != 0 && i.is_cont_range (n, l, u))
759  // If suitable, produce a shallow slice.
760  retval = Array<T, Alloc> (*this, result_dims, l, u);
761  else
762  {
763  // Don't use resize here to avoid useless initialization for POD
764  // types.
765  retval = Array<T, Alloc> (result_dims);
766 
767  if (idx_len != 0)
768  i.index (data (), n, retval.fortran_vec ());
769  }
770  }
771 
772  return retval;
773 }
774 
775 template <typename T, typename Alloc>
778 {
779  // Get dimensions, allowing Fortran indexing in the 2nd dim.
780  dim_vector dv = m_dimensions.redim (2);
781  octave_idx_type r = dv(0);
782  octave_idx_type c = dv(1);
784 
785  if (i.is_colon () && j.is_colon ())
786  {
787  // A(:,:) produces a shallow copy.
788  retval = Array<T, Alloc> (*this, dv);
789  }
790  else
791  {
792  if (i.extent (r) != r)
793  octave::err_index_out_of_range (2, 1, i.extent (r), r, m_dimensions); // throws
794  if (j.extent (c) != c)
795  octave::err_index_out_of_range (2, 2, j.extent (c), c, m_dimensions); // throws
796 
797  octave_idx_type n = numel ();
798  octave_idx_type il = i.length (r);
799  octave_idx_type jl = j.length (c);
800 
801  octave::idx_vector ii (i);
802 
803  if (ii.maybe_reduce (r, j, c))
804  {
805  octave_idx_type l, u;
806  if (ii.length () > 0 && ii.is_cont_range (n, l, u))
807  // If suitable, produce a shallow slice.
808  retval = Array<T, Alloc> (*this, dim_vector (il, jl), l, u);
809  else
810  {
811  // Don't use resize to avoid useless initialization for POD types.
812  retval = Array<T, Alloc> (dim_vector (il, jl));
813 
814  ii.index (data (), n, retval.fortran_vec ());
815  }
816  }
817  else
818  {
819  // Don't use resize to avoid useless initialization for POD types.
820  retval = Array<T, Alloc> (dim_vector (il, jl));
821 
822  const T *src = data ();
823  T *dest = retval.fortran_vec ();
824 
825  for (octave_idx_type k = 0; k < jl; k++)
826  dest += i.index (src + r * j.xelem (k), r, dest);
827  }
828  }
829 
830  return retval;
831 }
832 
833 template <typename T, typename Alloc>
836 {
837  int ial = ia.numel ();
838  Array<T, Alloc> retval;
839 
840  // FIXME: is this dispatching necessary?
841  if (ial == 1)
842  retval = index (ia(0));
843  else if (ial == 2)
844  retval = index (ia(0), ia(1));
845  else if (ial > 0)
846  {
847  // Get dimensions, allowing Fortran indexing in the last dim.
848  dim_vector dv = m_dimensions.redim (ial);
849 
850  // Check for out of bounds conditions.
851  bool all_colons = true;
852  for (int i = 0; i < ial; i++)
853  {
854  if (ia(i).extent (dv(i)) != dv(i))
855  octave::err_index_out_of_range (ial, i+1, ia(i).extent (dv(i)), dv(i),
856  m_dimensions); // throws
857 
858  all_colons = all_colons && ia(i).is_colon ();
859  }
860 
861  if (all_colons)
862  {
863  // A(:,:,...,:) produces a shallow copy.
865  retval = Array<T, Alloc> (*this, dv);
866  }
867  else
868  {
869  // Form result dimensions.
870  dim_vector rdv = dim_vector::alloc (ial);
871  for (int i = 0; i < ial; i++) rdv(i) = ia(i).length (dv(i));
873 
874  // Prepare for recursive indexing
875  rec_index_helper rh (dv, ia);
876 
877  octave_idx_type l, u;
878  if (rh.is_cont_range (l, u))
879  // If suitable, produce a shallow slice.
880  retval = Array<T, Alloc> (*this, rdv, l, u);
881  else
882  {
883  // Don't use resize to avoid useless initialization for POD types.
884  retval = Array<T, Alloc> (rdv);
885 
886  // Do it.
887  rh.index (data (), retval.fortran_vec ());
888  }
889  }
890  }
891 
892  return retval;
893 }
894 
895 // The default fill value. Override if you want a different one.
896 
897 template <typename T, typename Alloc>
898 T
900 {
901  static T zero = T ();
902  return zero;
903 }
904 
905 // Yes, we could do resize using index & assign. However, that would
906 // possibly involve a lot more memory traffic than we actually need.
907 
908 template <typename T, typename Alloc>
909 void
911 {
912  if (n < 0 || ndims () != 2)
914 
915  dim_vector dv;
916  // This is driven by Matlab's behavior of giving a *row* vector
917  // on some out-of-bounds assignments. Specifically, Matlab
918  // allows a(i) with out-of-bouds i when a is either of 0x0, 1x0,
919  // 1x1, 0xN, and gives a row vector in all cases (yes, even the
920  // last one, search me why). Giving a column vector would make
921  // much more sense (given the way trailing singleton dims are
922  // treated).
923  bool invalid = false;
924  if (rows () == 0 || rows () == 1)
925  dv = dim_vector (1, n);
926  else if (columns () == 1)
927  dv = dim_vector (n, 1);
928  else
929  invalid = true;
930 
931  if (invalid)
933 
934  octave_idx_type nx = numel ();
935  if (n == nx - 1 && n > 0)
936  {
937  // Stack "pop" operation.
938  if (m_rep->m_count == 1)
939  m_slice_data[m_slice_len-1] = T ();
940  m_slice_len--;
941  m_dimensions = dv;
942  }
943  else if (n == nx + 1 && nx > 0)
944  {
945  // Stack "push" operation.
946  if (m_rep->m_count == 1
947  && m_slice_data + m_slice_len < m_rep->m_data + m_rep->m_len)
948  {
949  m_slice_data[m_slice_len++] = rfv;
950  m_dimensions = dv;
951  }
952  else
953  {
954  static const octave_idx_type max_stack_chunk = 1024;
955  octave_idx_type nn = n + std::min (nx, max_stack_chunk);
956  Array<T, Alloc> tmp (Array<T, Alloc> (dim_vector (nn, 1)), dv, 0, n);
957  T *dest = tmp.fortran_vec ();
958 
959  std::copy_n (data (), nx, dest);
960  dest[nx] = rfv;
961 
962  *this = tmp;
963  }
964  }
965  else if (n != nx)
966  {
967  Array<T, Alloc> tmp = Array<T, Alloc> (dv);
968  T *dest = tmp.fortran_vec ();
969 
970  octave_idx_type n0 = std::min (n, nx);
971  octave_idx_type n1 = n - n0;
972  std::copy_n (data (), n0, dest);
973  std::fill_n (dest + n0, n1, rfv);
974 
975  *this = tmp;
976  }
977 }
978 
979 template <typename T, typename Alloc>
980 void
982 {
983  if (r < 0 || c < 0 || ndims () != 2)
985 
986  octave_idx_type rx = rows ();
987  octave_idx_type cx = columns ();
988  if (r != rx || c != cx)
989  {
991  T *dest = tmp.fortran_vec ();
992 
993  octave_idx_type r0 = std::min (r, rx);
994  octave_idx_type r1 = r - r0;
995  octave_idx_type c0 = std::min (c, cx);
996  octave_idx_type c1 = c - c0;
997  const T *src = data ();
998  if (r == rx)
999  {
1000  std::copy_n (src, r * c0, dest);
1001  dest += r * c0;
1002  }
1003  else
1004  {
1005  for (octave_idx_type k = 0; k < c0; k++)
1006  {
1007  std::copy_n (src, r0, dest);
1008  src += rx;
1009  dest += r0;
1010  std::fill_n (dest, r1, rfv);
1011  dest += r1;
1012  }
1013  }
1014 
1015  std::fill_n (dest, r * c1, rfv);
1016 
1017  *this = tmp;
1018  }
1019 }
1020 
1021 template <typename T, typename Alloc>
1022 void
1023 Array<T, Alloc>::resize (const dim_vector& dv, const T& rfv)
1024 {
1025  int dvl = dv.ndims ();
1026  if (dvl == 2)
1027  resize2 (dv(0), dv(1), rfv);
1028  else if (m_dimensions != dv)
1029  {
1030  if (m_dimensions.ndims () > dvl || dv.any_neg ())
1032 
1033  Array<T, Alloc> tmp (dv);
1034  // Prepare for recursive resizing.
1035  rec_resize_helper rh (dv, m_dimensions.redim (dvl));
1036 
1037  // Do it.
1038  rh.resize_fill (data (), tmp.fortran_vec (), rfv);
1039  *this = tmp;
1040  }
1041 }
1042 
1043 template <typename T, typename Alloc>
1045 Array<T, Alloc>::index (const octave::idx_vector& i, bool resize_ok, const T& rfv) const
1046 {
1047  Array<T, Alloc> tmp = *this;
1048  if (resize_ok)
1049  {
1050  octave_idx_type n = numel ();
1051  octave_idx_type nx = i.extent (n);
1052  if (n != nx)
1053  {
1054  if (i.is_scalar ())
1055  return Array<T, Alloc> (dim_vector (1, 1), rfv);
1056  else
1057  tmp.resize1 (nx, rfv);
1058  }
1059 
1060  if (tmp.numel () != nx)
1061  return Array<T, Alloc> ();
1062  }
1063 
1064  return tmp.index (i);
1065 }
1066 
1067 template <typename T, typename Alloc>
1070  bool resize_ok, const T& rfv) const
1071 {
1072  Array<T, Alloc> tmp = *this;
1073  if (resize_ok)
1074  {
1075  dim_vector dv = m_dimensions.redim (2);
1076  octave_idx_type r = dv(0);
1077  octave_idx_type c = dv(1);
1078  octave_idx_type rx = i.extent (r);
1079  octave_idx_type cx = j.extent (c);
1080  if (r != rx || c != cx)
1081  {
1082  if (i.is_scalar () && j.is_scalar ())
1083  return Array<T, Alloc> (dim_vector (1, 1), rfv);
1084  else
1085  tmp.resize2 (rx, cx, rfv);
1086  }
1087 
1088  if (tmp.rows () != rx || tmp.columns () != cx)
1089  return Array<T, Alloc> ();
1090  }
1091 
1092  return tmp.index (i, j);
1093 }
1094 
1095 template <typename T, typename Alloc>
1098  bool resize_ok, const T& rfv) const
1099 {
1100  Array<T, Alloc> tmp = *this;
1101  if (resize_ok)
1102  {
1103  int ial = ia.numel ();
1104  dim_vector dv = m_dimensions.redim (ial);
1105  dim_vector dvx = dim_vector::alloc (ial);
1106  for (int i = 0; i < ial; i++)
1107  dvx(i) = ia(i).extent (dv(i));
1108  if (! (dvx == dv))
1109  {
1110  bool all_scalars = true;
1111  for (int i = 0; i < ial; i++)
1112  all_scalars = all_scalars && ia(i).is_scalar ();
1113  if (all_scalars)
1114  return Array<T, Alloc> (dim_vector (1, 1), rfv);
1115  else
1116  tmp.resize (dvx, rfv);
1117 
1118  if (tmp.m_dimensions != dvx)
1119  return Array<T, Alloc> ();
1120  }
1121  }
1122 
1123  return tmp.index (ia);
1124 }
1125 
1126 template <typename T, typename Alloc>
1127 void
1128 Array<T, Alloc>::assign (const octave::idx_vector& i, const Array<T, Alloc>& rhs, const T& rfv)
1129 {
1130  octave_idx_type n = numel ();
1131  octave_idx_type rhl = rhs.numel ();
1132 
1133  if (rhl != 1 && i.length (n) != rhl)
1134  octave::err_nonconformant ("=", dim_vector(i.length(n), 1), rhs.dims());
1135 
1136  octave_idx_type nx = i.extent (n);
1137  bool colon = i.is_colon_equiv (nx);
1138  // Try to resize first if necessary.
1139  if (nx != n)
1140  {
1141  // Optimize case A = []; A(1:n) = X with A empty.
1142  if (m_dimensions.zero_by_zero () && colon)
1143  {
1144  if (rhl == 1)
1145  *this = Array<T, Alloc> (dim_vector (1, nx), rhs(0));
1146  else
1147  *this = Array<T, Alloc> (rhs, dim_vector (1, nx));
1148  return;
1149  }
1150 
1151  resize1 (nx, rfv);
1152  n = numel ();
1153  }
1154 
1155  if (colon)
1156  {
1157  // A(:) = X makes a full fill or a shallow copy.
1158  if (rhl == 1)
1159  fill (rhs(0));
1160  else
1161  *this = rhs.reshape (m_dimensions);
1162  }
1163  else
1164  {
1165  if (rhl == 1)
1166  i.fill (rhs(0), n, fortran_vec ());
1167  else
1168  i.assign (rhs.data (), n, fortran_vec ());
1169  }
1170 }
1171 
1172 // Assignment to a 2-dimensional array
1173 template <typename T, typename Alloc>
1174 void
1176  const Array<T, Alloc>& rhs, const T& rfv)
1177 {
1178  bool initial_dims_all_zero = m_dimensions.all_zero ();
1179 
1180  // Get RHS extents, discarding singletons.
1181  dim_vector rhdv = rhs.dims ();
1182 
1183  // Get LHS extents, allowing Fortran indexing in the second dim.
1184  dim_vector dv = m_dimensions.redim (2);
1185 
1186  // Check for out-of-bounds and form resizing m_dimensions.
1187  dim_vector rdv;
1188 
1189  // In the special when all dimensions are zero, colons are allowed
1190  // to inquire the shape of RHS. The rules are more obscure, so we
1191  // solve that elsewhere.
1192  if (initial_dims_all_zero)
1193  rdv = zero_dims_inquire (i, j, rhdv);
1194  else
1195  {
1196  rdv(0) = i.extent (dv(0));
1197  rdv(1) = j.extent (dv(1));
1198  }
1199 
1200  bool isfill = rhs.numel () == 1;
1201  octave_idx_type il = i.length (rdv(0));
1202  octave_idx_type jl = j.length (rdv(1));
1203  rhdv.chop_all_singletons ();
1204  bool match = (isfill
1205  || (rhdv.ndims () == 2 && il == rhdv(0) && jl == rhdv(1)));
1206  match = match || (il == 1 && jl == rhdv(0) && rhdv(1) == 1);
1207 
1208  if (match)
1209  {
1210  bool all_colons = (i.is_colon_equiv (rdv(0))
1211  && j.is_colon_equiv (rdv(1)));
1212  // Resize if requested.
1213  if (rdv != dv)
1214  {
1215  // Optimize case A = []; A(1:m, 1:n) = X
1216  if (dv.zero_by_zero () && all_colons)
1217  {
1218  if (isfill)
1219  *this = Array<T, Alloc> (rdv, rhs(0));
1220  else
1221  *this = Array<T, Alloc> (rhs, rdv);
1222  return;
1223  }
1224 
1225  resize (rdv, rfv);
1226  dv = m_dimensions;
1227  }
1228 
1229  if (all_colons)
1230  {
1231  // A(:,:) = X makes a full fill or a shallow copy
1232  if (isfill)
1233  fill (rhs(0));
1234  else
1235  *this = rhs.reshape (m_dimensions);
1236  }
1237  else
1238  {
1239  // The actual work.
1240  octave_idx_type n = numel ();
1241  octave_idx_type r = dv(0);
1242  octave_idx_type c = dv(1);
1243  octave::idx_vector ii (i);
1244 
1245  const T *src = rhs.data ();
1246  T *dest = fortran_vec ();
1247 
1248  // Try reduction first.
1249  if (ii.maybe_reduce (r, j, c))
1250  {
1251  if (isfill)
1252  ii.fill (*src, n, dest);
1253  else
1254  ii.assign (src, n, dest);
1255  }
1256  else
1257  {
1258  if (isfill)
1259  {
1260  for (octave_idx_type k = 0; k < jl; k++)
1261  i.fill (*src, r, dest + r * j.xelem (k));
1262  }
1263  else
1264  {
1265  for (octave_idx_type k = 0; k < jl; k++)
1266  src += i.assign (src, r, dest + r * j.xelem (k));
1267  }
1268  }
1269  }
1270  }
1271  // any empty RHS can be assigned to an empty LHS
1272  else if ((il != 0 && jl != 0) || (rhdv(0) != 0 && rhdv(1) != 0))
1273  octave::err_nonconformant ("=", il, jl, rhs.dim1 (), rhs.dim2 ());
1274 }
1275 
1276 // Assignment to a multi-dimensional array
1277 template <typename T, typename Alloc>
1278 void
1280  const Array<T, Alloc>& rhs, const T& rfv)
1281 {
1282  int ial = ia.numel ();
1283 
1284  // FIXME: is this dispatching necessary / desirable?
1285  if (ial == 1)
1286  assign (ia(0), rhs, rfv);
1287  else if (ial == 2)
1288  assign (ia(0), ia(1), rhs, rfv);
1289  else if (ial > 0)
1290  {
1291  bool initial_dims_all_zero = m_dimensions.all_zero ();
1292 
1293  // Get RHS extents, discarding singletons.
1294  dim_vector rhdv = rhs.dims ();
1295 
1296  // Get LHS extents, allowing Fortran indexing in the second dim.
1297  dim_vector dv = m_dimensions.redim (ial);
1298 
1299  // Get the extents forced by indexing.
1300  dim_vector rdv;
1301 
1302  // In the special when all dimensions are zero, colons are
1303  // allowed to inquire the shape of RHS. The rules are more
1304  // obscure, so we solve that elsewhere.
1305  if (initial_dims_all_zero)
1306  rdv = zero_dims_inquire (ia, rhdv);
1307  else
1308  {
1309  rdv = dim_vector::alloc (ial);
1310  for (int i = 0; i < ial; i++)
1311  rdv(i) = ia(i).extent (dv(i));
1312  }
1313 
1314  // Check whether LHS and RHS match, up to singleton dims.
1315  bool match = true;
1316  bool all_colons = true;
1317  bool isfill = rhs.numel () == 1;
1318 
1319  rhdv.chop_all_singletons ();
1320  int j = 0;
1321  int rhdvl = rhdv.ndims ();
1322  for (int i = 0; i < ial; i++)
1323  {
1324  all_colons = all_colons && ia(i).is_colon_equiv (rdv(i));
1325  octave_idx_type l = ia(i).length (rdv(i));
1326  if (l == 1) continue;
1327  match = match && j < rhdvl && l == rhdv(j++);
1328  }
1329 
1330  match = match && (j == rhdvl || rhdv(j) == 1);
1331  match = match || isfill;
1332 
1333  if (match)
1334  {
1335  // Resize first if necessary.
1336  if (rdv != dv)
1337  {
1338  // Optimize case A = []; A(1:m, 1:n) = X
1339  if (dv.zero_by_zero () && all_colons)
1340  {
1341  rdv.chop_trailing_singletons ();
1342  if (isfill)
1343  *this = Array<T, Alloc> (rdv, rhs(0));
1344  else
1345  *this = Array<T, Alloc> (rhs, rdv);
1346  return;
1347  }
1348 
1349  resize (rdv, rfv);
1350  dv = rdv;
1351  }
1352 
1353  if (all_colons)
1354  {
1355  // A(:,:,...,:) = X makes a full fill or a shallow copy.
1356  if (isfill)
1357  fill (rhs(0));
1358  else
1359  *this = rhs.reshape (m_dimensions);
1360  }
1361  else
1362  {
1363  // Do the actual work.
1364 
1365  // Prepare for recursive indexing
1366  rec_index_helper rh (dv, ia);
1367 
1368  // Do it.
1369  if (isfill)
1370  rh.fill (rhs(0), fortran_vec ());
1371  else
1372  rh.assign (rhs.data (), fortran_vec ());
1373  }
1374  }
1375  else
1376  {
1377  // dimension mismatch, unless LHS and RHS both empty
1378  bool lhsempty, rhsempty;
1379  lhsempty = rhsempty = false;
1380  dim_vector lhs_dv = dim_vector::alloc (ial);
1381  for (int i = 0; i < ial; i++)
1382  {
1383  octave_idx_type l = ia(i).length (rdv(i));
1384  lhs_dv(i) = l;
1385  lhsempty = lhsempty || (l == 0);
1386  rhsempty = rhsempty || (rhdv(j++) == 0);
1387  }
1388  if (! lhsempty || ! rhsempty)
1389  {
1390  lhs_dv.chop_trailing_singletons ();
1391  octave::err_nonconformant ("=", lhs_dv, rhdv);
1392  }
1393  }
1394  }
1395 }
1396 
1397 /*
1398 %!shared a
1399 %! a = [1 2; 3 4];
1400 %!error <op1 is 1x2, op2 is 1x3> a(1,:) = [1 2 3]
1401 %!error <op1 is 2x1, op2 is 1x3> a(:,1) = [1 2 3]
1402 %!error <op1 is 2x2, op2 is 2x1> a(1:2,2:3) = [1;2]
1403 */
1404 
1405 template <typename T, typename Alloc>
1406 void
1408 {
1409  octave_idx_type n = numel ();
1410  if (i.is_colon ())
1411  {
1412  *this = Array<T, Alloc> ();
1413  }
1414  else if (i.length (n) != 0)
1415  {
1416  if (i.extent (n) != n)
1417  octave::err_del_index_out_of_range (true, i.extent (n), n);
1418 
1419  octave_idx_type l, u;
1420  bool col_vec = ndims () == 2 && columns () == 1 && rows () != 1;
1421  if (i.is_scalar () && i(0) == n-1 && m_dimensions.isvector ())
1422  {
1423  // Stack "pop" operation.
1424  resize1 (n-1);
1425  }
1426  else if (i.is_cont_range (n, l, u))
1427  {
1428  // Special case deleting a contiguous range.
1429  octave_idx_type m = n + l - u;
1430  Array<T, Alloc> tmp (dim_vector (col_vec ? m : 1, ! col_vec ? m : 1));
1431  const T *src = data ();
1432  T *dest = tmp.fortran_vec ();
1433  std::copy_n (src, l, dest);
1434  std::copy (src + u, src + n, dest + l);
1435  *this = tmp;
1436  }
1437  else
1438  {
1439  // Use index.
1440  *this = index (i.complement (n));
1441  }
1442  }
1443 }
1444 
1445 template <typename T, typename Alloc>
1446 void
1448 {
1449  if (dim < 0)
1450  (*current_liboctave_error_handler) ("invalid dimension in delete_elements");
1451 
1452  dim_vector dimensions = m_dimensions;
1453 
1454  if (dim >= ndims ())
1455  dimensions.resize (dim + 1, 1);
1456 
1457  octave_idx_type ndim = dimensions.ndims ();
1458  octave_idx_type n = dimensions(dim);
1459 
1460  if (i.is_colon ())
1461  {
1462  *this = Array<T, Alloc> ();
1463  }
1464  else if (i.length (n) != 0)
1465  {
1466  if (i.extent (n) != n)
1467  octave::err_del_index_out_of_range (false, i.extent (n), n);
1468 
1469  octave_idx_type l, u;
1470 
1471  if (i.is_cont_range (n, l, u))
1472  {
1473  // Special case deleting a contiguous range.
1474  octave_idx_type nd = n + l - u;
1475  octave_idx_type dl = 1;
1476  octave_idx_type du = 1;
1477  dim_vector rdv = dimensions;
1478  rdv(dim) = nd;
1479  for (int k = 0; k < dim; k++) dl *= dimensions(k);
1480  for (int k = dim + 1; k < ndim; k++) du *= dimensions(k);
1481 
1482  // Special case deleting a contiguous range.
1483  Array<T, Alloc> tmp = Array<T, Alloc> (rdv);
1484  const T *src = data ();
1485  T *dest = tmp.fortran_vec ();
1486  l *= dl; u *= dl; n *= dl;
1487  for (octave_idx_type k = 0; k < du; k++)
1488  {
1489  std::copy_n (src, l, dest);
1490  dest += l;
1491  std::copy (src + u, src + n, dest);
1492  dest += n - u;
1493  src += n;
1494  }
1495 
1496  *this = tmp;
1497  }
1498  else
1499  {
1500  // Use index.
1501  Array<octave::idx_vector> ia (dim_vector (ndim, 1), octave::idx_vector::colon);
1502  ia (dim) = i.complement (n);
1503  *this = index (ia);
1504  }
1505  }
1506 }
1507 
1508 template <typename T, typename Alloc>
1509 void
1511 {
1512  int ial = ia.numel ();
1513 
1514  if (ial == 1)
1515  delete_elements (ia(0));
1516  else
1517  {
1518  int k, dim = -1;
1519  for (k = 0; k < ial; k++)
1520  {
1521  if (! ia(k).is_colon ())
1522  {
1523  if (dim < 0)
1524  dim = k;
1525  else
1526  break;
1527  }
1528  }
1529  if (dim < 0)
1530  {
1531  dim_vector dv = m_dimensions;
1532  dv(0) = 0;
1533  *this = Array<T, Alloc> (dv);
1534  }
1535  else if (k == ial)
1536  {
1537  delete_elements (dim, ia(dim));
1538  }
1539  else
1540  {
1541  // Allow the null assignment to succeed if it won't change
1542  // anything because the indices reference an empty slice,
1543  // provided that there is at most one non-colon (or
1544  // equivalent) index. So, we still have the requirement of
1545  // deleting a slice, but it is OK if the slice is empty.
1546 
1547  // For compatibility with Matlab, stop checking once we see
1548  // more than one non-colon index or an empty index. Matlab
1549  // considers "[]" to be an empty index but not "false". We
1550  // accept both.
1551 
1552  bool empty_assignment = false;
1553 
1554  int num_non_colon_indices = 0;
1555 
1556  int nd = ndims ();
1557 
1558  for (int i = 0; i < ial; i++)
1559  {
1560  octave_idx_type dim_len = (i >= nd ? 1 : m_dimensions(i));
1561 
1562  if (ia(i).length (dim_len) == 0)
1563  {
1564  empty_assignment = true;
1565  break;
1566  }
1567 
1568  if (! ia(i).is_colon_equiv (dim_len))
1569  {
1570  num_non_colon_indices++;
1571 
1572  if (num_non_colon_indices == 2)
1573  break;
1574  }
1575  }
1576 
1577  if (! empty_assignment)
1578  (*current_liboctave_error_handler)
1579  ("a null assignment can only have one non-colon index");
1580  }
1581  }
1582 
1583 }
1584 
1585 template <typename T, typename Alloc>
1588 {
1589  octave::idx_vector i (r, r + a.rows ());
1590  octave::idx_vector j (c, c + a.columns ());
1591  if (ndims () == 2 && a.ndims () == 2)
1592  assign (i, j, a);
1593  else
1594  {
1595  Array<octave::idx_vector> idx (dim_vector (a.ndims (), 1));
1596  idx(0) = i;
1597  idx(1) = j;
1598  for (int k = 2; k < a.ndims (); k++)
1599  idx(k) = octave::idx_vector (0, a.m_dimensions(k));
1600  assign (idx, a);
1601  }
1602 
1603  return *this;
1604 }
1605 
1606 template <typename T, typename Alloc>
1609 {
1612  const dim_vector dva = a.dims ().redim (n);
1613  for (octave_idx_type k = 0; k < n; k++)
1614  idx(k) = octave::idx_vector (ra_idx(k), ra_idx(k) + dva(k));
1615 
1616  assign (idx, a);
1617 
1618  return *this;
1619 }
1620 
1621 template <typename T, typename Alloc>
1624 {
1625  assert (ndims () == 2);
1626 
1627  octave_idx_type nr = dim1 ();
1628  octave_idx_type nc = dim2 ();
1629 
1630  if (nr >= 8 && nc >= 8)
1631  {
1632  Array<T, Alloc> result (dim_vector (nc, nr));
1633 
1634  // Reuse the implementation used for permuting.
1635 
1636  rec_permute_helper::blk_trans (data (), result.fortran_vec (), nr, nc);
1637 
1638  return result;
1639  }
1640  else if (nr > 1 && nc > 1)
1641  {
1642  Array<T, Alloc> result (dim_vector (nc, nr));
1643 
1644  for (octave_idx_type j = 0; j < nc; j++)
1645  for (octave_idx_type i = 0; i < nr; i++)
1646  result.xelem (j, i) = xelem (i, j);
1647 
1648  return result;
1649  }
1650  else
1651  {
1652  // Fast transpose for vectors and empty matrices.
1653  return Array<T, Alloc> (*this, dim_vector (nc, nr));
1654  }
1655 }
1656 
1657 template <typename T>
1658 static T
1659 no_op_fcn (const T& x)
1660 {
1661  return x;
1662 }
1663 
1664 template <typename T, typename Alloc>
1666 Array<T, Alloc>::hermitian (T (*fcn) (const T&)) const
1667 {
1668  assert (ndims () == 2);
1669 
1670  if (! fcn)
1671  fcn = no_op_fcn<T>;
1672 
1673  octave_idx_type nr = dim1 ();
1674  octave_idx_type nc = dim2 ();
1675 
1676  if (nr >= 8 && nc >= 8)
1677  {
1678  Array<T, Alloc> result (dim_vector (nc, nr));
1679 
1680  // Blocked transpose to attempt to avoid cache misses.
1681 
1682  T buf[64];
1683 
1684  octave_idx_type jj;
1685  for (jj = 0; jj < (nc - 8 + 1); jj += 8)
1686  {
1687  octave_idx_type ii;
1688  for (ii = 0; ii < (nr - 8 + 1); ii += 8)
1689  {
1690  // Copy to buffer
1691  for (octave_idx_type j = jj, k = 0, idxj = jj * nr;
1692  j < jj + 8; j++, idxj += nr)
1693  for (octave_idx_type i = ii; i < ii + 8; i++)
1694  buf[k++] = xelem (i + idxj);
1695 
1696  // Copy from buffer
1697  for (octave_idx_type i = ii, idxi = ii * nc; i < ii + 8;
1698  i++, idxi += nc)
1699  for (octave_idx_type j = jj, k = i - ii; j < jj + 8;
1700  j++, k+=8)
1701  result.xelem (j + idxi) = fcn (buf[k]);
1702  }
1703 
1704  if (ii < nr)
1705  for (octave_idx_type j = jj; j < jj + 8; j++)
1706  for (octave_idx_type i = ii; i < nr; i++)
1707  result.xelem (j, i) = fcn (xelem (i, j));
1708  }
1709 
1710  for (octave_idx_type j = jj; j < nc; j++)
1711  for (octave_idx_type i = 0; i < nr; i++)
1712  result.xelem (j, i) = fcn (xelem (i, j));
1713 
1714  return result;
1715  }
1716  else
1717  {
1718  Array<T, Alloc> result (dim_vector (nc, nr));
1719 
1720  for (octave_idx_type j = 0; j < nc; j++)
1721  for (octave_idx_type i = 0; i < nr; i++)
1722  result.xelem (j, i) = fcn (xelem (i, j));
1723 
1724  return result;
1725  }
1726 }
1727 
1728 /*
1729 
1730 ## Transpose tests for matrices of the tile size and plus or minus a row
1731 ## and with four tiles.
1732 
1733 %!shared m7, mt7, m8, mt8, m9, mt9
1734 %! m7 = reshape (1 : 7*8, 8, 7);
1735 %! mt7 = [1:8; 9:16; 17:24; 25:32; 33:40; 41:48; 49:56];
1736 %! m8 = reshape (1 : 8*8, 8, 8);
1737 %! mt8 = mt8 = [mt7; 57:64];
1738 %! m9 = reshape (1 : 9*8, 8, 9);
1739 %! mt9 = [mt8; 65:72];
1740 
1741 %!assert (m7', mt7)
1742 %!assert ((1i*m7).', 1i * mt7)
1743 %!assert ((1i*m7)', conj (1i * mt7))
1744 %!assert (m8', mt8)
1745 %!assert ((1i*m8).', 1i * mt8)
1746 %!assert ((1i*m8)', conj (1i * mt8))
1747 %!assert (m9', mt9)
1748 %!assert ((1i*m9).', 1i * mt9)
1749 %!assert ((1i*m9)', conj (1i * mt9))
1750 %!assert ([m7, m8; m7, m8]', [mt7, mt7; mt8, mt8])
1751 %!assert ((1i*[m7, m8; m7, m8]).', 1i * [mt7, mt7; mt8, mt8])
1752 %!assert ((1i*[m7, m8; m7, m8])', conj (1i * [mt7, mt7; mt8, mt8]))
1753 %!assert ([m8, m8; m8, m8]', [mt8, mt8; mt8, mt8])
1754 %!assert ((1i*[m8, m8; m8, m8]).', 1i * [mt8, mt8; mt8, mt8])
1755 %!assert ((1i*[m8, m8; m8, m8])', conj (1i * [mt8, mt8; mt8, mt8]))
1756 %!assert ([m9, m8; m9, m8]', [mt9, mt9; mt8, mt8])
1757 %!assert ((1i*[m9, m8; m9, m8]).', 1i * [mt9, mt9; mt8, mt8])
1758 %!assert ((1i*[m9, m8; m9, m8])', conj (1i * [mt9, mt9; mt8, mt8]))
1759 
1760 */
1761 
1762 template <typename T, typename Alloc>
1763 T *
1765 {
1766  make_unique ();
1767 
1768  return m_slice_data;
1769 }
1770 
1771 // Non-real types don't have NaNs.
1772 template <typename T>
1773 inline bool
1775 {
1776  return false;
1777 }
1778 
1779 template <typename T, typename Alloc>
1781 Array<T, Alloc>::sort (int dim, sortmode mode) const
1782 {
1783  if (dim < 0)
1784  (*current_liboctave_error_handler) ("sort: invalid dimension");
1785 
1786  Array<T, Alloc> m (dims ());
1787 
1788  dim_vector dv = m.dims ();
1789 
1790  if (m.numel () < 1)
1791  return m;
1792 
1793  if (dim >= dv.ndims ())
1794  dv.resize (dim+1, 1);
1795 
1796  octave_idx_type ns = dv(dim);
1797  octave_idx_type iter = dv.numel () / ns;
1798  octave_idx_type stride = 1;
1799 
1800  for (int i = 0; i < dim; i++)
1801  stride *= dv(i);
1802 
1803  T *v = m.fortran_vec ();
1804  const T *ov = data ();
1805 
1806  octave_sort<T> lsort;
1807 
1808  if (mode != UNSORTED)
1809  lsort.set_compare (mode);
1810  else
1811  return m;
1812 
1813  if (stride == 1)
1814  {
1815  // Special case along first dimension avoids gather/scatter AND directly
1816  // sorts into destination buffer for an 11% performance boost.
1817  for (octave_idx_type j = 0; j < iter; j++)
1818  {
1819  // Copy and partition out NaNs.
1820  // No need to special case integer types <T> from floating point
1821  // types <T> to avoid sort_isnan() test as it makes no discernible
1822  // performance impact.
1823  octave_idx_type kl = 0;
1824  octave_idx_type ku = ns;
1825  for (octave_idx_type i = 0; i < ns; i++)
1826  {
1827  T tmp = ov[i];
1828  if (sort_isnan<T> (tmp))
1829  v[--ku] = tmp;
1830  else
1831  v[kl++] = tmp;
1832  }
1833 
1834  // sort.
1835  lsort.sort (v, kl);
1836 
1837  if (ku < ns)
1838  {
1839  // NaNs are in reverse order
1840  std::reverse (v + ku, v + ns);
1841  if (mode == DESCENDING)
1842  std::rotate (v, v + ku, v + ns);
1843  }
1844 
1845  v += ns;
1846  ov += ns;
1847  }
1848  }
1849  else
1850  {
1851  OCTAVE_LOCAL_BUFFER (T, buf, ns);
1852 
1853  for (octave_idx_type j = 0; j < iter; j++)
1854  {
1855  octave_idx_type offset = j;
1856  octave_idx_type n_strides = j / stride;
1857  offset += n_strides * stride * (ns - 1);
1858 
1859  // gather and partition out NaNs.
1860  octave_idx_type kl = 0;
1861  octave_idx_type ku = ns;
1862  for (octave_idx_type i = 0; i < ns; i++)
1863  {
1864  T tmp = ov[i*stride + offset];
1865  if (sort_isnan<T> (tmp))
1866  buf[--ku] = tmp;
1867  else
1868  buf[kl++] = tmp;
1869  }
1870 
1871  // sort.
1872  lsort.sort (buf, kl);
1873 
1874  if (ku < ns)
1875  {
1876  // NaNs are in reverse order
1877  std::reverse (buf + ku, buf + ns);
1878  if (mode == DESCENDING)
1879  std::rotate (buf, buf + ku, buf + ns);
1880  }
1881 
1882  // scatter.
1883  for (octave_idx_type i = 0; i < ns; i++)
1884  v[i*stride + offset] = buf[i];
1885  }
1886  }
1887 
1888  return m;
1889 }
1890 
1891 template <typename T, typename Alloc>
1894  sortmode mode) const
1895 {
1896  if (dim < 0 || dim >= ndims ())
1897  (*current_liboctave_error_handler) ("sort: invalid dimension");
1898 
1899  Array<T, Alloc> m (dims ());
1900 
1901  dim_vector dv = m.dims ();
1902 
1903  if (m.numel () < 1)
1904  {
1905  sidx = Array<octave_idx_type> (dv);
1906  return m;
1907  }
1908 
1909  octave_idx_type ns = dv(dim);
1910  octave_idx_type iter = dv.numel () / ns;
1911  octave_idx_type stride = 1;
1912 
1913  for (int i = 0; i < dim; i++)
1914  stride *= dv(i);
1915 
1916  T *v = m.fortran_vec ();
1917  const T *ov = data ();
1918 
1919  octave_sort<T> lsort;
1920 
1921  sidx = Array<octave_idx_type> (dv);
1922  octave_idx_type *vi = sidx.fortran_vec ();
1923 
1924  if (mode != UNSORTED)
1925  lsort.set_compare (mode);
1926  else
1927  return m;
1928 
1929  if (stride == 1)
1930  {
1931  // Special case for dim 1 avoids gather/scatter for performance boost.
1932  // See comments in Array::sort (dim, mode).
1933  for (octave_idx_type j = 0; j < iter; j++)
1934  {
1935  // copy and partition out NaNs.
1936  octave_idx_type kl = 0;
1937  octave_idx_type ku = ns;
1938  for (octave_idx_type i = 0; i < ns; i++)
1939  {
1940  T tmp = ov[i];
1941  if (sort_isnan<T> (tmp))
1942  {
1943  --ku;
1944  v[ku] = tmp;
1945  vi[ku] = i;
1946  }
1947  else
1948  {
1949  v[kl] = tmp;
1950  vi[kl] = i;
1951  kl++;
1952  }
1953  }
1954 
1955  // sort.
1956  lsort.sort (v, vi, kl);
1957 
1958  if (ku < ns)
1959  {
1960  // NaNs are in reverse order
1961  std::reverse (v + ku, v + ns);
1962  std::reverse (vi + ku, vi + ns);
1963  if (mode == DESCENDING)
1964  {
1965  std::rotate (v, v + ku, v + ns);
1966  std::rotate (vi, vi + ku, vi + ns);
1967  }
1968  }
1969 
1970  v += ns;
1971  vi += ns;
1972  ov += ns;
1973  }
1974  }
1975  else
1976  {
1977  OCTAVE_LOCAL_BUFFER (T, buf, ns);
1979 
1980  for (octave_idx_type j = 0; j < iter; j++)
1981  {
1982  octave_idx_type offset = j;
1983  octave_idx_type n_strides = j / stride;
1984  offset += n_strides * stride * (ns - 1);
1985 
1986  // gather and partition out NaNs.
1987  octave_idx_type kl = 0;
1988  octave_idx_type ku = ns;
1989  for (octave_idx_type i = 0; i < ns; i++)
1990  {
1991  T tmp = ov[i*stride + offset];
1992  if (sort_isnan<T> (tmp))
1993  {
1994  --ku;
1995  buf[ku] = tmp;
1996  bufi[ku] = i;
1997  }
1998  else
1999  {
2000  buf[kl] = tmp;
2001  bufi[kl] = i;
2002  kl++;
2003  }
2004  }
2005 
2006  // sort.
2007  lsort.sort (buf, bufi, kl);
2008 
2009  if (ku < ns)
2010  {
2011  // NaNs are in reverse order
2012  std::reverse (buf + ku, buf + ns);
2013  std::reverse (bufi + ku, bufi + ns);
2014  if (mode == DESCENDING)
2015  {
2016  std::rotate (buf, buf + ku, buf + ns);
2017  std::rotate (bufi, bufi + ku, bufi + ns);
2018  }
2019  }
2020 
2021  // scatter.
2022  for (octave_idx_type i = 0; i < ns; i++)
2023  v[i*stride + offset] = buf[i];
2024  for (octave_idx_type i = 0; i < ns; i++)
2025  vi[i*stride + offset] = bufi[i];
2026  }
2027  }
2028 
2029  return m;
2030 }
2031 
2032 template <typename T, typename Alloc>
2035  bool /* allow_chk */)
2036 {
2037  if (mode == ASCENDING)
2039  else if (mode == DESCENDING)
2041  else
2042  return nullptr;
2043 }
2044 
2045 template <typename T, typename Alloc>
2046 sortmode
2048 {
2049  octave_sort<T> lsort;
2050 
2051  octave_idx_type n = numel ();
2052 
2053  if (n <= 1)
2054  return (mode == UNSORTED) ? ASCENDING : mode;
2055 
2056  if (mode == UNSORTED)
2057  {
2058  // Auto-detect mode.
2059  compare_fcn_type compare
2060  = safe_comparator (ASCENDING, *this, false);
2061 
2062  if (compare (elem (n-1), elem (0)))
2063  mode = DESCENDING;
2064  else
2065  mode = ASCENDING;
2066  }
2067 
2068  lsort.set_compare (safe_comparator (mode, *this, false));
2069 
2070  if (! lsort.issorted (data (), n))
2071  mode = UNSORTED;
2072 
2073  return mode;
2074 
2075 }
2076 
2077 template <typename T, typename Alloc>
2080 {
2082 
2083  octave_sort<T> lsort (safe_comparator (mode, *this, true));
2084 
2085  octave_idx_type r = rows ();
2086  octave_idx_type c = cols ();
2087 
2088  idx = Array<octave_idx_type> (dim_vector (r, 1));
2089 
2090  lsort.sort_rows (data (), idx.fortran_vec (), r, c);
2091 
2092  return idx;
2093 }
2094 
2095 template <typename T, typename Alloc>
2096 sortmode
2098 {
2099  octave_sort<T> lsort;
2100 
2101  octave_idx_type r = rows ();
2102  octave_idx_type c = cols ();
2103 
2104  if (r <= 1 || c == 0)
2105  return (mode == UNSORTED) ? ASCENDING : mode;
2106 
2107  if (mode == UNSORTED)
2108  {
2109  // Auto-detect mode.
2110  compare_fcn_type compare
2111  = safe_comparator (ASCENDING, *this, false);
2112 
2113  octave_idx_type i;
2114  for (i = 0; i < cols (); i++)
2115  {
2116  T l = elem (0, i);
2117  T u = elem (rows () - 1, i);
2118  if (compare (l, u))
2119  {
2120  if (mode == DESCENDING)
2121  {
2122  mode = UNSORTED;
2123  break;
2124  }
2125  else
2126  mode = ASCENDING;
2127  }
2128  else if (compare (u, l))
2129  {
2130  if (mode == ASCENDING)
2131  {
2132  mode = UNSORTED;
2133  break;
2134  }
2135  else
2136  mode = DESCENDING;
2137  }
2138  }
2139  if (mode == UNSORTED && i == cols ())
2140  mode = ASCENDING;
2141  }
2142 
2143  if (mode != UNSORTED)
2144  {
2145  lsort.set_compare (safe_comparator (mode, *this, false));
2146 
2147  if (! lsort.is_sorted_rows (data (), r, c))
2148  mode = UNSORTED;
2149  }
2150 
2151  return mode;
2152 
2153 }
2154 
2155 // Do a binary lookup in a sorted array.
2156 template <typename T, typename Alloc>
2158 Array<T, Alloc>::lookup (const T& value, sortmode mode) const
2159 {
2160  octave_idx_type n = numel ();
2161  octave_sort<T> lsort;
2162 
2163  if (mode == UNSORTED)
2164  {
2165  // auto-detect mode
2166  if (n > 1 && lsort.descending_compare (elem (0), elem (n-1)))
2167  mode = DESCENDING;
2168  else
2169  mode = ASCENDING;
2170  }
2171 
2172  lsort.set_compare (mode);
2173 
2174  return lsort.lookup (data (), n, value);
2175 }
2176 
2177 template <typename T, typename Alloc>
2180 {
2181  octave_idx_type n = numel ();
2182  octave_idx_type nval = values.numel ();
2183  octave_sort<T> lsort;
2184  Array<octave_idx_type> idx (values.dims ());
2185 
2186  if (mode == UNSORTED)
2187  {
2188  // auto-detect mode
2189  if (n > 1 && lsort.descending_compare (elem (0), elem (n-1)))
2190  mode = DESCENDING;
2191  else
2192  mode = ASCENDING;
2193  }
2194 
2195  lsort.set_compare (mode);
2196 
2197  // This determines the split ratio between the O(M*log2(N)) and O(M+N)
2198  // algorithms.
2199  static const double ratio = 1.0;
2200  sortmode vmode = UNSORTED;
2201 
2202  // Attempt the O(M+N) algorithm if M is large enough.
2203  if (nval > ratio * n / octave::math::log2 (n + 1.0))
2204  {
2205  vmode = values.issorted ();
2206  // The table must not contain a NaN.
2207  if ((vmode == ASCENDING && sort_isnan<T> (values(nval-1)))
2208  || (vmode == DESCENDING && sort_isnan<T> (values(0))))
2209  vmode = UNSORTED;
2210  }
2211 
2212  if (vmode != UNSORTED)
2213  lsort.lookup_sorted (data (), n, values.data (), nval,
2214  idx.fortran_vec (), vmode != mode);
2215  else
2216  lsort.lookup (data (), n, values.data (), nval, idx.fortran_vec ());
2217 
2218  return idx;
2219 }
2220 
2221 template <typename T, typename Alloc>
2224 {
2225  const T *src = data ();
2226  octave_idx_type nel = numel ();
2227  octave_idx_type retval = 0;
2228  const T zero = T ();
2229  for (octave_idx_type i = 0; i < nel; i++)
2230  if (src[i] != zero)
2231  retval++;
2232 
2233  return retval;
2234 }
2235 
2236 template <typename T, typename Alloc>
2239 {
2240  Array<octave_idx_type> retval;
2241  const T *src = data ();
2242  octave_idx_type nel = numel ();
2243  const T zero = T ();
2244  if (n < 0 || n >= nel)
2245  {
2246  // We want all elements, which means we'll almost surely need
2247  // to resize. So count first, then allocate array of exact size.
2248  octave_idx_type cnt = 0;
2249  for (octave_idx_type i = 0; i < nel; i++)
2250  cnt += src[i] != zero;
2251 
2252  retval.clear (cnt, 1);
2253  octave_idx_type *dest = retval.fortran_vec ();
2254  for (octave_idx_type i = 0; i < nel; i++)
2255  if (src[i] != zero) *dest++ = i;
2256  }
2257  else
2258  {
2259  // We want a fixed max number of elements, usually small. So be
2260  // optimistic, alloc the array in advance, and then resize if
2261  // needed.
2262  retval.clear (n, 1);
2263  if (backward)
2264  {
2265  // Do the search as a series of successive single-element searches.
2266  octave_idx_type k = 0;
2267  octave_idx_type l = nel - 1;
2268  for (; k < n; k++)
2269  {
2270  for (; l >= 0 && src[l] == zero; l--) ;
2271  if (l >= 0)
2272  retval(k) = l--;
2273  else
2274  break;
2275  }
2276  if (k < n)
2277  retval.resize2 (k, 1);
2278  octave_idx_type *rdata = retval.fortran_vec ();
2279  std::reverse (rdata, rdata + k);
2280  }
2281  else
2282  {
2283  // Do the search as a series of successive single-element searches.
2284  octave_idx_type k = 0;
2285  octave_idx_type l = 0;
2286  for (; k < n; k++)
2287  {
2288  for (; l != nel && src[l] == zero; l++) ;
2289  if (l != nel)
2290  retval(k) = l++;
2291  else
2292  break;
2293  }
2294  if (k < n)
2295  retval.resize2 (k, 1);
2296  }
2297  }
2298 
2299  // Fixup return dimensions, for Matlab compatibility.
2300  // find (zeros (0,0)) -> zeros (0,0)
2301  // find (zeros (1,0)) -> zeros (1,0)
2302  // find (zeros (0,1)) -> zeros (0,1)
2303  // find (zeros (0,X)) -> zeros (0,1)
2304  // find (zeros (1,1)) -> zeros (0,0) !!!! WHY?
2305  // find (zeros (0,1,0)) -> zeros (0,0)
2306  // find (zeros (0,1,0,1)) -> zeros (0,0) etc
2307 
2308  if ((numel () == 1 && retval.isempty ())
2309  || (rows () == 0 && dims ().numel (1) == 0))
2310  retval.m_dimensions = dim_vector ();
2311  else if (rows () == 1 && ndims () == 2)
2312  retval.m_dimensions = dim_vector (1, retval.numel ());
2313 
2314  return retval;
2315 }
2316 
2317 template <typename T, typename Alloc>
2320 {
2321  if (dim < 0)
2322  (*current_liboctave_error_handler) ("nth_element: invalid dimension");
2323 
2324  dim_vector dv = dims ();
2325  if (dim >= dv.ndims ())
2326  dv.resize (dim+1, 1);
2327 
2328  octave_idx_type ns = dv(dim);
2329 
2330  octave_idx_type nn = n.length (ns);
2331 
2332  dv(dim) = std::min (nn, ns);
2333  dv.chop_trailing_singletons ();
2334  dim = std::min (dv.ndims (), static_cast<octave_idx_type> (dim));
2335 
2336  Array<T, Alloc> m (dv);
2337 
2338  if (m.isempty ())
2339  return m;
2340 
2341  sortmode mode = UNSORTED;
2342  octave_idx_type lo = 0;
2343 
2344  switch (n.idx_class ())
2345  {
2346  case octave::idx_vector::class_scalar:
2347  mode = ASCENDING;
2348  lo = n(0);
2349  break;
2350  case octave::idx_vector::class_range:
2351  {
2352  octave_idx_type inc = n.increment ();
2353  if (inc == 1)
2354  {
2355  mode = ASCENDING;
2356  lo = n(0);
2357  }
2358  else if (inc == -1)
2359  {
2360  mode = DESCENDING;
2361  lo = ns - 1 - n(0);
2362  }
2363  }
2364  break;
2365  case octave::idx_vector::class_vector:
2366  // This case resolves bug #51329, a fallback to allow the given index
2367  // to be a sequential vector instead of the typical scalar or range
2368  if (n(1) - n(0) == 1)
2369  {
2370  mode = ASCENDING;
2371  lo = n(0);
2372  }
2373  else if (n(1) - n(0) == -1)
2374  {
2375  mode = DESCENDING;
2376  lo = ns - 1 - n(0);
2377  }
2378  // Ensure that the vector is actually an arithmetic contiguous sequence
2379  for (octave_idx_type i = 2; i < n.length () && mode != UNSORTED; i++)
2380  if ((mode == ASCENDING && n(i) - n(i-1) != 1)
2381  || (mode == DESCENDING && n(i) - n(i-1) != -1))
2382  mode = UNSORTED;
2383  break;
2384  default:
2385  break;
2386  }
2387 
2388  if (mode == UNSORTED)
2389  (*current_liboctave_error_handler)
2390  ("nth_element: n must be a scalar or a contiguous range");
2391 
2392  octave_idx_type up = lo + nn;
2393 
2394  if (lo < 0 || up > ns)
2395  (*current_liboctave_error_handler) ("nth_element: invalid element index");
2396 
2397  octave_idx_type iter = numel () / ns;
2398  octave_idx_type stride = 1;
2399 
2400  for (int i = 0; i < dim; i++)
2401  stride *= dv(i);
2402 
2403  T *v = m.fortran_vec ();
2404  const T *ov = data ();
2405 
2406  OCTAVE_LOCAL_BUFFER (T, buf, ns);
2407 
2408  octave_sort<T> lsort;
2409  lsort.set_compare (mode);
2410 
2411  for (octave_idx_type j = 0; j < iter; j++)
2412  {
2413  octave_idx_type kl = 0;
2414  octave_idx_type ku = ns;
2415 
2416  if (stride == 1)
2417  {
2418  // copy without NaNs.
2419  for (octave_idx_type i = 0; i < ns; i++)
2420  {
2421  T tmp = ov[i];
2422  if (sort_isnan<T> (tmp))
2423  buf[--ku] = tmp;
2424  else
2425  buf[kl++] = tmp;
2426  }
2427 
2428  ov += ns;
2429  }
2430  else
2431  {
2432  octave_idx_type offset = j % stride;
2433  // copy without NaNs.
2434  for (octave_idx_type i = 0; i < ns; i++)
2435  {
2436  T tmp = ov[offset + i*stride];
2437  if (sort_isnan<T> (tmp))
2438  buf[--ku] = tmp;
2439  else
2440  buf[kl++] = tmp;
2441  }
2442 
2443  if (offset == stride-1)
2444  ov += ns*stride;
2445  }
2446 
2447  if (ku == ns)
2448  lsort.nth_element (buf, ns, lo, up);
2449  else if (mode == ASCENDING)
2450  lsort.nth_element (buf, ku, lo, std::min (ku, up));
2451  else
2452  {
2453  octave_idx_type nnan = ns - ku;
2454  octave_idx_type zero = 0;
2455  lsort.nth_element (buf, ku, std::max (lo - nnan, zero),
2456  std::max (up - nnan, zero));
2457  std::rotate (buf, buf + ku, buf + ns);
2458  }
2459 
2460  if (stride == 1)
2461  {
2462  for (octave_idx_type i = 0; i < nn; i++)
2463  v[i] = buf[lo + i];
2464 
2465  v += nn;
2466  }
2467  else
2468  {
2469  octave_idx_type offset = j % stride;
2470  for (octave_idx_type i = 0; i < nn; i++)
2471  v[offset + stride * i] = buf[lo + i];
2472  if (offset == stride-1)
2473  v += nn*stride;
2474  }
2475  }
2476 
2477  return m;
2478 }
2479 
2480 #define NO_INSTANTIATE_ARRAY_SORT_API(T, API) \
2481  template <> API Array<T> \
2482  Array<T>::sort (int, sortmode) const \
2483  { \
2484  return *this; \
2485  } \
2486  template <> API Array<T> \
2487  Array<T>::sort (Array<octave_idx_type> &sidx, int, sortmode) const \
2488  { \
2489  sidx = Array<octave_idx_type> (); \
2490  return *this; \
2491  } \
2492  template <> API sortmode \
2493  Array<T>::issorted (sortmode) const \
2494  { \
2495  return UNSORTED; \
2496  } \
2497  API Array<T>::compare_fcn_type \
2498  safe_comparator (sortmode, const Array<T>&, bool) \
2499  { \
2500  return nullptr; \
2501  } \
2502  template <> API Array<octave_idx_type> \
2503  Array<T>::sort_rows_idx (sortmode) const \
2504  { \
2505  return Array<octave_idx_type> (); \
2506  } \
2507  template <> API sortmode \
2508  Array<T>::is_sorted_rows (sortmode) const \
2509  { \
2510  return UNSORTED; \
2511  } \
2512  template <> API octave_idx_type \
2513  Array<T>::lookup (T const &, sortmode) const \
2514  { \
2515  return 0; \
2516  } \
2517  template <> API Array<octave_idx_type> \
2518  Array<T>::lookup (const Array<T>&, sortmode) const \
2519  { \
2520  return Array<octave_idx_type> (); \
2521  } \
2522  template <> API octave_idx_type \
2523  Array<T>::nnz () const \
2524  { \
2525  return 0; \
2526  } \
2527  template <> API Array<octave_idx_type> \
2528  Array<T>::find (octave_idx_type, bool) const \
2529  { \
2530  return Array<octave_idx_type> (); \
2531  } \
2532  template <> API Array<T> \
2533  Array<T>::nth_element (const octave::idx_vector&, int) const { \
2534  return Array<T> (); \
2535  }
2536 
2537 #define NO_INSTANTIATE_ARRAY_SORT(T) NO_INSTANTIATE_ARRAY_SORT_API (T,)
2538 
2539 template <typename T, typename Alloc>
2542 {
2543  dim_vector dv = dims ();
2544  octave_idx_type nd = dv.ndims ();
2546 
2547  if (nd > 2)
2548  (*current_liboctave_error_handler) ("Matrix must be 2-dimensional");
2549 
2550  octave_idx_type nnr = dv(0);
2551  octave_idx_type nnc = dv(1);
2552 
2553  if (nnr == 0 && nnc == 0)
2554  ; // do nothing for empty matrix
2555  else if (nnr != 1 && nnc != 1)
2556  {
2557  // Extract diag from matrix
2558  if (k > 0)
2559  nnc -= k;
2560  else if (k < 0)
2561  nnr += k;
2562 
2563  if (nnr > 0 && nnc > 0)
2564  {
2565  octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2566 
2567  d.resize (dim_vector (ndiag, 1));
2568 
2569  if (k > 0)
2570  {
2571  for (octave_idx_type i = 0; i < ndiag; i++)
2572  d.xelem (i) = elem (i, i+k);
2573  }
2574  else if (k < 0)
2575  {
2576  for (octave_idx_type i = 0; i < ndiag; i++)
2577  d.xelem (i) = elem (i-k, i);
2578  }
2579  else
2580  {
2581  for (octave_idx_type i = 0; i < ndiag; i++)
2582  d.xelem (i) = elem (i, i);
2583  }
2584  }
2585  else // Matlab returns [] 0x1 for out-of-range diagonal
2586  d.resize (dim_vector (0, 1));
2587  }
2588  else
2589  {
2590  // Create diag matrix from vector
2591  octave_idx_type roff = 0;
2592  octave_idx_type coff = 0;
2593  if (k > 0)
2594  {
2595  roff = 0;
2596  coff = k;
2597  }
2598  else if (k < 0)
2599  {
2600  roff = -k;
2601  coff = 0;
2602  }
2603 
2604  if (nnr == 1)
2605  {
2606  octave_idx_type n = nnc + std::abs (k);
2607  d = Array<T, Alloc> (dim_vector (n, n), resize_fill_value ());
2608 
2609  for (octave_idx_type i = 0; i < nnc; i++)
2610  d.xelem (i+roff, i+coff) = elem (0, i);
2611  }
2612  else
2613  {
2614  octave_idx_type n = nnr + std::abs (k);
2615  d = Array<T, Alloc> (dim_vector (n, n), resize_fill_value ());
2616 
2617  for (octave_idx_type i = 0; i < nnr; i++)
2618  d.xelem (i+roff, i+coff) = elem (i, 0);
2619  }
2620  }
2621 
2622  return d;
2623 }
2624 
2625 template <typename T, typename Alloc>
2628 {
2629  if (ndims () != 2 || (rows () != 1 && cols () != 1))
2630  (*current_liboctave_error_handler) ("cat: invalid dimension");
2631 
2632  Array<T, Alloc> retval (dim_vector (m, n), resize_fill_value ());
2633 
2634  octave_idx_type nel = std::min (numel (), std::min (m, n));
2635  for (octave_idx_type i = 0; i < nel; i++)
2636  retval.xelem (i, i) = xelem (i);
2637 
2638  return retval;
2639 }
2640 
2641 template <typename T, typename Alloc>
2644 {
2645  // Default concatenation.
2646  bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2647 
2648  if (dim == -1 || dim == -2)
2649  {
2650  concat_rule = &dim_vector::hvcat;
2651  dim = -dim - 1;
2652  }
2653  else if (dim < 0)
2654  (*current_liboctave_error_handler) ("cat: invalid dimension");
2655 
2656  if (n == 1)
2657  return array_list[0];
2658  else if (n == 0)
2659  return Array<T, Alloc> ();
2660 
2661  // Special case:
2662  //
2663  // cat (dim, [], ..., [], A, ...)
2664  //
2665  // with dim > 2, A not 0x0, and at least three arguments to
2666  // concatenate is equivalent to
2667  //
2668  // cat (dim, A, ...)
2669  //
2670  // Note that this check must be performed here because for full-on
2671  // braindead Matlab compatibility, we need to have things like
2672  //
2673  // cat (3, [], [], A)
2674  //
2675  // succeed, but to have things like
2676  //
2677  // cat (3, cat (3, [], []), A)
2678  // cat (3, zeros (0, 0, 2), A)
2679  //
2680  // fail. See also bug report #31615.
2681 
2682  octave_idx_type istart = 0;
2683 
2684  if (n > 2 && dim > 1)
2685  {
2686  for (octave_idx_type i = 0; i < n; i++)
2687  {
2688  dim_vector dv = array_list[i].dims ();
2689 
2690  if (dv.zero_by_zero ())
2691  istart++;
2692  else
2693  break;
2694  }
2695 
2696  // Don't skip any initial arguments if they are all empty.
2697  if (istart >= n)
2698  istart = 0;
2699  }
2700 
2701  dim_vector dv = array_list[istart++].dims ();
2702 
2703  for (octave_idx_type i = istart; i < n; i++)
2704  if (! (dv.*concat_rule) (array_list[i].dims (), dim))
2705  (*current_liboctave_error_handler) ("cat: dimension mismatch");
2706 
2707  Array<T, Alloc> retval (dv);
2708 
2709  if (retval.isempty ())
2710  return retval;
2711 
2712  int nidx = std::max (dv.ndims (), static_cast<octave_idx_type> (dim + 1));
2713  Array<octave::idx_vector> idxa (dim_vector (nidx, 1), octave::idx_vector::colon);
2714  octave_idx_type l = 0;
2715 
2716  for (octave_idx_type i = 0; i < n; i++)
2717  {
2718  // NOTE: This takes some thinking, but no matter what the above rules
2719  // are, an empty array can always be skipped at this point, because
2720  // the result dimensions are already determined, and there is no way
2721  // an empty array may contribute a nonzero piece along the dimension
2722  // at this point, unless an empty array can be promoted to a non-empty
2723  // one (which makes no sense). I repeat, *no way*, think about it.
2724  if (array_list[i].isempty ())
2725  continue;
2726 
2727  octave_quit ();
2728 
2729  octave_idx_type u;
2730  if (dim < array_list[i].ndims ())
2731  u = l + array_list[i].dims ()(dim);
2732  else
2733  u = l + 1;
2734 
2735  idxa(dim) = octave::idx_vector (l, u);
2736 
2737  retval.assign (idxa, array_list[i]);
2738 
2739  l = u;
2740  }
2741 
2742  return retval;
2743 }
2744 
2745 template <typename T, typename Alloc>
2746 void
2747 Array<T, Alloc>::print_info (std::ostream& os, const std::string& prefix) const
2748 {
2749  os << prefix << "m_rep address: " << m_rep << '\n'
2750  << prefix << "m_rep->m_len: " << m_rep->m_len << '\n'
2751  << prefix << "m_rep->m_data: " << static_cast<void *> (m_rep->m_data) << '\n'
2752  << prefix << "m_rep->m_count: " << m_rep->m_count << '\n'
2753  << prefix << "m_slice_data: " << static_cast<void *> (m_slice_data) << '\n'
2754  << prefix << "m_slice_len: " << m_slice_len << '\n';
2755 
2756  // 2D info:
2757  //
2758  // << pefix << "rows: " << rows () << "\n"
2759  // << prefix << "cols: " << cols () << "\n";
2760 }
2761 
2762 template <typename T, typename Alloc>
2763 bool
2765 {
2766  bool retval = m_dimensions == dv;
2767  if (retval)
2768  m_dimensions = dv;
2769 
2770  return retval;
2771 }
2772 
2773 template <typename T, typename Alloc>
2774 void
2776 {
2777  // This guards against accidental implicit instantiations.
2778  // Array<T, Alloc> instances should always be explicit and use INSTANTIATE_ARRAY.
2779  T::__xXxXx__ ();
2780 }
2781 
2782 #define INSTANTIATE_ARRAY(T, API) \
2783  template <> API void \
2784  Array<T>::instantiation_guard () { } \
2785  \
2786  template class API Array<T>
2787 
2788 // FIXME: is this used?
2789 
2790 template <typename T, typename Alloc>
2791 std::ostream&
2792 operator << (std::ostream& os, const Array<T, Alloc>& a)
2793 {
2794  dim_vector a_dims = a.dims ();
2795 
2796  int n_dims = a_dims.ndims ();
2797 
2798  os << n_dims << "-dimensional array";
2799 
2800  if (n_dims)
2801  os << " (" << a_dims.str () << ')';
2802 
2803  os <<"\n\n";
2804 
2805  if (n_dims)
2806  {
2807  os << "data:";
2808 
2809  Array<octave_idx_type> ra_idx (dim_vector (n_dims, 1), 0);
2810 
2811  // Number of times the first 2d-array is to be displayed.
2812 
2813  octave_idx_type m = 1;
2814  for (int i = 2; i < n_dims; i++)
2815  m *= a_dims(i);
2816 
2817  if (m == 1)
2818  {
2819  octave_idx_type rows = 0;
2820  octave_idx_type cols = 0;
2821 
2822  switch (n_dims)
2823  {
2824  case 2:
2825  rows = a_dims(0);
2826  cols = a_dims(1);
2827 
2828  for (octave_idx_type j = 0; j < rows; j++)
2829  {
2830  ra_idx(0) = j;
2831  for (octave_idx_type k = 0; k < cols; k++)
2832  {
2833  ra_idx(1) = k;
2834  os << ' ' << a.elem (ra_idx);
2835  }
2836  os << "\n";
2837  }
2838  break;
2839 
2840  default:
2841  rows = a_dims(0);
2842 
2843  for (octave_idx_type k = 0; k < rows; k++)
2844  {
2845  ra_idx(0) = k;
2846  os << ' ' << a.elem (ra_idx);
2847  }
2848  break;
2849  }
2850 
2851  os << "\n";
2852  }
2853  else
2854  {
2855  octave_idx_type rows = a_dims(0);
2856  octave_idx_type cols = a_dims(1);
2857 
2858  for (int i = 0; i < m; i++)
2859  {
2860  os << "\n(:,:,";
2861 
2862  for (int j = 2; j < n_dims - 1; j++)
2863  os << ra_idx(j) + 1 << ',';
2864 
2865  os << ra_idx(n_dims - 1) + 1 << ") = \n";
2866 
2867  for (octave_idx_type j = 0; j < rows; j++)
2868  {
2869  ra_idx(0) = j;
2870 
2871  for (octave_idx_type k = 0; k < cols; k++)
2872  {
2873  ra_idx(1) = k;
2874  os << ' ' << a.elem (ra_idx);
2875  }
2876 
2877  os << "\n";
2878  }
2879 
2880  os << "\n";
2881 
2882  if (i != m - 1)
2883  increment_index (ra_idx, a_dims, 2);
2884  }
2885  }
2886  }
2887 
2888  return os;
2889 }
bool sort_isnan(typename ref_param< T >::type)
Definition: Array-base.cc:1774
Array< T, Alloc >::compare_fcn_type safe_comparator(sortmode mode, const Array< T, Alloc > &, bool)
Definition: Array-base.cc:2034
std::ostream & operator<<(std::ostream &os, const Array< T, Alloc > &a)
Definition: Array-base.cc:2792
octave_idx_type compute_index(octave_idx_type n, const dim_vector &dims)
Definition: Array-util.cc:177
dim_vector zero_dims_inquire(const Array< octave::idx_vector > &ia, const dim_vector &rhdv)
Definition: Array-util.cc:408
void increment_index(Array< octave_idx_type > &ra_idx, const dim_vector &dimensions, int start_dimension)
Definition: Array-util.cc:60
charNDArray max(char d, const charNDArray &m)
Definition: chNDArray.cc:230
charNDArray min(char d, const charNDArray &m)
Definition: chNDArray.cc:207
The real representation of all arrays.
Definition: Array.h:135
octave::refcount< octave_idx_type > m_count
Definition: Array.h:145
N Dimensional Array with copy-on-write semantics.
Definition: Array.h:130
T & elem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:562
T * fortran_vec()
Size of the specified dimension.
Definition: Array-base.cc:1764
Array< T, Alloc > index(const octave::idx_vector &i) const
Indexing without resizing.
Definition: Array-base.cc:710
friend class Array
Size of the specified dimension.
Definition: Array.h:941
void assign(const octave::idx_vector &i, const Array< T, Alloc > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Definition: Array-base.cc:1128
Array< T, Alloc > transpose() const
Size of the specified dimension.
Definition: Array-base.cc:1623
Array< T, Alloc > nth_element(const octave::idx_vector &n, int dim=0) const
Returns the n-th element in increasing order, using the same ordering as used for sort.
Definition: Array-base.cc:2319
void clear()
Definition: Array-base.cc:109
Array< T, Alloc > hermitian(T(*fcn)(const T &)=nullptr) const
Size of the specified dimension.
Definition: Array-base.cc:1666
void resize1(octave_idx_type n, const T &rfv)
Size of the specified dimension.
Definition: Array-base.cc:910
int ndims() const
Size of the specified dimension.
Definition: Array.h:671
ref_param< T >::type crefT
Definition: Array.h:238
Array< T, Alloc > column(octave_idx_type k) const
Extract column: A(:,k+1).
Definition: Array-base.cc:283
Array< T, Alloc > linear_slice(octave_idx_type lo, octave_idx_type up) const
Extract a slice from this array as a column vector: A(:)(lo+1:up).
Definition: Array-base.cc:303
bool optimize_dimensions(const dim_vector &dv)
Returns true if this->dims () == dv, and if so, replaces this->m_dimensions by a shallow copy of dv.
Definition: Array-base.cc:2764
octave_idx_type nnz() const
Count nonzero elements.
Definition: Array-base.cc:2223
octave_idx_type rows() const
Definition: Array.h:459
Array< T, Alloc > page(octave_idx_type k) const
Extract page: A(:,:,k+1).
Definition: Array-base.cc:292
void resize(const dim_vector &dv, const T &rfv)
Size of the specified dimension.
Definition: Array-base.cc:1023
Array< T, Alloc > & insert(const Array< T, Alloc > &a, const Array< octave_idx_type > &idx)
Insert an array into another at a specified position.
Definition: Array-base.cc:1608
Array< T, Alloc > squeeze() const
Chop off leading singleton dimensions.
Definition: Array-base.cc:139
const T * data() const
Size of the specified dimension.
Definition: Array.h:663
octave_idx_type dim2() const
Definition: Array.h:467
octave_idx_type columns() const
Definition: Array.h:471
void delete_elements(const octave::idx_vector &i)
Deleting elements.
Definition: Array-base.cc:1407
Array< octave_idx_type > sort_rows_idx(sortmode mode=ASCENDING) const
Sort by rows returns only indices.
Definition: Array-base.cc:2079
Array< octave_idx_type > find(octave_idx_type n=-1, bool backward=false) const
Find indices of (at most n) nonzero elements.
Definition: Array-base.cc:2238
octave_idx_type dim1() const
Definition: Array.h:456
dim_vector m_dimensions
Definition: Array.h:245
Array< T, Alloc > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Size of the specified dimension.
Definition: Array-base.cc:450
Array< T, Alloc > sort(int dim=0, sortmode mode=ASCENDING) const
Size of the specified dimension.
Definition: Array-base.cc:1781
bool isempty() const
Size of the specified dimension.
Definition: Array.h:651
void print_info(std::ostream &os, const std::string &prefix) const
Size of the specified dimension.
Definition: Array-base.cc:2747
sortmode issorted(sortmode mode=UNSORTED) const
Ordering is auto-detected or can be specified.
Definition: Array-base.cc:2047
const dim_vector & dims() const
Return a const-reference so that dims ()(i) works efficiently.
Definition: Array.h:503
T & xelem(octave_idx_type n)
Size of the specified dimension.
Definition: Array.h:524
Array< T, Alloc > reshape(octave_idx_type nr, octave_idx_type nc) const
Size of the specified dimension.
Definition: Array.h:635
T & checkelem(octave_idx_type n)
Size of the specified dimension.
Definition: Array-base.cc:214
sortmode is_sorted_rows(sortmode mode=UNSORTED) const
Ordering is auto-detected or can be specified.
Definition: Array-base.cc:2097
void fill(const T &val)
Definition: Array-base.cc:95
Array< T, Alloc > diag(octave_idx_type k=0) const
Get the kth super or subdiagonal.
Definition: Array-base.cc:2541
virtual T resize_fill_value() const
Size of the specified dimension.
Definition: Array-base.cc:899
octave_idx_type compute_index(octave_idx_type i, octave_idx_type j) const
Size of the specified dimension.
Definition: Array-base.cc:192
octave_idx_type lookup(const T &value, sortmode mode=UNSORTED) const
Do a binary lookup in a sorted array.
Definition: Array-base.cc:2158
Array< T, Alloc >::ArrayRep * m_rep
Definition: Array.h:247
void resize2(octave_idx_type nr, octave_idx_type nc, const T &rfv)
Resizing (with fill).
Definition: Array-base.cc:981
static Array< T, Alloc > cat(int dim, octave_idx_type n, const Array< T, Alloc > *array_list)
Concatenation along a specified (0-based) dimension, equivalent to cat().
Definition: Array-base.cc:2643
octave_idx_type numel() const
Number of elements in the array.
Definition: Array.h:414
Vector representing the dimensions (size) of an Array.
Definition: dim-vector.h:94
bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
Definition: dim-vector.cc:140
octave_idx_type safe_numel() const
The following function will throw a std::bad_alloc () exception if the requested size is larger than ...
Definition: dim-vector.cc:98
std::string str(char sep='x') const
Definition: dim-vector.cc:68
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition: dim-vector.h:335
void chop_trailing_singletons()
Definition: dim-vector.h:164
void resize(int n, int fill_value=0)
Definition: dim-vector.h:272
static dim_vector alloc(int n)
Definition: dim-vector.h:202
bool is_nd_vector() const
Definition: dim-vector.h:400
bool any_neg() const
Definition: dim-vector.h:357
octave_idx_type ndims() const
Number of dimensions.
Definition: dim-vector.h:257
void chop_all_singletons()
Definition: dim-vector.cc:50
bool zero_by_zero() const
Definition: dim-vector.h:311
bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
Definition: dim-vector.cc:201
dim_vector redim(int n) const
Force certain dimensionality, preserving numel ().
Definition: dim-vector.cc:226
dim_vector make_nd_vector(octave_idx_type n) const
Definition: dim-vector.h:421
virtual octave_idx_type numel() const
Definition: ov-base.h:391
void set_compare(const compare_fcn_type &comp)
Definition: oct-sort.h:117
void sort_rows(const T *data, octave_idx_type *idx, octave_idx_type rows, octave_idx_type cols)
Definition: oct-sort.cc:1667
static bool descending_compare(typename ref_param< T >::type, typename ref_param< T >::type)
Definition: oct-sort.cc:1977
bool issorted(const T *data, octave_idx_type nel)
Definition: oct-sort.cc:1579
octave_idx_type lookup(const T *data, octave_idx_type nel, const T &value)
Definition: oct-sort.cc:1789
void sort(T *data, octave_idx_type nel)
Definition: oct-sort.cc:1522
void nth_element(T *data, octave_idx_type nel, octave_idx_type lo, octave_idx_type up=-1)
Definition: oct-sort.cc:1947
bool is_sorted_rows(const T *data, octave_idx_type rows, octave_idx_type cols)
Definition: oct-sort.cc:1744
void lookup_sorted(const T *data, octave_idx_type nel, const T *values, octave_idx_type nvalues, octave_idx_type *idx, bool rev=false)
Definition: oct-sort.cc:1900
if_then_else< is_class_type< T >::no, T, T const & >::result type
Definition: lo-traits.h:121
octave::idx_vector idx_vector
Definition: idx-vector.h:1022
void err_invalid_resize()
void err_nonconformant(const char *op, octave_idx_type op1_len, octave_idx_type op2_len)
void err_invalid_index(const std::string &idx, octave_idx_type nd, octave_idx_type dim, const std::string &)
void err_del_index_out_of_range(bool is1d, octave_idx_type idx, octave_idx_type ext)
Complex log2(const Complex &x)
Definition: lo-mappers.cc:141
F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
F77_RET_T const F77_DBLE * x
T octave_idx_type m
Definition: mx-inlines.cc:781
octave_idx_type n
Definition: mx-inlines.cc:761
T * r
Definition: mx-inlines.cc:781
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition: oct-locbuf.h:44
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition: oct-locbuf.h:50
sortmode
Definition: oct-sort.h:97
@ UNSORTED
Definition: oct-sort.h:97
@ ASCENDING
Definition: oct-sort.h:97
@ DESCENDING
Definition: oct-sort.h:97
T::size_type numel(const T &str)
Definition: oct-string.cc:74
const octave_base_value const Array< octave_idx_type > & ra_idx
F77_RET_T len
Definition: xerbla.cc:61