GNU Octave 11.1.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
 
Loading...
Searching...
No Matches
data.cc
Go to the documentation of this file.
1////////////////////////////////////////////////////////////////////////
2//
3// Copyright (C) 1994-2026 The Octave Project Developers
4//
5// See the file COPYRIGHT.md in the top-level directory of this
6// distribution or <https://octave.org/copyright/>.
7//
8// This file is part of Octave.
9//
10// Octave is free software: you can redistribute it and/or modify it
11// under the terms of the GNU General Public License as published by
12// the Free Software Foundation, either version 3 of the License, or
13// (at your option) any later version.
14//
15// Octave is distributed in the hope that it will be useful, but
16// WITHOUT ANY WARRANTY; without even the implied warranty of
17// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18// GNU General Public License for more details.
19//
20// You should have received a copy of the GNU General Public License
21// along with Octave; see the file COPYING. If not, see
22// <https://www.gnu.org/licenses/>.
23//
24////////////////////////////////////////////////////////////////////////
25
26#if defined (HAVE_CONFIG_H)
27# include "config.h"
28#endif
29
30#include <cmath>
31#include <cstddef>
32#include <cstdint>
33#include <ctime>
34
35#include <algorithm>
36#include <limits>
37#include <string>
38
39#include "lo-ieee.h"
40#include "mx-base.h"
41#include "oct-base64.h"
42#include "oct-binmap.h"
43#include "oct-time.h"
44#include "quit.h"
45
46#include "cdef-utils.h"
47#include "Cell.h"
48#include "data.h"
49#include "defun.h"
50#include "error.h"
51#include "errwarn.h"
52#include "interpreter-private.h"
53#include "interpreter.h"
54#include "oct-map.h"
55#include "ov-class.h"
56#include "ov-classdef.h"
57#include "ov-complex.h"
58#include "ov-cx-mat.h"
59#include "ov-cx-sparse.h"
60#include "ov-float.h"
61#include "ov-flt-complex.h"
62#include "ov-flt-cx-mat.h"
63#include "ov.h"
64#include "ovl.h"
65#include "pager.h"
66#include "pt-mat.h"
67#include "utils.h"
68#include "variables.h"
69#include "xnorm.h"
70
72
73static void
74get_dim_vecdim_all (const octave_value& dimarg, octave_value& arg,
75 int& dim, Array<int>& perm_vec, bool& do_perm,
76 bool& allflag, const char *fcn)
77{
78 if (dimarg.is_scalar_type ())
79 {
80 dim = dimarg.int_value () - 1;
81 if (dim < 0)
82 error ("%s: invalid dimension DIM = %d", fcn, dim + 1);
83 }
84 else
85 {
86 Array<int> vec = dimarg.int_vector_value ();
87 std::vector<int> vecdim;
88 dim_vector sz = arg.dims ();
89 int ndims = arg.ndims ();
90 // Check for invalid dims and ignore any dims larger than actual ndims
91 for (int i = 0; i < vec.numel (); i++)
92 {
93 vec(i)--;
94 if (vec(i) < 0)
95 error ("%s: invalid dimension in VECDIM = %d", fcn, vec(i)+1);
96 if (vec(i) < ndims)
97 vecdim.push_back (vec(i));
98 }
99 int n = vecdim.size ();
100 // If no dimensions left, set DIM = ndims to return input as is
101 if (n == 0)
102 {
103 dim = ndims;
104 return;
105 }
106 octave_idx_type szvecdim = 1;
107 // Check for duplicate dims and add VECDIM to permutation vector
108 perm_vec.resize (dim_vector (1, ndims));
109 std::sort (vecdim.begin (), vecdim.end ());
110 // Check for duplicates FIRST before any array writes
111 auto dup = std::adjacent_find (vecdim.begin (), vecdim.end ());
112 if (dup != vecdim.end ())
113 error ("%s: duplicate dimension in VECDIM = %d", fcn, *dup + 1);
114
115 // Verified vecdim has unique entries in [0, ndims-1], hence n <= ndims
116 int out_pos = ndims - n;
117 for (int d : vecdim)
118 {
119 szvecdim *= sz (d);
120 perm_vec(out_pos++) = d;
121 }
122
123 // Parse vecdim
124 if (n == 1)
125 // Only one dimension given, treat as if dim were specified instead
126 dim = vecdim[0];
127 else if (ndims == n)
128 // vecdim contains all dimensions, treat as if "all" flag given.
129 allflag = true;
130 else
131 {
132 dim_vector new_sz;
133 new_sz.resize (ndims - n + 1);
134 int idx = 0;
135 // Add remaining dims to permutation vector
136 for (int i = 0; i < ndims; i++)
137 {
138 if (std::find (vecdim.begin (), vecdim.end (), i)
139 == vecdim.end ())
140 {
141 perm_vec(idx) = i;
142 new_sz(idx) = sz(i);
143 idx++;
144 }
145 }
146 new_sz(idx) = szvecdim;
147 arg = arg.permute (perm_vec, false);
148 arg = arg.reshape (new_sz);
149 do_perm = true;
150 dim = idx;
151 }
152 }
153}
154
155DEFUN (all, args, ,
156 doc: /* -*- texinfo -*-
157@deftypefn {} {@var{tf} =} all (@var{x})
158@deftypefnx {} {@var{tf} =} all (@var{x}, @var{dim})
159@deftypefnx {} {@var{tf} =} all (@var{x}, @var{vecdim})
160@deftypefnx {} {@var{tf} =} all (@var{x}, "all")
161Return true (logical 1) if all elements are nonzero or true.
162
163If @var{x} is a vector, then @code{all (@var{x})} returns true (logical 1) if
164all elements of the vector are nonzero.
165
166If @var{x} is a matrix, then @code{all (@var{x})} returns a row vector of
167logical ones and zeros where each element indicates whether all of the elements
168of the corresponding column of the matrix are nonzero.
169
170If @var{x} is an array, then @code{all(@var{x})} operates along the first
171non-singleton dimension of @var{x} and returns a logical array, whose size is
172equal to @var{x} except for the operating dimension which becomes 1.
173
174The optional input @var{dim} specifies the dimension to operate on and must be
175a positive integer. Specifying any singleton dimension in @var{x}, including
176any dimension exceeding @code{ndims (@var{x})}, will return @var{x}.
177
178Specifying multiple dimensions with input @var{vecdim}, a vector of
179non-repeating dimensions, will operate along the array slice defined by
180@var{vecdim}. If @var{vecdim} indexes all dimensions of @var{x}, then it is
181equivalent to the option @qcode{"all"}. Any dimension in @var{vecdim} greater
182than @code{ndims (@var{x})} is ignored. The size of the dimensions specified
183by @var{vecdim} become 1 in the returned logical array.
184
185Specifying the dimension as @qcode{"all"} will cause @code{all} to operate on
186all elements of @var{x}, and is equivalent to @code{all (@var{x}(:))}.
187@seealso{any}
188@end deftypefn */)
189{
190 int nargin = args.length ();
191
192 bool do_perm = false;
193 bool allflag = false;
194
195 while (nargin > 1 && args(nargin - 1).is_string ())
196 {
197 std::string str = args(nargin - 1).string_value ();
198
199 if (str == "all")
200 allflag = true;
201 else
202 error ("all: unrecognized optional argument '%s'", str.c_str ());
203
204 nargin--;
205 }
206
207 if (nargin < 1 || nargin > 2)
208 print_usage ();
209 if (allflag && nargin > 1)
210 error ("all: cannot set DIM or VECDIM with 'all' flag");
211
212 octave_value arg = args(0);
213
214 // Handle DIM, VECDIM
215 int dim = -1;
216 Array<int> perm_vec;
217 if (nargin == 2)
218 {
219 octave_value dimarg = args(1);
220 get_dim_vecdim_all (dimarg, arg, dim, perm_vec, do_perm, allflag, "all");
221 }
222
223 // Handle allflag
224 if (allflag)
225 arg = arg.reshape (dim_vector (arg.numel (), 1));
226
227 octave_value retval = arg.all (dim);
228
229 if (do_perm)
230 retval = retval.permute (perm_vec, true);
231
232 return retval;
233}
234
235/*
236%!test
237%! x = ones (3);
238%! x(1,1) = 0;
239%! assert (all (all (rand (3) + 1) == [1, 1, 1]) == 1);
240%! assert (all (all (x) == [0, 1, 1]) == 1);
241%! assert (all (x, 1) == [0, 1, 1]);
242%! assert (all (x, 2) == [0; 1; 1]);
243
244%!test
245%! x = ones (3, "single");
246%! x(1,1) = 0;
247%! assert (all (all (single (rand (3) + 1)) == [1, 1, 1]) == 1);
248%! assert (all (all (x) == [0, 1, 1]) == 1);
249%! assert (all (x, 1) == [0, 1, 1]);
250%! assert (all (x, 2) == [0; 1; 1]);
251
252%!test
253%! x = ones (3, 3, 3);
254%! x(3) = 0;
255%! y = all (x);
256%! assert (y(:,:,1), logical ([0, 1, 1]));
257%! assert (y(:,:,2), logical ([1, 1, 1]));
258%! assert (y(:,:,3), logical ([1, 1, 1]));
259%! y = all (x, [1, 2]);
260%! assert (y(:,:,1), false);
261%! assert (y(:,:,2), true);
262%! assert (y(:,:,3), true);
263%! assert (all (x, [1, 3]), logical ([0, 1, 1]));
264%! assert (all (x, [2, 3]), logical ([1; 1; 0]));
265%! assert (all (x, "all"), false);
266
267%!assert (all (ones (2), 3), logical (ones (2)))
268%!assert (all (ones (2), [3, 5]), logical (ones (2)))
269
270## Test empty matrices
271%!assert (all ([]), true)
272%!assert (all ([], 1), true (1, 0))
273%!assert (all ([], 2), true (0, 1))
274%!assert (all ([], 3), true (0, 0))
275%!assert (all (ones (1,0)), true)
276%!assert (all (ones (1,0), 1), true (1, 0))
277%!assert (all (ones (1,0), 2), true)
278%!assert (all (ones (1,0), 3), true (1, 0))
279%!assert (all (ones (0,1)), true)
280%!assert (all (ones (0,1), 1), true)
281%!assert (all (ones (0,1), 2), true (0, 1))
282%!assert (all (ones (0,1), 3), true (0, 1))
283
284## Test sparse matrices
285%!assert (all (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 0, NaN]), 2),
286%! sparse ([true; false]))
287%!assert (all (sparse ([NaN, 0, 1, 4, 2; 1, 2, 1, 2, NaN]), 2),
288%! sparse ([false; true]))
289
290## Test empty sparse matrices
291%!assert (all (sparse ([])), sparse (true))
292%!assert (all (sparse ([]), 1), sparse (true (1, 0)))
293%!assert (all (sparse ([]), 2), sparse (true (0, 1)))
294%!assert (all (sparse ([]), 3), sparse (true (0, 0)))
295%!assert (all (sparse (ones (1,0))), sparse (true))
296%!assert (all (sparse (ones (1,0)), 1), sparse (true (1, 0)))
297%!assert (all (sparse (ones (1,0)), 2), sparse (true))
298%!assert (all (sparse (ones (1,0)), 3), sparse (true (1, 0)))
299%!assert (all (sparse (ones (0,1))), sparse (true))
300%!assert (all (sparse (ones (0,1)), 1), sparse (true))
301%!assert (all (sparse (ones (0,1)), 2), sparse (true (0, 1)))
302%!assert (all (sparse (ones (0,1)), 3), sparse (true (0, 1)))
303
304## Test input validation
305%!error <Invalid call> all ()
306%!error <Invalid call> all (1,2,3)
307%!error <unrecognized optional argument 'foobar'> all (1, "foobar")
308%!error <cannot set DIM or VECDIM with 'all' flag>
309%! all (ones (3,3), 1, "all");
310%!error <cannot set DIM or VECDIM with 'all' flag>
311%! all (ones (3,3), [1, 2], "all");
312%!error <invalid dimension DIM = 0> all (ones (3,3), 0)
313%!error <invalid dimension DIM = -1> all (ones (3,3), -1)
314%!error <invalid dimension in VECDIM = -2> all (ones (3,3), [1 -2])
315%!error <duplicate dimension in VECDIM = 2> all (ones (3,3), [1 2 2])
316%!error <duplicate dimension in VECDIM = 1> all (ones (3,3), [1 1 2])
317*/
318
319DEFUN (any, args, ,
320 doc: /* -*- texinfo -*-
321@deftypefn {} {@var{tf} =} any (@var{x})
322@deftypefnx {} {@var{tf} =} any (@var{x}, @var{dim})
323@deftypefnx {} {@var{tf} =} any (@var{x}, @var{vecdim})
324@deftypefnx {} {@var{tf} =} any (@var{x}, "all")
325Return true (logical 1) if any elements are nonzero or true.
326
327If @var{x} is a vector, then @code{any (@var{x})} returns true (logical 1) if
328any elements of the vector are nonzero.
329
330If @var{x} is a matrix, then @code{any (@var{x})} returns a row vector of
331logical ones and zeros where each element indicates whether any of the elements
332of the corresponding column of the matrix are nonzero.
333
334If @var{x} is an array, then @code{any(@var{x})} operates along the first
335non-singleton dimension of @var{x} and returns a logical array, whose size is
336equal to @var{x} except for the operating dimension which becomes 1.
337
338The optional input @var{dim} specifies the dimension to operate on and must be
339a positive integer. Specifying any singleton dimension in @var{x}, including
340any dimension exceeding @code{ndims (@var{x})}, will return @var{x}.
341
342Specifying multiple dimensions with input @var{vecdim}, a vector of
343non-repeating dimensions, will operate along the array slice defined by
344@var{vecdim}. If @var{vecdim} indexes all dimensions of @var{x}, then it is
345equivalent to the option @qcode{"all"}. Any dimension in @var{vecdim} greater
346than @code{ndims (@var{x})} is ignored. The size of the dimensions specified
347by @var{vecdim} become 1 in the returned logical array.
348
349Specifying the dimension as @qcode{"all"} will cause @code{any} to operate on
350all elements of @var{x}, and is equivalent to @code{any (@var{x}(:))}.
351@seealso{all}
352@end deftypefn */)
353{
354 int nargin = args.length ();
355
356 bool do_perm = false;
357 bool allflag = false;
358
359 while (nargin > 1 && args(nargin - 1).is_string ())
360 {
361 std::string str = args(nargin - 1).string_value ();
362
363 if (str == "all")
364 allflag = true;
365 else
366 error ("any: unrecognized optional argument '%s'", str.c_str ());
367
368 nargin--;
369 }
370
371 if (nargin < 1 || nargin > 2)
372 print_usage ();
373 if (allflag && nargin > 1)
374 error ("any: cannot set DIM or VECDIM with 'all' flag");
375
376 octave_value arg = args(0);
377
378 // Handle DIM, VECDIM
379 int dim = -1;
380 Array<int> perm_vec;
381 if (nargin == 2)
382 {
383 octave_value dimarg = args(1);
384 get_dim_vecdim_all (dimarg, arg, dim, perm_vec, do_perm, allflag, "any");
385 }
386
387 // Handle allflag
388 if (allflag)
389 arg = arg.reshape (dim_vector (arg.numel (), 1));
390
391 octave_value retval = arg.any (dim);
392
393 if (do_perm)
394 retval = retval.permute (perm_vec, true);
395
396 return retval;
397}
398
399/*
400%!test
401%! x = zeros (3);
402%! x(3,3) = 1;
403%! assert (all (any (x) == [0, 0, 1]) == 1);
404%! assert (all (any (ones (3)) == [1, 1, 1]) == 1);
405%! assert (any (x, 1) == [0, 0, 1]);
406%! assert (any (x, 2) == [0; 0; 1]);
407
408%!test
409%! x = zeros (3, "single");
410%! x(3,3) = 1;
411%! assert (all (any (x) == [0, 0, 1]) == 1);
412%! assert (all (any (ones (3, "single")) == [1, 1, 1]) == 1);
413%! assert (any (x, 1) == [0, 0, 1]);
414%! assert (any (x, 2) == [0; 0; 1]);
415
416%!test
417%! x = zeros (3, 3, 3);
418%! x(3) = 1;
419%! y = any (x);
420%! assert (y(:,:,1), logical ([1, 0, 0]));
421%! assert (y(:,:,2), logical ([0, 0, 0]));
422%! assert (y(:,:,3), logical ([0, 0, 0]));
423%! y = any (x, [1, 2]);
424%! assert (y(:,:,1), true);
425%! assert (y(:,:,2), false);
426%! assert (y(:,:,3), false);
427%! assert (any (x, [1, 3]), logical ([1, 0, 0]));
428%! assert (any (x, [2, 3]), logical ([0; 0; 1]));
429%! assert (any (x, "all"), true);
430
431## Test empty matrices
432%!assert (any ([]), false)
433%!assert (any ([], 1), false (1, 0))
434%!assert (any ([], 2), false (0, 1))
435%!assert (any ([], 3), false (0, 0))
436%!assert (any (ones (1,0)), false)
437%!assert (any (ones (1,0), 1), false (1, 0))
438%!assert (any (ones (1,0), 2), false)
439%!assert (any (ones (1,0), 3), false (1, 0))
440%!assert (any (ones (0,1)), false)
441%!assert (any (ones (0,1), 1), false)
442%!assert (any (ones (0,1), 2), false (0, 1))
443%!assert (any (ones (0,1), 3), false (0, 1))
444
445## Test sparse matrices
446%!assert (any (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 0, NaN]), 2),
447%! sparse ([true; true]))
448%!assert (any (sparse ([0, 0, 0, 0, 0; 1, 2, 1, 2, NaN]), 2),
449%! sparse ([false; true]))
450%!assert (any (sparse ([0, 0, 0, 0i, 0; 1, 2, 1, 2, NaN]), 2),
451%! sparse ([false; true]))
452%!assert (any (sparse ([0, 0, 0, 0+i, 0; 1, 2, 1, 2, NaN]), 2),
453%! sparse ([true; true]))
454
455## Test empty sparse matrices
456%!assert (any (sparse ([])), sparse (false))
457%!assert (any (sparse ([]), 1), sparse (false (1, 0)))
458%!assert (any (sparse ([]), 2), sparse (false (0, 1)))
459%!assert (any (sparse ([]), 3), sparse (false (0, 0)))
460%!assert (any (sparse (ones (1,0))), sparse (false))
461%!assert (any (sparse (ones (1,0)), 1), sparse (false (1, 0)))
462%!assert (any (sparse (ones (1,0)), 2), sparse (false))
463%!assert (any (sparse (ones (1,0)), 3), sparse (false (1, 0)))
464%!assert (any (sparse (ones (0,1))), sparse (false))
465%!assert (any (sparse (ones (0,1)), 1), sparse (false))
466%!assert (any (sparse (ones (0,1)), 2), sparse (false (0, 1)))
467%!assert (any (sparse (ones (0,1)), 3), sparse (false (0, 1)))
468
469## Test input validation
470%!error <Invalid call> any ()
471%!error <Invalid call> any (1,2,3)
472%!error <unrecognized optional argument 'foobar'> any (1, "foobar")
473%!error <cannot set DIM or VECDIM with 'all' flag>
474%! any (ones (3,3), 1, "all");
475%!error <cannot set DIM or VECDIM with 'all' flag>
476%! any (ones (3,3), [1, 2], "all");
477%!error <invalid dimension DIM = 0> any (ones (3,3), 0)
478%!error <invalid dimension DIM = -1> any (ones (3,3), -1)
479%!error <invalid dimension in VECDIM = -2> any (ones (3,3), [1 -2])
480%!error <duplicate dimension in VECDIM = 2> any (ones (3,3), [1 2 2])
481%!error <duplicate dimension in VECDIM = 1> any (ones (3,3), [1 1 2])
482*/
483
484// These mapping functions may also be useful in other places, eh?
485
486DEFUN (atan2, args, ,
487 doc: /* -*- texinfo -*-
488@deftypefn {} {@var{angle} =} atan2 (@var{y}, @var{x})
489Compute atan (@var{y} / @var{x}) for corresponding elements of @var{y} and
490@var{x}.
491
492@var{y} and @var{x} must match in size and orientation. The signs of elements
493of @var{y} and @var{x} are used to determine the quadrants of each resulting
494value.
495
496This function is equivalent to @code{arg (complex (@var{x}, @var{y}))}.
497@seealso{tan, tand, tanh, atanh}
498@end deftypefn */)
499{
500 if (args.length () != 2)
501 print_usage ();
502
503 octave_value retval;
504
505 if (! args(0).isnumeric ())
506 err_wrong_type_arg ("atan2", args(0));
507
508 if (! args(1).isnumeric ())
509 err_wrong_type_arg ("atan2", args(1));
510
511 if (args(0).iscomplex () || args(1).iscomplex ())
512 error ("atan2: not defined for complex numbers");
513
514 if (args(0).is_single_type () || args(1).is_single_type ())
515 {
516 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
517 retval = atan2f (args(0).float_value (), args(1).float_value ());
518 else
519 {
520 FloatNDArray a0 = args(0).float_array_value ();
521 FloatNDArray a1 = args(1).float_array_value ();
522 retval = binmap<float> (a0, a1, std::atan2, "atan2");
523 }
524 }
525 else
526 {
527 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
528 retval = atan2 (args(0).scalar_value (), args(1).scalar_value ());
529 else if (args(0).issparse ())
530 {
531 SparseMatrix m0 = args(0).sparse_matrix_value ();
532 SparseMatrix m1 = args(1).sparse_matrix_value ();
533 retval = binmap<double> (m0, m1, std::atan2, "atan2");
534 }
535 else
536 {
537 NDArray a0 = args(0).array_value ();
538 NDArray a1 = args(1).array_value ();
539 retval = binmap<double> (a0, a1, std::atan2, "atan2");
540 }
541 }
542
543 return retval;
544}
545
546/*
547%!assert (size (atan2 (zeros (0, 2), zeros (0, 2))), [0, 2])
548%!assert (size (atan2 (rand (2, 3, 4), zeros (2, 3, 4))), [2, 3, 4])
549%!assert (size (atan2 (rand (2, 3, 4), 1)), [2, 3, 4])
550%!assert (size (atan2 (1, rand (2, 3, 4))), [2, 3, 4])
551%!assert (size (atan2 (1, 2)), [1, 1])
552
553%!test
554%! rt2 = sqrt (2);
555%! rt3 = sqrt (3);
556%! v = [0, pi/6, pi/4, pi/3, -pi/3, -pi/4, -pi/6, 0];
557%! y = [0, rt3, 1, rt3, -rt3, -1, -rt3, 0];
558%! x = [1, 3, 1, 1, 1, 1, 3, 1];
559%! assert (atan2 (y, x), v, sqrt (eps));
560
561%!test
562%! rt2 = sqrt (2);
563%! rt3 = sqrt (3);
564%! v = single ([0, pi/6, pi/4, pi/3, -pi/3, -pi/4, -pi/6, 0]);
565%! y = single ([0, rt3, 1, rt3, -rt3, -1, -rt3, 0]);
566%! x = single ([1, 3, 1, 1, 1, 1, 3, 1]);
567%! assert (atan2 (y, x), v, sqrt (eps ("single")));
568
569%!assert (any (ones (2), 3), logical (ones (2)))
570%!assert (any (ones (2), [3, 5]), logical (ones (2)))
571
572## Test sparse implementations
573%!shared xs
574%! xs = sparse (0:3);
575%!test
576%! y = atan2 (1, xs);
577%! assert (issparse (y), false);
578%! assert (nnz (y), 4);
579%! assert (y, atan2 (1, 0:3));
580%!test
581%! y = atan2 (0, xs);
582%! assert (issparse (y), false);
583%! assert (nnz (y), 0);
584%! assert (y, zeros (1,4));
585%!test
586%! y = atan2 (xs, 1);
587%! assert (issparse (y));
588%! assert (nnz (y), 3);
589%! assert (y, sparse (atan2 (0:3, 1)));
590%!test
591%! y = atan2 (xs, 0);
592%! assert (issparse (y));
593%! assert (nnz (y), 3);
594%! assert (y, sparse (atan2 (0:3, 0)));
595%!test
596%! y = atan2 (xs, sparse (ones (1, 4)));
597%! assert (issparse (y));
598%! assert (nnz (y), 3);
599%! assert (y, sparse (atan2 (0:3, ones (1,4))));
600%!test
601%! y = atan2 (xs, sparse (zeros (1,4)));
602%! assert (issparse (y));
603%! assert (nnz (y), 3);
604%! assert (y, sparse (atan2 (0:3, zeros (1,4))));
605
606%!error atan2 ()
607%!error atan2 (1, 2, 3)
608*/
609
610static octave_value
611do_hypot (const octave_value& x, const octave_value& y)
612{
613 octave_value retval;
614
615 octave_value arg0 = x;
616 octave_value arg1 = y;
617 if (! arg0.isnumeric ())
618 err_wrong_type_arg ("hypot", arg0);
619 if (! arg1.isnumeric ())
620 err_wrong_type_arg ("hypot", arg1);
621
622 if (arg0.iscomplex ())
623 arg0 = arg0.abs ();
624 if (arg1.iscomplex ())
625 arg1 = arg1.abs ();
626
627 if (arg0.is_single_type () || arg1.is_single_type ())
628 {
629 if (arg0.is_scalar_type () && arg1.is_scalar_type ())
630 retval = hypotf (arg0.float_value (), arg1.float_value ());
631 else
632 {
633 FloatNDArray a0 = arg0.float_array_value ();
634 FloatNDArray a1 = arg1.float_array_value ();
635 retval = binmap<float> (a0, a1, std::hypot, "hypot");
636 }
637 }
638 else
639 {
640 if (arg0.is_scalar_type () && arg1.is_scalar_type ())
641 retval = hypot (arg0.scalar_value (), arg1.scalar_value ());
642 else if (arg0.issparse () || arg1.issparse ())
643 {
646 retval = binmap<double> (m0, m1, std::hypot, "hypot");
647 }
648 else
649 {
650 NDArray a0 = arg0.array_value ();
651 NDArray a1 = arg1.array_value ();
652 retval = binmap<double> (a0, a1, std::hypot, "hypot");
653 }
654 }
655
656 return retval;
657}
658
659DEFUN (hypot, args, ,
660 doc: /* -*- texinfo -*-
661@deftypefn {} {@var{h} =} hypot (@var{x}, @var{y})
662@deftypefnx {} {@var{h} =} hypot (@var{x}, @var{y}, @var{z}, @dots{})
663Compute the element-by-element square root of the sum of the squares of
664@var{x} and @var{y}.
665
666This is equivalent to
667@code{sqrt (@var{x}.^2 + @var{y}.^2)}, but is calculated in a manner that
668avoids overflows for large values of @var{x} or @var{y}.
669
670@code{hypot} can also be called with more than 2 arguments; in this case,
671the arguments are accumulated from left to right:
672
673@example
674@group
675hypot (hypot (@var{x}, @var{y}), @var{z})
676hypot (hypot (hypot (@var{x}, @var{y}), @var{z}), @var{w}), etc.
677@end group
678@end example
679@end deftypefn */)
680{
681 int nargin = args.length ();
682
683 if (nargin < 2)
684 print_usage ();
685
686 octave_value retval;
687
688 if (nargin == 2)
689 retval = do_hypot (args(0), args(1));
690 else
691 {
692 retval = args(0);
693
694 for (int i = 1; i < nargin; i++)
695 retval = do_hypot (retval, args(i));
696 }
697
698 return retval;
699}
700
701/*
702%!assert (size (hypot (zeros (0, 2), zeros (0, 2))), [0, 2])
703%!assert (size (hypot (rand (2, 3, 4), zeros (2, 3, 4))), [2, 3, 4])
704%!assert (size (hypot (rand (2, 3, 4), 1)), [2, 3, 4])
705%!assert (size (hypot (1, rand (2, 3, 4))), [2, 3, 4])
706%!assert (size (hypot (1, 2)), [1, 1])
707%!assert (hypot (1:10, 1:10), sqrt (2) * [1:10], 16*eps)
708%!assert (hypot (single (1:10), single (1:10)), single (sqrt (2) * [1:10]))
709
710## Test sparse implementations
711%!shared xs
712%! xs = sparse (0:3);
713%!test
714%! y = hypot (1, xs);
715%! assert (nnz (y), 4);
716%! assert (y, sparse (hypot (1, 0:3)));
717%!test
718%! y = hypot (0, xs);
719%! assert (nnz (y), 3);
720%! assert (y, xs);
721%!test
722%! y = hypot (xs, 1);
723%! assert (nnz (y), 4);
724%! assert (y, sparse (hypot (0:3, 1)));
725%!test
726%! y = hypot (xs, 0);
727%! assert (nnz (y), 3);
728%! assert (y, xs);
729%!test
730%! y = hypot (sparse ([0 0]), sparse ([0 1]));
731%! assert (nnz (y), 1);
732%! assert (y, sparse ([0 1]));
733%!test
734%! y = hypot (sparse ([0 1]), sparse ([0 0]));
735%! assert (nnz (y), 1);
736%! assert (y, sparse ([0 1]));
737
738*/
739
740template <typename T, typename ET>
741void
743{
744 f = Array<T> (x.dims ());
745 e = Array<ET> (x.dims ());
746 for (octave_idx_type i = 0; i < x.numel (); i++)
747 {
748 int exp;
749 f.xelem (i) = math::log2 (x(i), exp);
750 e.xelem (i) = exp;
751 }
752}
753
754DEFUN (log2, args, nargout,
755 doc: /* -*- texinfo -*-
756@deftypefn {} {@var{y} =} log2 (@var{x})
757@deftypefnx {} {[@var{f}, @var{e}] =} log2 (@var{x})
758Compute the base-2 logarithm of each element of @var{x}.
759
760If called with one output, compute the base-2 logarithm such that
761@tex
762$2^y = x$.
763@end tex
764@ifnottex
765@code{2^@var{y} = @var{x}}.
766@end ifnottex
767
768If called with two output arguments, split @var{x} into binary mantissa
769(@var{f}) and exponent (@var{e}) such that
770@tex
771$x = f \cdot 2^e$
772@end tex
773@ifnottex
774@code{@var{x} = @var{f} * 2^@var{e}}
775@end ifnottex
776where
777@tex
778${1 \over 2} \le \left| f \right| < 1$
779@end tex
780@ifnottex
781@w{@code{1/2 <= abs (@var{f}) < 1}}
782@end ifnottex
783and @var{e} is an integer. If
784@tex
785$x = 0$, $f = e = 0$.
786@end tex
787@ifnottex
788@w{@code{x = 0}}, @w{@code{f = e = 0}}.
789@end ifnottex
790@seealso{pow2, log, log10, exp}
791@end deftypefn */)
792{
793 if (args.length () != 1)
794 print_usage ();
795
796 octave_value_list retval;
797
798 if (nargout < 2)
799 retval = ovl (args(0).log2 ());
800 else if (args(0).is_single_type ())
801 {
802 if (args(0).isreal ())
803 {
805 FloatNDArray x = args(0).float_array_value ();
806 // FIXME: should E be an int value?
807 FloatMatrix e;
808 map_2_xlog2 (x, f, e);
809 retval = ovl (f, e);
810 }
811 else if (args(0).iscomplex ())
812 {
814 FloatComplexNDArray x = args(0).float_complex_array_value ();
815 // FIXME: should E be an int value?
816 FloatNDArray e;
817 map_2_xlog2 (x, f, e);
818 retval = ovl (f, e);
819 }
820 }
821 else if (args(0).isreal ())
822 {
823 NDArray f;
824 NDArray x = args(0).array_value ();
825 // FIXME: should E be an int value?
826 Matrix e;
827 map_2_xlog2 (x, f, e);
828 retval = ovl (f, e);
829 }
830 else if (args(0).iscomplex ())
831 {
833 ComplexNDArray x = args(0).complex_array_value ();
834 // FIXME: should E be an int value?
835 NDArray e;
836 map_2_xlog2 (x, f, e);
837 retval = ovl (f, e);
838 }
839 else
840 err_wrong_type_arg ("log2", args(0));
841
842 return retval;
843}
844
845/*
846%!assert (log2 ([1/4, 1/2, 1, 2, 4]), [-2, -1, 0, 1, 2])
847%!assert (log2 (Inf), Inf)
848%!assert (isnan (log2 (NaN)))
849%!assert (log2 (4*i), 2 + log2 (1*i))
850%!assert (log2 (complex (0,Inf)), Inf + log2 (i))
851
852%!test
853%! [f, e] = log2 ([0,-1; 2,-4; Inf,-Inf]);
854%! assert (f, [0,-0.5; 0.5,-0.5; Inf,-Inf]);
855%! assert (e(1:2,:), [0,1;2,3]);
856
857%!test
858%! [f, e] = log2 (complex (zeros (3, 2), [0,-1; 2,-4; Inf,-Inf]));
859%! assert (f, complex (zeros (3, 2), [0,-0.5; 0.5,-0.5; Inf,-Inf]));
860%! assert (e(1:2,:), [0,1; 2,3]);
861
862%!assert <*42583> (all (log2 (pow2 (-1074:1023)) == -1074:1023))
863*/
864
865DEFUN (rem, args, ,
866 doc: /* -*- texinfo -*-
867@deftypefn {} {@var{r} =} rem (@var{x}, @var{y})
868Return the remainder of the division @code{@var{x} / @var{y}}.
869
870The remainder is computed using the expression
871
872@example
873x - y .* fix (x ./ y)
874@end example
875
876An error message is printed if the dimensions of the arguments do not agree,
877or if either argument is complex.
878
879Programming Notes: When calculating with floating point numbers (double,
880single), values within a few eps of an integer will be rounded to that
881integer before computation for compatibility with @sc{matlab}. Any floating
882point integers greater than @code{flintmax} (2^53 for double) will not compute
883correctly. For larger integer values convert the input to @code{uint64} before
884calling this function.
885
886By convention,
887
888@example
889@group
890rem (@var{x}, 0) = NaN if @var{x} is a floating point variable
891rem (@var{x}, 0) = 0 if @var{x} is an integer variable
892rem (@var{x}, @var{y}) returns a value with the signbit from @var{x}
893@end group
894@end example
895
896For the opposite conventions see the @code{mod} function. In general,
897@code{rem} is best when computing the remainder after division of two
898@emph{positive} numbers. For negative numbers, or when the values are
899periodic, @code{mod} is a better choice.
900@seealso{mod}
901@end deftypefn */)
902{
903 if (args.length () != 2)
904 print_usage ();
905
906 octave_value retval;
907
908 if (! args(0).isnumeric ())
909 err_wrong_type_arg ("rem", args(0));
910
911 if (! args(1).isnumeric ())
912 err_wrong_type_arg ("rem", args(1));
913
914 if (args(0).iscomplex () || args(1).iscomplex ())
915 error ("rem: not defined for complex numbers");
916
917 if (args(0).isinteger () || args(1).isinteger ())
918 {
919 builtin_type_t btyp0 = args(0).builtin_type ();
920 builtin_type_t btyp1 = args(1).builtin_type ();
921 if (btyp0 == btyp_double || btyp0 == btyp_float)
922 btyp0 = btyp1;
923 if (btyp1 == btyp_double || btyp1 == btyp_float)
924 btyp1 = btyp0;
925
926 if (btyp0 != btyp1)
927 error ("rem: cannot combine %s and %s",
928 args(0).class_name ().c_str (),
929 args(1).class_name ().c_str ());
930
931 switch (btyp0)
932 {
933#define MAKE_INT_BRANCH(X) \
934 case btyp_ ## X: \
935 { \
936 X##NDArray a0 = args(0).X##_array_value (); \
937 X##NDArray a1 = args(1).X##_array_value (); \
938 retval = binmap<octave_##X,octave_##X,octave_##X> (a0, a1, rem, "rem"); \
939 } \
940 break
941
942 MAKE_INT_BRANCH (int8);
943 MAKE_INT_BRANCH (int16);
944 MAKE_INT_BRANCH (int32);
945 MAKE_INT_BRANCH (int64);
946 MAKE_INT_BRANCH (uint8);
947 MAKE_INT_BRANCH (uint16);
948 MAKE_INT_BRANCH (uint32);
949 MAKE_INT_BRANCH (uint64);
950
951#undef MAKE_INT_BRANCH
952
953 case btyp_double:
954 case btyp_float:
955 case btyp_complex:
957 case btyp_bool:
958 case btyp_char:
959 case btyp_struct:
960 case btyp_cell:
961 case btyp_func_handle:
962 case btyp_unknown:
963 error ("rem: unexpected: found %s instead of integer - please report this bug", btyp_class_name[btyp0].c_str ());
964 break;
965
966 // We should have handled all possible enum values above.
967 // Rely on compiler diagnostics to warn if we haven't. For
968 // example, GCC's -Wswitch option, enabled by -Wall, will
969 // provide a warning.
970 }
971 }
972 else if (args(0).is_single_type () || args(1).is_single_type ())
973 {
974 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
975 retval = math::rem (args(0).float_value (), args(1).float_value ());
976 else
977 {
978 FloatNDArray a0 = args(0).float_array_value ();
979 FloatNDArray a1 = args(1).float_array_value ();
980 retval = binmap<float> (a0, a1, math::rem<float>, "rem");
981 }
982 }
983 else
984 {
985 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
986 retval = math::rem (args(0).scalar_value (), args(1).scalar_value ());
987 else if (args(0).issparse () || args(1).issparse ())
988 {
989 SparseMatrix m0 = args(0).sparse_matrix_value ();
990 SparseMatrix m1 = args(1).sparse_matrix_value ();
991 retval = binmap<double> (m0, m1, math::rem<double>, "rem");
992 }
993 else
994 {
995 NDArray a0 = args(0).array_value ();
996 NDArray a1 = args(1).array_value ();
997 retval = binmap<double> (a0, a1, math::rem<double>, "rem");
998 }
999 }
1000
1001 return retval;
1002}
1003
1004/*
1005%!assert (size (rem (zeros (0, 2), zeros (0, 2))), [0, 2])
1006%!assert (size (rem (rand (2, 3, 4), zeros (2, 3, 4))), [2, 3, 4])
1007%!assert (size (rem (rand (2, 3, 4), 1)), [2, 3, 4])
1008%!assert (size (rem (1, rand (2, 3, 4))), [2, 3, 4])
1009%!assert (size (rem (1, 2)), [1, 1])
1010
1011%!assert (rem ([1, 2, 3; -1, -2, -3], 2), [1, 0, 1; -1, 0, -1])
1012%!assert (rem ([1, 2, 3; -1, -2, -3], 2 * ones (2, 3)),[1, 0, 1; -1, 0, -1])
1013%!assert (rem ([0, 1, 2], [0, 0, 1]), [NaN, NaN, 0])
1014%!assert (rem (uint8 ([1, 2, 3; -1, -2, -3]), uint8 (2)),
1015%! uint8 ([1, 0, 1; -1, 0, -1]))
1016%!assert (uint8 (rem ([1, 2, 3; -1, -2, -3], 2 * ones (2, 3))),
1017%! uint8 ([1, 0, 1; -1, 0, -1]))
1018%!assert (rem (uint8 ([0, 1, 2]), [0, 0, 1]), uint8 ([0, 0, 0]))
1019
1020## Test sparse implementations
1021%!shared xs
1022%! xs = sparse (0:3);
1023%!test
1024%! y = rem (11, xs);
1025%! assert (isnan (y(1)));
1026%! assert (y, sparse (rem (11, 0:3)));
1027%!test
1028%! y = rem (0, xs);
1029%! assert (nnz (y), 1);
1030%! assert (y, sparse ([NaN 0 0 0]));
1031%!test
1032%! y = rem (xs, 2);
1033%! assert (nnz (y), 2);
1034%! assert (y, sparse (rem (0:3, 2)));
1035%!test
1036%! y = rem (xs, 1);
1037%! assert (nnz (y), 0);
1038%! assert (y, sparse (rem (0:3, 1)));
1039%!test
1040%! y = rem (sparse ([11 11 11 11]), xs);
1041%! assert (nnz (y), 3);
1042%! assert (y, sparse (rem (11, 0:3)));
1043%!test
1044%! y = rem (sparse ([0 0 0 0]), xs);
1045%! assert (nnz (y), 1);
1046%! assert (y, sparse ([NaN 0 0 0]));
1047
1048%!assert <*45587> (signbit (rem (-0, 1)))
1049%!assert <*45587> (! signbit (rem (0, 1)))
1050
1051%!assert <*42627> (rem (0.94, 0.01), 0.0)
1052
1053## Test rem (x-1, x) for x close to flintmax. Should return x-1 and not zero.
1054%!test <*67339>
1055%! x = flintmax - (10:-1:1);
1056%! assert (rem (x-1, x), x-1);
1057%! x = flintmax ("single") - (10:-1:1);
1058%! assert (rem (x-1, x), x-1);
1059
1060%!error rem (uint (8), int8 (5))
1061%!error rem (uint8 ([1, 2]), uint8 ([3, 4, 5]))
1062%!error rem ()
1063%!error rem (1, 2, 3)
1064%!error rem ([1, 2], [3, 4, 5])
1065%!error rem (i, 1)
1066*/
1067
1068DEFUN (mod, args, ,
1069 doc: /* -*- texinfo -*-
1070@deftypefn {} {@var{m} =} mod (@var{x}, @var{y})
1071Compute the modulo of @var{x} and @var{y}.
1072
1073Conceptually this is given by
1074
1075@example
1076x - y .* floor (x ./ y)
1077@end example
1078
1079@noindent
1080and is written such that the correct modulus is returned for integer types.
1081This function handles negative values correctly. That is,
1082@w{@code{mod (-1, 3)}}@ is 2, not -1, as @w{@code{rem (-1, 3)}}@ returns.
1083
1084An error results if the dimensions of the arguments do not agree, or if
1085either of the arguments is complex.
1086
1087Programming Notes: When calculating with floating point numbers (double,
1088single), values within a few eps of an integer will be rounded to that
1089integer before computation for compatibility with @sc{matlab}. Any floating
1090point integers greater than @code{flintmax} (2^53 for double) will not compute
1091correctly. For larger integer values convert the input to @code{uint64} before
1092calling this function.
1093
1094By convention,
1095
1096@example
1097@group
1098mod (@var{x}, 0) = @var{x}
1099mod (@var{x}, @var{y}) returns a value with the signbit from @var{y}
1100@end group
1101@end example
1102
1103For the opposite conventions see the @code{rem} function. In general,
1104@code{mod} is a better choice than @code{rem} when any of the inputs are
1105negative numbers or when the values are periodic.
1106@seealso{rem}
1107@end deftypefn */)
1108{
1109 if (args.length () != 2)
1110 print_usage ();
1111
1112 octave_value retval;
1113
1114 if (! args(0).isnumeric ())
1115 err_wrong_type_arg ("mod", args(0));
1116
1117 if (! args(1).isnumeric ())
1118 err_wrong_type_arg ("mod", args(1));
1119
1120 if (args(0).iscomplex () || args(1).iscomplex ())
1121 error ("mod: not defined for complex numbers");
1122
1123 if (args(0).isinteger () || args(1).isinteger ())
1124 {
1125 builtin_type_t btyp0 = args(0).builtin_type ();
1126 builtin_type_t btyp1 = args(1).builtin_type ();
1127 if (btyp0 == btyp_double || btyp0 == btyp_float)
1128 btyp0 = btyp1;
1129 if (btyp1 == btyp_double || btyp1 == btyp_float)
1130 btyp1 = btyp0;
1131
1132 if (btyp0 != btyp1)
1133 error ("mod: cannot combine %s and %s",
1134 args(0).class_name ().c_str (),
1135 args(1).class_name ().c_str ());
1136
1137 switch (btyp0)
1138 {
1139#define MAKE_INT_BRANCH(X) \
1140 case btyp_ ## X: \
1141 { \
1142 X##NDArray a0 = args(0).X##_array_value (); \
1143 X##NDArray a1 = args(1).X##_array_value (); \
1144 retval = binmap<octave_##X,octave_##X,octave_##X> (a0, a1, mod, "mod"); \
1145 } \
1146 break
1147
1148 MAKE_INT_BRANCH (int8);
1149 MAKE_INT_BRANCH (int16);
1150 MAKE_INT_BRANCH (int32);
1151 MAKE_INT_BRANCH (int64);
1152 MAKE_INT_BRANCH (uint8);
1153 MAKE_INT_BRANCH (uint16);
1154 MAKE_INT_BRANCH (uint32);
1155 MAKE_INT_BRANCH (uint64);
1156
1157#undef MAKE_INT_BRANCH
1158
1159 case btyp_double:
1160 case btyp_float:
1161 case btyp_complex:
1162 case btyp_float_complex:
1163 case btyp_bool:
1164 case btyp_char:
1165 case btyp_struct:
1166 case btyp_cell:
1167 case btyp_func_handle:
1168 case btyp_unknown:
1169 error ("mod: unexpected: found %s instead of integer - please report this bug", btyp_class_name[btyp0].c_str ());
1170 break;
1171
1172 // We should have handled all possible enum values above.
1173 // Rely on compiler diagnostics to warn if we haven't. For
1174 // example, GCC's -Wswitch option, enabled by -Wall, will
1175 // provide a warning.
1176 }
1177 }
1178 else if (args(0).is_single_type () || args(1).is_single_type ())
1179 {
1180 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
1181 retval = math::mod (args(0).float_value (), args(1).float_value ());
1182 else
1183 {
1184 FloatNDArray a0 = args(0).float_array_value ();
1185 FloatNDArray a1 = args(1).float_array_value ();
1186 retval = binmap<float> (a0, a1, math::mod<float>, "mod");
1187 }
1188 }
1189 else
1190 {
1191 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
1192 retval = math::mod (args(0).scalar_value (), args(1).scalar_value ());
1193 else if (args(0).issparse () || args(1).issparse ())
1194 {
1195 SparseMatrix m0 = args(0).sparse_matrix_value ();
1196 SparseMatrix m1 = args(1).sparse_matrix_value ();
1197 retval = binmap<double> (m0, m1, math::mod<double>, "mod");
1198 }
1199 else
1200 {
1201 NDArray a0 = args(0).array_value ();
1202 NDArray a1 = args(1).array_value ();
1203 retval = binmap<double> (a0, a1, math::mod<double>, "mod");
1204 }
1205 }
1206
1207 return retval;
1208}
1209
1210/*
1211## empty input test
1212%!assert (isempty (mod ([], [])))
1213
1214## x mod y, y != 0 tests
1215%!assert (mod (5, 3), 2)
1216%!assert (mod (-5, 3), 1)
1217%!assert (mod (0, 3), 0)
1218%!assert (mod ([-5, 5, 0], [3, 3, 3]), [1, 2, 0])
1219%!assert (mod ([-5; 5; 0], [3; 3; 3]), [1; 2; 0])
1220%!assert (mod ([-5, 5; 0, 3], [3, 3 ; 3, 1]), [1, 2 ; 0, 0])
1221
1222## x mod 0 tests
1223%!assert (mod (5, 0), 5)
1224%!assert (mod (-5, 0), -5)
1225%!assert (mod ([-5, 5, 0], [3, 0, 3]), [1, 5, 0])
1226%!assert (mod ([-5; 5; 0], [3; 0; 3]), [1; 5; 0])
1227%!assert (mod ([-5, 5; 0, 3], [3, 0 ; 3, 1]), [1, 5 ; 0, 0])
1228%!assert (mod ([-5, 5; 0, 3], [0, 0 ; 0, 0]), [-5, 5; 0, 3])
1229
1230## mixed scalar/matrix tests
1231%!assert (mod ([-5, 5; 0, 3], 0), [-5, 5; 0, 3])
1232%!assert (mod ([-5, 5; 0, 3], 3), [1, 2; 0, 0])
1233%!assert (mod (-5, [0,0; 0,0]), [-5, -5; -5, -5])
1234%!assert (mod (-5, [3,0; 3,1]), [1, -5; 1, 0])
1235%!assert (mod (-5, [3,2; 3,1]), [1, 1; 1, 0])
1236
1237## integer types
1238%!assert (mod (uint8 (5), uint8 (4)), uint8 (1))
1239%!assert (mod (uint8 ([1:5]), uint8 (4)), uint8 ([1,2,3,0,1]))
1240%!assert (mod (uint8 ([1:5]), uint8 (0)), uint8 ([1:5]))
1241%!error mod (uint8 (5), int8 (4))
1242
1243## mixed integer/real types
1244%!assert (mod (uint8 (5), 4), uint8 (1))
1245%!assert (mod (5, uint8 (4)), uint8 (1))
1246%!assert (mod (uint8 ([1:5]), 4), uint8 ([1,2,3,0,1]))
1247
1248## non-integer real numbers
1249%!assert (mod (2.1, 0.1), 0)
1250%!assert (mod (2.1, 0.2), 0.1, eps)
1251
1252%!assert <*45587> (signbit (mod (-0, 0)))
1253%!assert <*45587> (! signbit (mod (0, -0)))
1254
1255%!assert <*42627> (mod (0.94, 0.01), 0.0)
1256
1257%!assert <*54602> (mod (int8 (125), int8 (-25)), int8 (0))
1258%!assert <*54602> (mod (int8 (-125), int8 (-25)), int8 (0))
1259%!assert <*54602> (mod (int8 (-125), int8 (0)), int8 (-125))
1260%!assert <*54602> (mod (int8 (0), int8 (-25)), int8 (0))
1261
1262## Test mod (x-1, x) for x close to flintmax. Should return x-1 and not zero.
1263%!test <*67339>
1264%! x = flintmax - (10:-1:1);
1265%! assert (mod (x-1, x), x-1);
1266%! x = flintmax ("single") - (10:-1:1);
1267%! assert (mod (x-1, x), x-1);
1268*/
1269
1270DEFUN (cumprod, args, ,
1271 doc: /* -*- texinfo -*-
1272@deftypefn {} {@var{y} =} cumprod (@var{x})
1273@deftypefnx {} {@var{y} =} cumprod (@var{x}, @var{dim})
1274@deftypefnx {} {@var{y} =} cumprod (@var{x}, @var{vecdim})
1275@deftypefnx {} {@var{y} =} cumprod (@dots{}, "all")
1276@deftypefnx {} {@var{y} =} cumprod (@dots{}, @var{direction})
1277@deftypefnx {} {@var{y} =} cumprod (@dots{}, @var{nanflag})
1278Compute the cumulative product of elements in @var{x}.
1279
1280If @var{x} is a vector, then @code{cumprod (@var{x})} returns a vector of the
1281same size with the cumulative product of @var{x}.
1282
1283If @var{x} is a matrix, then @code{cumprod (@var{x})} returns a matrix of the
1284same size with the cumulative product along each column of @var{x}.
1285
1286If @var{x} is an array, then @code{cumprod(@var{x})} returns an array of the
1287same size with the cumulative product along the first non-singleton dimension
1288of @var{x}.
1289
1290The class of output @var{y} is the same as the class of input @var{x}, unless
1291@var{x} is logical, in which case @var{y} is double.
1292
1293The optional input @var{dim} specifies the dimension to operate on and must be
1294a positive integer. Specifying any singleton dimension in @var{x}, including
1295any dimension exceeding @code{ndims (@var{x})}, will return @var{x}.
1296
1297Specifying multiple dimensions with input @var{vecdim}, a vector of
1298non-repeating dimensions, will operate along the array slice defined by
1299@var{vecdim}. If @var{vecdim} indexes all dimensions of @var{x}, then it is
1300equivalent to the option @qcode{"all"}. Any dimension in @var{vecdim} greater
1301than @code{ndims (@var{x})} is ignored.
1302
1303Specifying the dimension as @qcode{"all"} will cause @code{cumprod} to operate
1304on all elements of @var{x}, and is equivalent to @code{cumprod (@var{x}(:))}.
1305
1306The optional input @var{direction} specifies how the operating dimension is
1307traversed and can take the following values:
1308
1309@table @asis
1310@item @qcode{"forward"} (default)
1311
1312The cumulative product is computed from beginning (index 1) to end along the
1313operating dimension.
1314
1315@item @qcode{"reverse"}
1316
1317The cumulative product is computed from end to beginning along the operating
1318dimension.
1319@end table
1320
1321The optional variable @var{nanflag} specifies whether to include or exclude
1322NaN values from the calculation using any of the previously specified input
1323argument combinations. The default value for @var{nanflag} is
1324@qcode{"includenan"} which keeps NaN values in the calculation. To exclude
1325NaN values set the value of @var{nanflag} to @qcode{"omitnan"}. The output
1326will still contain NaN values if @var{x} consists of all NaN values in the
1327operating dimension.
1328@seealso{prod, cumsum}
1329@end deftypefn */)
1330{
1331 int nargin = args.length ();
1332
1333 bool direction = false;
1334 bool do_perm = false;
1335 bool allflag = false;
1336 bool nanflag = false;
1337
1338 while (nargin > 1 && args(nargin - 1).is_string ())
1339 {
1340 std::string str = args(nargin - 1).string_value ();
1341
1342 if (str == "forward")
1343 direction = false;
1344 else if (str == "reverse")
1345 direction = true;
1346 else if (str == "all")
1347 allflag = true;
1348 else if (str == "omitnan" || str == "omitmissing")
1349 {
1350 if (args(0).is_double_type () || args(0).is_single_type ())
1351 nanflag = true;
1352 }
1353 else if (str == "includenan" || str == "includemissing")
1354 nanflag = false;
1355 else
1356 error ("cumprod: unrecognized optional argument '%s'", str.c_str ());
1357
1358 nargin--;
1359 }
1360
1361 if (nargin < 1 || nargin > 2)
1362 print_usage ();
1363 if (allflag && nargin > 1)
1364 error ("cumprod: cannot set DIM or VECDIM with 'all' flag");
1365
1366 octave_value arg = args(0);
1367
1368 // Handle DIM, VECDIM
1369 int dim = -1;
1370 Array<int> perm_vec;
1371 if (nargin == 2)
1372 {
1373 octave_value dimarg = args(1);
1374 get_dim_vecdim_all (dimarg, arg, dim, perm_vec, do_perm, allflag, "cumprod");
1375 }
1376
1377 // Handle allflag
1378 if (allflag)
1379 arg = arg.reshape (dim_vector (arg.numel (), 1));
1380
1381 octave_value retval;
1382
1383 switch (arg.builtin_type ())
1384 {
1385 case btyp_double:
1386 if (arg.issparse ())
1387 {
1388 if (direction)
1389 error ("cumprod: DIRECTION is not supported for sparse matrices");
1390 else
1391 retval = arg.sparse_matrix_value ().cumprod (dim, nanflag);
1392 }
1393 else
1394 {
1395 if (direction)
1396 retval = arg.array_value ().flip (dim).cumprod (dim, nanflag).flip (dim);
1397 else
1398 retval = arg.array_value ().cumprod (dim, nanflag);
1399 }
1400 break;
1401
1402 case btyp_complex:
1403 if (arg.issparse ())
1404 {
1405 if (direction)
1406 error ("cumprod: DIRECTION is not supported for sparse matrices");
1407 else
1408 retval = arg.sparse_complex_matrix_value ().cumprod (dim, nanflag);
1409 }
1410 else
1411 {
1412 if (direction)
1413 retval = arg.complex_array_value ().flip (dim).cumprod (dim, nanflag).flip (dim);
1414 else
1415 retval = arg.complex_array_value ().cumprod (dim, nanflag);
1416 }
1417 break;
1418
1419 case btyp_float:
1420 if (direction)
1421 retval = arg.float_array_value ().flip (dim).cumprod (dim, nanflag).flip (dim);
1422 else
1423 retval = arg.float_array_value ().cumprod (dim, nanflag);
1424 break;
1425
1426 case btyp_float_complex:
1427 if (direction)
1428 retval = arg.float_complex_array_value ().flip (dim).cumprod (dim, nanflag).flip (dim);
1429 else
1430 retval = arg.float_complex_array_value ().cumprod (dim, nanflag);
1431 break;
1432
1433#define MAKE_INT_BRANCH(X) \
1434 case btyp_ ## X: \
1435 if (direction) \
1436 retval = arg.X ## _array_value ().flip (dim).cumprod (dim).flip (dim); \
1437 else \
1438 retval = arg.X ## _array_value ().cumprod (dim); \
1439 break;
1440
1441 MAKE_INT_BRANCH (int8);
1442 MAKE_INT_BRANCH (int16);
1443 MAKE_INT_BRANCH (int32);
1444 MAKE_INT_BRANCH (int64);
1445 MAKE_INT_BRANCH (uint8);
1446 MAKE_INT_BRANCH (uint16);
1447 MAKE_INT_BRANCH (uint32);
1448 MAKE_INT_BRANCH (uint64);
1449
1450#undef MAKE_INT_BRANCH
1451
1452 case btyp_bool:
1453 if (arg.issparse ())
1454 {
1455 if (direction)
1456 error ("cumprod: DIRECTION is not supported for sparse matrices");
1457 retval = arg.sparse_matrix_value ().cumprod (dim);
1458 }
1459 else
1460 {
1461 if (direction)
1462 retval = arg.array_value ().flip (dim).cumprod (dim).flip (dim);
1463 else
1464 retval = arg.array_value ().cumprod (dim);
1465 }
1466 break;
1467
1468 default:
1469 err_wrong_type_arg ("cumprod", arg);
1470 }
1471
1472 if (do_perm)
1473 retval = retval.permute (perm_vec, true);
1474
1475 return retval;
1476}
1477
1478/*
1479%!assert (cumprod ([1, 2, 3]), [1, 2, 6])
1480%!assert (cumprod ([-1; -2; -3]), [-1; 2; -6])
1481%!assert (cumprod ([i, 2+i, -3+2i, 4]), [i, -1+2i, -1-8i, -4-32i])
1482%!assert (cumprod ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i]),
1483%! [1, 2, 3; i, 4i, 9i; -1+i, -8+8i, -27+27i])
1484
1485%!assert (cumprod (single ([1, 2, 3])), single ([1, 2, 6]))
1486%!assert (cumprod (single ([-1; -2; -3])), single ([-1; 2; -6]))
1487%!assert (cumprod (single ([i, 2+i, -3+2i, 4])),
1488%! single ([i, -1+2i, -1-8i, -4-32i]))
1489%!assert (cumprod (single ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i])),
1490%! single ([1, 2, 3; i, 4i, 9i; -1+i, -8+8i, -27+27i]))
1491
1492%!assert (cumprod ([2, 3; 4, 5], 1), [2, 3; 8, 15])
1493%!assert (cumprod ([2, 3; 4, 5], 2), [2, 6; 4, 20])
1494
1495%!assert (cumprod (single ([2, 3; 4, 5]), 1), single ([2, 3; 8, 15]))
1496%!assert (cumprod (single ([2, 3; 4, 5]), 2), single ([2, 6; 4, 20]))
1497
1498%!test
1499%! x = reshape ([1:8], 2, 2, 2);
1500%! y = cumprod (x);
1501%! assert (y(:,:,1), [1, 3; 2, 12]);
1502%! assert (y(:,:,2), [5, 7; 30, 56]);
1503%! assert (flip (cumprod (flip (x))), cumprod (x, "reverse"));
1504%! y = cumprod (x, 2);
1505%! assert (y(:,:,1), [1, 3; 2, 8]);
1506%! assert (y(:,:,2), [5, 35; 6, 48]);
1507%! y = cumprod (x, 2, "reverse");
1508%! assert (y(:,:,1), [3, 3; 8, 4]);
1509%! assert (y(:,:,2), [35, 7; 48, 8]);
1510%! assert (flip (cumprod (flip (x, 2), 2), 2), cumprod (x, 2, "reverse"));
1511%! y = cumprod (x, [1, 2]);
1512%! assert (y(:,:,1), [1; 2; 6; 24]);
1513%! assert (y(:,:,2), [5; 30; 210; 1680]);
1514%! y = cumprod (x, [1, 3]);
1515%! assert (y(:,1), [1; 2; 10; 60]);
1516%! assert (y(:,2), [3; 12; 84; 672]);
1517%! y = cumprod (x, [2, 3]);
1518%! assert (y(1,:), [1, 3, 15, 105]);
1519%! assert (y(2,:), [2, 8, 48, 384]);
1520%! assert (cumprod (x, [1, 2, 3]), cumprod (x, "all"));
1521
1522## Test exceeding dimensions
1523%!test
1524%! x = reshape ([1:8], 2, 2, 2);
1525%! x(3) = NaN;
1526%! y = cumprod (x);
1527%! assert (y(:,:,1), [1, NaN; 2, NaN]);
1528%! y = cumprod (x, "omitnan");
1529%! assert (y(:,:,1), [1, 1; 2, 4]);
1530%! assert (flip (cumprod (flip (x))), cumprod (x, "reverse"));
1531%! assert (flip (cumprod (flip (x), "omitnan")),
1532%! cumprod (x, "reverse", "omitnan"));
1533%! y = cumprod (x, 2);
1534%! assert (y(:,:,1), [1, NaN; 2, 8]);
1535%! y = cumprod (x, 2, "omitnan");
1536%! assert (y(:,:,1), [1, 1; 2, 8]);
1537%! y = cumprod (x, 2, "reverse");
1538%! assert (y(:,:,1), [NaN, NaN; 8, 4]);
1539%! y = cumprod (x, 2, "reverse", "omitnan");
1540%! assert (y(:,:,1), [1, 1; 8, 4]);
1541%! assert (flip (cumprod (flip (x, 3), 3), 3), cumprod (x, 3, "reverse"));
1542%! assert (flip (cumprod (flip (x, 3), 3, "omitnan"), 3),
1543%! cumprod (x, 3, "reverse", "omitnan"));
1544%! y = cumprod (x, [1, 2]);
1545%! assert (y(:,:,1), [1; 2; NaN; NaN]);
1546%! y = cumprod (x, [1, 2], "omitnan");
1547%! assert (y(:,:,1), [1; 2; 2; 8]);
1548%! y = cumprod (x, [1, 2], "reverse");
1549%! assert (y(:,:,1), [NaN; NaN; NaN; 4]);
1550%! assert (y(:,:,2), [1680; 336; 56; 8]);
1551%! y = cumprod (x, [1, 2], "reverse", "omitnan");
1552%! assert (y(:,:,1), [8; 8; 4; 4]);
1553%! y = cumprod (x, [1, 3]);
1554%! assert (y(:,1), [1; 2; 10; 60]);
1555%! assert (y(:,2), nan (4, 1));
1556%! y = cumprod (x, [1, 3], "omitnan");
1557%! assert (y(:,2), [1; 4; 28; 224]);
1558%! y = cumprod (x, [1, 3], "omitnan", "reverse");
1559%! assert (y(:,1), [60; 60; 30; 6]);
1560%! assert (y(:,2), [224; 224; 56; 8]);
1561%! y = cumprod (x, [2, 3], "omitnan");
1562%! assert (y(1,:), [1, 1, 5, 35]);
1563%! y = cumprod (x, [2, 3], "omitnan", "reverse");
1564%! assert (y(1,:), [35, 35, 35, 7]);
1565%! assert (y(2,:), [384, 192, 48, 8]);
1566%! assert (cumprod (x, [1, 2, 3]), cumprod (x, "all"));
1567
1568## Test exceeding dimensions
1569%!test
1570%! x = reshape ([1:8], 2, 2, 2);
1571%! assert (cumprod (x, 4), x);
1572%! assert (cumprod (x, 2), cumprod (x, [2, 4]));
1573%! assert (cumprod (x, 2, "reverse"), cumprod (x, [2, 4], "reverse"));
1574%! x(3) = NaN;
1575%! assert (cumprod (x, 4), x);
1576%! y = x;
1577%! y(3) = 1;
1578%! assert (cumprod (x, 4, "omitnan"), y);
1579%! assert (cumprod (x, 2, "omitnan"), cumprod (x, [2, 4], "omitnan"));
1580%! assert (cumprod (x, 2, "reverse", "omitnan"),
1581%! cumprod (x, [2, 4], "reverse", "omitnan"));
1582%!assert (cumprod (sparse ([1, 2; 3, 4]), 3), sparse ([1, 2; 3, 4]))
1583%!assert (cumprod (sparse ([1, 2i; 3, 4]), 3), sparse ([1, 2i; 3, 4]))
1584
1585%!test
1586%! x = ones (3);
1587%! assert (class (cumprod (uint8 (x))), "uint8");
1588%! assert (cumprod (x), cumprod (x, "omitnan"));
1589%! assert (class (cumprod (uint16 (x))), "uint16");
1590%! assert (class (cumprod (uint32 (x))), "uint32");
1591%! assert (class (cumprod (uint64 (x))), "uint64");
1592%! assert (class (cumprod (int8 (x))), "int8");
1593%! assert (class (cumprod (int16 (x))), "int16");
1594%! assert (class (cumprod (int32 (x))), "int32");
1595%! assert (class (cumprod (int64 (x))), "int64");
1596%!assert (class (cumprod ([true, false])), "double")
1597%!assert (cumprod ([true, false]), [1, 0])
1598%!assert (cumprod ([true, false], "reverse"), [0, 0])
1599
1600%!assert (cumprod (ones (2), 4), ones (2))
1601%!assert (cumprod (ones (2), [4, 5]), ones (2))
1602%!assert (cumprod (single (ones (2)), 4),single (ones (2)))
1603%!assert (cumprod (single (ones (2)), [4, 5]),single (ones (2)))
1604
1605%!assert (cumprod ([NaN, NaN], "omitnan"), [1, 1])
1606
1607## Test sparse matrices
1608%!assert (cumprod (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN])),
1609%! sparse ([NaN, NaN, 1, 4, 2; NaN, NaN, 1, 8, NaN]))
1610%!assert (cumprod (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), "omitnan"),
1611%! sparse ([1, 1, 1, 4, 2; 1, 2, 1, 8, 2]))
1612%!assert (cumprod (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), 2),
1613%! sparse ([NaN, NaN, NaN, NaN, NaN; 1, 2, 2, 4, NaN]))
1614%!assert (cumprod (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
1615%! sparse ([1, 1, 1, 4, 8; 1, 2, 2, 4, 4]))
1616%!assert (cumprod (sparse ([NaN, NaN, 1i, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
1617%! sparse ([1, 1, 1i, 4i, 8i; 1, 2, 2, 4, 4]))
1618%!assert (cumprod (sparse ([NaN, 0i, 1, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
1619%! sparse ([1, 0, 0, 0, 0; 1, 2, 2, 4, 4]))
1620
1621## Test input validation
1622%!error <Invalid call> cumprod ()
1623%!error <Invalid call> cumprod (1,2,3)
1624%!error <unrecognized optional argument 'foobar'> cumprod (1, "foobar")
1625%!error <cannot set DIM or VECDIM with 'all' flag>
1626%! cumprod (ones (3,3), 1, "all");
1627%!error <cannot set DIM or VECDIM with 'all' flag>
1628%! cumprod (ones (3,3), [1, 2], "all");
1629%!error <invalid dimension DIM = 0> cumprod (ones (3,3), 0)
1630%!error <invalid dimension DIM = -1> cumprod (ones (3,3), -1)
1631%!error <invalid dimension in VECDIM = -2> cumprod (ones (3), [1 -2])
1632%!error <duplicate dimension in VECDIM = 2> cumprod (ones (3), [1 2 2])
1633%!error <duplicate dimension in VECDIM = 1> cumprod (ones (3), [1 1 2])
1634%!error <DIRECTION is not supported for sparse matrices>
1635%! cumprod (sparse (ones (3,3)), "reverse");
1636*/
1637
1638DEFUN (cumsum, args, ,
1639 doc: /* -*- texinfo -*-
1640@deftypefn {} {@var{y} =} cumsum (@var{x})
1641@deftypefnx {} {@var{y} =} cumsum (@var{x}, @var{dim})
1642@deftypefnx {} {@var{y} =} cumsum (@var{x}, @var{vecdim})
1643@deftypefnx {} {@var{y} =} cumsum (@dots{}, "all")
1644@deftypefnx {} {@var{y} =} cumsum (@dots{}, @var{direction})
1645@deftypefnx {} {@var{y} =} cumsum (@dots{}, @var{nanflag})
1646Compute the cumulative sum of elements in @var{x}.
1647
1648If @var{x} is a vector, then @code{cumsum (@var{x})} returns a vector of the
1649same size with the cumulative sum of @var{x}.
1650
1651If @var{x} is a matrix, then @code{cumsum (@var{x})} returns a matrix of the
1652same size with the cumulative sum along each column of @var{x}.
1653
1654If @var{x} is an array, then @code{cumsum(@var{x})} returns an array of the
1655same size with the cumulative sum along the first non-singleton dimension of
1656@var{x}.
1657
1658The class of output @var{y} is the same as the class of input @var{x}, unless
1659@var{x} is logical, in which case @var{y} is double.
1660
1661The optional input @var{dim} specifies the dimension to operate on and must be
1662a positive integer. Specifying any singleton dimension in @var{x}, including
1663any dimension exceeding @code{ndims (@var{x})}, will return @var{x}.
1664
1665Specifying multiple dimensions with input @var{vecdim}, a vector of
1666non-repeating dimensions, will operate along the array slice defined by
1667@var{vecdim}. If @var{vecdim} indexes all dimensions of @var{x}, then it is
1668equivalent to the option @qcode{"all"}. Any dimension in @var{vecdim} greater
1669than @code{ndims (@var{x})} is ignored.
1670
1671Specifying the dimension as @qcode{"all"} will cause @code{cumsum} to operate
1672on all elements of @var{x}, and is equivalent to @code{cumsum (@var{x}(:))}.
1673
1674The optional input @var{direction} specifies how the operating dimension is
1675traversed and can take the following values:
1676
1677@table @asis
1678@item @qcode{"forward"} (default)
1679
1680The cumulative sum is computed from beginning (index 1) to end along the
1681operating dimension.
1682
1683@item @qcode{"reverse"}
1684
1685The cumulative sum is computed from end to beginning along the operating
1686dimension.
1687@end table
1688
1689The optional variable @var{nanflag} specifies whether to include or exclude
1690NaN values from the calculation using any of the previously specified input
1691argument combinations. The default value for @var{nanflag} is
1692@qcode{"includenan"} which keeps NaN values in the calculation. To exclude
1693NaN values set the value of @var{nanflag} to @qcode{"omitnan"}. The output
1694will still contain NaN values if @var{x} consists of all NaN values in the
1695operating dimension.
1696@seealso{sum, cumprod}
1697@end deftypefn */)
1698{
1699 int nargin = args.length ();
1700
1701 bool direction = false;
1702 bool do_perm = false;
1703 bool allflag = false;
1704 bool nanflag = false;
1705
1706 while (nargin > 1 && args(nargin - 1).is_string ())
1707 {
1708 std::string str = args(nargin - 1).string_value ();
1709
1710 if (str == "forward")
1711 direction = false;
1712 else if (str == "reverse")
1713 direction = true;
1714 else if (str == "all")
1715 allflag = true;
1716 else if (str == "omitnan" || str == "omitmissing")
1717 {
1718 if (args(0).is_double_type () || args(0).is_single_type ())
1719 nanflag = true;
1720 }
1721 else if (str == "includenan" || str == "includemissing")
1722 nanflag = false;
1723 else
1724 error ("cumsum: unrecognized optional argument '%s'", str.c_str ());
1725
1726 nargin--;
1727 }
1728
1729 if (nargin < 1 || nargin > 2)
1730 print_usage ();
1731 if (allflag && nargin > 1)
1732 error ("cumsum: cannot set DIM or VECDIM with 'all' flag");
1733
1734 octave_value arg = args(0);
1735
1736 // Handle DIM, VECDIM
1737 int dim = -1;
1738 Array<int> perm_vec;
1739 if (nargin == 2)
1740 {
1741 octave_value dimarg = args(1);
1742 get_dim_vecdim_all (dimarg, arg, dim, perm_vec, do_perm, allflag, "cumsum");
1743 }
1744
1745 // Handle allflag
1746 if (allflag)
1747 arg = arg.reshape (dim_vector (arg.numel (), 1));
1748
1749 octave_value retval;
1750
1751 switch (arg.builtin_type ())
1752 {
1753 case btyp_double:
1754 if (arg.issparse ())
1755 {
1756 if (direction)
1757 error ("cumsum: DIRECTION is not supported for sparse matrices");
1758 else
1759 retval = arg.sparse_matrix_value ().cumsum (dim, nanflag);
1760 }
1761 else
1762 {
1763 if (direction)
1764 retval = arg.array_value ().flip (dim).cumsum (dim, nanflag).flip (dim);
1765 else
1766 retval = arg.array_value ().cumsum (dim, nanflag);
1767 }
1768 break;
1769
1770 case btyp_complex:
1771 if (arg.issparse ())
1772 {
1773 if (direction)
1774 error ("cumsum: DIRECTION is not supported for sparse matrices");
1775 else
1776 retval = arg.sparse_complex_matrix_value ().cumsum (dim, nanflag);
1777 }
1778 else
1779 {
1780 if (direction)
1781 retval = arg.complex_array_value ().flip (dim).cumsum (dim, nanflag).flip (dim);
1782 else
1783 retval = arg.complex_array_value ().cumsum (dim, nanflag);
1784 }
1785 break;
1786
1787 case btyp_float:
1788 if (direction)
1789 retval = arg.float_array_value ().flip (dim).cumsum (dim, nanflag).flip (dim);
1790 else
1791 retval = arg.float_array_value ().cumsum (dim, nanflag);
1792 break;
1793
1794 case btyp_float_complex:
1795 if (direction)
1796 retval = arg.float_complex_array_value ().flip (dim).cumsum (dim, nanflag).flip (dim);
1797 else
1798 retval = arg.float_complex_array_value ().cumsum (dim, nanflag);
1799 break;
1800
1801#define MAKE_INT_BRANCH(X) \
1802 case btyp_ ## X: \
1803 if (direction) \
1804 retval = arg.X ## _array_value ().flip (dim).cumsum (dim).flip (dim); \
1805 else \
1806 retval = arg.X ## _array_value ().cumsum (dim); \
1807 break;
1808
1809 MAKE_INT_BRANCH (int8);
1810 MAKE_INT_BRANCH (int16);
1811 MAKE_INT_BRANCH (int32);
1812 MAKE_INT_BRANCH (int64);
1813 MAKE_INT_BRANCH (uint8);
1814 MAKE_INT_BRANCH (uint16);
1815 MAKE_INT_BRANCH (uint32);
1816 MAKE_INT_BRANCH (uint64);
1817
1818#undef MAKE_INT_BRANCH
1819
1820 case btyp_bool:
1821 if (arg.issparse ())
1822 {
1823 if (direction)
1824 error ("cumsum: DIRECTION is not supported for sparse matrices");
1825 retval = arg.sparse_matrix_value ().cumsum (dim);
1826 }
1827 else
1828 {
1829 // OPTIMIZED: Direct boolean cumulative count
1830 boolNDArray m = arg.bool_array_value ();
1831
1832 if (dim < 0)
1833 dim = m.dims ().first_non_singleton ();
1834
1835 if (dim >= m.ndims ())
1836 {
1837 retval = NDArray (m);
1838 break;
1839 }
1840
1841 // Calculate extent triplet (l, n, u) for column-major indexing
1842 dim_vector dv = m.dims ();
1843 octave_idx_type l = 1;
1844 for (int i = 0; i < dim; i++)
1845 l *= dv(i);
1846
1847 octave_idx_type n = dv(dim);
1848
1849 octave_idx_type u = 1;
1850 for (int i = dim + 1; i < dv.ndims (); i++)
1851 u *= dv(i);
1852
1853 NDArray result (dv);
1854 const bool *data = m.data ();
1855 double *r = result.rwdata ();
1856
1857 if (direction)
1858 {
1859 // Reverse direction
1860 for (octave_idx_type outer = 0; outer < u; outer++)
1861 {
1862 const octave_idx_type outer_base = outer * n * l;
1863 for (octave_idx_type inner = 0; inner < l; inner++)
1864 {
1865 double cumval = 0.0;
1866 octave_idx_type base = outer_base + inner;
1867
1868 // walk from last to first
1869 for (octave_idx_type j = n - 1; j >= 0; j--)
1870 {
1871 octave_idx_type idx = base + j * l;
1872 cumval += data[idx];
1873 r[idx] = cumval;
1874 }
1875 }
1876 }
1877 }
1878 else
1879 {
1880 // Forward direction
1881 for (octave_idx_type outer = 0; outer < u; outer++)
1882 {
1883 const octave_idx_type outer_base = outer * n * l;
1884 for (octave_idx_type inner = 0; inner < l; inner++)
1885 {
1886 double cumval = 0.0;
1887 octave_idx_type base = outer_base + inner;
1888 for (octave_idx_type j = 0; j < n; j++)
1889 {
1890 octave_idx_type idx = base + j * l;
1891 cumval += data[idx];
1892 r[idx] = cumval;
1893 }
1894 }
1895 }
1896 }
1897
1898 retval = result;
1899 }
1900 break;
1901
1902 default:
1903 err_wrong_type_arg ("cumsum", arg);
1904 }
1905
1906 if (do_perm)
1907 retval = retval.permute (perm_vec, true);
1908
1909 return retval;
1910}
1911
1912/*
1913%!assert (cumsum ([1, 2, 3]), [1, 3, 6])
1914%!assert (cumsum ([-1; -2; -3]), [-1; -3; -6])
1915%!assert (cumsum ([i, 2+i, -3+2i, 4]), [i, 2+2i, -1+4i, 3+4i])
1916%!assert (cumsum ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i]),
1917%! [1, 2, 3; 1+i, 2+2i, 3+3i; 2+2i, 4+4i, 6+6i])
1918
1919%!assert (cumsum (single ([1, 2, 3])), single ([1, 3, 6]))
1920%!assert (cumsum (single ([-1; -2; -3])), single ([-1; -3; -6]))
1921%!assert (cumsum (single ([i, 2+i, -3+2i, 4])),
1922%! single ([i, 2+2i, -1+4i, 3+4i]))
1923%!assert (cumsum (single ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i])),
1924%! single ([1, 2, 3; 1+i, 2+2i, 3+3i; 2+2i, 4+4i, 6+6i]))
1925
1926%!assert (cumsum ([1, 2; 3, 4], 1), [1, 2; 4, 6])
1927%!assert (cumsum ([1, 2; 3, 4], 2), [1, 3; 3, 7])
1928
1929%!assert (cumsum (single ([1, 2; 3, 4]), 1), single ([1, 2; 4, 6]))
1930%!assert (cumsum (single ([1, 2; 3, 4]), 2), single ([1, 3; 3, 7]))
1931
1932%!test
1933%! x = reshape ([1:8], 2, 2, 2);
1934%! y = cumsum (x);
1935%! assert (y(:,:,1), [1, 3; 3, 7]);
1936%! assert (y(:,:,2), [5, 7; 11, 15]);
1937%! assert (flip (cumsum (flip (x))), cumsum (x, "reverse"));
1938%! y = cumsum (x, 2);
1939%! assert (y(:,:,1), [1, 4; 2, 6]);
1940%! assert (y(:,:,2), [5, 12; 6, 14]);
1941%! y = cumsum (x, 2, "reverse");
1942%! assert (y(:,:,1), [4, 3; 6, 4]);
1943%! assert (y(:,:,2), [12, 7; 14, 8]);
1944%! assert (flip (cumsum (flip (x, 2), 2), 2), cumsum (x, 2, "reverse"));
1945%! y = cumsum (x, [1, 2]);
1946%! assert (y(:,:,1), [1; 3; 6; 10]);
1947%! assert (y(:,:,2), [5; 11; 18; 26]);
1948%! y = cumsum (x, [1, 3]);
1949%! assert (y(:,1), [1; 3; 8; 14]);
1950%! assert (y(:,2), [3; 7; 14; 22]);
1951%! y = cumsum (x, [2, 3]);
1952%! assert (y(1,:), [1, 4, 9, 16]);
1953%! assert (y(2,:), [2, 6, 12, 20]);
1954%! assert (cumsum (x, [1, 2, 3]), cumsum (x, "all"));
1955
1956## Test exceeding dimensions
1957%!test
1958%! x = reshape ([1:8], 2, 2, 2);
1959%! x(3) = NaN;
1960%! y = cumsum (x);
1961%! assert (y(:,:,1), [1, NaN; 3, NaN]);
1962%! y = cumsum (x, "omitnan");
1963%! assert (y(:,:,1), [1, 0; 3, 4]);
1964%! assert (flip (cumsum (flip (x))), cumsum (x, "reverse"));
1965%! assert (flip (cumsum (flip (x), "omitnan")),
1966%! cumsum (x, "reverse", "omitnan"));
1967%! y = cumsum (x, 2);
1968%! assert (y(:,:,1), [1, NaN; 2, 6]);
1969%! y = cumsum (x, 2, "omitnan");
1970%! assert (y(:,:,1), [1, 1; 2, 6]);
1971%! y = cumsum (x, 2, "reverse");
1972%! assert (y(:,:,1), [NaN, NaN; 6, 4]);
1973%! y = cumsum (x, 2, "reverse", "omitnan");
1974%! assert (y(:,:,1), [1, 0; 6, 4]);
1975%! assert (flip (cumsum (flip (x, 3), 3), 3), cumsum (x, 3, "reverse"));
1976%! assert (flip (cumsum (flip (x, 3), 3, "omitnan"), 3),
1977%! cumsum (x, 3, "reverse", "omitnan"));
1978%! y = cumsum (x, [1, 2]);
1979%! assert (y(:,:,1), [1; 3; NaN; NaN]);
1980%! y = cumsum (x, [1, 2], "omitnan");
1981%! assert (y(:,:,1), [1; 3; 3; 7]);
1982%! y = cumsum (x, [1, 2], "reverse");
1983%! assert (y(:,:,1), [NaN; NaN; NaN; 4]);
1984%! assert (y(:,:,2), [26; 21; 15; 8]);
1985%! y = cumsum (x, [1, 2], "reverse", "omitnan");
1986%! assert (y(:,:,1), [7; 6; 4; 4]);
1987%! assert (y(:,:,2), [26; 21; 15; 8]);
1988%! y = cumsum (x, [1, 3]);
1989%! assert (y(:,1), [1; 3; 8; 14]);
1990%! assert (y(:,2), nan (4, 1));
1991%! y = cumsum (x, [1, 3], "omitnan");
1992%! assert (y(:,2), [0; 4; 11; 19]);
1993%! y = cumsum (x, [1, 3], "omitnan", "reverse");
1994%! assert (y(:,1), [14; 13; 11; 6]);
1995%! assert (y(:,2), [19; 19; 15; 8]);
1996%! y = cumsum (x, [2, 3], "omitnan");
1997%! assert (y(1,:), [1, 1, 6, 13]);
1998%! y = cumsum (x, [2, 3], "omitnan", "reverse");
1999%! assert (y(1,:), [13, 12, 12, 7]);
2000%! assert (y(2,:), [20, 18, 14, 8]);
2001%! assert (cumsum (x, [1, 2, 3]), cumsum (x, "all"));
2002
2003## Test exceeding dimensions
2004%!test
2005%! x = reshape ([1:8], 2, 2, 2);
2006%! assert (cumsum (x, 4), x);
2007%! assert (cumsum (x, 2), cumsum (x, [2, 4]));
2008%! assert (cumsum (x, 2, "reverse"), cumsum (x, [2, 4], "reverse"));
2009%! x(3) = NaN;
2010%! assert (cumsum (x, 4), x);
2011%! y = x;
2012%! y(3) = 0;
2013%! assert (cumsum (x, 4, "omitnan"), y);
2014%! assert (cumsum (x, 2, "omitnan"), cumsum (x, [2, 4], "omitnan"));
2015%! assert (cumsum (x, 2, "reverse", "omitnan"),
2016%! cumsum (x, [2, 4], "reverse", "omitnan"));
2017%!assert (cumsum (sparse ([1, 2; 3, 4]), 3), sparse ([1, 2; 3, 4]))
2018%!assert (cumsum (sparse ([1, 2i; 3, 4]), 3), sparse ([1, 2i; 3, 4]))
2019
2020%!test
2021%! x = ones (3);
2022%! assert (class (cumsum (uint8 (x))), "uint8");
2023%! assert (cumsum (x), cumsum (x, "omitnan"));
2024%! assert (class (cumsum (uint16 (x))), "uint16");
2025%! assert (class (cumsum (uint32 (x))), "uint32");
2026%! assert (class (cumsum (uint64 (x))), "uint64");
2027%! assert (class (cumsum (int8 (x))), "int8");
2028%! assert (class (cumsum (int16 (x))), "int16");
2029%! assert (class (cumsum (int32 (x))), "int32");
2030%! assert (class (cumsum (int64 (x))), "int64");
2031%!assert (class (cumsum ([true, false])), "double")
2032%!assert (cumsum ([true, false]), [1, 1])
2033%!assert (cumsum ([true, false], "reverse"), [1, 0])
2034
2035%!assert (cumsum (ones (2), 4), ones (2))
2036%!assert (cumsum (ones (2), [4, 5]), ones (2))
2037%!assert (cumsum (single (ones (2)), 4),single (ones (2)))
2038%!assert (cumsum (single (ones (2)), [4, 5]),single (ones (2)))
2039
2040%!assert (cumsum ([NaN, NaN], "omitnan"), [0, 0])
2041
2042## Test sparse matrices
2043%!assert (cumsum (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN])),
2044%! sparse ([NaN, NaN, 1, 4, 2; NaN, NaN, 2, 6, NaN]))
2045%!assert (cumsum (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), "omitnan"),
2046%! sparse ([0, 0, 1, 4, 2; 1, 2, 2, 6, 2]))
2047%!assert (cumsum (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), 2),
2048%! sparse ([NaN, NaN, NaN, NaN, NaN; 1, 3, 4, 6, NaN]))
2049%!assert (cumsum (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
2050%! sparse ([0, 0, 1, 5, 7; 1, 3, 4, 6, 6]))
2051%!assert (cumsum (sparse ([NaN, NaN, 1i, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
2052%! sparse ([0, 0, 1i, 4+i, 6+i; 1, 3, 4, 6, 6]))
2053%!assert (cumsum (sparse ([NaN, 0i, 1, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
2054%! sparse ([0, 0, 1, 5, 7; 1, 3, 4, 6, 6]))
2055
2056## Test input validation
2057%!error <Invalid call> cumsum ()
2058%!error <Invalid call> cumsum (1,2,3)
2059%!error <unrecognized optional argument 'foobar'> cumsum (1, "foobar")
2060%!error <cannot set DIM or VECDIM with 'all' flag> ...
2061%! cumsum (ones (3,3), 1, "all")
2062%!error <cannot set DIM or VECDIM with 'all' flag> ...
2063%! cumsum (ones (3,3), [1, 2], "all")
2064%!error <invalid dimension DIM = 0> cumsum (ones (3,3), 0)
2065%!error <invalid dimension DIM = -1> cumsum (ones (3,3), -1)
2066%!error <invalid dimension in VECDIM = -2> cumsum (ones (3), [1 -2])
2067%!error <duplicate dimension in VECDIM = 2> cumsum (ones (3), [1 2 2])
2068%!error <duplicate dimension in VECDIM = 1> cumsum (ones (3), [1 1 2])
2069%!error <DIRECTION is not supported for sparse matrices> ...
2070%! cumsum (sparse (ones (3,3)), "reverse")
2071*/
2072
2073/* Additional cumsum boolean tests
2074%!test
2075%! x = [true, false, true; false, true, false];
2076%! assert (cumsum (x, 1), [1, 0, 1; 1, 1, 1]);
2077%! assert (cumsum (x, 2), [1, 1, 2; 0, 1, 1]);
2078
2079%!test
2080%! x = rand (100, 100) > 0.5;
2081%! assert (cumsum (x, 2), cumsum (double (x), 2));
2082*/
2083
2084DEFUN (diag, args, ,
2085 doc: /* -*- texinfo -*-
2086@deftypefn {} {@var{M} =} diag (@var{v})
2087@deftypefnx {} {@var{M} =} diag (@var{v}, @var{k})
2088@deftypefnx {} {@var{M} =} diag (@var{v}, @var{m}, @var{n})
2089@deftypefnx {} {@var{v} =} diag (@var{M})
2090@deftypefnx {} {@var{v} =} diag (@var{M}, @var{k})
2091Return a diagonal matrix with vector @var{v} on diagonal @var{k}.
2092
2093The second argument is optional. If it is positive, the vector is placed on
2094the @var{k}-th superdiagonal. If it is negative, it is placed on the
2095@var{-k}-th subdiagonal. The default value of @var{k} is 0, and the vector
2096is placed on the main diagonal. For example:
2097
2098@example
2099@group
2100diag ([1, 2, 3], 1)
2101 @xresult{} 0 1 0 0
2102 0 0 2 0
2103 0 0 0 3
2104 0 0 0 0
2105@end group
2106@end example
2107
2108@noindent
2109The 3-input form returns a diagonal matrix with vector @var{v} on the main
2110diagonal and the resulting matrix being of size @var{m} rows x @var{n}
2111columns.
2112
2113Given a matrix argument, instead of a vector, @code{diag} extracts the
2114@var{k}-th diagonal of the matrix.
2115@end deftypefn */)
2116{
2117 int nargin = args.length ();
2118
2119 if (nargin < 1 || nargin > 3)
2120 print_usage ();
2121
2122 octave_value retval;
2123
2124 if (nargin == 1)
2125 retval = args(0).diag ();
2126 else if (nargin == 2)
2127 {
2128 octave_idx_type k = args(1).strict_idx_type_value ("diag: invalid argument K");
2129
2130 retval = args(0).diag (k);
2131 }
2132 else
2133 {
2134 octave_value arg0 = args(0);
2135
2136 if (arg0.ndims () != 2 || (arg0.rows () != 1 && arg0.columns () != 1))
2137 error ("diag: V must be a vector");
2138
2139 octave_idx_type m = args(1).strict_idx_type_value ("diag: invalid dimension M");
2140 octave_idx_type n = args(2).strict_idx_type_value ("diag: invalid dimension N");
2141
2142 retval = arg0.diag (m, n);
2143 }
2144
2145 return retval;
2146}
2147
2148/*
2149
2150%!assert (full (diag ([1; 2; 3])), [1, 0, 0; 0, 2, 0; 0, 0, 3])
2151%!assert (diag ([1; 2; 3], 1),
2152%! [0, 1, 0, 0; 0, 0, 2, 0; 0, 0, 0, 3; 0, 0, 0, 0])
2153%!assert (diag ([1; 2; 3], 2),
2154%! [0 0 1 0 0; 0 0 0 2 0; 0 0 0 0 3; 0 0 0 0 0; 0 0 0 0 0])
2155%!assert (diag ([1; 2; 3],-1),
2156%! [0 0 0 0; 1 0 0 0; 0 2 0 0; 0 0 3 0])
2157%!assert (diag ([1; 2; 3],-2),
2158%! [0 0 0 0 0; 0 0 0 0 0; 1 0 0 0 0; 0 2 0 0 0; 0 0 3 0 0])
2159
2160%!assert (diag ([1, 0, 0; 0, 2, 0; 0, 0, 3]), [1; 2; 3])
2161%!assert (diag ([0, 1, 0, 0; 0, 0, 2, 0; 0, 0, 0, 3; 0, 0, 0, 0], 1),
2162%! [1; 2; 3])
2163%!assert (diag ([0, 0, 0, 0; 1, 0, 0, 0; 0, 2, 0, 0; 0, 0, 3, 0], -1),
2164%! [1; 2; 3])
2165%!assert (diag (ones (1, 0), 2), zeros (2))
2166%!assert (diag (1:3, 4, 2), [1, 0; 0, 2; 0, 0; 0, 0])
2167
2168%!assert (full (diag (single ([1; 2; 3]))),
2169%! single ([1, 0, 0; 0, 2, 0; 0, 0, 3]))
2170%!assert (diag (single ([1; 2; 3]), 1),
2171%! single ([0, 1, 0, 0; 0, 0, 2, 0; 0, 0, 0, 3; 0, 0, 0, 0]))
2172%!assert (diag (single ([1; 2; 3]), 2),
2173%! single ([0 0 1 0 0; 0 0 0 2 0; 0 0 0 0 3; 0 0 0 0 0; 0 0 0 0 0]))
2174%!assert (diag (single ([1; 2; 3]),-1),
2175%! single ([0, 0, 0, 0; 1, 0, 0, 0; 0, 2, 0, 0; 0, 0, 3, 0]))
2176%!assert (diag (single ([1; 2; 3]),-2),
2177%! single ([0 0 0 0 0; 0 0 0 0 0; 1 0 0 0 0; 0 2 0 0 0; 0 0 3 0 0]))
2178
2179%!assert (diag (single ([1, 0, 0; 0, 2, 0; 0, 0, 3])), single ([1; 2; 3]))
2180%!assert (diag (single ([0, 1, 0, 0; 0, 0, 2, 0; 0, 0, 0, 3; 0, 0, 0, 0]), 1),
2181%! single ([1; 2; 3]))
2182%!assert (diag (single ([0, 0, 0, 0; 1, 0, 0, 0; 0, 2, 0, 0; 0, 0, 3, 0]), -1),
2183%! single ([1; 2; 3]))
2184
2185%!assert (diag (int8 ([1; 2; 3])), int8 ([1, 0, 0; 0, 2, 0; 0, 0, 3]))
2186%!assert (diag (int8 ([1; 2; 3]), 1),
2187%! int8 ([0, 1, 0, 0; 0, 0, 2, 0; 0, 0, 0, 3; 0, 0, 0, 0]))
2188%!assert (diag (int8 ([1; 2; 3]), 2),
2189%! int8 ([0 0 1 0 0; 0 0 0 2 0; 0 0 0 0 3; 0 0 0 0 0; 0 0 0 0 0]))
2190%!assert (diag (int8 ([1; 2; 3]),-1),
2191%! int8 ([0 0 0 0; 1 0 0 0; 0 2 0 0; 0 0 3 0]))
2192%!assert (diag (int8 ([1; 2; 3]),-2),
2193%! int8 ([0 0 0 0 0; 0 0 0 0 0; 1 0 0 0 0; 0 2 0 0 0; 0 0 3 0 0]))
2194
2195%!assert (diag (int8 ([1, 0, 0; 0, 2, 0; 0, 0, 3])), int8 ([1; 2; 3]))
2196%!assert (diag (int8 ([0, 1, 0, 0; 0, 0, 2, 0; 0, 0, 0, 3; 0, 0, 0, 0]), 1),
2197%! int8 ([1; 2; 3]))
2198%!assert (diag (int8 ([0, 0, 0, 0; 1, 0, 0, 0; 0, 2, 0, 0; 0, 0, 3, 0]), -1),
2199%! int8 ([1; 2; 3]))
2200
2201%!assert (diag (1, 3, 3), diag ([1, 0, 0]))
2202%!assert (diag (i, 3, 3), diag ([i, 0, 0]))
2203%!assert (diag (single (1), 3, 3), diag ([single(1), 0, 0]))
2204%!assert (diag (single (i), 3, 3), diag ([single(i), 0, 0]))
2205%!assert (diag ([1, 2], 3, 3), diag ([1, 2, 0]))
2206%!assert (diag ([1, 2]*i, 3, 3), diag ([1, 2, 0]*i))
2207%!assert (diag (single ([1, 2]), 3, 3), diag (single ([1, 2, 0])))
2208%!assert (diag (single ([1, 2]*i), 3, 3), diag (single ([1, 2, 0]*i)))
2209
2210%!assert <*37411> (diag (diag ([5, 2, 3])(:,1)), diag([5 0 0 ]))
2211%!assert <*37411> (diag (diag ([5, 2, 3])(:,1), 2), [0 0 5 0 0; zeros(4, 5)])
2212%!assert <*37411> (diag (diag ([5, 2, 3])(:,1), -2),
2213%! [[0 0 5 0 0]', zeros(5, 4)])
2214
2215## Test non-square size
2216%!assert (diag ([1,2,3], 6, 3), [1 0 0; 0 2 0; 0 0 3; 0 0 0; 0 0 0; 0 0 0])
2217%!assert (diag (1, 2, 3), [1,0,0; 0,0,0])
2218%!assert (diag ({1}, 2, 3), {1,[],[]; [],[],[]})
2219%!assert (diag ({1,2}, 3, 4), {1,[],[],[]; [],2,[],[]; [],[],[],[]})
2220%!assert <*56711> (diag ({1,2,3}, 2, 1), {1; []})
2221
2222## Test out-of-range diagonals
2223%!assert (diag (ones (3,3), 4), zeros (0, 1))
2224%!assert (diag (cell (3,3), 4), cell (0, 1))
2225%!assert (diag (sparse (ones (3,3)), 4), sparse (zeros (0, 1)))
2226
2227## Test input validation
2228%!error <Invalid call to diag> diag ()
2229%!error <Invalid call to diag> diag (1,2,3,4)
2230%!error <V must be a vector> diag (ones (2), 3, 3)
2231%!error diag (1:3, -4, 3)
2232%!error diag (1:3, 4, -3)
2233
2234*/
2235
2236DEFUN (prod, args, ,
2237 doc: /* -*- texinfo -*-
2238@deftypefn {} {@var{y} =} prod (@var{x})
2239@deftypefnx {} {@var{y} =} prod (@var{x}, @var{dim})
2240@deftypefnx {} {@var{y} =} prod (@var{x}, @var{vecdim})
2241@deftypefnx {} {@var{y} =} prod (@var{x}, "all")
2242@deftypefnx {} {@var{y} =} prod (@dots{}, @var{outtype})
2243@deftypefnx {} {@var{y} =} prod (@dots{}, @var{nanflag})
2244Compute the product of the elements of @var{x}.
2245
2246If @var{x} is a vector, then @code{prod (@var{x})} returns the product of the
2247elements in @var{x}.
2248
2249If @var{x} is a matrix, then @code{prod (@var{x})} returns a row vector with
2250each element containing the product of the corresponding column in @var{x}.
2251
2252If @var{x} is an array, then @code{prod(@var{x})} computes the product along
2253the first non-singleton dimension of @var{x}.
2254
2255The optional input @var{dim} specifies the dimension to operate on and must be
2256a positive integer. Specifying any singleton dimension in @var{x}, including
2257any dimension exceeding @code{ndims (@var{x})}, will return @var{x}.
2258
2259Specifying multiple dimensions with input @var{vecdim}, a vector of
2260non-repeating dimensions, will operate along the array slice defined by
2261@var{vecdim}. If @var{vecdim} indexes all dimensions of @var{x}, then it is
2262equivalent to the option @qcode{"all"}. Any dimension in @var{vecdim} greater
2263than @code{ndims (@var{x})} is ignored.
2264
2265Specifying the dimension as @qcode{"all"} will cause @code{prod} to operate on
2266all elements of @var{x}, and is equivalent to @code{prod (@var{x}(:))}.
2267
2268The optional input @var{outtype} specifies the data type that is returned as
2269well as the class of the variable used for calculations.
2270@var{outtype} can take the following values:
2271
2272@table @asis
2273@item @qcode{"default"}
2274Operations on floating point inputs (double or single) are performed in their
2275native data type; while operations on integer, logical, and character data
2276types are performed using doubles. Output is of type double, unless the input
2277is single in which case the output is of type single.
2278
2279@item @qcode{"double"}
2280Operations are performed in double precision even for single precision inputs.
2281Output is of type double.
2282
2283@item @qcode{"native"}
2284Operations are performed in their native data types and output is of the same
2285type as the input as reported by (@code{class (@var{x})}). When the input is
2286logical, @code{prod (@var{x}, "native")} is equivalent to @code{all (@var{x})}.
2287@end table
2288
2289The optional variable @var{nanflag} specifies whether to include or exclude
2290NaN values from the calculation using any of the previously specified input
2291argument combinations. The default value for @var{nanflag} is
2292@qcode{"includenan"} which keeps NaN values in the calculation. To exclude
2293NaN values set the value of @var{nanflag} to @qcode{"omitnan"}. The output
2294will be @var{1}, if @var{x} consists of all NaN values in the
2295operating dimension.
2296@seealso{cumprod, sum}
2297@end deftypefn */)
2298{
2299 int nargin = args.length ();
2300
2301 bool isnative = false;
2302 bool isdouble = false;
2303 bool do_perm = false;
2304 bool allflag = false;
2305 bool nanflag = false;
2306
2307 while (nargin > 1 && args(nargin - 1).is_string ())
2308 {
2309 std::string str = args(nargin - 1).string_value ();
2310
2311 if (str == "native")
2312 isnative = true;
2313 else if (str == "double")
2314 isdouble = true;
2315 else if (str == "all")
2316 allflag = true;
2317 else if (str == "omitnan" || str == "omitmissing")
2318 {
2319 if (args(0).is_double_type () || args(0).is_single_type ())
2320 nanflag = true;
2321 }
2322 else if (str == "includenan" || str == "includemissing")
2323 nanflag = false;
2324 else if (str != "default")
2325 error ("prod: unrecognized optional argument '%s'", str.c_str ());
2326
2327 nargin--;
2328 }
2329
2330 if (nargin < 1 || nargin > 2)
2331 print_usage ();
2332 if (allflag && nargin > 1)
2333 error ("prod: cannot set DIM or VECDIM with 'all' flag");
2334
2335 octave_value arg = args(0);
2336
2337 // Handle DIM, VECDIM
2338 int dim = -1;
2339 Array<int> perm_vec;
2340 if (nargin == 2)
2341 {
2342 octave_value dimarg = args(1);
2343 get_dim_vecdim_all (dimarg, arg, dim, perm_vec, do_perm, allflag, "prod");
2344 }
2345
2346 // Handle allflag
2347 if (allflag)
2348 arg = arg.reshape (dim_vector (arg.numel (), 1));
2349
2350 octave_value retval;
2351
2352 switch (arg.builtin_type ())
2353 {
2354 case btyp_double:
2355 if (arg.issparse ())
2356 retval = arg.sparse_matrix_value ().prod (dim, nanflag);
2357 else
2358 retval = arg.array_value ().prod (dim, nanflag);
2359 break;
2360
2361 case btyp_complex:
2362 if (arg.issparse ())
2363 retval = arg.sparse_complex_matrix_value ().prod (dim, nanflag);
2364 else
2365 retval = arg.complex_array_value ().prod (dim, nanflag);
2366 break;
2367
2368 case btyp_float:
2369 if (isdouble)
2370 retval = arg.float_array_value ().dprod (dim, nanflag);
2371 else
2372 retval = arg.float_array_value ().prod (dim, nanflag);
2373 break;
2374
2375 case btyp_float_complex:
2376 if (isdouble)
2377 retval = arg.float_complex_array_value ().dprod (dim, nanflag);
2378 else
2379 retval = arg.float_complex_array_value ().prod (dim, nanflag);
2380 break;
2381
2382#define MAKE_INT_BRANCH(X) \
2383 case btyp_ ## X: \
2384 if (isnative) \
2385 retval = arg.X ## _array_value ().prod (dim); \
2386 else \
2387 retval = arg.array_value ().prod (dim); \
2388 break;
2389
2390 MAKE_INT_BRANCH (int8);
2391 MAKE_INT_BRANCH (int16);
2392 MAKE_INT_BRANCH (int32);
2393 MAKE_INT_BRANCH (int64);
2394 MAKE_INT_BRANCH (uint8);
2395 MAKE_INT_BRANCH (uint16);
2396 MAKE_INT_BRANCH (uint32);
2397 MAKE_INT_BRANCH (uint64);
2398
2399#undef MAKE_INT_BRANCH
2400
2401 // GAGME: Accursed Matlab compatibility...
2402 case btyp_char:
2403 retval = arg.array_value (true).prod (dim);
2404 break;
2405
2406 case btyp_bool:
2407 if (arg.issparse ())
2408 {
2409 if (isnative)
2410 retval = arg.sparse_bool_matrix_value ().all (dim);
2411 else
2412 retval = arg.sparse_matrix_value ().prod (dim);
2413 }
2414 else if (isnative)
2415 retval = arg.bool_array_value ().all (dim);
2416 else
2417 retval = NDArray (arg.bool_array_value ().all (dim));
2418 break;
2419
2420 default:
2421 err_wrong_type_arg ("prod", arg);
2422 }
2423
2424 if (do_perm)
2425 retval = retval.permute (perm_vec, true);
2426
2427 return retval;
2428}
2429
2430/*
2431%!assert (prod ([1, 2, 3]), 6)
2432%!assert (prod ([-1; -2; -3]), -6)
2433%!assert (prod ([i, 2+i, -3+2i, 4]), -4 - 32i)
2434%!assert (prod ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i]), [-1+i, -8+8i, -27+27i])
2435
2436%!assert (prod (single ([1, 2, 3])), single (6))
2437%!assert (prod (single ([-1; -2; -3])), single (-6))
2438%!assert (prod (single ([i, 2+i, -3+2i, 4])), single (-4 - 32i))
2439%!assert (prod (single ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i])),
2440%! single ([-1+i, -8+8i, -27+27i]))
2441
2442## Test sparse
2443%!assert (prod (sparse ([1, 2, 3])), sparse (6))
2444%!assert (prod (sparse ([-1; -2; -3])), sparse (-6))
2445## Commented out until bug #42290 is fixed
2446#%!assert (prod (sparse ([i, 2+i, -3+2i, 4])), sparse (-4 - 32i))
2447#%!assert (prod (sparse ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i])),
2448#%! sparse ([-1+i, -8+8i, -27+27i]))
2449
2450%!assert (prod ([1, 2; 3, 4], 1), [3, 8])
2451%!assert (prod ([1, 2; 3, 4], 2), [2; 12])
2452%!assert (prod (single ([1, 2; 3, 4]), 1), single ([3, 8]))
2453%!assert (prod (single ([1, 2; 3, 4]), 2), single ([2; 12]))
2454
2455## Test empty matrices
2456%!assert (prod ([]), 1)
2457%!assert (prod ([], 1), zeros (1, 0))
2458%!assert (prod ([], 2), zeros (0, 1))
2459%!assert (prod ([], 3), zeros (0, 0))
2460%!assert (prod (zeros (1, 0)), 1)
2461%!assert (prod (zeros (1, 0), 1), zeros (1, 0))
2462%!assert (prod (zeros (1, 0), 2), 1)
2463%!assert (prod (zeros (0, 1)), 1)
2464%!assert (prod (zeros (0, 1), 1), 1)
2465%!assert (prod (zeros (0, 1), 2), zeros (0, 1))
2466%!assert (prod (zeros (2, 0)), zeros (1, 0))
2467%!assert (prod (zeros (2, 0), 1), zeros (1, 0))
2468%!assert (prod (zeros (2, 0), 2), [1; 1])
2469%!assert (prod (zeros (0, 2)), [1, 1])
2470%!assert (prod (zeros (0, 2), 1), [1, 1])
2471%!assert (prod (zeros (0, 2), 2), zeros (0, 1))
2472
2473%!assert (prod (single ([])), single (1))
2474%!assert (prod (single ([]), 1), single (zeros (1, 0)))
2475%!assert (prod (single ([]), 2), single (zeros (0, 1)))
2476%!assert (prod (single ([]), 3), single (zeros (0, 0)))
2477%!assert (prod (zeros (1, 0, "single")), single (1))
2478%!assert (prod (zeros (1, 0, "single"), 1), zeros (1, 0, "single"))
2479%!assert (prod (zeros (1, 0, "single"), 2), single (1))
2480%!assert (prod (zeros (0, 1, "single")), single (1))
2481%!assert (prod (zeros (0, 1, "single"), 1), single (1))
2482%!assert (prod (zeros (0, 1, "single"), 2), zeros (0, 1, "single"))
2483%!assert (prod (zeros (2, 0, "single")), zeros (1, 0, "single"))
2484%!assert (prod (zeros (2, 0, "single"), 1), zeros (1, 0, "single"))
2485%!assert (prod (zeros (2, 0, "single"), 2), single ([1; 1]))
2486%!assert (prod (zeros (0, 2, "single")), single ([1, 1]))
2487%!assert (prod (zeros (0, 2, "single"), 1), single ([1, 1]))
2488%!assert (prod (zeros (0, 2, "single"), 2), zeros (0, 1, "single"))
2489
2490## Test "double" type argument
2491%!assert (prod (single ([1, 2, 3]), "double"), 6)
2492%!assert (prod (single ([-1; -2; -3]), "double"), -6)
2493%!assert (prod (single ([i, 2+i, -3+2i, 4]), "double"), -4 - 32i)
2494%!assert (prod (single ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i]), "double"),
2495%! [-1+i, -8+8i, -27+27i])
2496
2497## Test "native" type argument
2498%!assert (prod (uint8 ([1, 2, 3]), "native"), uint8 (6))
2499%!assert (prod (uint8 ([-1; -2; -3]), "native"), uint8 (0))
2500%!assert (prod (int8 ([1, 2, 3]), "native"), int8 (6))
2501%!assert (prod (int8 ([-1; -2; -3]), "native"), int8 (-6))
2502%!assert (prod ([true false; true true], "native"), [true false])
2503%!assert (prod ([true false; true true], 2, "native"), [false; true])
2504
2505## Test "default"
2506%!assert (prod (single (1)), prod (single (1), "default"))
2507%!assert (prod ([true true], "default"), double (1))
2508%!assert (prod (uint8 (1), "default"), double (1))
2509
2510## Test character arrays
2511%!assert (prod (["aa";"aa"])', prod (["aa";"aa"], 2))
2512%!assert (prod ("aa"), sum ("a") * sum ("a"))
2513%!assert (prod ("a", 3), 97)
2514%!assert (prod (["a";"a"], 3), [97; 97])
2515%!assert (prod (["a";"a"], [1, 3]), sum ("a") * sum ("a"))
2516%!assert (prod (["a";"a"], [2, 3]), [97; 97])
2517
2518## Test dimension indexing with vecdim in N-dimensional arrays
2519%!test
2520%! x = repmat ([1:20;6:25], [5 2 6 3]);
2521%! assert (size (prod (x, [3 2])), [10 1 1 3]);
2522%! assert (size (prod (x, [1 2])), [1 1 6 3]);
2523%! assert (size (prod (x, [1 2 4])), [1 1 6]);
2524%! assert (size (prod (x, [1 4 3])), [1 40]);
2525%! assert (size (prod (x, [1 2 3 4])), [1 1]);
2526
2527## Test exceeding dimensions
2528%!assert (prod (ones (2,2), 3), ones (2,2))
2529%!assert (prod (ones (2,2,2), 99), ones (2,2,2))
2530%!assert (prod (magic (3), 3), magic (3))
2531%!assert (prod (magic (3), [1 3]), prod (magic (3)))
2532%!assert (prod (magic (3), [2 99]), prod (magic (3), 2))
2533%!assert (prod (ones (2), 4), ones (2))
2534%!assert (prod (ones (2), [4, 5]), ones (2))
2535%!assert (prod (single (ones (2)), 4),single (ones (2)))
2536%!assert (prod (single (ones (2)), [4, 5]),single (ones (2)))
2537%!assert (prod (sparse ([1, 2; 3, 4]), 3), sparse ([1, 2; 3, 4]))
2538%!assert (prod (sparse ([1, 2i; 3, 4]), 3), sparse ([1, 2i; 3, 4]))
2539
2540## Test nanflag
2541%!test
2542%! x = ones (3,4,5);
2543%! x(1) = NaN;
2544%! assert (prod (x)(:,:,1), [NaN, 1, 1, 1]);
2545%! assert (prod (x, "includenan")(:,:,1), [NaN, 1, 1, 1]);
2546%! assert (prod (x, "omitnan")(:,:,1), [1, 1, 1, 1]);
2547%! assert (prod (x, "omitmissing")(:,:,1), [1, 1, 1, 1]);
2548%! assert (prod (x, [2 3]), [NaN; 1; 1]);
2549%! assert (prod (x, [2 3], "omitnan"), [1; 1; 1]);
2550
2551## Test cases for "omitnan"
2552%!test
2553%! A = [2, NaN; 3, NaN; 4, NaN];
2554%! assert (prod (A, 2, "omitnan"), [2; 3; 4]);
2555%!test
2556%! A = [NaN, NaN, NaN];
2557%! assert (prod (A, "omitnan"), 1);
2558%!test
2559%! A = [2, 3, NaN; 4, 5, NaN; 1, 2, NaN];
2560%! assert (prod (A, 2, "omitnan"), [6; 20; 2]);
2561%!test
2562%! A = [2, NaN, 3; 3, NaN, 2; 2, NaN, 4];
2563%! assert (prod (A, 1, "omitnan"), [12, 1, 24]);
2564%!test
2565%! A = [1+i, NaN; 2+2i, NaN];
2566%! assert (prod (A, 2, "omitnan"), [1+i; 2+2i]);
2567%!test
2568%! A = single ([NaN, NaN, NaN]);
2569%! assert (prod (A, "omitnan"), single (1));
2570
2571## Test sparse matrices
2572%!assert (prod (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN])),
2573%! sparse ([NaN, NaN, 1, 8, NaN]))
2574%!assert (prod (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), "omitnan"),
2575%! sparse ([1, 2, 1, 8, 2]))
2576%!assert (prod (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), 2),
2577%! sparse ([NaN; NaN]))
2578%!assert (prod (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
2579%! sparse ([8; 4]))
2580%!assert (prod (sparse ([NaN, NaN, 1i, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
2581%! sparse ([8i; 4]))
2582%!assert (prod (sparse ([NaN, 0i, 1i, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
2583%! sparse ([0; 4]))
2584%!assert (prod (sparse ([NaN, 1+i, 1i, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
2585%! sparse ([-8+8i; 4]))
2586%!assert (prod (sparse ([NaN, NaN, NaN])), sparse (NaN))
2587%!assert (prod (sparse ([0, 0, 0, NaN, NaN, NaN])), sparse (NaN))
2588%!assert (prod (sparse ([NaN, NaN, NaN]), "omitnan"), sparse (1))
2589%!assert (prod (sparse ([0, 0, 0, NaN, NaN, NaN]), "omitnan"), sparse (0))
2590
2591## Test empty sparse matrices
2592%!assert (prod (sparse (ones(1, 0))), sparse (1))
2593%!assert (size (prod (sparse (ones(1, 0)), 1)), [1, 0])
2594%!assert (size (prod (sparse (ones(1, 0)), 2)), [1, 1])
2595%!assert (prod (sparse (ones(0, 1))), sparse (1))
2596%!assert (size (prod (sparse (ones(0, 1)), 1)), [1, 1])
2597%!assert (size (prod (sparse (ones(0, 1)), 1)), [1, 1])
2598%!assert (size (prod (sparse (ones(0, 1)), 2)), [0, 1])
2599%!assert (prod (sparse (ones(0, 0))), sparse (1))
2600%!assert (size (prod (sparse (ones(0, 0)), 1)), [1, 0])
2601%!assert (size (prod (sparse (ones(0, 0)), 2)), [0, 1])
2602%!assert (size (prod (sparse (ones(0, 0)), 3)), [0, 0])
2603
2604## Test input validation
2605%!error <Invalid call> prod ()
2606%!error <Invalid call> prod (1,2,3)
2607%!error <unrecognized optional argument 'foobar'> prod (1, "foobar")
2608%!error <cannot set DIM or VECDIM with 'all' flag>
2609%! prod (ones (3,3), 1, "all");
2610%!error <cannot set DIM or VECDIM with 'all' flag>
2611%! prod (ones (3,3), [1, 2], "all");
2612%!error <invalid dimension DIM = 0> prod (ones (3,3), 0)
2613%!error <invalid dimension DIM = -1> prod (ones (3,3), -1)
2614%!error <invalid dimension in VECDIM = -2> prod (ones (3,3), [1 -2])
2615%!error <duplicate dimension in VECDIM = 2> prod (ones (3,3), [1 2 2])
2616%!error <duplicate dimension in VECDIM = 1> prod (ones (3,3), [1 1 2])
2617*/
2618
2619static bool
2620all_scalar_1x1 (const octave_value_list& args)
2621{
2622 int n_args = args.length ();
2623 for (int i = 0; i < n_args; i++)
2624 if (args(i).numel () != 1)
2625 return false;
2626
2627 return true;
2628}
2629
2630template <typename TYPE, typename T>
2631static void
2632single_type_concat (Array<T>& result,
2633 const octave_value_list& args,
2634 int dim)
2635{
2636 int n_args = args.length ();
2639 && all_scalar_1x1 (args))
2640 {
2641 // Optimize all scalars case.
2642 dim_vector dv (1, 1);
2643 if (dim == -1 || dim == -2)
2644 dim = -dim - 1;
2645 else if (dim >= 2)
2646 dv.resize (dim+1, 1);
2647 dv(dim) = n_args;
2648
2649 result.clear (dv);
2650
2651 for (int j = 0; j < n_args; j++)
2652 {
2653 octave_quit ();
2654
2655 result(j) = octave_value_extract<T> (args(j));
2656 }
2657 }
2658 else
2659 {
2660 OCTAVE_LOCAL_BUFFER (Array<T>, array_list, n_args);
2661
2662 for (int j = 0; j < n_args; j++)
2663 {
2664 octave_quit ();
2665
2666 array_list[j] = octave_value_extract<TYPE> (args(j));
2667 }
2668
2669 result = Array<T>::cat (dim, n_args, array_list);
2670 }
2671}
2672
2673template <typename TYPE, typename T>
2674static void
2675single_type_concat (Sparse<T>& result,
2676 const octave_value_list& args,
2677 int dim)
2678{
2679 octave_idx_type n_args = args.length ();
2680 OCTAVE_LOCAL_BUFFER (Sparse<T>, sparse_list, n_args);
2681
2682 for (octave_idx_type j = 0; j < n_args; j++)
2683 {
2684 octave_quit ();
2685
2686 sparse_list[j] = octave_value_extract<TYPE> (args(j));
2687 }
2688
2689 result = Sparse<T>::cat (dim, n_args, sparse_list);
2690}
2691
2692// Dispatcher.
2693template <typename TYPE>
2694static TYPE
2695do_single_type_concat (const octave_value_list& args, int dim)
2696{
2697 TYPE result;
2698
2699 single_type_concat<TYPE, typename TYPE::element_type> (result, args, dim);
2700
2701 return result;
2702}
2703
2704template <typename MAP>
2705static void
2706single_type_concat_map (octave_map& result,
2707 const octave_value_list& args,
2708 int dim)
2709{
2710 int n_args = args.length ();
2711 OCTAVE_LOCAL_BUFFER (MAP, map_list, n_args);
2712
2713 for (int j = 0; j < n_args; j++)
2714 {
2715 octave_quit ();
2716
2717 map_list[j] = octave_value_extract<MAP> (args(j));
2718 }
2719
2720 result = octave_map::cat (dim, n_args, map_list);
2721}
2722
2723static octave_map
2724do_single_type_concat_map (const octave_value_list& args,
2725 int dim)
2726{
2727 octave_map result;
2728 if (all_scalar_1x1 (args)) // optimize all scalars case.
2729 single_type_concat_map<octave_scalar_map> (result, args, dim);
2730 else
2731 single_type_concat_map<octave_map> (result, args, dim);
2732
2733 return result;
2734}
2735
2736static octave_value
2737do_single_type_concat_cdef (const octave_value_list& args, int dim)
2738{
2739 // Concatenate a list of octave_classdef values of the same
2740 // class along dimension dim
2741
2743 int n_args = args.length ();
2744
2745 if (all_scalar_1x1 (args))
2746 {
2747 // Optimize all scalars case.
2748 dim_vector dv (1, 1);
2749 if (dim == -1 || dim == -2)
2750 dim = -dim - 1;
2751 else if (dim >= 2)
2752 dv.resize (dim+1, 1);
2753 dv(dim) = n_args;
2754
2755 arr.clear (dv);
2756
2757 for (int j = 0; j < n_args; j++)
2758 {
2759 octave_quit ();
2760
2761 arr(j) = args(j).classdef_object_value ()->get_object ();
2762 }
2763 }
2764 else
2765 {
2766 OCTAVE_LOCAL_BUFFER (Array<cdef_object>, array_list, n_args);
2767
2768 for (int j = 0; j < n_args; j++)
2769 {
2770 octave_quit ();
2771
2772 cdef_object obj = args(j).classdef_object_value ()->get_object ();
2773 if (obj.is_array ())
2774 array_list[j] = obj.array_value ();
2775 else
2776 array_list[j] = Array<cdef_object> (dim_vector (1,1), obj);
2777 }
2778
2779 arr = Array<cdef_object>::cat (dim, n_args, array_list);
2780 }
2781
2782 cdef_object obj_result = cdef_object (new cdef_object_array (arr));
2783 obj_result.set_class (arr(0).get_class ());
2784
2785 return to_ov (obj_result);
2786}
2787
2788static octave_value
2789attempt_type_conversion (const octave_value& ov, std::string dtype)
2790{
2791 octave_value retval;
2792
2793 // First try to find function in the class of OV that can convert to
2794 // the dispatch type dtype. It will have the name of the dispatch
2795 // type.
2796
2797 std::string cname = ov.class_name ();
2798
2799 interpreter& interp = __get_interpreter__ ();
2800
2801 symbol_table& symtab = interp.get_symbol_table ();
2802
2803 cdef_manager& cdef_mgr = interp.get_cdef_manager ();
2804
2805 auto fcn = cdef_mgr.find_method_symbol (dtype, cname);
2806 if (! fcn.is_defined ())
2807 fcn = symtab.find_method (dtype, cname);
2808
2809 if (fcn.is_defined ())
2810 {
2811 octave_value_list result;
2812
2813 try
2814 {
2815 result = interp.feval (fcn, ovl (ov), 1);
2816 }
2817 catch (execution_exception& ee)
2818 {
2819 error (ee, "conversion from %s to %s failed", dtype.c_str (),
2820 cname.c_str ());
2821 }
2822
2823 if (result.empty ())
2824 error ("conversion from %s to %s failed", dtype.c_str (),
2825 cname.c_str ());
2826
2827 retval = result(0);
2828 }
2829 else
2830 {
2831 // No conversion function available. Try the constructor for the
2832 // dispatch type.
2833 // We can't go purely through the symbol table here, we have to
2834 // check the classdef manager as well for classdef methods
2835 auto ctor = cdef_mgr.find_method_symbol (dtype, dtype);
2836
2837 if (! ctor.is_defined ())
2838 ctor = symtab.find_method (dtype, dtype);
2839
2840 octave_value_list result;
2841
2842 // The following code looks fairly ugly because we have to handle
2843 // old-style classes and classdef constructors separately
2844 if (ctor.is_defined () && ctor.is_function ())
2845 {
2846 octave_function *ctor_fcn = ctor.function_value ();
2847
2848 if (ctor_fcn->is_legacy_constructor ())
2849 {
2850 // Old-style class constructor
2851 octave::unwind_protect frame;
2852
2853 octave::interpreter_try (frame);
2854
2855 try
2856 {
2857 result = interp.feval (ctor, ovl (ov), 1);
2858 }
2859 catch (const octave::execution_exception&)
2860 {
2861 interp.recover_from_exception ();
2862 }
2863 }
2864 else
2865 // Classdef constructor
2866 {
2867 auto cls = lookup_class (dtype, false, false);
2868
2869 try
2870 {
2871 result = to_ov (cls.construct_object (ovl (ov)));
2872 }
2873 catch (execution_exception& ee)
2874 {
2875 error (ee, "%s constructor failed for %s argument",
2876 dtype.c_str (), cname.c_str ());
2877 }
2878
2879 }
2880 }
2881
2882 retval = result(0);
2883 }
2884
2885 return retval;
2886}
2887
2890 const std::string& cattype, int dim)
2891{
2892 octave_value retval;
2893
2894 // Get dominant type for list
2895
2896 std::string dtype = get_dispatch_type (ovl);
2897
2898 interpreter& interp = __get_interpreter__ ();
2899
2900 cdef_manager& cdef_mgr = __get_cdef_manager__ ();
2901
2902 symbol_table& symtab = interp.get_symbol_table ();
2903
2904 octave_value ov_fcn = symtab.find_method (cattype, dtype);
2905
2906 if (ov_fcn.is_defined ())
2907 {
2908 // Have method for dominant type. Call it and let it handle conversions.
2909
2910 octave_value_list tmp2;
2911
2912 try
2913 {
2914 tmp2 = interp.feval (ov_fcn, ovl, 1);
2915 }
2916 catch (execution_exception& ee)
2917 {
2918 error (ee, "cat: %s/%s method failed", dtype.c_str (), cattype.c_str ());
2919 }
2920
2921 if (tmp2.empty ())
2922 error ("cat: %s/%s method did not return a value", dtype.c_str (),
2923 cattype.c_str ());
2924
2925 retval = tmp2(0);
2926 }
2927 else
2928 {
2929 // No method for dominant type, so attempt type conversions for
2930 // all elements that are not of the dominant type, then do the
2931 // default operation for octave_class values.
2932
2933 octave_idx_type j = 0;
2936 for (octave_idx_type k = 0; k < len; k++)
2937 {
2938 octave_value elt = ovl(k);
2939
2940 std::string t1_type = elt.class_name ();
2941
2942 if (t1_type == dtype)
2943 tmp(j++) = elt;
2944 else if (elt.isobject () || ! elt.isempty ())
2945 {
2946 auto ov = attempt_type_conversion (elt, dtype);
2947 // Abort concatenation if type cannot be converted
2948 if ( ! ov.is_defined ())
2949 error ("cat: cannot convert from type \"%s\" to type \"%s\"",
2950 t1_type.c_str (), dtype.c_str ());
2951 tmp(j++) = ov;
2952 }
2953 }
2954
2955 tmp.resize (j);
2956
2957 // See if dominant type is a classdef
2958 cdef_class cdef = cdef_mgr.find_class (dtype, false);
2959
2960 if (cdef.ok ())
2961 // Default classdef concat
2962 retval = do_single_type_concat_cdef (tmp, dim);
2963 else
2964 {
2965 // Default struct-based class concat
2966 octave_map m = do_single_type_concat_map (tmp, dim);
2967
2968 std::string cname = tmp(0).class_name ();
2969 std::list<std::string> parents = tmp(0).parent_class_name_list ();
2970
2971 retval = octave_value (new octave_class (m, cname, parents));
2972 }
2973 }
2974
2975 return retval;
2976}
2977
2978static octave_value
2979do_cat (const octave_value_list& xargs, int dim, std::string fname)
2980{
2981 octave_value retval;
2982
2983 // We may need to convert elements of the list to cells, so make a copy.
2984 // This should be efficient, it is done mostly by incrementing reference
2985 // counts.
2986 octave_value_list args = xargs;
2987
2988 int n_args = args.length ();
2989
2990 if (n_args == 0)
2991 retval = Matrix ();
2992 else if (n_args == 1)
2993 retval = args(0);
2994 else if (n_args > 1)
2995 {
2996 std::string result_type;
2997
2998 bool all_strings_p = true;
2999 bool all_sq_strings_p = true;
3000 bool all_dq_strings_p = true;
3001 bool all_real_p = true;
3002 bool all_cmplx_p = true;
3003 bool any_sparse_p = false;
3004 bool any_cell_p = false;
3005 bool any_class_p = false;
3006
3007 bool first_elem_is_struct = false;
3008
3009 for (int i = 0; i < n_args; i++)
3010 {
3011 if (i == 0)
3012 {
3013 result_type = args(i).class_name ();
3014
3015 first_elem_is_struct = args(i).isstruct ();
3016 }
3017 else
3018 result_type = get_concat_class (result_type, args(i).class_name ());
3019
3020 if (all_strings_p && ! args(i).is_string ())
3021 all_strings_p = false;
3022 if (all_sq_strings_p && ! args(i).is_sq_string ())
3023 all_sq_strings_p = false;
3024 if (all_dq_strings_p && ! args(i).is_dq_string ())
3025 all_dq_strings_p = false;
3026 if (all_real_p && ! args(i).isreal ())
3027 all_real_p = false;
3028 if (all_cmplx_p && ! (args(i).iscomplex ()
3029 || args(i).isreal ()))
3030 all_cmplx_p = false;
3031 if (! any_sparse_p && args(i).issparse ())
3032 any_sparse_p = true;
3033 if (! any_cell_p && args(i).iscell ())
3034 any_cell_p = true;
3035 if (! any_class_p && args(i).isobject ())
3036 any_class_p = true;
3037 }
3038
3039 if (any_cell_p && ! any_class_p && ! first_elem_is_struct)
3040 {
3041 int j = 0;
3042 for (int i = 0; i < n_args; i++)
3043 {
3044 if (args(i).iscell ())
3045 args(j++) = args(i);
3046 else
3047 {
3048 if (args(i).isempty ())
3049 continue; // Delete empty non-cell arg
3050 else
3051 args(j++) = Cell (args(i));
3052 }
3053 }
3054 n_args = j;
3055 args.resize (n_args);
3056 }
3057
3058 if (any_class_p)
3059 retval = do_class_concat (args, fname, dim);
3060 else if (result_type == "double")
3061 {
3062 if (any_sparse_p)
3063 {
3064 if (all_real_p)
3065 retval = do_single_type_concat<SparseMatrix> (args, dim);
3066 else
3067 retval = do_single_type_concat<SparseComplexMatrix> (args, dim);
3068 }
3069 else
3070 {
3071 if (all_real_p)
3072 retval = do_single_type_concat<NDArray> (args, dim);
3073 else
3074 retval = do_single_type_concat<ComplexNDArray> (args, dim);
3075 }
3076 }
3077 else if (result_type == "single")
3078 {
3079 if (all_real_p)
3080 retval = do_single_type_concat<FloatNDArray> (args, dim);
3081 else
3082 retval = do_single_type_concat<FloatComplexNDArray> (args, dim);
3083 }
3084 else if (result_type == "char")
3085 {
3086 char type = (all_dq_strings_p ? '"' : '\'');
3087
3088 if (! all_strings_p)
3089 warn_implicit_conversion ("Octave:num-to-str",
3090 "numeric", result_type);
3091 else
3092 maybe_warn_string_concat (all_dq_strings_p, all_sq_strings_p);
3093
3094 charNDArray result = do_single_type_concat<charNDArray> (args, dim);
3095
3096 retval = octave_value (result, type);
3097 }
3098 else if (result_type == "logical")
3099 {
3100 if (any_sparse_p)
3101 retval = do_single_type_concat<SparseBoolMatrix> (args, dim);
3102 else
3103 retval = do_single_type_concat<boolNDArray> (args, dim);
3104 }
3105 else if (result_type == "int8")
3106 retval = do_single_type_concat<int8NDArray> (args, dim);
3107 else if (result_type == "int16")
3108 retval = do_single_type_concat<int16NDArray> (args, dim);
3109 else if (result_type == "int32")
3110 retval = do_single_type_concat<int32NDArray> (args, dim);
3111 else if (result_type == "int64")
3112 retval = do_single_type_concat<int64NDArray> (args, dim);
3113 else if (result_type == "uint8")
3114 retval = do_single_type_concat<uint8NDArray> (args, dim);
3115 else if (result_type == "uint16")
3116 retval = do_single_type_concat<uint16NDArray> (args, dim);
3117 else if (result_type == "uint32")
3118 retval = do_single_type_concat<uint32NDArray> (args, dim);
3119 else if (result_type == "uint64")
3120 retval = do_single_type_concat<uint64NDArray> (args, dim);
3121 else if (result_type == "cell")
3122 retval = do_single_type_concat<Cell> (args, dim);
3123 else if (result_type == "struct")
3124 retval = do_single_type_concat_map (args, dim);
3125 else
3126 {
3127 dim_vector dv = args(0).dims ();
3128
3129 // Default concatenation.
3130 bool (dim_vector::*concat_rule) (const dim_vector&, int)
3131 = &dim_vector::concat;
3132
3133 if (dim == -1 || dim == -2)
3134 {
3135 concat_rule = &dim_vector::hvcat;
3136 dim = -dim - 1;
3137 }
3138
3139 for (int i = 1; i < args.length (); i++)
3140 {
3141 if (! (dv.*concat_rule) (args(i).dims (), dim))
3142 error ("cat: dimension mismatch");
3143 }
3144
3145 // The lines below might seem crazy, since we take a copy
3146 // of the first argument, resize it to be empty and then resize
3147 // it to be full. This is done since it means that there is no
3148 // recopying of data, as would happen if we used a single resize.
3149 // It should be noted that resize operation is also significantly
3150 // slower than the do_cat_op function, so it makes sense to have
3151 // an empty matrix and copy all data.
3152 //
3153 // We might also start with a empty octave_value using
3154 //
3155 // tmp = type_info::lookup_type (args(1).type_name());
3156 //
3157 // and then directly resize. However, for some types there might
3158 // be some additional setup needed, and so this should be avoided.
3159
3160 octave_value tmp = args(0);
3161 tmp = tmp.resize (dim_vector (0, 0)).resize (dv);
3162
3163 int dv_len = dv.ndims ();
3164 Array<octave_idx_type> ra_idx (dim_vector (dv_len, 1), 0);
3165
3166 for (int j = 0; j < n_args; j++)
3167 {
3168 // Can't fast return here to skip empty matrices as something
3169 // like cat (1,[],single ([])) must return an empty matrix of
3170 // the right type.
3171 tmp = cat_op (tmp, args(j), ra_idx);
3172
3173 const dim_vector& dv_tmp = args(j).dims ();
3174
3175 if (dim >= dv_len)
3176 {
3177 if (j > 1)
3178 error ("%s: indexing error", fname.c_str ());
3179
3180 break;
3181 }
3182 else
3183 ra_idx(dim) += (dim < dv_tmp.ndims () ? dv_tmp(dim) : 1);
3184 }
3185 retval = tmp;
3186 }
3187 }
3188 else
3189 print_usage ();
3190
3191 return retval;
3192}
3193
3194DEFUN (horzcat, args, ,
3195 doc: /* -*- texinfo -*-
3196@deftypefn {} {@var{A} =} horzcat (@var{array1}, @var{array2}, @dots{}, @var{arrayN})
3197Return the horizontal concatenation of N-D array objects, @var{array1},
3198@var{array2}, @dots{}, @var{arrayN} along dimension 2.
3199
3200Arrays may also be concatenated horizontally using the syntax for creating
3201new matrices. For example:
3202
3203@example
3204@var{A} = [ @var{array1}, @var{array2}, @dots{} ]
3205@end example
3206
3207This syntax is slightly more efficient because the Octave parser can
3208concatenate the arrays without the overhead of a function call.
3209@seealso{cat, vertcat}
3210@end deftypefn */)
3211{
3212 return do_cat (args, -2, "horzcat");
3213}
3214
3215/*
3216## Test concatenation with all zero matrices
3217%!test
3218%! warning ("off", "Octave:num-to-str", "local");
3219%! assert (horzcat ("", 65* ones (1,10)), "AAAAAAAAAA");
3220%! assert (horzcat (65* ones (1,10), ""), "AAAAAAAAAA");
3221
3222%!assert (class (horzcat (int64 (1), int64 (1))), "int64")
3223%!assert (class (horzcat (int64 (1), int32 (1))), "int64")
3224%!assert (class (horzcat (int64 (1), int16 (1))), "int64")
3225%!assert (class (horzcat (int64 (1), int8 (1))), "int64")
3226%!assert (class (horzcat (int64 (1), uint64 (1))), "int64")
3227%!assert (class (horzcat (int64 (1), uint32 (1))), "int64")
3228%!assert (class (horzcat (int64 (1), uint16 (1))), "int64")
3229%!assert (class (horzcat (int64 (1), uint8 (1))), "int64")
3230%!assert (class (horzcat (int64 (1), single (1))), "int64")
3231%!assert (class (horzcat (int64 (1), double (1))), "int64")
3232%!assert (class (horzcat (int64 (1), cell (1))), "cell")
3233%!assert (class (horzcat (int64 (1), true)), "int64")
3234%!test
3235%! warning ("off", "Octave:num-to-str", "local");
3236%! assert (class (horzcat (int64 (1), "a")), "char");
3237
3238%!assert (class (horzcat (int32 (1), int64 (1))), "int32")
3239%!assert (class (horzcat (int32 (1), int32 (1))), "int32")
3240%!assert (class (horzcat (int32 (1), int16 (1))), "int32")
3241%!assert (class (horzcat (int32 (1), int8 (1))), "int32")
3242%!assert (class (horzcat (int32 (1), uint64 (1))), "int32")
3243%!assert (class (horzcat (int32 (1), uint32 (1))), "int32")
3244%!assert (class (horzcat (int32 (1), uint16 (1))), "int32")
3245%!assert (class (horzcat (int32 (1), uint8 (1))), "int32")
3246%!assert (class (horzcat (int32 (1), single (1))), "int32")
3247%!assert (class (horzcat (int32 (1), double (1))), "int32")
3248%!assert (class (horzcat (int32 (1), cell (1))), "cell")
3249%!assert (class (horzcat (int32 (1), true)), "int32")
3250%!test
3251%! warning ("off", "Octave:num-to-str", "local");
3252%! assert (class (horzcat (int32 (1), "a")), "char");
3253
3254%!assert (class (horzcat (int16 (1), int64 (1))), "int16")
3255%!assert (class (horzcat (int16 (1), int32 (1))), "int16")
3256%!assert (class (horzcat (int16 (1), int16 (1))), "int16")
3257%!assert (class (horzcat (int16 (1), int8 (1))), "int16")
3258%!assert (class (horzcat (int16 (1), uint64 (1))), "int16")
3259%!assert (class (horzcat (int16 (1), uint32 (1))), "int16")
3260%!assert (class (horzcat (int16 (1), uint16 (1))), "int16")
3261%!assert (class (horzcat (int16 (1), uint8 (1))), "int16")
3262%!assert (class (horzcat (int16 (1), single (1))), "int16")
3263%!assert (class (horzcat (int16 (1), double (1))), "int16")
3264%!assert (class (horzcat (int16 (1), cell (1))), "cell")
3265%!assert (class (horzcat (int16 (1), true)), "int16")
3266%!test
3267%! warning ("off", "Octave:num-to-str", "local");
3268%! assert (class (horzcat (int16 (1), "a")), "char");
3269
3270%!assert (class (horzcat (int8 (1), int64 (1))), "int8")
3271%!assert (class (horzcat (int8 (1), int32 (1))), "int8")
3272%!assert (class (horzcat (int8 (1), int16 (1))), "int8")
3273%!assert (class (horzcat (int8 (1), int8 (1))), "int8")
3274%!assert (class (horzcat (int8 (1), uint64 (1))), "int8")
3275%!assert (class (horzcat (int8 (1), uint32 (1))), "int8")
3276%!assert (class (horzcat (int8 (1), uint16 (1))), "int8")
3277%!assert (class (horzcat (int8 (1), uint8 (1))), "int8")
3278%!assert (class (horzcat (int8 (1), single (1))), "int8")
3279%!assert (class (horzcat (int8 (1), double (1))), "int8")
3280%!assert (class (horzcat (int8 (1), cell (1))), "cell")
3281%!assert (class (horzcat (int8 (1), true)), "int8")
3282%!test
3283%! warning ("off", "Octave:num-to-str", "local");
3284%! assert (class (horzcat (int8 (1), "a")), "char");
3285
3286%!assert (class (horzcat (uint64 (1), int64 (1))), "uint64")
3287%!assert (class (horzcat (uint64 (1), int32 (1))), "uint64")
3288%!assert (class (horzcat (uint64 (1), int16 (1))), "uint64")
3289%!assert (class (horzcat (uint64 (1), int8 (1))), "uint64")
3290%!assert (class (horzcat (uint64 (1), uint64 (1))), "uint64")
3291%!assert (class (horzcat (uint64 (1), uint32 (1))), "uint64")
3292%!assert (class (horzcat (uint64 (1), uint16 (1))), "uint64")
3293%!assert (class (horzcat (uint64 (1), uint8 (1))), "uint64")
3294%!assert (class (horzcat (uint64 (1), single (1))), "uint64")
3295%!assert (class (horzcat (uint64 (1), double (1))), "uint64")
3296%!assert (class (horzcat (uint64 (1), cell (1))), "cell")
3297%!assert (class (horzcat (uint64 (1), true)), "uint64")
3298%!test
3299%! warning ("off", "Octave:num-to-str", "local");
3300%! assert (class (horzcat (uint64 (1), "a")), "char");
3301
3302%!assert (class (horzcat (uint32 (1), int64 (1))), "uint32")
3303%!assert (class (horzcat (uint32 (1), int32 (1))), "uint32")
3304%!assert (class (horzcat (uint32 (1), int16 (1))), "uint32")
3305%!assert (class (horzcat (uint32 (1), int8 (1))), "uint32")
3306%!assert (class (horzcat (uint32 (1), uint64 (1))), "uint32")
3307%!assert (class (horzcat (uint32 (1), uint32 (1))), "uint32")
3308%!assert (class (horzcat (uint32 (1), uint16 (1))), "uint32")
3309%!assert (class (horzcat (uint32 (1), uint8 (1))), "uint32")
3310%!assert (class (horzcat (uint32 (1), single (1))), "uint32")
3311%!assert (class (horzcat (uint32 (1), double (1))), "uint32")
3312%!assert (class (horzcat (uint32 (1), cell (1))), "cell")
3313%!assert (class (horzcat (uint32 (1), true)), "uint32")
3314%!test
3315%! warning ("off", "Octave:num-to-str", "local");
3316%! assert (class (horzcat (uint32 (1), "a")), "char");
3317
3318%!assert (class (horzcat (uint16 (1), int64 (1))), "uint16")
3319%!assert (class (horzcat (uint16 (1), int32 (1))), "uint16")
3320%!assert (class (horzcat (uint16 (1), int16 (1))), "uint16")
3321%!assert (class (horzcat (uint16 (1), int8 (1))), "uint16")
3322%!assert (class (horzcat (uint16 (1), uint64 (1))), "uint16")
3323%!assert (class (horzcat (uint16 (1), uint32 (1))), "uint16")
3324%!assert (class (horzcat (uint16 (1), uint16 (1))), "uint16")
3325%!assert (class (horzcat (uint16 (1), uint8 (1))), "uint16")
3326%!assert (class (horzcat (uint16 (1), single (1))), "uint16")
3327%!assert (class (horzcat (uint16 (1), double (1))), "uint16")
3328%!assert (class (horzcat (uint16 (1), cell (1))), "cell")
3329%!assert (class (horzcat (uint16 (1), true)), "uint16")
3330%!test
3331%! warning ("off", "Octave:num-to-str", "local");
3332%! assert (class (horzcat (uint16 (1), "a")), "char");
3333
3334%!assert (class (horzcat (uint8 (1), int64 (1))), "uint8")
3335%!assert (class (horzcat (uint8 (1), int32 (1))), "uint8")
3336%!assert (class (horzcat (uint8 (1), int16 (1))), "uint8")
3337%!assert (class (horzcat (uint8 (1), int8 (1))), "uint8")
3338%!assert (class (horzcat (uint8 (1), uint64 (1))), "uint8")
3339%!assert (class (horzcat (uint8 (1), uint32 (1))), "uint8")
3340%!assert (class (horzcat (uint8 (1), uint16 (1))), "uint8")
3341%!assert (class (horzcat (uint8 (1), uint8 (1))), "uint8")
3342%!assert (class (horzcat (uint8 (1), single (1))), "uint8")
3343%!assert (class (horzcat (uint8 (1), double (1))), "uint8")
3344%!assert (class (horzcat (uint8 (1), cell (1))), "cell")
3345%!assert (class (horzcat (uint8 (1), true)), "uint8")
3346%!test
3347%! warning ("off", "Octave:num-to-str", "local");
3348%! assert (class (horzcat (uint8 (1), "a")), "char");
3349
3350%!assert (class (horzcat (single (1), int64 (1))), "int64")
3351%!assert (class (horzcat (single (1), int32 (1))), "int32")
3352%!assert (class (horzcat (single (1), int16 (1))), "int16")
3353%!assert (class (horzcat (single (1), int8 (1))), "int8")
3354%!assert (class (horzcat (single (1), uint64 (1))), "uint64")
3355%!assert (class (horzcat (single (1), uint32 (1))), "uint32")
3356%!assert (class (horzcat (single (1), uint16 (1))), "uint16")
3357%!assert (class (horzcat (single (1), uint8 (1))), "uint8")
3358%!assert (class (horzcat (single (1), single (1))), "single")
3359%!assert (class (horzcat (single (1), double (1))), "single")
3360%!assert (class (horzcat (single (1), cell (1))), "cell")
3361%!assert (class (horzcat (single (1), true)), "single")
3362%!test
3363%! warning ("off", "Octave:num-to-str", "local");
3364%! assert (class (horzcat (single (1), "a")), "char");
3365
3366%!assert (class (horzcat (double (1), int64 (1))), "int64")
3367%!assert (class (horzcat (double (1), int32 (1))), "int32")
3368%!assert (class (horzcat (double (1), int16 (1))), "int16")
3369%!assert (class (horzcat (double (1), int8 (1))), "int8")
3370%!assert (class (horzcat (double (1), uint64 (1))), "uint64")
3371%!assert (class (horzcat (double (1), uint32 (1))), "uint32")
3372%!assert (class (horzcat (double (1), uint16 (1))), "uint16")
3373%!assert (class (horzcat (double (1), uint8 (1))), "uint8")
3374%!assert (class (horzcat (double (1), single (1))), "single")
3375%!assert (class (horzcat (double (1), double (1))), "double")
3376%!assert (class (horzcat (double (1), cell (1))), "cell")
3377%!assert (class (horzcat (double (1), true)), "double")
3378%!test
3379%! warning ("off", "Octave:num-to-str", "local");
3380%! assert (class (horzcat (double (1), "a")), "char");
3381
3382%!assert (class (horzcat (cell (1), int64 (1))), "cell")
3383%!assert (class (horzcat (cell (1), int32 (1))), "cell")
3384%!assert (class (horzcat (cell (1), int16 (1))), "cell")
3385%!assert (class (horzcat (cell (1), int8 (1))), "cell")
3386%!assert (class (horzcat (cell (1), uint64 (1))), "cell")
3387%!assert (class (horzcat (cell (1), uint32 (1))), "cell")
3388%!assert (class (horzcat (cell (1), uint16 (1))), "cell")
3389%!assert (class (horzcat (cell (1), uint8 (1))), "cell")
3390%!assert (class (horzcat (cell (1), single (1))), "cell")
3391%!assert (class (horzcat (cell (1), double (1))), "cell")
3392%!assert (class (horzcat (cell (1), cell (1))), "cell")
3393%!assert (class (horzcat (cell (1), true)), "cell")
3394%!assert (class (horzcat (cell (1), "a")), "cell")
3395
3396%!assert (class (horzcat (true, int64 (1))), "int64")
3397%!assert (class (horzcat (true, int32 (1))), "int32")
3398%!assert (class (horzcat (true, int16 (1))), "int16")
3399%!assert (class (horzcat (true, int8 (1))), "int8")
3400%!assert (class (horzcat (true, uint64 (1))), "uint64")
3401%!assert (class (horzcat (true, uint32 (1))), "uint32")
3402%!assert (class (horzcat (true, uint16 (1))), "uint16")
3403%!assert (class (horzcat (true, uint8 (1))), "uint8")
3404%!assert (class (horzcat (true, single (1))), "single")
3405%!assert (class (horzcat (true, double (1))), "double")
3406%!assert (class (horzcat (true, cell (1))), "cell")
3407%!assert (class (horzcat (true, true)), "logical")
3408%!test
3409%! warning ("off", "Octave:num-to-str", "local");
3410%! assert (class (horzcat (true, "a")), "char");
3411
3412%!test
3413%! warning ("off", "Octave:num-to-str", "local");
3414%! assert (class (horzcat ("a", int64 (1))), "char");
3415%! assert (class (horzcat ("a", int32 (1))), "char");
3416%! assert (class (horzcat ("a", int16 (1))), "char");
3417%! assert (class (horzcat ("a", int8 (1))), "char");
3418%! assert (class (horzcat ("a", int64 (1))), "char");
3419%! assert (class (horzcat ("a", int32 (1))), "char");
3420%! assert (class (horzcat ("a", int16 (1))), "char");
3421%! assert (class (horzcat ("a", int8 (1))), "char");
3422%! assert (class (horzcat ("a", single (1))), "char");
3423%! assert (class (horzcat ("a", double (1))), "char");
3424%! assert (class (horzcat ("a", cell (1))), "cell");
3425%! assert (class (horzcat ("a", true)), "char");
3426%! assert (class (horzcat ("a", "a")), "char");
3427
3428%!assert (class (horzcat (cell (1), struct ("foo", "bar"))), "cell")
3429
3430%!error horzcat (struct ("foo", "bar"), cell (1))
3431
3432%!test <*39041> assert (class (horzcat (cell (0), struct ())), "cell")
3433%!test <51086> assert (class (horzcat (struct (), cell (0))), "struct")
3434*/
3435
3436DEFUN (vertcat, args, ,
3437 doc: /* -*- texinfo -*-
3438@deftypefn {} {@var{A} =} vertcat (@var{array1}, @var{array2}, @dots{}, @var{arrayN})
3439Return the vertical concatenation of N-D array objects, @var{array1},
3440@var{array2}, @dots{}, @var{arrayN} along dimension 1.
3441
3442Arrays may also be concatenated vertically using the syntax for creating
3443new matrices. For example:
3444
3445@example
3446@var{A} = [ @var{array1}; @var{array2}; @dots{} ]
3447@end example
3448
3449This syntax is slightly more efficient because the Octave parser can
3450concatenate the arrays without the overhead of a function call.
3451@seealso{cat, horzcat}
3452@end deftypefn */)
3453{
3454 return do_cat (args, -1, "vertcat");
3455}
3456
3457/*
3458%!test
3459%! c = {"foo"; "bar"; "bazoloa"};
3460%! assert (vertcat (c, "a", "bc", "def"),
3461%! {"foo"; "bar"; "bazoloa"; "a"; "bc"; "def"});
3462*/
3463
3464DEFUN (cat, args, ,
3465 doc: /* -*- texinfo -*-
3466@deftypefn {} {@var{A} =} cat (@var{dim}, @var{array1}, @var{array2}, @dots{}, @var{arrayN})
3467Return the concatenation of N-D array objects, @var{array1}, @var{array2},
3468@dots{}, @var{arrayN} along dimension @var{dim}.
3469
3470@example
3471@group
3472A = ones (2, 2);
3473B = zeros (2, 2);
3474cat (2, A, B)
3475 @xresult{} 1 1 0 0
3476 1 1 0 0
3477@end group
3478@end example
3479
3480Alternatively, we can concatenate @var{A} and @var{B} along the second
3481dimension in the following way:
3482
3483@example
3484@group
3485[A, B]
3486@end group
3487@end example
3488
3489@var{dim} can be larger than the dimensions of the N-D array objects and the
3490result will thus have @var{dim} dimensions as the following example shows:
3491
3492@example
3493@group
3494cat (4, ones (2, 2), zeros (2, 2))
3495 @xresult{} ans(:,:,1,1) =
3496
3497 1 1
3498 1 1
3499
3500 ans(:,:,1,2) =
3501
3502 0 0
3503 0 0
3504@end group
3505@end example
3506@seealso{horzcat, vertcat}
3507@end deftypefn */)
3508{
3509 if (args.length () == 0)
3510 print_usage ();
3511
3512 int dim = args(0).strict_int_value ("cat: DIM must be an integer") - 1;
3513
3514 if (dim < 0)
3515 error ("cat: DIM must be a valid dimension");
3516
3517 return ovl (do_cat (args.slice (1, args.length () - 1), dim, "cat"));
3518}
3519
3520/*
3521%!function ret = __testcat (t1, t2, tr, cmplx)
3522%! assert (cat (1, cast ([], t1), cast ([], t2)), cast ([], tr));
3523%!
3524%! assert (cat (1, cast (1, t1), cast (2, t2)), cast ([1; 2], tr));
3525%! assert (cat (1, cast (1, t1), cast ([2; 3], t2)), cast ([1; 2; 3], tr));
3526%! assert (cat (1, cast ([1; 2], t1), cast (3, t2)), cast ([1; 2; 3], tr));
3527%! assert (cat (1, cast ([1; 2], t1), cast ([3; 4], t2)),
3528%! cast ([1; 2; 3; 4], tr));
3529%! assert (cat (2, cast (1, t1), cast (2, t2)), cast ([1, 2], tr));
3530%! assert (cat (2, cast (1, t1), cast ([2, 3], t2)), cast ([1, 2, 3], tr));
3531%! assert (cat (2, cast ([1, 2], t1), cast (3, t2)), cast ([1, 2, 3], tr));
3532%! assert (cat (2, cast ([1, 2], t1), cast ([3, 4], t2)),
3533%! cast ([1, 2, 3, 4], tr));
3534%!
3535%! assert ([cast(1, t1); cast(2, t2)], cast ([1; 2], tr));
3536%! assert ([cast(1, t1); cast([2; 3], t2)], cast ([1; 2; 3], tr));
3537%! assert ([cast([1; 2], t1); cast(3, t2)], cast ([1; 2; 3], tr));
3538%! assert ([cast([1; 2], t1); cast([3; 4], t2)], cast ([1; 2; 3; 4], tr));
3539%! assert ([cast(1, t1), cast(2, t2)], cast ([1, 2], tr));
3540%! assert ([cast(1, t1), cast([2, 3], t2)], cast ([1, 2, 3], tr));
3541%! assert ([cast([1, 2], t1), cast(3, t2)], cast ([1, 2, 3], tr));
3542%! assert ([cast([1, 2], t1), cast([3, 4], t2)], cast ([1, 2, 3, 4], tr));
3543%!
3544%! if (nargin == 3 || cmplx)
3545%! assert (cat (1, cast (1i, t1), cast (2, t2)), cast ([1i; 2], tr));
3546%! assert (cat (1, cast (1i, t1), cast ([2; 3], t2)), cast ([1i; 2; 3], tr));
3547%! assert (cat (1, cast ([1i; 2], t1), cast (3, t2)), cast ([1i; 2; 3], tr));
3548%! assert (cat (1, cast ([1i; 2], t1), cast ([3; 4], t2)),
3549%! cast ([1i; 2; 3; 4], tr));
3550%! assert (cat (2, cast (1i, t1), cast (2, t2)), cast ([1i, 2], tr));
3551%! assert (cat (2, cast (1i, t1), cast ([2, 3], t2)), cast ([1i, 2, 3], tr));
3552%! assert (cat (2, cast ([1i, 2], t1), cast (3, t2)), cast ([1i, 2, 3], tr));
3553%! assert (cat (2, cast ([1i, 2], t1), cast ([3, 4], t2)),
3554%! cast ([1i, 2, 3, 4], tr));
3555%! assert ([cast(1i, t1); cast(2, t2)], cast ([1i; 2], tr));
3556%! assert ([cast(1i, t1); cast([2; 3], t2)], cast ([1i; 2; 3], tr));
3557%! assert ([cast([1i; 2], t1); cast(3, t2)], cast ([1i; 2; 3], tr));
3558%! assert ([cast([1i; 2], t1); cast([3; 4], t2)], cast ([1i; 2; 3; 4], tr));
3559%! assert ([cast(1i, t1), cast(2, t2)], cast ([1i, 2], tr));
3560%! assert ([cast(1i, t1), cast([2, 3], t2)], cast ([1i, 2, 3], tr));
3561%! assert ([cast([1i, 2], t1), cast(3, t2)], cast ([1i, 2, 3], tr));
3562%! assert ([cast([1i, 2], t1), cast([3, 4], t2)], cast ([1i, 2, 3, 4], tr));
3563%!
3564%! assert (cat (1, cast (1, t1), cast (2i, t2)), cast ([1; 2i], tr));
3565%! assert (cat (1, cast (1, t1), cast ([2i; 3], t2)), cast ([1; 2i; 3], tr));
3566%! assert (cat (1, cast ([1; 2], t1), cast (3i, t2)), cast ([1; 2; 3i], tr));
3567%! assert (cat (1, cast ([1; 2], t1), cast ([3i; 4], t2)),
3568%! cast ([1; 2; 3i; 4], tr));
3569%! assert (cat (2, cast (1, t1), cast (2i, t2)), cast ([1, 2i], tr));
3570%! assert (cat (2, cast (1, t1), cast ([2i, 3], t2)), cast ([1, 2i, 3], tr));
3571%! assert (cat (2, cast ([1, 2], t1), cast (3i, t2)), cast ([1, 2, 3i], tr));
3572%! assert (cat (2, cast ([1, 2], t1), cast ([3i, 4], t2)),
3573%! cast ([1, 2, 3i, 4], tr));
3574%! assert ([cast(1, t1); cast(2i, t2)], cast ([1; 2i], tr));
3575%! assert ([cast(1, t1); cast([2i; 3], t2)], cast ([1; 2i; 3], tr));
3576%! assert ([cast([1; 2], t1); cast(3i, t2)], cast ([1; 2; 3i], tr));
3577%! assert ([cast([1; 2], t1); cast([3i; 4], t2)], cast ([1; 2; 3i; 4], tr));
3578%! assert ([cast(1, t1), cast(2i, t2)], cast ([1, 2i], tr));
3579%! assert ([cast(1, t1), cast([2i, 3], t2)], cast ([1, 2i, 3], tr));
3580%! assert ([cast([1, 2], t1), cast(3i, t2)], cast ([1, 2, 3i], tr));
3581%! assert ([cast([1, 2], t1), cast([3i, 4], t2)], cast ([1, 2, 3i, 4], tr));
3582%!
3583%! assert (cat (1, cast (1i, t1), cast (2i, t2)), cast ([1i; 2i], tr));
3584%! assert (cat (1, cast (1i, t1), cast ([2i; 3], t2)),
3585%! cast ([1i; 2i; 3], tr));
3586%! assert (cat (1, cast ([1i; 2], t1), cast (3i, t2)),
3587%! cast ([1i; 2; 3i], tr));
3588%! assert (cat (1, cast ([1i; 2], t1), cast ([3i; 4], t2)),
3589%! cast ([1i; 2; 3i; 4], tr));
3590%! assert (cat (2, cast (1i, t1), cast (2i, t2)), cast ([1i, 2i], tr));
3591%! assert (cat (2, cast (1i, t1), cast ([2i, 3], t2)),
3592%! cast ([1i, 2i, 3], tr));
3593%! assert (cat (2, cast ([1i, 2], t1), cast (3i, t2)),
3594%! cast ([1i, 2, 3i], tr));
3595%! assert (cat (2, cast ([1i, 2], t1), cast ([3i, 4], t2)),
3596%! cast ([1i, 2, 3i, 4], tr));
3597%!
3598%! assert ([cast(1i, t1); cast(2i, t2)], cast ([1i; 2i], tr));
3599%! assert ([cast(1i, t1); cast([2i; 3], t2)], cast ([1i; 2i; 3], tr));
3600%! assert ([cast([1i; 2], t1); cast(3i, t2)], cast ([1i; 2; 3i], tr));
3601%! assert ([cast([1i; 2], t1); cast([3i; 4], t2)],
3602%! cast ([1i; 2; 3i; 4], tr));
3603%! assert ([cast(1i, t1), cast(2i, t2)], cast ([1i, 2i], tr));
3604%! assert ([cast(1i, t1), cast([2i, 3], t2)], cast ([1i, 2i, 3], tr));
3605%! assert ([cast([1i, 2], t1), cast(3i, t2)], cast ([1i, 2, 3i], tr));
3606%! assert ([cast([1i, 2], t1), cast([3i, 4], t2)],
3607%! cast ([1i, 2, 3i, 4], tr));
3608%! endif
3609%! ret = true;
3610%!endfunction
3611
3612%!assert (__testcat ("double", "double", "double"))
3613%!assert (__testcat ("single", "double", "single"))
3614%!assert (__testcat ("double", "single", "single"))
3615%!assert (__testcat ("single", "single", "single"))
3616
3617%!assert (__testcat ("double", "int8", "int8", false))
3618%!assert (__testcat ("int8", "double", "int8", false))
3619%!assert (__testcat ("single", "int8", "int8", false))
3620%!assert (__testcat ("int8", "single", "int8", false))
3621%!assert (__testcat ("int8", "int8", "int8", false))
3622%!assert (__testcat ("double", "int16", "int16", false))
3623%!assert (__testcat ("int16", "double", "int16", false))
3624%!assert (__testcat ("single", "int16", "int16", false))
3625%!assert (__testcat ("int16", "single", "int16", false))
3626%!assert (__testcat ("int16", "int16", "int16", false))
3627%!assert (__testcat ("double", "int32", "int32", false))
3628%!assert (__testcat ("int32", "double", "int32", false))
3629%!assert (__testcat ("single", "int32", "int32", false))
3630%!assert (__testcat ("int32", "single", "int32", false))
3631%!assert (__testcat ("int32", "int32", "int32", false))
3632%!assert (__testcat ("double", "int64", "int64", false))
3633%!assert (__testcat ("int64", "double", "int64", false))
3634%!assert (__testcat ("single", "int64", "int64", false))
3635%!assert (__testcat ("int64", "single", "int64", false))
3636%!assert (__testcat ("int64", "int64", "int64", false))
3637
3638%!assert (__testcat ("double", "uint8", "uint8", false))
3639%!assert (__testcat ("uint8", "double", "uint8", false))
3640%!assert (__testcat ("single", "uint8", "uint8", false))
3641%!assert (__testcat ("uint8", "single", "uint8", false))
3642%!assert (__testcat ("uint8", "uint8", "uint8", false))
3643%!assert (__testcat ("double", "uint16", "uint16", false))
3644%!assert (__testcat ("uint16", "double", "uint16", false))
3645%!assert (__testcat ("single", "uint16", "uint16", false))
3646%!assert (__testcat ("uint16", "single", "uint16", false))
3647%!assert (__testcat ("uint16", "uint16", "uint16", false))
3648%!assert (__testcat ("double", "uint32", "uint32", false))
3649%!assert (__testcat ("uint32", "double", "uint32", false))
3650%!assert (__testcat ("single", "uint32", "uint32", false))
3651%!assert (__testcat ("uint32", "single", "uint32", false))
3652%!assert (__testcat ("uint32", "uint32", "uint32", false))
3653%!assert (__testcat ("double", "uint64", "uint64", false))
3654%!assert (__testcat ("uint64", "double", "uint64", false))
3655%!assert (__testcat ("single", "uint64", "uint64", false))
3656%!assert (__testcat ("uint64", "single", "uint64", false))
3657%!assert (__testcat ("uint64", "uint64", "uint64", false))
3658
3659%!assert (cat (3, [], [1,2;3,4]), [1,2;3,4])
3660%!assert (cat (3, [1,2;3,4], []), [1,2;3,4])
3661%!assert (cat (3, [], [1,2;3,4], []), [1,2;3,4])
3662%!assert (cat (3, [], [], []), zeros (0, 0, 3))
3663
3664%!assert (cat (3, [], [], 1, 2), cat (3, 1, 2))
3665%!assert (cat (3, [], [], [1,2;3,4]), [1,2;3,4])
3666%!assert (cat (4, [], [], [1,2;3,4]), [1,2;3,4])
3667
3668%!assert ([zeros(3,2,2); ones(1,2,2)], repmat ([0;0;0;1],[1,2,2]))
3669%!assert ([zeros(3,2,2); ones(1,2,2)], vertcat (zeros (3,2,2), ones (1,2,2)))
3670
3671%!test <*49759>
3672%! A = [];
3673%! B = {1; 2};
3674%! assert (cat (1, A, B), {1; 2});
3675%! assert (cat (2, A, B), {1; 2});
3676
3677%!error <dimension mismatch> cat (3, cat (3, [], []), [1,2;3,4])
3678%!error <dimension mismatch> cat (3, zeros (0, 0, 2), [1,2;3,4])
3679*/
3680
3681static octave_value
3682do_permute (const octave_value_list& args, bool inv)
3683{
3684 if (args.length () != 2 || args(1).length () < args(1).ndims ())
3685 print_usage ();
3686
3687 Array<int> vec = args(1).int_vector_value ();
3688
3689 // FIXME: maybe we should create an idx_vector object here
3690 // and pass that to permute?
3691 int n = vec.numel ();
3692 for (int i = 0; i < n; i++)
3693 vec(i)--;
3694
3695 return octave_value (args(0).permute (vec, inv));
3696}
3697
3698DEFUN (permute, args, ,
3699 doc: /* -*- texinfo -*-
3700@deftypefn {} {@var{B} =} permute (@var{A}, @var{perm})
3701Return the generalized transpose for an N-D array object @var{A}.
3702
3703The permutation vector @var{perm} must contain the elements
3704@w{@code{1:ndims (A)}}@ (in any order, but each element must appear only
3705once). The @var{N}th dimension of @var{A} gets remapped to dimension
3706@code{@var{PERM}(@var{N})}. For example:
3707
3708@example
3709@group
3710@var{x} = zeros ([2, 3, 5, 7]);
3711size (@var{x})
3712 @xresult{} 2 3 5 7
3713
3714size (permute (@var{x}, [2, 1, 3, 4]))
3715 @xresult{} 3 2 5 7
3716
3717size (permute (@var{x}, [1, 3, 4, 2]))
3718 @xresult{} 2 5 7 3
3719
3720## The identity permutation
3721size (permute (@var{x}, [1, 2, 3, 4]))
3722 @xresult{} 2 3 5 7
3723@end group
3724@end example
3725@seealso{ipermute}
3726@end deftypefn */)
3727{
3728 return do_permute (args, false);
3729}
3730
3731DEFUN (ipermute, args, ,
3732 doc: /* -*- texinfo -*-
3733@deftypefn {} {@var{A} =} ipermute (@var{B}, @var{iperm})
3734The inverse of the @code{permute} function.
3735
3736The expression
3737
3738@example
3739ipermute (permute (A, perm), perm)
3740@end example
3741
3742@noindent
3743returns the original array @var{A}.
3744@seealso{permute}
3745@end deftypefn */)
3746{
3747 return do_permute (args, true);
3748}
3749
3750DEFUN (length, args, ,
3751 doc: /* -*- texinfo -*-
3752@deftypefn {} {@var{n} =} length (@var{A})
3753Return the length of the object @var{A}.
3754
3755The length is 0 for empty objects, 1 for scalars, and the number of elements
3756for vectors. For matrix or N-dimensional objects, the length is the number
3757of elements along the largest dimension
3758(equivalent to @w{@code{max (size (@var{A}))}}).
3759@seealso{numel, size}
3760@end deftypefn */)
3761{
3762 if (args.length () != 1)
3763 print_usage ();
3764
3765 return ovl (args(0).length ());
3766}
3767
3768DEFUN (ndims, args, ,
3769 doc: /* -*- texinfo -*-
3770@deftypefn {} {@var{n} =} ndims (@var{A})
3771Return the number of dimensions of @var{A}.
3772
3773For any array, the result will always be greater than or equal to 2.
3774Trailing singleton dimensions are not counted, i.e., trailing dimensions
3775@var{d} greater than 2 for which @code{size (@var{A}, @var{d}) = 1}.
3776
3777@example
3778@group
3779ndims (ones (4, 1, 2, 1))
3780 @xresult{} 3
3781@end group
3782@end example
3783@seealso{size}
3784@end deftypefn */)
3785{
3786 if (args.length () != 1)
3787 print_usage ();
3788
3789 // This function *must* use size() to determine the desired values to be
3790 // compatible with Matlab and to allow user-defined class overloading.
3791 Matrix sz = octave_value (args(0)).size ();
3792
3793 octave_idx_type ndims = sz.numel ();
3794
3795 // Don't count trailing ones. Trailing zeros are *not* singleton dimension.
3796 while ((ndims > 2) && (sz(ndims - 1) == 1))
3797 ndims--;
3798
3799 return ovl (ndims);
3800}
3801
3802/*
3803%!assert (ndims (1:5), 2)
3804%!assert (ndims (ones (4, 1, 2, 1)), 3)
3805%!assert (ndims (ones (4, 1, 2, 0)), 4)
3806*/
3807
3808DEFUN (numel, args, ,
3809 doc: /* -*- texinfo -*-
3810@deftypefn {} {@var{n} =} numel (@var{A})
3811@deftypefnx {} {@var{n} =} numel (@var{A}, @var{idx1}, @var{idx2}, @dots{})
3812Return the number of elements in the object @var{A}.
3813
3814Optionally, if indices @var{idx1}, @var{idx2}, @dots{} are supplied,
3815return the number of elements that would result from the indexing
3816
3817@example
3818@var{A}(@var{idx1}, @var{idx2}, @dots{})
3819@end example
3820
3821Note that the indices do not have to be scalar numbers. For example,
3822
3823@example
3824@group
3825@var{a} = 1;
3826@var{b} = ones (2, 3);
3827numel (@var{a}, @var{b})
3828@end group
3829@end example
3830
3831@noindent
3832will return 6, as this is the number of ways to index with @var{b}.
3833Or the index could be the string @qcode{":"} which represents the colon
3834operator. For example,
3835
3836@example
3837@group
3838@var{A} = ones (5, 3);
3839numel (@var{A}, 2, ":")
3840@end group
3841@end example
3842
3843@noindent
3844will return 3 as the second row has three column entries.
3845
3846This method is also called when an object appears as lvalue with cs-list
3847indexing, i.e., @code{object@{@dots{}@}} or @code{object(@dots{}).field}.
3848@seealso{size, length, ndims}
3849@end deftypefn */)
3850{
3851 int nargin = args.length ();
3852
3853 if (nargin == 0)
3854 print_usage ();
3855
3856 octave_value retval;
3857
3858 if (nargin == 1)
3859 retval = args(0).numel ();
3860 else if (nargin > 1)
3861 {
3862 // Don't use numel (const octave_value_list&) here as that corresponds to
3863 // an overloaded call, not to builtin!
3864 retval = dims_to_numel (args(0).dims (), args.slice (1, nargin-1));
3865 }
3866
3867 return retval;
3868}
3869
3870DEFUN (size, args, nargout,
3871 doc: /* -*- texinfo -*-
3872@deftypefn {} {@var{sz} =} size (@var{A})
3873@deftypefnx {} {@var{dim_sz} =} size (@var{A}, @var{dim})
3874@deftypefnx {} {@var{dim_sz} =} size (@var{A}, @var{d1}, @var{d2}, @dots{})
3875@deftypefnx {} {[@var{rows}, @var{cols}, @dots{}, @var{dim_N_sz}] =} size (@dots{})
3876Return a row vector with the size (number of elements) of each dimension for
3877the object @var{A}.
3878
3879When given a second argument, @var{dim}, return the size of the corresponding
3880dimension. If @var{dim} is a vector, return each of the corresponding
3881dimensions. Multiple dimensions may also be specified as separate arguments.
3882
3883With a single output argument, @code{size} returns a row vector. When called
3884with multiple output arguments, @code{size} returns the size of dimension N
3885in the Nth argument. The number of rows, dimension 1, is returned in the
3886first argument, the number of columns, dimension 2, is returned in the
3887second argument, etc. If there are more dimensions in @var{A} than there are
3888output arguments, @code{size} returns the total number of elements in the
3889remaining dimensions in the final output argument. If the requested dimension
3890@var{dim} is greater than the number of dimensions in @var{A}, @code{size}
3891returns 1 (not 0).
3892
3893Example 1: single row vector output
3894
3895@example
3896@group
3897size ([1, 2; 3, 4; 5, 6])
3898 @xresult{} [ 3, 2 ]
3899@end group
3900@end example
3901
3902Example 2: number of elements in 2nd dimension (columns)
3903
3904@example
3905@group
3906size ([1, 2; 3, 4; 5, 6], 2)
3907 @xresult{} 2
3908@end group
3909@end example
3910
3911Example 3: number of output arguments == number of dimensions
3912
3913@example
3914@group
3915[nr, nc] = size ([1, 2; 3, 4; 5, 6])
3916 @xresult{} nr = 3
3917 @xresult{} nc = 2
3918@end group
3919@end example
3920
3921Example 4: number of output arguments < number of dimensions
3922
3923@example
3924@group
3925[nr, remainder] = size (ones (2, 3, 4, 5))
3926 @xresult{} nr = 2
3927 @xresult{} remainder = 60
3928@end group
3929@end example
3930
3931Example 5: number of elements in dimension > number of actual dimensions
3932
3933@example
3934@group
3935sz4 = size (ones (2, 3), 4)
3936 @xresult{} sz4 = 1
3937@end group
3938@end example
3939
3940@seealso{numel, ndims, length, rows, columns, size_equal, common_size}
3941@end deftypefn */)
3942{
3943 int nargin = args.length ();
3944
3945 if (nargin == 0)
3946 print_usage ();
3947
3948 // For compatibility with Matlab, size returns dimensions as doubles.
3949
3950 Matrix m;
3951
3952 dim_vector dimensions = args(0).dims ();
3953 int ndims = dimensions.ndims ();
3954
3955 if (nargin == 1)
3956 {
3957 if (nargout > 1)
3958 {
3959 dimensions = dimensions.redim (nargout);
3960 ndims = dimensions.ndims ();
3961 }
3962
3963 m.resize (1, ndims);
3964
3965 for (octave_idx_type i = 0; i < ndims; i++)
3966 m(i) = dimensions(i);
3967 }
3968 else
3969 {
3970 Array<octave_idx_type> query_dims;
3971
3972 if (nargin > 2)
3973 {
3974 query_dims.resize (dim_vector (1, nargin-1));
3975
3976 for (octave_idx_type i = 0; i < nargin-1; i++)
3977 query_dims(i) = args(i+1).idx_type_value (true);
3978 }
3979 else
3980 query_dims = args(1).octave_idx_type_vector_value (true);
3981
3982 if (nargout > 1 && nargout != query_dims.numel ())
3983 error ("size: nargout > 1 but does not match number of requested dimensions");
3984
3985 octave_idx_type nidx = query_dims.numel ();
3986
3987 m.resize (1, nidx);
3988
3989 for (octave_idx_type i = 0; i < nidx; i++)
3990 {
3991 octave_idx_type nd = query_dims.xelem (i);
3992
3993 if (nd < 1)
3994 error ("size: requested dimension DIM (= %"
3995 OCTAVE_IDX_TYPE_FORMAT ") out of range", nd);
3996
3997 m(i) = nd <= ndims ? dimensions (nd-1) : 1;
3998 }
3999 }
4000
4001 if (nargout > 1)
4002 {
4003 octave_value_list retval (nargout);
4004
4005 for (octave_idx_type i = 0; i < nargout; i++)
4006 retval(i) = m(i);
4007
4008 return retval;
4009 }
4010
4011 return ovl (m);
4012}
4013
4014/*
4015## Plain call
4016
4017%!assert (size ([1, 2; 3, 4; 5, 6]), [3, 2])
4018
4019%!test
4020%! [nr, nc] = size ([1, 2; 3, 4; 5, 6]);
4021%! assert (nr, 3);
4022%! assert (nc, 2);
4023
4024%!test
4025%! [nr, remainder] = size (ones (2, 3, 4, 5));
4026%! assert (nr, 2);
4027%! assert (remainder, 60);
4028
4029## Call for single existing dimension
4030
4031%!assert (size ([1, 2; 3, 4; 5, 6], 1), 3)
4032%!assert (size ([1, 2; 3, 4; 5, 6], 2), 2)
4033
4034## Call for single non-existing dimension
4035
4036%!assert (size ([1, 2; 3, 4; 5, 6], 3), 1)
4037%!assert (size ([1, 2; 3, 4; 5, 6], 4), 1)
4038
4039## Call for more than existing dimensions
4040
4041%!test
4042%! [nr, nc, e1, e2] = size ([1, 2; 3, 4; 5, 6]);
4043%! assert (nr, 3);
4044%! assert (nc, 2);
4045%! assert (e1, 1);
4046%! assert (e2, 1);
4047
4048## Call for two arbitrary dimensions
4049
4050%!test
4051%! dim = [3, 2, 1, 1, 1];
4052%! for i = 1:5
4053%! for j = 1:5
4054%! assert (size ([1, 2; 3, 4; 5, 6], i, j), [dim(i), dim(j)]);
4055%! assert (size ([1, 2; 3, 4; 5, 6], [i, j]), [dim(i), dim(j)]);
4056%! [a, b] = size ([1, 2; 3, 4; 5, 6], i, j);
4057%! assert (a, dim(i));
4058%! assert (b, dim(j));
4059%! [a, b] = size ([1, 2; 3, 4; 5, 6], [i, j]);
4060%! assert (a, dim(i));
4061%! assert (b, dim(j));
4062%! endfor
4063%! endfor
4064
4065## Call for three arbitrary dimensions
4066
4067%!test
4068%! dim = [3, 2, 1, 1, 1];
4069%! for i = 1:5
4070%! for j = 1:5
4071%! for k = 1:5
4072%! assert (size ([1, 2; 3, 4; 5, 6], i, j, k), [dim(i), dim(j), dim(k)]);
4073%! assert (size ([1, 2; 3, 4; 5, 6], [i, j, k]),
4074%! [dim(i), dim(j), dim(k)]);
4075%! [a, b, c] = size ([1, 2; 3, 4; 5, 6], i, j, k);
4076%! assert (a, dim(i));
4077%! assert (b, dim(j));
4078%! assert (c, dim(k));
4079%! [a, b, c] = size ([1, 2; 3, 4; 5, 6], [i, j, k]);
4080%! assert (a, dim(i));
4081%! assert (b, dim(j));
4082%! assert (c, dim(k));
4083%! endfor
4084%! endfor
4085%! endfor
4086
4087%!error <does not match number of requested dimensions>
4088%! [a, b, c] = size ([1, 2; 3, 4; 5, 6], 1:4)
4089*/
4090
4091DEFUN (size_equal, args, ,
4092 doc: /* -*- texinfo -*-
4093@deftypefn {} {@var{TF} =} size_equal (@var{A}, @var{B})
4094@deftypefnx {} {@var{TF} =} size_equal (@var{A}, @var{B}, @dots{})
4095Return true if the dimensions of all arguments agree.
4096
4097Trailing singleton dimensions are ignored. When called with a single argument,
4098or no argument, @code{size_equal} returns true.
4099@seealso{size, numel, ndims, common_size}
4100@end deftypefn */)
4101{
4102 int nargin = args.length ();
4103
4104 if (nargin >= 1)
4105 {
4106 const dim_vector& a_dims = args(0).dims ();
4107
4108 for (int i = 1; i < nargin; ++i)
4109 {
4110 const dim_vector& b_dims = args(i).dims ();
4111
4112 if (a_dims != b_dims)
4113 return ovl (false);
4114 }
4115 }
4116
4117 return ovl (true);
4118}
4119
4120DEFUN (nnz, args, ,
4121 doc: /* -*- texinfo -*-
4122@deftypefn {} {@var{n} =} nnz (@var{A})
4123Return the number of nonzero elements in @var{A}.
4124@seealso{nzmax, nonzeros, find}
4125@end deftypefn */)
4126{
4127 if (args.length () != 1)
4128 print_usage ();
4129
4130 return ovl (args(0).nnz ());
4131}
4132
4133/*
4134%!assert (nnz (1:5), 5)
4135%!assert (nnz (-5:-1), 5)
4136%!assert (nnz (0:5), 5)
4137%!assert (nnz (-5:0), 5)
4138%!assert (nnz (-5:5), 10)
4139%!assert (nnz (-2:1:2), 4)
4140%!assert (nnz (-2+eps (2):1:2), 5)
4141%!assert (nnz (-2-eps (2):1:2), 5)
4142%!assert (nnz (-2:1+eps (1):2), 5)
4143%!assert (nnz (-2:1-eps (1):2), 5)
4144%!assert (nnz ([1:5] * 0), 0)
4145%!assert (nnz ([-5:-1] * 0), 0)
4146%!assert (nnz ([-1:1] * 0), 0)
4147*/
4148
4149DEFUN (nzmax, args, ,
4150 doc: /* -*- texinfo -*-
4151@deftypefn {} {@var{n} =} nzmax (@var{SM})
4152Return the amount of storage allocated to the sparse matrix @var{SM}.
4153
4154Programming Note: Octave tends to crop unused memory at the first opportunity
4155for sparse objects. Thus, in general the value of @code{nzmax} will be the
4156same as @code{nnz}, except for some cases of user-created sparse objects.
4157
4158Also, note that Octave always reserves storage for at least one value. Thus,
4159for empty matrices @code{nnz} will report 0, but @code{nzmax} will report 1.
4160@seealso{nnz, spalloc, sparse}
4161@end deftypefn */)
4162{
4163 if (args.length () != 1)
4164 print_usage ();
4165
4166 return ovl (args(0).nzmax ());
4167}
4168
4169DEFUN (rows, args, ,
4170 doc: /* -*- texinfo -*-
4171@deftypefn {} {@var{nr} =} rows (@var{A})
4172@deftypefnx {} {@var{nr} =} height (@var{A})
4173Return the number of rows of @var{A}.
4174
4175This is equivalent to @code{size (@var{A}, 1)}.
4176
4177Programming Note: @code{height} is an alias for @code{rows} and can be
4178used interchangeably.
4179
4180@seealso{columns, size, length, numel, isscalar, isvector, ismatrix}
4181@end deftypefn */)
4182{
4183 if (args.length () != 1)
4184 print_usage ();
4185
4186 // This function *must* use size() to determine the desired values to
4187 // allow user-defined class overloading.
4188
4189 return ovl ((octave_value (args(0)).size ())(0));
4190}
4191
4192DEFALIAS (height, rows);
4193
4194/*
4195%!assert (rows (ones (2,5)), 2)
4196%!assert (rows (ones (5,2)), 5)
4197%!assert (rows (ones (5,4,3,2)), 5)
4198%!assert (rows (ones (3,4,5,2)), 3)
4199
4200%!assert (rows (cell (2,5)), 2)
4201%!assert (rows (cell (5,2)), 5)
4202%!assert (rows (cell (5,4,3,2)), 5)
4203%!assert (rows (cell (3,4,5,2)), 3)
4204
4205%!test
4206%! x(2,5,3).a = 1;
4207%! assert (rows (x), 2);
4208%! y(5,4,3).b = 2;
4209%! assert (rows (y), 5);
4210
4211%!assert (rows ("Hello World"), 1)
4212
4213%!assert (rows ([]), 0)
4214%!assert (rows (zeros (2,0)), 2)
4215
4216## Test input validation
4217%!error rows ()
4218%!error rows (1,2)
4219*/
4220
4221DEFUN (columns, args, ,
4222 doc: /* -*- texinfo -*-
4223@deftypefn {} {@var{nc} =} columns (@var{A})
4224@deftypefnx {} {@var{nc} =} width (@var{A})
4225Return the number of columns of @var{A}.
4226
4227This is equivalent to @code{size (@var{A}, 2)}.
4228
4229Programming Note: @code{width} is an alias for @code{columns} and can be
4230used interchangeably.
4231
4232@seealso{rows, size, length, numel, isscalar, isvector, ismatrix}
4233@end deftypefn */)
4234{
4235 if (args.length () != 1)
4236 print_usage ();
4237
4238 // This function *must* use size() to determine the desired values to
4239 // allow user-defined class overloading.
4240
4241 return ovl ((octave_value (args(0)).size ())(1));
4242}
4243
4244DEFALIAS (width, columns);
4245
4246DEFUN (sum, args, ,
4247 doc: /* -*- texinfo -*-
4248@deftypefn {} {@var{y} =} sum (@var{x})
4249@deftypefnx {} {@var{y} =} sum (@var{x}, @var{dim})
4250@deftypefnx {} {@var{y} =} sum (@var{x}, @var{vecdim})
4251@deftypefnx {} {@var{y} =} sum (@var{x}, "all")
4252@deftypefnx {} {@var{y} =} sum (@dots{}, @var{outtype})
4253@deftypefnx {} {@var{y} =} sum (@dots{}, @var{nanflag})
4254Compute the sum of the elements of @var{x}.
4255
4256If @var{x} is a vector, then @code{sum (@var{x})} returns the sum of the
4257elements in @var{x}.
4258
4259If @var{x} is a matrix, then @code{sum (@var{x})} returns a row vector with
4260each element containing the sum of the corresponding column in @var{x}.
4261
4262If @var{x} is an array, then @code{sum(@var{x})} computes the sum along the
4263first non-singleton dimension of @var{x}.
4264
4265The optional input @var{dim} specifies the dimension to operate on and must be
4266a positive integer. Specifying any singleton dimension in @var{x}, including
4267any dimension exceeding @code{ndims (@var{x})}, will return @var{x}.
4268
4269Specifying multiple dimensions with input @var{vecdim}, a vector of
4270non-repeating dimensions, will operate along the array slice defined by
4271@var{vecdim}. If @var{vecdim} indexes all dimensions of @var{x}, then it is
4272equivalent to the option @qcode{"all"}. Any dimension in @var{vecdim} greater
4273than @code{ndims (@var{x})} is ignored.
4274
4275Specifying the dimension as @qcode{"all"} will cause @code{sum} to operate
4276on all elements of @var{x}, and is equivalent to @code{cumsum (@var{x}(:))}.
4277
4278The optional input @var{outtype} specifies the data type that is returned as
4279well as the class of the variable used for calculations.
4280@var{outtype} can take the following values:
4281
4282@table @asis
4283@item @qcode{"default"}
4284Operations on floating point inputs (double or single) are performed in their
4285native data type, while operations on integer, logical, and character data
4286types are performed using doubles. Output is of type double, unless the input
4287is single in which case the output is of type single.
4288
4289@item @qcode{"double"}
4290Operations are performed in double precision even for single precision inputs.
4291Output is of type double.
4292
4293@item @qcode{"extra"}
4294For double precision inputs, @code{sum} will use a more accurate algorithm than
4295straightforward summation. For single precision inputs, @qcode{"extra"} is the
4296same as @qcode{"double"}. For all other data types, @qcode{"extra"} has no
4297effect.
4298
4299@item @qcode{"native"}
4300Operations are performed in their native data types and output is of the same
4301type as the input as reported by (@code{class (@var{x})}). When the input is
4302logical, @code{sum (@var{x}, "native")} is equivalent to @code{any (@var{x})}.
4303@end table
4304
4305The optional variable @var{nanflag} specifies whether to include or exclude
4306NaN values from the calculation using any of the previously specified input
4307argument combinations. The default value for @var{nanflag} is
4308@qcode{"includenan"} which keeps NaN values in the calculation. To exclude
4309NaN values set the value of @var{nanflag} to @qcode{"omitnan"}. The output
4310will be @var{0}, if @var{x} consists of all NaN values in the
4311operating dimension.
4312@seealso{cumsum, sumsq, prod}
4313@end deftypefn */)
4314{
4315 int nargin = args.length ();
4316
4317 bool isnative = false;
4318 bool isdouble = false;
4319 bool isextra = false;
4320 bool do_perm = false;
4321 bool allflag = false;
4322 bool nanflag = false;
4323
4324 while (nargin > 1 && args(nargin - 1).is_string ())
4325 {
4326 std::string str = args(nargin - 1).string_value ();
4327
4328 if (str == "native")
4329 isnative = true;
4330 else if (str == "double")
4331 isdouble = true;
4332 else if (str == "extra")
4333 isextra = true;
4334 else if (str == "all")
4335 allflag = true;
4336 else if (str == "omitnan" || str == "omitmissing")
4337 {
4338 if (args(0).is_double_type () || args(0).is_single_type ())
4339 nanflag = true;
4340 }
4341 else if (str == "includenan" || str == "includemissing")
4342 nanflag = false;
4343 else if (str != "default")
4344 error ("sum: unrecognized optional argument '%s'", str.c_str ());
4345
4346 nargin--;
4347 }
4348
4349 if (nargin < 1 || nargin > 2)
4350 print_usage ();
4351 if (allflag && nargin > 1)
4352 error ("sum: cannot set DIM or VECDIM with 'all' flag");
4353
4354 octave_value arg = args(0);
4355
4356 // Handle DIM, VECDIM
4357 int dim = -1;
4358 Array<int> perm_vec;
4359 if (nargin == 2)
4360 {
4361 octave_value dimarg = args(1);
4362 get_dim_vecdim_all (dimarg, arg, dim, perm_vec, do_perm, allflag, "sum");
4363 }
4364
4365 // Handle allflag
4366 if (allflag)
4367 arg = arg.reshape (dim_vector (arg.numel (), 1));
4368
4369 octave_value retval;
4370
4371 switch (arg.builtin_type ())
4372 {
4373 case btyp_double:
4374 if (arg.issparse ())
4375 {
4376 if (isextra)
4377 retval = arg.sparse_matrix_value ().xsum (dim, nanflag);
4378 else
4379 retval = arg.sparse_matrix_value ().sum (dim, nanflag);
4380 }
4381 else
4382 {
4383 if (isextra)
4384 retval = arg.array_value ().xsum (dim, nanflag);
4385 else
4386 retval = arg.array_value ().sum (dim, nanflag);
4387 }
4388 break;
4389
4390 case btyp_complex:
4391 if (arg.issparse ())
4392 {
4393 if (isextra)
4394 retval = arg.sparse_complex_matrix_value ().xsum (dim, nanflag);
4395 else
4396 retval = arg.sparse_complex_matrix_value ().sum (dim, nanflag);
4397 }
4398 else
4399 {
4400 if (isextra)
4401 retval = arg.complex_array_value ().xsum (dim, nanflag);
4402 else
4403 retval = arg.complex_array_value ().sum (dim, nanflag);
4404 }
4405 break;
4406
4407 case btyp_float:
4408 if (isdouble || isextra)
4409 retval = arg.float_array_value ().dsum (dim, nanflag);
4410 else
4411 retval = arg.float_array_value ().sum (dim, nanflag);
4412 break;
4413
4414 case btyp_float_complex:
4415 if (isdouble || isextra)
4416 retval = arg.float_complex_array_value ().dsum (dim, nanflag);
4417 else
4418 retval = arg.float_complex_array_value ().sum (dim, nanflag);
4419 break;
4420
4421#define MAKE_INT_BRANCH(X) \
4422 case btyp_ ## X: \
4423 if (isnative) \
4424 retval = arg.X ## _array_value ().sum (dim); \
4425 else \
4426 retval = arg.X ## _array_value ().dsum (dim); \
4427 break;
4428
4429 MAKE_INT_BRANCH (int8);
4430 MAKE_INT_BRANCH (int16);
4431 MAKE_INT_BRANCH (int32);
4432 MAKE_INT_BRANCH (int64);
4433 MAKE_INT_BRANCH (uint8);
4434 MAKE_INT_BRANCH (uint16);
4435 MAKE_INT_BRANCH (uint32);
4436 MAKE_INT_BRANCH (uint64);
4437
4438#undef MAKE_INT_BRANCH
4439
4440 // GAGME: Accursed Matlab compatibility...
4441 case btyp_char:
4442 if (isextra)
4443 retval = arg.array_value (true).xsum (dim);
4444 else
4445 retval = arg.array_value (true).sum (dim);
4446 break;
4447
4448 case btyp_bool:
4449 if (arg.issparse ())
4450 {
4451 if (isnative)
4452 retval = arg.sparse_bool_matrix_value ().any (dim);
4453 else
4454 retval = arg.sparse_bool_matrix_value ().sum (dim);
4455 }
4456 else if (isnative)
4457 retval = arg.bool_array_value ().any (dim);
4458 else
4459 {
4460 boolNDArray m = arg.bool_array_value ();
4461 retval = do_mx_red_op<double, bool> (m, dim, mx_inline_count);
4462 }
4463 break;
4464
4465 default:
4466 err_wrong_type_arg ("sum", arg);
4467 }
4468
4469 if (do_perm)
4470 retval = retval.permute (perm_vec, true);
4471
4472 return retval;
4473}
4474
4475/*
4476%!assert (sum ([1, 2, 3]), 6)
4477%!assert (sum ([-1; -2; -3]), -6)
4478%!assert (sum ([i, 2+i, -3+2i, 4]), 3+4i)
4479%!assert (sum ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i]), [2+2i, 4+4i, 6+6i])
4480
4481%!assert (sum (single ([1, 2, 3])), single (6))
4482%!assert (sum (single ([-1; -2; -3])), single (-6))
4483%!assert (sum (single ([i, 2+i, -3+2i, 4])), single (3+4i))
4484%!assert (sum (single ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i])),
4485%! single ([2+2i, 4+4i, 6+6i]))
4486
4487%!assert (sum ([1, 2; 3, 4], 1), [4, 6])
4488%!assert (sum ([1, 2; 3, 4], 2), [3; 7])
4489%!assert (sum (single ([1, 2; 3, 4]), 1), single ([4, 6]))
4490%!assert (sum (single ([1, 2; 3, 4]), 2), single ([3; 7]))
4491
4492## Test empty matrices
4493%!assert (sum ([]), 0)
4494%!assert (sum ([], 1), zeros (1, 0))
4495%!assert (sum ([], 2), zeros (0, 1))
4496%!assert (sum ([], 3), zeros (0, 0))
4497%!assert (sum (zeros (1, 0)), 0)
4498%!assert (sum (zeros (1, 0), 1), zeros (1, 0))
4499%!assert (sum (zeros (1, 0), 2), 0)
4500%!assert (sum (zeros (0, 1)), 0)
4501%!assert (sum (zeros (0, 1), 1), 0)
4502%!assert (sum (zeros (0, 1), 2), zeros (0, 1))
4503%!assert (sum (zeros (2, 0)), zeros (1, 0))
4504%!assert (sum (zeros (2, 0), 1), zeros (1, 0))
4505%!assert (sum (zeros (2, 0), 2), [0; 0])
4506%!assert (sum (zeros (0, 2)), [0, 0])
4507%!assert (sum (zeros (0, 2), 1), [0, 0])
4508%!assert (sum (zeros (0, 2), 2), zeros (0, 1))
4509%!assert (sum (zeros (2, 2, 0, 3)), zeros (1, 2, 0, 3))
4510%!assert (sum (zeros (2, 2, 0, 3), 2), zeros (2, 1, 0, 3))
4511%!assert (sum (zeros (2, 2, 0, 3), 3), zeros (2, 2, 1, 3))
4512%!assert (sum (zeros (2, 2, 0, 3), 4), zeros (2, 2, 0))
4513%!assert (sum (zeros (2, 2, 0, 3), 7), zeros (2, 2, 0, 3))
4514
4515%!assert (sum (single ([])), single (0))
4516%!assert (sum (single ([]), 1), single (zeros (1, 0)))
4517%!assert (sum (single ([]), 2), single (zeros (0, 1)))
4518%!assert (sum (single ([]), 3), single (zeros (0, 0)))
4519%!assert (sum (zeros (1, 0, "single")), single (0))
4520%!assert (sum (zeros (1, 0, "single"), 1), zeros (1, 0, "single"))
4521%!assert (sum (zeros (1, 0, "single"), 2), single (0))
4522%!assert (sum (zeros (0, 1, "single")), single (0))
4523%!assert (sum (zeros (0, 1, "single"), 1), single (0))
4524%!assert (sum (zeros (0, 1, "single"), 2), zeros (0, 1, "single"))
4525%!assert (sum (zeros (2, 0, "single")), zeros (1, 0, "single"))
4526%!assert (sum (zeros (2, 0, "single"), 1), zeros (1, 0, "single"))
4527%!assert (sum (zeros (2, 0, "single"), 2), single ([0; 0]))
4528%!assert (sum (zeros (0, 2, "single")), single ([0, 0]))
4529%!assert (sum (zeros (0, 2, "single"), 1), single ([0, 0]))
4530%!assert (sum (zeros (0, 2, "single"), 2), zeros (0, 1, "single"))
4531%!assert (sum (zeros (2, 2, 0, 3, "single")), zeros (1, 2, 0, 3, "single"))
4532%!assert (sum (zeros (2, 2, 0, 3, "single"), 2), zeros (2, 1, 0, 3, "single"))
4533%!assert (sum (zeros (2, 2, 0, 3, "single"), 3), zeros (2, 2, 1, 3, "single"))
4534%!assert (sum (zeros (2, 2, 0, 3, "single"), 4), zeros (2, 2, 0, "single"))
4535%!assert (sum (zeros (2, 2, 0, 3, "single"), 7), zeros (2, 2, 0, 3, "single"))
4536
4537## Test "default"
4538%!assert (sum (single (1)), sum (single (1), "default"))
4539%!assert (sum ([true true], "default"), double (2))
4540%!assert (sum (uint8 (1), "default"), double (1))
4541
4542## Test "double" and "extra"
4543%!assert (sum (single ([1 2 3]), "double"), double (6))
4544%!assert (sum (single ([1 2 3]), "extra"), double (6))
4545%!assert (sum ([true,true], "double"), double (2))
4546
4547## Test "native"
4548%!assert (sum ([true,true]), 2)
4549%!assert (sum ([true,true], "native"), true)
4550%!assert (sum (int8 ([127,10,-20])), 117)
4551%!assert (sum (int8 ([127,10,-20]), "native"), int8 (107))
4552
4553## Test character arrays
4554%!assert (sum ("Octave") + "8", sumsq (primes (17)))
4555%!assert (sum (repmat ("Octave", [2,1,3]), [1, 3]), [474 594 696 582 708 606])
4556%!assert (sum (repmat ("Octave", [2,1,3]), [1, 2, 3]), 3660)
4557%!assert (sum (repmat ("Octave", [2,1,3]), 'all'), 3660)
4558
4559## Test dimension indexing with vecdim in N-dimensional arrays
4560%!test
4561%! x = repmat ([1:20;6:25], [5 2 6 3]);
4562%! assert (size (sum (x, [3 2])), [10 1 1 3]);
4563%! assert (size (sum (x, [1 2])), [1 1 6 3]);
4564%! assert (size (sum (x, [1 2 4])), [1 1 6]);
4565%! assert (size (sum (x, [1 4 3])), [1 40]);
4566%! assert (size (sum (x, [1 2 3 4])), [1 1]);
4567
4568## Test exceeding dimensions
4569%!assert (sum (ones (2,2), 3), ones (2,2))
4570%!assert (sum (ones (2,2,2), 99), ones (2,2,2))
4571%!assert (sum (magic (3), 3), magic (3))
4572%!assert (sum (magic (3), [1 3]), [15, 15, 15])
4573%!assert (sum (magic (3), [1 99]), [15, 15, 15])
4574%!assert (sum (ones (2), 4), ones (2))
4575%!assert (sum (ones (2), [4, 5]), ones (2))
4576%!assert (sum (single (ones (2)), 4),single (ones (2)))
4577%!assert (sum (single (ones (2)), [4, 5]),single (ones (2)))
4578%!assert (sum (sparse ([1, 2; 3, 4]), 3), sparse ([1, 2; 3, 4]))
4579%!assert (sum (sparse ([1, 2; 3, 4]), 3, 'extra'), sparse ([1, 2; 3, 4]))
4580%!assert (sum (sparse ([1, 2i; 3, 4]), 3), sparse ([1, 2i; 3, 4]))
4581%!assert (sum (sparse ([1, 2i; 3, 4]), 3, 'extra'), sparse ([1, 2i; 3, 4]))
4582
4583## Test nanflag
4584%!test
4585%! x = ones (3,4,5);
4586%! x(1) = NaN;
4587%! assert (sum (x)(:,:,1), [NaN, 3, 3, 3]);
4588%! assert (sum (x, "includenan")(:,:,1), [NaN, 3, 3, 3]);
4589%! assert (sum (x, "omitnan")(:,:,1), [2, 3, 3, 3]);
4590%! assert (sum (x, "omitmissing")(:,:,1), [2, 3, 3, 3]);
4591%! assert (sum (x, [2 3]), [NaN; 20; 20]);
4592%! assert (sum (x, [2 3], "omitnan"), [19; 20; 20]);
4593
4594## Test sparse matrices
4595%!assert (sum (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN])),
4596%! sparse ([NaN, NaN, 2, 6, NaN]))
4597%!assert (sum (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), "omitnan"),
4598%! sparse ([1, 2, 2, 6, 2]))
4599%!assert (sum (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), 2),
4600%! sparse ([NaN; NaN]))
4601%!assert (sum (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
4602%! sparse ([7; 6]))
4603%!assert (sum (sparse ([NaN, NaN, 1i, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
4604%! sparse ([6+i; 6]))
4605%!assert (sum (sparse ([NaN, 0i, 1i, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
4606%! sparse ([6+i; 6]))
4607%!assert (sum (sparse ([NaN, 1+i, 1i, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
4608%! sparse ([7+2i; 6]))
4609%!assert (sum (sparse ([NaN, NaN, NaN]), "omitnan"), sparse (0))
4610%!assert (sum (sparse ([0, 0, 0, NaN, NaN, NaN]), "omitnan"), sparse (0))
4611
4612## Test empty sparse matrices
4613%!assert (sum (sparse (ones(1, 0))), sparse (0))
4614%!assert (size (sum (sparse (ones(1, 0)), 1)), [1, 0])
4615%!assert (size (sum (sparse (ones(1, 0)), 2)), [1, 1])
4616%!assert (sum (sparse (ones(0, 1))), sparse (0))
4617%!assert (size (sum (sparse (ones(0, 1)), 1)), [1, 1])
4618%!assert (size (sum (sparse (ones(0, 1)), 1)), [1, 1])
4619%!assert (size (sum (sparse (ones(0, 1)), 2)), [0, 1])
4620%!assert (sum (sparse (ones(0, 0))), sparse (0))
4621%!assert (size (sum (sparse (ones(0, 0)), 1)), [1, 0])
4622%!assert (size (sum (sparse (ones(0, 0)), 2)), [0, 1])
4623%!assert (size (sum (sparse (ones(0, 0)), 3)), [0, 0])
4624
4625## Test empty sparse matrices with 'extra' option
4626%!assert (sum (sparse (ones(1, 0)), 'extra'), sparse (0))
4627%!assert (size (sum (sparse (ones(1, 0)), 1, 'extra')), [1, 0])
4628%!assert (size (sum (sparse (ones(1, 0)), 2, 'extra')), [1, 1])
4629%!assert (sum (sparse (ones(0, 1)), 'extra'), sparse (0))
4630%!assert (size (sum (sparse (ones(0, 1)), 1, 'extra')), [1, 1])
4631%!assert (size (sum (sparse (ones(0, 1)), 1, 'extra')), [1, 1])
4632%!assert (size (sum (sparse (ones(0, 1)), 2, 'extra')), [0, 1])
4633%!assert (sum (sparse (ones(0, 0)), 'extra'), sparse (0))
4634%!assert (size (sum (sparse (ones(0, 0)), 1, 'extra')), [1, 0])
4635%!assert (size (sum (sparse (ones(0, 0)), 2, 'extra')), [0, 1])
4636%!assert (size (sum (sparse (ones(0, 0)), 3, 'extra')), [0, 0])
4637
4638## Test 'extra' option
4639%!assert (sum ([1, Inf], "extra"), Inf)
4640%!assert (sum ([1, -Inf], "extra"), -Inf)
4641%!assert (sum ([Inf, -Inf], "extra"), NaN)
4642%!assert (sum ([realmax, 1e300], "extra"), Inf)
4643%!assert (sum ([realmax/2, realmax/2, 1e300], "extra"), Inf)
4644%!assert (sum ([realmax/2, (2 * (realmax/3))], "extra"), Inf)
4645%!assert (sum (sparse ([1, Inf]), "extra"), sparse (Inf))
4646%!assert (sum (sparse ([1, -Inf]), "extra"), sparse (-Inf))
4647%!assert (sum (sparse ([Inf, -Inf]), "extra"), sparse (NaN))
4648%!test
4649%! x = [flintmax("double"), 1, -1];
4650%! assert (sum (x, "extra") - flintmax ("double"), 0);
4651%! assert (sum (x) - flintmax ("double"), -1);
4652%!test
4653%! x = sparse ([flintmax("double"), 1, -1]);
4654%! assert (sum (x, "extra") - sparse (flintmax ("double")), sparse (0));
4655%! assert (sum (x) - sparse (flintmax ("double")), sparse (-1));
4656%!test
4657%! F(:,:,1) = [3, 5; -1, 2];
4658%! F(:,:,2) = [4, -2; Inf, -4];
4659%! s = sum (F, 1, "extra");
4660%! assert (s(:,:,1), [2, 7]);
4661%! assert (s(:,:,2), [Inf, -6]);
4662%! s = sum (F, 2, "extra");
4663%! assert (s(:,:,1), [8; 1]);
4664%! assert (s(:,:,2), [2; Inf]);
4665%! s = sum (F, 3, "extra");
4666%! assert (s, [7, 3; Inf, -2]);
4667%!test
4668%! F(:,:,1) = [NaN, 5; -1, 2];
4669%! F(:,:,2) = [4, -2; Inf, -4];
4670%! s = sum (F, 1, "extra");
4671%! assert (s(:,:,1), [NaN, 7]);
4672%! assert (s(:,:,2), [Inf, -6]);
4673%! s = sum (F, 2, "extra");
4674%! assert (s(:,:,1), [NaN; 1]);
4675%! assert (s(:,:,2), [2; Inf]);
4676%! s = sum (F, 3, "extra");
4677%! assert (s, [NaN, 3; Inf, -2]);
4678%!test
4679%! F(:,:,1) = [NaN, 5; -1, 2];
4680%! F(:,:,2) = [4, -2; -Inf, -4];
4681%! s = sum (F, 1, "extra");
4682%! assert (s(:,:,1), [NaN, 7]);
4683%! assert (s(:,:,2), [-Inf, -6]);
4684%! s = sum (F, 2, "extra");
4685%! assert (s(:,:,1), [NaN; 1]);
4686%! assert (s(:,:,2), [2; -Inf]);
4687%! s = sum (F, 3, "extra");
4688%! assert (s, [NaN, 3; -Inf, -2]);
4689%!test
4690%! F(:,:,1) = [NaN, 5; -1, 2];
4691%! F(:,:,2) = [Inf, -2; -Inf, -4];
4692%! s = sum (F, 1, "extra");
4693%! assert (s(:,:,1), [NaN, 7]);
4694%! assert (s(:,:,2), [NaN, -6]);
4695%! s = sum (F, 2, "extra");
4696%! assert (s(:,:,1), [NaN; 1]);
4697%! assert (s(:,:,2), [Inf; -Inf]);
4698%! s = sum (F, 3, "extra");
4699%! assert (s, [NaN, 3; -Inf, -2]);
4700%!test
4701%! F(:,:,1) = [NaN, 5; -1, 2];
4702%! F(:,:,2) = [4, -2; Inf, -4];
4703%! assert (sum (F, 1, "omitnan", "extra"), sum (F, 1, "omitnan"));
4704%! assert (sum (F, 2, "omitnan", "extra"), sum (F, 2, "omitnan"));
4705%! assert (sum (F, 3, "omitnan", "extra"), sum (F, 3, "omitnan"));
4706%! assert (sum (F, [1, 2], "omitnan", "extra"), sum (F, [1, 2], "omitnan"));
4707%! assert (sum (F, [1, 3], "omitnan", "extra"), sum (F, [1, 3], "omitnan"));
4708%! assert (sum (F, [2, 3], "omitnan", "extra"), sum (F, [2, 3], "omitnan"));
4709%! assert (sum (F, "all", "omitnan", "extra"), sum (F, [1, 2, 3], "omitnan"));
4710
4711## Test 'extra' option with sparse matrices
4712%!assert (sum (sparse ([2; 3; 4; 5; 6]), "extra"), sparse (20))
4713%!assert (sum (sparse ([2; 3; 0; 5; 6]), "extra"), sparse (16))
4714%!assert (sum (sparse ([2; 3; 4; 5; 6]'), "extra"), sparse (20))
4715%!assert (sum (sparse ([2; 3; 0; 5; 6]'), "extra"), sparse (16))
4716%!assert (sum (speye (3), "extra"), sparse ([1, 1, 1]))
4717%!assert (sum (speye (3), 1, "extra"), sparse ([1, 1, 1]))
4718%!assert (sum (speye (3), 2, "extra"), sparse ([1; 1; 1]))
4719%!assert (sum (speye (3), 3, "extra"), speye (3))
4720%!assert (sum (sparse ([2; 3; 4; 5; 6] * i), "extra"), sparse (20i))
4721%!assert (sum (sparse ([2; 3; 0; 5; 6] * i), "extra"), sparse (16i))
4722%!assert (sum (sparse ([2; 3; 4; 5; 6]' * i), "extra"), sparse (20i))
4723%!assert (sum (sparse ([2; 3; 0; 5; 6]' * i), "extra"), sparse (16i))
4724%!assert (sum (speye (3) * i, "extra"), sparse ([1, 1, 1]*i))
4725%!assert (sum (speye (3) * i, 1, "extra"), sparse ([1, 1, 1]*i))
4726%!assert (sum (speye (3) * i, 2, "extra"), sparse ([1; 1; 1]*i))
4727%!assert (sum (speye (3) * i, 3, "extra"), speye (3) * i)
4728
4729## Test 'extra' option with sparse matrices
4730%!assert (sum (sparse ([2; 3; 4; 5; 6]), "extra"), sparse (20))
4731%!assert (sum (sparse ([2; 3; 0; 5; 6]), "extra"), sparse (16))
4732%!assert (sum (sparse ([2; 3; 4; 5; 6]'), "extra"), sparse (20))
4733%!assert (sum (sparse ([2; 3; 0; 5; 6]'), "extra"), sparse (16))
4734%!assert (sum (speye (3), "extra"), sparse ([1, 1, 1]))
4735%!assert (sum (speye (3), 1, "extra"), sparse ([1, 1, 1]))
4736%!assert (sum (speye (3), 2, "extra"), sparse ([1; 1; 1]))
4737%!assert (sum (speye (3), 3, "extra"), speye (3))
4738%!assert (sum (sparse ([2; 3; 4; 5; 6] * i), "extra"), sparse (20i))
4739%!assert (sum (sparse ([2; 3; 0; 5; 6] * i), "extra"), sparse (16i))
4740%!assert (sum (sparse ([2; 3; 4; 5; 6]' * i), "extra"), sparse (20i))
4741%!assert (sum (sparse ([2; 3; 0; 5; 6]' * i), "extra"), sparse (16i))
4742%!assert (sum (speye (3) * i, "extra"), sparse ([1, 1, 1]*i))
4743%!assert (sum (speye (3) * i, 1, "extra"), sparse ([1, 1, 1]*i))
4744%!assert (sum (speye (3) * i, 2, "extra"), sparse ([1; 1; 1]*i))
4745%!assert (sum (speye (3) * i, 3, "extra"), speye (3) * i)
4746
4747## Test cases for "omitnan"
4748%!test
4749%! A = [1, NaN; 2, NaN; 3, NaN];
4750%! assert (sum (A, 2, "omitnan"), [1; 2; 3]);
4751%!test
4752%! A = [1, 2, 3; NaN, NaN, NaN];
4753%! assert (sum (A, 1, "omitnan"), [1, 2, 3]);
4754%!test
4755%! A = [NaN, NaN, NaN];
4756%! assert (sum (A, "omitnan"), 0);
4757%!test
4758%! A = [1, NaN, 3; 2, NaN, 4; 5, NaN, 6];
4759%! assert (sum (A, 1, "omitnan"), [8, 0, 13]);
4760%!test
4761%! A = [1, 2, NaN; 3, 4, NaN; 5, 6, NaN];
4762%! assert (sum (A, 2, "omitnan"), [3; 7; 11]);
4763%!test
4764%! A = [2, NaN, NaN, 3; 4, NaN, NaN, 5; 1, NaN, NaN, 6];
4765%! assert (sum (A, 1, "omitnan"), [7, 0, 0, 14]);
4766%!test
4767%! A = [1, NaN, 3, NaN, 5; 2, NaN, 4, NaN, 6; 3, NaN, 5, NaN, 7];
4768%! assert (sum (A, 1, "omitnan"), [6, 0, 12, 0, 18]);
4769%!test
4770%! A = [1+2i, NaN; 3+4i, NaN; 5+6i, NaN];
4771%! assert (sum (A, 2, "omitnan"), [1+2i; 3+4i; 5+6i]);
4772%!test
4773%! A = single ([1, NaN; 2, NaN; 3, NaN]);
4774%! assert (sum (A, 2, "omitnan"), single ([1; 2; 3]));
4775%!test
4776%! A = single ([NaN, NaN, NaN]);
4777%! assert (sum (A, "omitnan"), single (0));
4778
4779## Test boolean sum optimization
4780%!test
4781%! x = [true, false, true; false, true, false];
4782%! assert (sum (x, 1), [1, 1, 1]);
4783%! assert (sum (x, 2), [2; 1]);
4784
4785%!test
4786%! x = rand (100, 100) > 0.5;
4787%! assert (sum (x), sum (double (x)));
4788%! assert (sum (x, 2), sum (double (x), 2));
4789
4790%!assert (sum (true), 1)
4791%!assert (sum (false), 0)
4792%!assert (class (sum ([true, false])), "double")
4793
4794## Test input validation
4795%!error <Invalid call> sum ()
4796%!error <Invalid call> sum (1,2,3)
4797%!error <unrecognized optional argument 'foobar'> sum (1, "foobar")
4798%!error <cannot set DIM or VECDIM with 'all' flag>
4799%! sum (ones (3,3), 1, "all");
4800%!error <cannot set DIM or VECDIM with 'all' flag>
4801%! sum (ones (3,3), [1, 2], "all");
4802%!error <invalid dimension DIM = 0> sum (ones (3,3), 0)
4803%!error <invalid dimension DIM = -1> sum (ones (3,3), -1)
4804%!error <invalid dimension in VECDIM = -2> sum (ones (3,3), [1 -2])
4805%!error <duplicate dimension in VECDIM = 2> sum (ones (3,3), [1 2 2])
4806%!error <duplicate dimension in VECDIM = 1> sum (ones (3,3), [1 1 2])
4807*/
4808
4809DEFUN (sumsq, args, ,
4810 doc: /* -*- texinfo -*-
4811@deftypefn {} {@var{y} =} sumsq (@var{x})
4812@deftypefnx {} {@var{y} =} sumsq (@var{x}, @var{dim})
4813@deftypefnx {} {@var{y} =} sumsq (@var{x}, @var{vecdim})
4814@deftypefnx {} {@var{y} =} sumsq (@var{x}, "all")
4815@deftypefnx {} {@var{y} =} sumsq (@dots{}, @var{outtype})
4816@deftypefnx {} {@var{y} =} sumsq (@dots{}, @var{nanflag})
4817Compute the sum of squares of the elements of @var{x}.
4818
4819If @var{x} is a vector, then @code{sumsq (@var{x})} returns the sum of the
4820squares of the elements in @var{x}.
4821
4822If @var{x} is a matrix, then @code{sumsq (@var{x})} returns a row vector with
4823each element containing the sum of squares of the corresponding column in
4824@var{x}.
4825
4826If @var{x} is an array, then @code{sumsq(@var{x})} computes the sum of squares
4827along the first non-singleton dimension of @var{x}.
4828
4829This function is conceptually equivalent to computing
4830
4831@example
4832sum (x .* conj (x))
4833@end example
4834
4835@noindent
4836but it uses less memory and avoids calling @code{conj} if @var{x} is real.
4837
4838The optional input @var{dim} specifies the dimension to operate on and must be
4839a positive integer. Specifying any singleton dimension in @var{x}, including
4840any dimension exceeding @code{ndims (@var{x})}, will return a sum of squares
4841equal to @code{@var{x}.^2}.
4842
4843Specifying multiple dimensions with input @var{vecdim}, a vector of
4844non-repeating dimensions, will operate along the array slice defined by
4845@var{vecdim}. If @var{vecdim} indexes all dimensions of @var{x}, then it is
4846equivalent to the option @qcode{"all"}. Any dimension in @var{vecdim} greater
4847than @code{ndims (@var{x})} is ignored.
4848
4849Specifying the dimension as @qcode{"all"} will cause @code{prod} to operate on
4850all elements of @var{x}, and is equivalent to @code{sumsq (@var{x}(:))}.
4851
4852The optional input @var{outtype} specifies the data type that is returned as
4853well as the class of the variable used for calculations.
4854@var{outtype} can take the following values:
4855
4856@table @asis
4857@item @qcode{"default"}
4858Operations on floating point inputs (double or single) are performed in their
4859native data type; while operations on integer, logical, and character data
4860types are performed using doubles. Output is of type double, unless the input
4861is single in which case the output is of type single.
4862
4863@item @qcode{"double"}
4864Operations are performed in double precision even for single precision inputs.
4865Output is of type double.
4866
4867@item @qcode{"native"}
4868Operations are performed in their native data types and output is of the same
4869type as the input as reported by (@code{class (@var{x})}). When the input is
4870logical, @code{sumsq (@var{x}, "native")} is equivalent to
4871@code{all (@var{x})}.
4872@end table
4873
4874The optional variable @var{nanflag} specifies whether to include or exclude
4875NaN values from the calculation using any of the previously specified input
4876argument combinations. The default value for @var{nanflag} is
4877@qcode{"includenan"} which keeps NaN values in the calculation. To exclude
4878NaN values set the value of @var{nanflag} to @qcode{"omitnan"}. The output
4879will be @var{0}, if @var{x} consists of all NaN values in the
4880operating dimension.
4881@seealso{sum, prod}
4882@end deftypefn */)
4883{
4884 int nargin = args.length ();
4885
4886 bool isnative = false;
4887 bool isdouble = false;
4888 bool do_perm = false;
4889 bool allflag = false;
4890 bool nanflag = false;
4891
4892 while (nargin > 1 && args(nargin - 1).is_string ())
4893 {
4894 std::string str = args(nargin - 1).string_value ();
4895
4896 if (str == "native")
4897 isnative = true;
4898 else if (str == "double")
4899 isdouble = true;
4900 else if (str == "all")
4901 allflag = true;
4902 else if (str == "omitnan" || str == "omitmissing")
4903 {
4904 if (args(0).is_double_type () || args(0).is_single_type ())
4905 nanflag = true;
4906 }
4907 else if (str == "includenan" || str == "includemissing")
4908 nanflag = false;
4909 else if (str != "default")
4910 error ("sumsq: unrecognized optional argument '%s'", str.c_str ());
4911
4912 nargin--;
4913 }
4914
4915 if (nargin < 1 || nargin > 2)
4916 print_usage ();
4917 if (allflag && nargin > 1)
4918 error ("sumsq: cannot set DIM or VECDIM with 'all' flag");
4919
4920 octave_value arg = args(0);
4921
4922 // Handle DIM, VECDIM
4923 int dim = -1;
4924 Array<int> perm_vec;
4925 if (nargin == 2)
4926 {
4927 octave_value dimarg = args(1);
4928 get_dim_vecdim_all (dimarg, arg, dim, perm_vec, do_perm, allflag, "sumsq");
4929 }
4930
4931 // Handle allflag
4932 if (allflag)
4933 arg = arg.reshape (dim_vector (arg.numel (), 1));
4934
4935 octave_value retval;
4936
4937 switch (arg.builtin_type ())
4938 {
4939 case btyp_double:
4940 if (arg.issparse ())
4941 retval = arg.sparse_matrix_value ().sumsq (dim, nanflag);
4942 else
4943 retval = arg.array_value ().sumsq (dim, nanflag);
4944 break;
4945
4946 case btyp_complex:
4947 if (arg.issparse ())
4948 retval = arg.sparse_complex_matrix_value ().sumsq (dim, nanflag);
4949 else
4950 retval = arg.complex_array_value ().sumsq (dim, nanflag);
4951 break;
4952
4953 case btyp_float:
4954 if (isdouble)
4955 retval = arg.float_array_value ().dsumsq (dim, nanflag);
4956 else
4957 retval = arg.float_array_value ().sumsq (dim, nanflag);
4958 break;
4959
4960 case btyp_float_complex:
4961 if (isdouble)
4962 retval = arg.float_complex_array_value ().dsumsq (dim, nanflag);
4963 else
4964 retval = arg.float_complex_array_value ().sumsq (dim, nanflag);
4965 break;
4966
4967#define MAKE_INT_BRANCH(X) \
4968 case btyp_ ## X: \
4969 if (isnative) \
4970 retval = arg.X ## _array_value ().sumsq (dim); \
4971 else \
4972 retval = arg.array_value ().sumsq (dim); \
4973 break;
4974
4975 MAKE_INT_BRANCH (int8);
4976 MAKE_INT_BRANCH (int16);
4977 MAKE_INT_BRANCH (int32);
4978 MAKE_INT_BRANCH (int64);
4979 MAKE_INT_BRANCH (uint8);
4980 MAKE_INT_BRANCH (uint16);
4981 MAKE_INT_BRANCH (uint32);
4982 MAKE_INT_BRANCH (uint64);
4983
4984#undef MAKE_INT_BRANCH
4985
4986 // GAGME: Accursed Matlab compatibility...
4987 case btyp_char:
4988 retval = arg.array_value (true).sumsq (dim);
4989 break;
4990
4991 case btyp_bool:
4992 if (arg.issparse ())
4993 {
4994 if (isnative)
4995 retval = arg.sparse_bool_matrix_value ().all (dim);
4996 else
4997 retval = arg.sparse_matrix_value ().sumsq (dim);
4998 }
4999 else if (isnative)
5000 retval = arg.bool_array_value ().all (dim);
5001 else
5002 {
5003 // For booleans: sumsq (x) = sum (x), since 0^2=0, 1^2=1
5004 boolNDArray m = arg.bool_array_value ();
5005 retval = do_mx_red_op<double, bool> (m, dim, mx_inline_count);
5006 }
5007 break;
5008
5009 default:
5010 err_wrong_type_arg ("sumsq", arg);
5011 }
5012
5013 if (do_perm)
5014 retval = retval.permute (perm_vec, true);
5015
5016 return retval;
5017}
5018
5019/*
5020%!assert (sumsq ([1, 2, 3]), 14)
5021%!assert (sumsq ([-1; -2; 4i]), 21)
5022%!assert (sumsq ([1, 2, 3; 2, 3, 4; 4i, 6i, 2]), [21, 49, 29])
5023
5024%!assert (sumsq (single ([1, 2, 3])), single (14))
5025%!assert (sumsq (single ([-1; -2; 4i])), single (21))
5026%!assert (sumsq (single ([1, 2, 3; 2, 3, 4; 4i, 6i, 2])),
5027%! single ([21, 49, 29]))
5028
5029%!assert (sumsq ([1, 2; 3, 4], 1), [10, 20])
5030%!assert (sumsq ([1, 2; 3, 4], 2), [5; 25])
5031
5032%!assert (sumsq (single ([1, 2; 3, 4]), 1), single ([10, 20]))
5033%!assert (sumsq (single ([1, 2; 3, 4]), 2), single ([5; 25]))
5034
5035## Test empty matrices
5036%!assert (sumsq ([]), 0)
5037%!assert (sumsq ([], 1), zeros (1, 0))
5038%!assert (sumsq ([], 2), zeros (0, 1))
5039%!assert (sumsq ([], 3), zeros (0, 0))
5040%!assert (sumsq (zeros (1, 0)), 0)
5041%!assert (sumsq (zeros (1, 0), 1), zeros (1, 0))
5042%!assert (sumsq (zeros (1, 0), 2), 0)
5043%!assert (sumsq (zeros (0, 1)), 0)
5044%!assert (sumsq (zeros (0, 1), 1), 0)
5045%!assert (sumsq (zeros (0, 1), 2), zeros (0, 1))
5046%!assert (sumsq (zeros (2, 0)), zeros (1, 0))
5047%!assert (sumsq (zeros (2, 0), 1), zeros (1, 0))
5048%!assert (sumsq (zeros (2, 0), 2), [0; 0])
5049%!assert (sumsq (zeros (0, 2)), [0, 0])
5050%!assert (sumsq (zeros (0, 2), 1), [0, 0])
5051%!assert (sumsq (zeros (0, 2), 2), zeros (0, 1))
5052%!assert (sumsq (zeros (2, 2, 0, 3)), zeros (1, 2, 0, 3))
5053%!assert (sumsq (zeros (2, 2, 0, 3), 2), zeros (2, 1, 0, 3))
5054%!assert (sumsq (zeros (2, 2, 0, 3), 3), zeros (2, 2, 1, 3))
5055%!assert (sumsq (zeros (2, 2, 0, 3), 4), zeros (2, 2, 0))
5056%!assert (sumsq (zeros (2, 2, 0, 3), 7), zeros (2, 2, 0, 3))
5057
5058%!assert (sumsq (single ([])), single (0))
5059%!assert (sumsq (single ([]), 1), single (zeros (1, 0)))
5060%!assert (sumsq (single ([]), 2), single (zeros (0, 1)))
5061%!assert (sumsq (single ([]), 3), single (zeros (0, 0)))
5062%!assert (sumsq (zeros (1, 0, "single")), single (0))
5063%!assert (sumsq (zeros (1, 0, "single"), 1), zeros (1, 0, "single"))
5064%!assert (sumsq (zeros (1, 0, "single"), 2), single (0))
5065%!assert (sumsq (zeros (0, 1, "single")), single (0))
5066%!assert (sumsq (zeros (0, 1, "single"), 1), single (0))
5067%!assert (sumsq (zeros (0, 1, "single"), 2), zeros (0, 1, "single"))
5068%!assert (sumsq (zeros (2, 0, "single")), zeros (1, 0, "single"))
5069%!assert (sumsq (zeros (2, 0, "single"), 1), zeros (1, 0, "single"))
5070%!assert (sumsq (zeros (2, 0, "single"), 2), single ([0; 0]))
5071%!assert (sumsq (zeros (0, 2, "single")), single ([0, 0]))
5072%!assert (sumsq (zeros (0, 2, "single"), 1), single ([0, 0]))
5073%!assert (sumsq (zeros (0, 2, "single"), 2), zeros (0, 1, "single"))
5074%!assert (sumsq (zeros (2, 2, 0, 3, "single")), zeros (1, 2, 0, 3, "single"))
5075%!assert (sumsq (zeros (2, 2, 0, 3, "single"), 2), zeros (2, 1, 0, 3, "single"))
5076%!assert (sumsq (zeros (2, 2, 0, 3, "single"), 3), zeros (2, 2, 1, 3, "single"))
5077%!assert (sumsq (zeros (2, 2, 0, 3, "single"), 4), zeros (2, 2, 0, "single"))
5078%!assert (sumsq (zeros (2, 2, 0, 3, "single"), 7), zeros (2, 2, 0, 3, "single"))
5079
5080## Test dimension indexing with vecdim in N-dimensional arrays
5081%!test
5082%! x = repmat ([1:20;6:25], [5 2 6 3]);
5083%! assert (size (sumsq (x, [3 2])), [10 1 1 3]);
5084%! assert (size (sumsq (x, [1 2])), [1 1 6 3]);
5085%! assert (size (sumsq (x, [1 2 4])), [1 1 6]);
5086%! assert (size (sumsq (x, [1 4 3])), [1 40]);
5087%! assert (size (sumsq (x, [1 2 3 4])), [1 1]);
5088
5089## Test exceeding dimensions
5090%!assert (sumsq (ones (2), 3), ones (2))
5091%!assert (sumsq (ones (2, 2, 2), 99), ones (2, 2, 2))
5092%!assert (sumsq (magic (3), 3), magic (3) .^ 2)
5093%!assert (sumsq (magic (3), [3, 5]), magic (3) .^ 2)
5094%!assert (sumsq (magic (3), [1 3]), sum (magic (3) .^ 2))
5095%!assert (sumsq (magic (3), [2 99]), sum (magic (3) .^ 2, 2))
5096%!assert (sumsq (single (ones (2)), 4),single (ones (2)))
5097%!assert (sumsq (single (ones (2)), [4, 5]),single (ones (2)))
5098%!assert (sumsq (sparse ([1, 2; 3, 4]), 3), sparse ([1, 2; 3, 4].^2))
5099%!assert (sumsq (sparse ([1, 2i; 3, 4]), 3), sparse ([1, 4; 9, 16]))
5100
5101## Test nanflag
5102%!test
5103%! x = ones (3,4,5);
5104%! x(1) = NaN;
5105%! assert (sumsq (x)(:,:,1), [NaN, 3, 3, 3]);
5106%! assert (sumsq (x, "includenan")(:,:,1), [NaN, 3, 3, 3]);
5107%! assert (sumsq (x, "omitnan")(:,:,1), [2, 3, 3, 3]);
5108%! assert (sumsq (x, "omitmissing")(:,:,1), [2, 3, 3, 3]);
5109%! assert (sumsq (x, [2 3]), [NaN; 20; 20]);
5110%! assert (sumsq (x, [2 3], "omitnan"), [19; 20; 20]);
5111
5112## Test cases for "omitnan"
5113%!test
5114%! A = [1, NaN; 2, NaN; 3, NaN];
5115%! assert (sumsq (A, 2, "omitnan"), [1; 4; 9]);
5116%!test
5117%! A = [NaN, NaN, NaN];
5118%! assert (sumsq (A, "omitnan"), 0);
5119%!test
5120%! A = [2, 3, NaN; 4, 5, NaN; 1, 2, NaN];
5121%! assert (sumsq (A, 2, "omitnan"), [13; 41; 5]);
5122%!test
5123%! A = [2, NaN, 3; 3, NaN, 4; 1, NaN, 2];
5124%! assert (sumsq (A, 1, "omitnan"), [14, 0, 29]);
5125%!test
5126%! A = [1+2i, NaN; 3+4i, NaN];
5127%! assert (sumsq (A, 2, "omitnan"), [5; 25]);
5128%!test
5129%! A = single ([NaN, NaN, NaN]);
5130%! assert (sumsq (A, "omitnan"), single (0));
5131
5132## Test sparse matrices
5133%!assert (sumsq (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN])),
5134%! sparse ([NaN, NaN, 2, 20, NaN]))
5135%!assert (sumsq (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), "omitnan"),
5136%! sparse ([1, 4, 2, 20, 4]))
5137%!assert (sumsq (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), 2),
5138%! sparse ([NaN; NaN]))
5139%!assert (sumsq (sparse ([NaN, NaN, 1, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
5140%! sparse ([21; 10]))
5141%!assert (sumsq (sparse ([NaN, NaN, 1i, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
5142%! sparse ([21; 10]))
5143%!assert (sumsq (sparse ([NaN, 0i, 1i, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
5144%! sparse ([21; 10]))
5145%!assert (sumsq (sparse ([NaN, 1+i, 1i, 4, 2; 1, 2, 1, 2, NaN]), 2, "omitnan"),
5146%! sparse ([23; 10]))
5147%!assert (sumsq (sparse ([NaN, NaN, NaN]), "omitnan"), sparse (0))
5148%!assert (sumsq (sparse ([0, 0, 0, NaN, NaN, NaN]), "omitnan"), sparse (0))
5149
5150## Test empty sparse matrices
5151%!assert (sumsq (sparse (ones(1, 0))), sparse (0))
5152%!assert (size (sumsq (sparse (ones(1, 0)), 1)), [1, 0])
5153%!assert (size (sumsq (sparse (ones(1, 0)), 2)), [1, 1])
5154%!assert (sumsq (sparse (ones(0, 1))), sparse (0))
5155%!assert (size (sumsq (sparse (ones(0, 1)), 1)), [1, 1])
5156%!assert (size (sumsq (sparse (ones(0, 1)), 1)), [1, 1])
5157%!assert (size (sumsq (sparse (ones(0, 1)), 2)), [0, 1])
5158%!assert (sumsq (sparse (ones(0, 0))), sparse (0))
5159%!assert (size (sumsq (sparse (ones(0, 0)), 1)), [1, 0])
5160%!assert (size (sumsq (sparse (ones(0, 0)), 2)), [0, 1])
5161%!assert (size (sumsq (sparse (ones(0, 0)), 3)), [0, 0])
5162
5163## Test boolean sumsq (must be equal to sum)
5164%!test
5165%! x = [true, false, true; false, true, false];
5166%! assert (sumsq (x), sum (x));
5167%! assert (sumsq (x, 1), [1, 1, 1]);
5168%! assert (sumsq (x, 2), [2; 1]);
5169
5170%!test
5171%! x = rand (100, 100) > 0.5;
5172%! assert (sumsq (x), sum (x));
5173
5174%!assert (sumsq (true), 1)
5175%!assert (sumsq (false), 0)
5176%!assert (class (sumsq ([true, false])), "double")
5177
5178## Test input validation
5179%!error <Invalid call> sumsq ()
5180%!error <Invalid call> sumsq (1,2,3)
5181%!error <unrecognized optional argument 'foobar'> sumsq (1, "foobar")
5182%!error <cannot set DIM or VECDIM with 'all' flag>
5183%! sumsq (ones (3,3), 1, "all");
5184%!error <cannot set DIM or VECDIM with 'all' flag>
5185%! sumsq (ones (3,3), [1, 2], "all");
5186%!error <invalid dimension DIM = 0> sumsq (ones (3,3), 0)
5187%!error <invalid dimension DIM = -1> sumsq (ones (3,3), -1)
5188%!error <invalid dimension in VECDIM = -2> sumsq (ones (3,3), [1 -2])
5189%!error <duplicate dimension in VECDIM = 2> sumsq (ones (3,3), [1 2 2])
5190%!error <duplicate dimension in VECDIM = 1> sumsq (ones (3,3), [1 1 2])
5191*/
5192
5193DEFUN (islogical, args, ,
5194 doc: /* -*- texinfo -*-
5195@deftypefn {} {@var{tf} =} islogical (@var{x})
5196@deftypefnx {} {@var{tf} =} isbool (@var{x})
5197Return true if @var{x} is a logical object.
5198
5199Programming Note: @code{isbool} is an alias for @code{islogical} and can be
5200used interchangeably.
5201@seealso{ischar, isfloat, isinteger, isstring, isnumeric, isa}
5202@end deftypefn */)
5203{
5204 if (args.length () != 1)
5205 print_usage ();
5206
5207 return ovl (args(0).islogical ());
5208}
5209
5210DEFALIAS (isbool, islogical);
5211
5212/*
5213%!assert (islogical (true), true)
5214%!assert (islogical (false), true)
5215%!assert (islogical ([true, false]), true)
5216%!assert (islogical (1), false)
5217%!assert (islogical (1i), false)
5218%!assert (islogical ([1,1]), false)
5219%!assert (islogical (single (1)), false)
5220%!assert (islogical (single (1i)), false)
5221%!assert (islogical (single ([1,1])), false)
5222%!assert (islogical (sparse ([true, false])), true)
5223%!assert (islogical (sparse ([1, 0])), false)
5224*/
5225
5226DEFUN (isinteger, args, ,
5227 doc: /* -*- texinfo -*-
5228@deftypefn {} {@var{tf} =} isinteger (@var{x})
5229Return true if @var{x} is an integer object (int8, uint8, int16, etc.).
5230
5231Note that @w{@code{isinteger (14)}}@ is false because numeric constants in
5232Octave are double precision floating point values.
5233@seealso{isfloat, ischar, islogical, isstring, isnumeric, isa}
5234@end deftypefn */)
5235{
5236 if (args.length () != 1)
5237 print_usage ();
5238
5239 return ovl (args(0).isinteger ());
5240}
5241
5242/*
5243%!assert (isinteger (int8 (16)))
5244%!assert (isinteger (int16 (16)))
5245%!assert (isinteger (int32 (16)))
5246%!assert (isinteger (int64 (16)))
5247
5248%!assert (isinteger (uint8 (16)))
5249%!assert (isinteger (uint16 (16)))
5250%!assert (isinteger (uint32 (16)))
5251%!assert (isinteger (uint64 (16)))
5252
5253%!assert (isinteger (intmax ("int8")))
5254%!assert (isinteger (intmax ("int16")))
5255%!assert (isinteger (intmax ("int32")))
5256%!assert (isinteger (intmax ("int64")))
5257
5258%!assert (isinteger (intmax ("uint8")))
5259%!assert (isinteger (intmax ("uint16")))
5260%!assert (isinteger (intmax ("uint32")))
5261%!assert (isinteger (intmax ("uint64")))
5262
5263%!assert (isinteger (intmin ("int8")))
5264%!assert (isinteger (intmin ("int16")))
5265%!assert (isinteger (intmin ("int32")))
5266%!assert (isinteger (intmin ("int64")))
5267
5268%!assert (isinteger (intmin ("uint8")))
5269%!assert (isinteger (intmin ("uint16")))
5270%!assert (isinteger (intmin ("uint32")))
5271%!assert (isinteger (intmin ("uint64")))
5272
5273%!assert (isinteger (uint8 ([1:10])))
5274%!assert (isinteger (uint8 ([1:10; 1:10])))
5275
5276%!assert (! isinteger (16))
5277%!assert (! isinteger ("parrot"))
5278%!assert (! isinteger ([1, 2, 3]))
5279
5280%!error isinteger ()
5281%!error isinteger ("multiple", "parameters")
5282*/
5283
5284DEFUN (iscomplex, args, ,
5285 doc: /* -*- texinfo -*-
5286@deftypefn {} {@var{tf} =} iscomplex (@var{x})
5287Return true if @var{x} is a complex-valued numeric object.
5288@seealso{isreal, isnumeric, ischar, isfloat, islogical, isstring, isa}
5289@end deftypefn */)
5290{
5291 if (args.length () != 1)
5292 print_usage ();
5293
5294 return ovl (args(0).iscomplex ());
5295}
5296
5297/*
5298%!assert (iscomplex (4), false)
5299%!assert (iscomplex (i), true)
5300%!assert (iscomplex (4+3i), true)
5301%!assert (iscomplex ([1, 2, 3]), false)
5302%!assert (iscomplex ([1, 2i, 3]), true)
5303
5304%!assert (iscomplex (0j), false)
5305%!assert (iscomplex (complex (0,0)), true)
5306%!assert (iscomplex ("4"), false)
5307%!assert (iscomplex ({i}), false)
5308
5309## Test input validation
5310%!error iscomplex ()
5311%!error iscomplex (1, 2)
5312*/
5313
5314DEFUN (isfloat, args, ,
5315 doc: /* -*- texinfo -*-
5316@deftypefn {} {@var{tf} =} isfloat (@var{x})
5317Return true if @var{x} is a floating-point numeric object.
5318
5319Objects of class double or single are floating-point objects.
5320@seealso{isinteger, ischar, islogical, isnumeric, isstring, isa}
5321@end deftypefn */)
5322{
5323 if (args.length () != 1)
5324 print_usage ();
5325
5326 return ovl (args(0).isfloat ());
5327}
5328
5329// FIXME: perhaps this should be implemented with an
5330// octave_value member function?
5331
5332DEFUN (complex, args, ,
5333 doc: /* -*- texinfo -*-
5334@deftypefn {} {@var{z} =} complex (@var{x})
5335@deftypefnx {} {@var{z} =} complex (@var{re}, @var{im})
5336Return a complex value from real arguments.
5337
5338With 1 real argument @var{x}, return the complex result
5339@w{@code{@var{x} + 0i}}.
5340
5341With 2 real arguments, return the complex result
5342@w{@code{@var{re} + @var{im}i}}.
5343@code{complex} can often be more convenient than expressions such as
5344@w{@code{a + b*i}}.
5345For example:
5346
5347@example
5348@group
5349complex ([1, 2], [3, 4])
5350 @xresult{} [ 1 + 3i 2 + 4i ]
5351@end group
5352@end example
5353@seealso{real, imag, iscomplex, abs, arg}
5354@end deftypefn */)
5355// Programming Note: Throughout this function the coding pattern
5356// octave_value (new XXX)) is used. This is done specifically because the
5357// default octave_value constructor would otherwise perform automatic narrowing
5358// (i.e., complex values with 0 for the imaginary part would be converted
5359// to real values). The complex() function *must* return a complex value
5360// even when the imaginary part is 0.
5361{
5362 int nargin = args.length ();
5363
5364 if (nargin < 1 || nargin > 2)
5365 print_usage ();
5366
5367 octave_value retval;
5368
5369 if (nargin == 1)
5370 {
5371 octave_value arg = args(0);
5372
5373 if (arg.iscomplex ())
5374 retval = arg;
5375 else
5376 {
5377 if (arg.issparse ())
5378 {
5379 SparseComplexMatrix val = arg.xsparse_complex_matrix_value ("complex: invalid conversion");
5380
5381 retval = octave_value (new octave_sparse_complex_matrix (val));
5382 }
5383 else if (arg.is_single_type ())
5384 {
5385 if (arg.numel () == 1)
5386 {
5387 FloatComplex val = arg.xfloat_complex_value ("complex: invalid conversion");
5388
5389 retval = octave_value (new octave_float_complex (val));
5390 }
5391 else
5392 {
5393 FloatComplexNDArray val = arg.xfloat_complex_array_value ("complex: invalid conversion");
5394
5395 retval = octave_value (new octave_float_complex_matrix (val));
5396 }
5397 }
5398 else
5399 {
5400 if (arg.numel () == 1)
5401 {
5402 Complex val = arg.xcomplex_value ("complex: invalid conversion");
5403
5404 retval = octave_value (new octave_complex (val));
5405 }
5406 else
5407 {
5408 ComplexNDArray val = arg.xcomplex_array_value ("complex: invalid conversion");
5409
5410 retval = octave_value (new octave_complex_matrix (val));
5411 }
5412 }
5413 }
5414 }
5415 else
5416 {
5417 octave_value re = args(0);
5418 octave_value im = args(1);
5419
5420 if (re.issparse () && im.issparse ())
5421 {
5422 const SparseMatrix re_val = re.sparse_matrix_value ();
5423 const SparseMatrix im_val = im.sparse_matrix_value ();
5424
5425 if (re.numel () == 1)
5426 {
5427 SparseComplexMatrix result;
5428 if (re_val.nnz () == 0)
5429 result = Complex (0, 1) * SparseComplexMatrix (im_val);
5430 else
5431 {
5432 octave_idx_type nr = im_val.rows ();
5433 octave_idx_type nc = im_val.cols ();
5434 result = SparseComplexMatrix (nr, nc, re_val(0));
5435
5436 for (octave_idx_type j = 0; j < nc; j++)
5437 {
5438 octave_idx_type off = j * nr;
5439 for (octave_idx_type i = im_val.cidx (j);
5440 i < im_val.cidx (j + 1); i++)
5441 result.data (im_val.ridx (i) + off)
5442 += Complex (0, im_val.data (i));
5443 }
5444 }
5445 retval = octave_value (new octave_sparse_complex_matrix (result));
5446 }
5447 else if (im.numel () == 1)
5448 {
5449 SparseComplexMatrix result;
5450 if (im_val.nnz () == 0)
5451 result = SparseComplexMatrix (re_val);
5452 else
5453 {
5454 octave_idx_type nr = re_val.rows ();
5455 octave_idx_type nc = re_val.cols ();
5456 result = SparseComplexMatrix (nr, nc,
5457 Complex (0, im_val(0)));
5458
5459 for (octave_idx_type j = 0; j < nc; j++)
5460 {
5461 octave_idx_type off = j * nr;
5462 for (octave_idx_type i = re_val.cidx (j);
5463 i < re_val.cidx (j + 1); i++)
5464 result.data (re_val.ridx (i) + off)
5465 += re_val.data (i);
5466 }
5467 }
5468 retval = octave_value (new octave_sparse_complex_matrix (result));
5469 }
5470 else
5471 {
5472 if (re_val.dims () != im_val.dims ())
5473 error ("complex: dimension mismatch");
5474
5475 SparseComplexMatrix result;
5476 result = SparseComplexMatrix (re_val)
5477 + Complex (0, 1) * SparseComplexMatrix (im_val);
5478 retval = octave_value (new octave_sparse_complex_matrix (result));
5479 }
5480 }
5481 else if (re.is_single_type () || im.is_single_type ())
5482 {
5483 if (re.numel () == 1)
5484 {
5485 float re_val = re.float_value ();
5486
5487 if (im.numel () == 1)
5488 {
5489 float im_val = im.double_value ();
5490
5492 (FloatComplex (re_val, im_val)));
5493 }
5494 else
5495 {
5496 const FloatNDArray im_val = im.float_array_value ();
5497
5498 FloatComplexNDArray result (im_val.dims ());
5499
5500 for (octave_idx_type i = 0; i < im_val.numel (); i++)
5501 result.xelem (i) = FloatComplex (re_val, im_val.xelem (i));
5502
5504 (result));
5505 }
5506 }
5507 else
5508 {
5509 const FloatNDArray re_val = re.float_array_value ();
5510
5511 if (im.numel () == 1)
5512 {
5513 float im_val = im.float_value ();
5514
5515 FloatComplexNDArray result (re_val.dims ());
5516
5517 for (octave_idx_type i = 0; i < re_val.numel (); i++)
5518 result.xelem (i) = FloatComplex (re_val.xelem (i), im_val);
5519
5521 (result));
5522 }
5523 else
5524 {
5525 const FloatNDArray im_val = im.float_array_value ();
5526
5527 if (re_val.dims () != im_val.dims ())
5528 error ("complex: dimension mismatch");
5529
5530 FloatComplexNDArray result (re_val.dims ());
5531
5532 for (octave_idx_type i = 0; i < re_val.numel (); i++)
5533 result.xelem (i) = FloatComplex (re_val.xelem (i),
5534 im_val.xelem (i));
5535
5537 (result));
5538 }
5539 }
5540 }
5541 else if (re.numel () == 1)
5542 {
5543 double re_val = re.double_value ();
5544
5545 if (im.numel () == 1)
5546 {
5547 double im_val = im.double_value ();
5548
5549 retval = octave_value (new octave_complex
5550 (Complex (re_val, im_val)));
5551 }
5552 else
5553 {
5554 const NDArray im_val = im.array_value ();
5555
5556 ComplexNDArray result (im_val.dims ());
5557
5558 for (octave_idx_type i = 0; i < im_val.numel (); i++)
5559 result.xelem (i) = Complex (re_val, im_val.xelem (i));
5560
5561 retval = octave_value (new octave_complex_matrix (result));
5562 }
5563 }
5564 else
5565 {
5566 const NDArray re_val = re.array_value ();
5567
5568 if (im.numel () == 1)
5569 {
5570 double im_val = im.double_value ();
5571
5572 ComplexNDArray result (re_val.dims ());
5573
5574 for (octave_idx_type i = 0; i < re_val.numel (); i++)
5575 result.xelem (i) = Complex (re_val.xelem (i), im_val);
5576
5577 retval = octave_value (new octave_complex_matrix (result));
5578 }
5579 else
5580 {
5581 const NDArray im_val = im.array_value ();
5582
5583 if (re_val.dims () != im_val.dims ())
5584 error ("complex: dimension mismatch");
5585
5586 ComplexNDArray result (re_val.dims (), Complex ());
5587
5588 for (octave_idx_type i = 0; i < re_val.numel (); i++)
5589 result.xelem (i) = Complex (re_val.xelem (i),
5590 im_val.xelem (i));
5591
5592 retval = octave_value (new octave_complex_matrix (result));
5593 }
5594 }
5595 }
5596
5597 return retval;
5598}
5599
5600/*
5601%!error <undefined> 1+Infj
5602%!error <undefined> 1+Infi
5603
5604%!test <31974>
5605%! assert (Inf + Inf*i, complex (Inf, Inf));
5606%!
5607%! assert (1 + Inf*i, complex (1, Inf));
5608%! assert (1 + Inf*j, complex (1, Inf));
5609%!
5610%! ## whitespace should not affect parsing
5611%! assert (1+Inf*i, complex (1, Inf));
5612%! assert (1+Inf*j, complex (1, Inf));
5613%!
5614%! assert (NaN*j, complex (0, NaN));
5615%!
5616%! assert (Inf * 4j, complex (0, Inf));
5617
5618%!test <31974>
5619%! x = Inf;
5620%! assert (x * j, complex (0, Inf));
5621%! j = complex (0, 1);
5622%! assert (Inf * j, complex (0, Inf));
5623
5624%!test <31974>
5625%! exp = complex (zeros (2, 2), Inf (2, 2));
5626%! assert (Inf (2, 2) * j, exp);
5627%! assert (Inf (2, 2) .* j, exp);
5628%! assert (Inf * (ones (2, 2) * j), exp);
5629%! assert (Inf (2, 2) .* (ones (2, 2) * j), exp);
5630
5631%!test <31974>
5632%! assert ([Inf; 0] * [i, 0], complex ([NaN NaN; 0 0], [Inf NaN; 0 0]));
5633%! assert ([Inf, 0] * [i; 0], complex (NaN, Inf));
5634%! assert ([Inf, 0] .* [i, 0], complex ([0 0], [Inf 0]));
5635
5636%!test <31974>
5637%! m = @(x, y) x * y;
5638%! d = @(x, y) x / y;
5639%! assert (m (Inf, i), complex (0, +Inf));
5640%! assert (d (Inf, i), complex (0, -Inf));
5641*/
5642
5643DEFUN (isreal, args, ,
5644 doc: /* -*- texinfo -*-
5645@deftypefn {} {@var{tf} =} isreal (@var{x})
5646Return true if @var{x} is a non-complex matrix or scalar.
5647
5648For compatibility with @sc{matlab}, this includes logical and character
5649matrices.
5650@seealso{iscomplex, isnumeric, isa}
5651@end deftypefn */)
5652{
5653 if (args.length () != 1)
5654 print_usage ();
5655
5656 return ovl (args(0).isreal ());
5657}
5658
5659DEFUN (isempty, args, ,
5660 doc: /* -*- texinfo -*-
5661@deftypefn {} {@var{tf} =} isempty (@var{A})
5662Return true if @var{A} is an empty object (any one of its dimensions is
5663zero).
5664@seealso{isnull, isa}
5665@end deftypefn */)
5666{
5667 if (args.length () != 1)
5668 print_usage ();
5669
5670 return ovl (args(0).isempty ());
5671}
5672
5673/*
5674## Debian bug #706376
5675%!assert (isempty (speye (2^16)), false)
5676*/
5677
5678DEFUN (isnumeric, args, ,
5679 doc: /* -*- texinfo -*-
5680@deftypefn {} {@var{tf} =} isnumeric (@var{x})
5681Return true if @var{x} is a numeric object, i.e., an integer, real, or
5682complex array.
5683
5684Logical and character arrays are not considered to be numeric.
5685@seealso{isinteger, isfloat, isreal, iscomplex, ischar, islogical, isstring,
5686iscell, isstruct, isa}
5687@end deftypefn */)
5688{
5689 if (args.length () != 1)
5690 print_usage ();
5691
5692 return ovl (args(0).isnumeric ());
5693}
5694
5695/*
5696%!assert (isnumeric (1), true)
5697%!assert (isnumeric (1i), true)
5698%!assert (isnumeric ([1,1]), true)
5699%!assert (isnumeric (single (1)), true)
5700%!assert (isnumeric (single (1i)), true)
5701%!assert (isnumeric (single ([1,1])), true)
5702%!assert (isnumeric (int8 (1)), true)
5703%!assert (isnumeric (uint8 ([1,1])), true)
5704%!assert (isnumeric ("Hello World"), false)
5705%!assert (isnumeric (true), false)
5706%!assert (isnumeric (false), false)
5707%!assert (isnumeric ([true, false]), false)
5708%!assert (isnumeric (sparse ([true, false])), false)
5709*/
5710
5711DEFUN (isscalar, args, ,
5712 doc: /* -*- texinfo -*-
5713@deftypefn {} {@var{tf} =} isscalar (@var{x})
5714Return true if @var{x} is a scalar.
5715
5716A scalar is a single-element object of any type for which @code{size (@var{x})}
5717returns @w{@code{[1, 1]}}.
5718@seealso{isvector, ismatrix, size}
5719@end deftypefn */)
5720{
5721 if (args.length () != 1)
5722 print_usage ();
5723
5724 // This function *must* use size() to determine the desired values to be
5725 // compatible with Matlab and to allow user-defined class overloading.
5726 Matrix sz = octave_value (args(0)).size ();
5727
5728 return ovl (sz.numel () == 2 && sz(0) == 1 && sz(1) == 1);
5729}
5730
5731/*
5732%!assert (isscalar (1))
5733%!assert (isscalar ([1, 2]), false)
5734%!assert (isscalar ([]), false)
5735%!assert (isscalar ([1, 2; 3, 4]), false)
5736
5737%!assert (isscalar ("t"))
5738%!assert (isscalar ("test"), false)
5739%!assert (isscalar (["test"; "ing"]), false)
5740
5741%!test
5742%! s.a = 1;
5743%! assert (isscalar (s));
5744
5745## Test input validation
5746%!error isscalar ()
5747%!error isscalar (1, 2)
5748*/
5749
5750DEFUN (isvector, args, ,
5751 doc: /* -*- texinfo -*-
5752@deftypefn {} {@var{tf} =} isvector (@var{x})
5753Return true if @var{x} is a vector.
5754
5755A vector is a 2-D array of any type where one of the dimensions is equal to 1
5756(either @nospell{1xN} or @nospell{Nx1}). As a consequence of this definition,
5757a 1x1 object (a scalar) is also a vector.
5758@seealso{isscalar, ismatrix, iscolumn, isrow, size}
5759@end deftypefn */)
5760{
5761 if (args.length () != 1)
5762 print_usage ();
5763
5764 // This function *must* use size() to determine the desired values to be
5765 // compatible with Matlab and to allow user-defined class overloading.
5766 Matrix sz = octave_value (args(0)).size ();
5767
5768 return ovl (sz.numel () == 2 && (sz(0) == 1 || sz(1) == 1));
5769}
5770
5771/*
5772%!assert (isvector (1), true)
5773%!assert (isvector ([1; 2; 3]), true)
5774%!assert (isvector ([1, 2, 3]), true)
5775%!assert (isvector ([]), false)
5776%!assert (isvector ([1, 2; 3, 4]), false)
5777
5778%!assert (isvector ("t"), true)
5779%!assert (isvector ("test"), true)
5780%!assert (isvector (["test"; "ing"]), false)
5781
5782%!test
5783%! s.a = 1;
5784%! assert (isvector (s), true);
5785
5786## Test input validation
5787%!error isvector ()
5788%!error isvector ([1, 2], 2)
5789*/
5790
5791DEFUN (isrow, args, ,
5792 doc: /* -*- texinfo -*-
5793@deftypefn {} {@var{tf} =} isrow (@var{x})
5794Return true if @var{x} is a row vector.
5795
5796A row vector is a 2-D array of any type for which @code{size (@var{x})} returns
5797@w{@code{[1, N]}}@ with non-negative N.
5798@seealso{iscolumn, isscalar, isvector, ismatrix, size}
5799@end deftypefn */)
5800{
5801 if (args.length () != 1)
5802 print_usage ();
5803
5804 // This function *must* use size() to determine the desired values to be
5805 // compatible with Matlab and to allow user-defined class overloading.
5806 Matrix sz = octave_value (args(0)).size ();
5807
5808 return ovl (sz.numel () == 2 && sz(0) == 1);
5809}
5810
5811/*
5812%!assert (isrow ([1, 2, 3]))
5813%!assert (isrow ([1; 2; 3]), false)
5814%!assert (isrow (1))
5815%!assert (isrow ([]), false)
5816%!assert (isrow ([1, 2; 3, 4]), false)
5817
5818%!assert (isrow (ones (1, 0)), true)
5819%!assert (isrow (ones (1, 1)), true)
5820%!assert (isrow (ones (1, 2)), true)
5821%!assert (isrow (ones (1, 1, 1)), true)
5822%!assert (isrow (ones (1, 1, 1, 1)), true)
5823
5824%!assert (isrow (ones (0, 0)), false)
5825%!assert (isrow (ones (1, 1, 0)), false)
5826
5827%!assert (isrow ("t"), true)
5828%!assert (isrow ("test"), true)
5829%!assert (isrow (["test"; "ing"]), false)
5830
5831%!test
5832%! s.a = 1;
5833%! assert (isrow (s), true);
5834
5835## Test input validation
5836%!error isrow ()
5837%!error isrow ([1, 2], 2)
5838*/
5839
5840DEFUN (iscolumn, args, ,
5841 doc: /* -*- texinfo -*-
5842@deftypefn {} {@var{tf} =} iscolumn (@var{x})
5843Return true if @var{x} is a column vector.
5844
5845A column vector is a 2-D array of any type for which @code{size (@var{x})}
5846returns @w{@code{[N, 1]}}@ with non-negative N.
5847@seealso{isrow, isscalar, isvector, ismatrix, size}
5848@end deftypefn */)
5849{
5850 if (args.length () != 1)
5851 print_usage ();
5852
5853 // This function *must* use size() to determine the desired values to be
5854 // compatible with Matlab and to allow user-defined class overloading.
5855 Matrix sz = octave_value (args(0)).size ();
5856
5857 return ovl (sz.numel () == 2 && sz(1) == 1);
5858}
5859
5860/*
5861%!assert (iscolumn ([1, 2, 3]), false)
5862%!assert (iscolumn ([1; 2; 3]), true)
5863%!assert (iscolumn (1), true)
5864%!assert (iscolumn ([]), false)
5865%!assert (iscolumn ([1, 2; 3, 4]), false)
5866
5867%!assert (iscolumn ("t"), true)
5868%!assert (iscolumn ("test"), false)
5869%!assert (iscolumn (["test"; "ing"]), false)
5870
5871%!assert (iscolumn (ones (0, 1)), true)
5872%!assert (iscolumn (ones (1, 1)), true)
5873%!assert (iscolumn (ones (2, 1)), true)
5874%!assert (iscolumn (ones (1, 1, 1)), true)
5875%!assert (iscolumn (ones (1, 1, 1, 1)), true)
5876
5877%!assert (iscolumn (ones (0, 0)), false)
5878%!assert (iscolumn (ones (0, 1, 0)), false)
5879
5880%!test
5881%! s.a = 1;
5882%! assert (iscolumn (s));
5883
5884## Test input validation
5885%!error iscolumn ()
5886%!error iscolumn ([1, 2], 2)
5887*/
5888
5889DEFUN (ismatrix, args, ,
5890 doc: /* -*- texinfo -*-
5891@deftypefn {} {@var{tf} =} ismatrix (@var{x})
5892Return true if @var{x} is a 2-D array.
5893
5894A matrix is an array of any type where @code{ndims (@var{x}) == 2} and for
5895which @code{size (@var{x})} returns @w{@code{[M, N]}}@ with non-negative M and
5896N.
5897@seealso{isscalar, isvector, iscell, isstruct, issparse, isa}
5898@end deftypefn */)
5899{
5900 if (args.length () != 1)
5901 print_usage ();
5902
5903 // This function *must* use size() to determine the desired values to be
5904 // compatible with Matlab and to allow user-defined class overloading.
5905 Matrix sz = octave_value (args(0)).size ();
5906
5907 return ovl (sz.numel () == 2 && sz(0) >= 0 && sz(1) >= 0);
5908}
5909
5910/*
5911%!assert (ismatrix ([]), true)
5912%!assert (ismatrix (1), true)
5913%!assert (ismatrix ([1, 2, 3]), true)
5914%!assert (ismatrix ([1, 2; 3, 4]), true)
5915
5916%!assert (ismatrix (zeros (0)), true)
5917%!assert (ismatrix (zeros (0, 0)), true)
5918%!assert (ismatrix (zeros (0, 0, 0)), false)
5919%!assert (ismatrix (zeros (3, 2, 4)), false)
5920
5921%!assert (ismatrix (single ([])), true)
5922%!assert (ismatrix (single (1)), true)
5923%!assert (ismatrix (single ([1, 2, 3])), true)
5924%!assert (ismatrix (single ([1, 2; 3, 4])), true)
5925
5926%!assert (ismatrix ("t"), true)
5927%!assert (ismatrix ("test"), true)
5928%!assert (ismatrix (["test"; "ing"]), true)
5929
5930%!test
5931%! s.a = 1;
5932%! assert (ismatrix (s), true);
5933
5934%!error ismatrix ()
5935%!error ismatrix ([1, 2; 3, 4], 2)
5936*/
5937
5938DEFUN (issquare, args, ,
5939 doc: /* -*- texinfo -*-
5940@deftypefn {} {@var{tf} =} issquare (@var{x})
5941Return true if @var{x} is a 2-D square array.
5942
5943A square array is a 2-D array of any type for which @code{size (@var{x})}
5944returns @w{@code{[N, N]}}@ where N is a non-negative integer.
5945@seealso{isscalar, isvector, ismatrix, size}
5946@end deftypefn */)
5947{
5948 if (args.length () != 1)
5949 print_usage ();
5950
5951 // This function *must* use size() to determine the desired values to
5952 // allow user-defined class overloading.
5953 Matrix sz = octave_value (args(0)).size ();
5954
5955 return ovl (sz.numel () == 2 && sz(0) == sz(1));
5956}
5957
5958/*
5959%!assert (issquare ([]))
5960%!assert (issquare (1))
5961%!assert (! issquare ([1, 2]))
5962%!assert (issquare ([1, 2; 3, 4]))
5963%!assert (! issquare ([1, 2; 3, 4; 5, 6]))
5964%!assert (! issquare (ones (3,3,3)))
5965%!assert (issquare ("t"))
5966%!assert (! issquare ("test"))
5967%!assert (issquare (["test"; "ing"; "1"; "2"]))
5968%!test
5969%! s.a = 1;
5970%! assert (issquare (s));
5971%!assert (issquare ({1, 2; 3, 4}))
5972%!assert (sparse (([1, 2; 3, 4])))
5973
5974## Test input validation
5975%!error issquare ()
5976%!error issquare ([1, 2; 3, 4], 2)
5977*/
5978
5979static octave_value
5980fill_matrix (const octave_value_list& args, int val, const char *fcn)
5981{
5982 octave_value retval;
5983
5984 int nargin = args.length ();
5985
5987
5988 dim_vector dims (1, 1);
5989 bool issparse = false;
5990 bool iscomplex = false;
5991
5992 if (nargin > 0 && args(nargin-1).is_string ())
5993 {
5994 std::string nm = args(nargin-1).string_value ();
5995 nargin--;
5996
5998 }
5999
6000 if (nargin > 1 && args(nargin-2).is_string ()
6001 && args(nargin-2).string_value () == "like")
6002 {
6003 std::string nm = args(nargin-1).class_name ();
6004 issparse = args(nargin-1).issparse ();
6005 iscomplex = args(nargin-1).iscomplex ();
6006 nargin -= 2;
6008 }
6009
6010 switch (nargin)
6011 {
6012 case 0:
6013 break;
6014
6015 case 1:
6016 get_dimensions (args(0), fcn, dims);
6017 break;
6018
6019 default:
6020 {
6021 dims.resize (nargin);
6022
6023 for (int i = 0; i < nargin; i++)
6024 {
6025 if (args(i).numel () > 1)
6026 error ("%s: dimensions must be scalars.", fcn);
6027
6028 dims(i) = (args(i).isempty () ? 0 : args(i).idx_type_value (true));
6029 }
6030 }
6031 break;
6032 }
6033
6034 dims.chop_trailing_singletons ();
6035
6036 check_dimensions (dims, fcn);
6037
6038 // FIXME: Perhaps this should be made extensible by using the class name
6039 // to lookup a function to call to create the new value.
6040
6041 // Note that automatic narrowing will handle conversion from
6042 // NDArray to scalar.
6043
6044 if (issparse)
6045 {
6046 if (dims.ndims () > 2)
6047 error ("%s: sparse ND arrays not supported.", fcn);
6048
6049 switch (dt)
6050 {
6052 if (iscomplex)
6053 retval = SparseComplexMatrix (dims(0), dims(1), Complex (val, 0));
6054 else
6055 retval = SparseMatrix (dims(0), dims(1), static_cast<double> (val));
6056 break;
6057
6059 retval = SparseBoolMatrix (dims(0), dims(1), static_cast<bool> (val));
6060 break;
6061
6062 default:
6063 // FIXME: It shouldn't be possible to ever reach this.
6064 error ("%s: invalid class name for sparse", fcn);
6065 }
6066
6067 return retval;
6068 }
6069
6070 switch (dt)
6071 {
6073 retval = int8NDArray (dims, val);
6074 break;
6075
6077 retval = uint8NDArray (dims, val);
6078 break;
6079
6081 retval = int16NDArray (dims, val);
6082 break;
6083
6085 retval = uint16NDArray (dims, val);
6086 break;
6087
6089 retval = int32NDArray (dims, val);
6090 break;
6091
6093 retval = uint32NDArray (dims, val);
6094 break;
6095
6097 retval = int64NDArray (dims, val);
6098 break;
6099
6101 retval = uint64NDArray (dims, val);
6102 break;
6103
6105 if (iscomplex)
6106 retval = FloatComplexNDArray (dims, val);
6107 else
6108 retval = FloatNDArray (dims, val);
6109 break;
6110
6112 if (iscomplex)
6113 retval = ComplexNDArray (dims, Complex (val, 0));
6114 else
6115 retval = NDArray (dims, val);
6116 break;
6117
6119 retval = boolNDArray (dims, val);
6120 break;
6121
6122 default:
6123 error ("%s: invalid class name", fcn);
6124 break;
6125 }
6126
6127 return retval;
6128}
6129
6130static octave_value
6131fill_matrix (const octave_value_list& args, double val, float fval,
6132 const char *fcn)
6133{
6134 octave_value retval;
6135
6136 int nargin = args.length ();
6137
6139
6140 dim_vector dims (1, 1);
6141 bool issparse = false;
6142 bool iscomplex = false;
6143
6144 if (nargin > 0 && args(nargin-1).is_string ())
6145 {
6146 std::string nm = args(nargin-1).string_value ();
6147 nargin--;
6148
6150 }
6151
6152 if (nargin > 1 && args(nargin-2).is_string ()
6153 && args(nargin-2).string_value () == "like"
6154 && (std::string(fcn) == "Inf"
6155 || std::string(fcn) == "NaN" || std::string(fcn) == "NA"))
6156 {
6157 if (! args(nargin-1).isfloat ())
6158 error ("%s: input followed by 'like' must be floating point", fcn);
6159 std::string nm = args(nargin-1).class_name ();
6160 issparse = args(nargin-1).issparse ();
6161 iscomplex = args(nargin-1).iscomplex ();
6162 nargin -= 2;
6164 }
6165
6166 switch (nargin)
6167 {
6168 case 0:
6169 break;
6170
6171 case 1:
6172 get_dimensions (args(0), fcn, dims);
6173 break;
6174
6175 default:
6176 {
6177 dims.resize (nargin);
6178
6179 for (int i = 0; i < nargin; i++)
6180 {
6181 if (args(i).numel () > 1)
6182 error ("%s: dimensions must be scalars.", fcn);
6183
6184 dims(i) = (args(i).isempty () ? 0 : args(i).idx_type_value (true));
6185 }
6186 }
6187 break;
6188 }
6189
6190 dims.chop_trailing_singletons ();
6191
6192 check_dimensions (dims, fcn);
6193
6194 // Note that automatic narrowing will handle conversion from
6195 // NDArray to scalar.
6196
6197 if (issparse)
6198 {
6199 if (dims.ndims () > 2)
6200 error ("%s: sparse ND arrays not supported", fcn);
6201
6202 if (iscomplex)
6203 retval = SparseComplexMatrix (dims(0), dims(1), Complex (val, 0));
6204 else
6205 retval = SparseMatrix (dims(0), dims(1), static_cast<double> (val));
6206
6207 return retval;
6208 }
6209
6210 switch (dt)
6211 {
6213 if (iscomplex)
6214 retval = FloatComplexNDArray (dims, fval);
6215 else
6216 retval = FloatNDArray (dims, fval);
6217 break;
6218
6220 if (iscomplex)
6221 retval = ComplexNDArray (dims, Complex (val, 0));
6222 else
6223 retval = NDArray (dims, val);
6224 break;
6225
6226 default:
6227 error ("%s: invalid class name", fcn);
6228 break;
6229 }
6230
6231 return retval;
6232}
6233
6234static octave_value
6235fill_matrix (const octave_value_list& args, double val, const char *fcn)
6236{
6237 octave_value retval;
6238
6239 int nargin = args.length ();
6240
6242
6243 dim_vector dims (1, 1);
6244
6245 if (nargin > 0 && args(nargin-1).is_string ())
6246 {
6247 std::string nm = args(nargin-1).string_value ();
6248 nargin--;
6249
6251 }
6252
6253 switch (nargin)
6254 {
6255 case 0:
6256 break;
6257
6258 case 1:
6259 get_dimensions (args(0), fcn, dims);
6260 break;
6261
6262 default:
6263 {
6264 dims.resize (nargin);
6265
6266 for (int i = 0; i < nargin; i++)
6267 {
6268 if (args(i).numel () > 1)
6269 error ("%s: dimensions must be scalars.", fcn);
6270
6271 dims(i) = (args(i).isempty () ? 0 : args(i).idx_type_value (true));
6272 }
6273 }
6274 break;
6275 }
6276
6277 dims.chop_trailing_singletons ();
6278
6279 check_dimensions (dims, fcn);
6280
6281 // Note that automatic narrowing will handle conversion from
6282 // NDArray to scalar.
6283
6284 switch (dt)
6285 {
6287 retval = FloatNDArray (dims, static_cast<float> (val));
6288 break;
6289
6291 retval = NDArray (dims, val);
6292 break;
6293
6294 default:
6295 error ("%s: invalid class name", fcn);
6296 break;
6297 }
6298
6299 return retval;
6300}
6301
6302static octave_value
6303fill_matrix (const octave_value_list& args, const Complex& val,
6304 const char *fcn)
6305{
6306 octave_value retval;
6307
6308 int nargin = args.length ();
6309
6311
6312 dim_vector dims (1, 1);
6313
6314 if (nargin > 0 && args(nargin-1).is_string ())
6315 {
6316 std::string nm = args(nargin-1).string_value ();
6317 nargin--;
6318
6320 }
6321
6322 switch (nargin)
6323 {
6324 case 0:
6325 break;
6326
6327 case 1:
6328 get_dimensions (args(0), fcn, dims);
6329 break;
6330
6331 default:
6332 {
6333 dims.resize (nargin);
6334
6335 for (int i = 0; i < nargin; i++)
6336 {
6337 if (args(i).numel () > 1)
6338 error ("%s: dimensions must be scalars.", fcn);
6339
6340 dims(i) = (args(i).isempty () ? 0 : args(i).idx_type_value (true));
6341 }
6342 }
6343 break;
6344 }
6345
6346 dims.chop_trailing_singletons ();
6347
6348 check_dimensions (dims, fcn);
6349
6350 // Note that automatic narrowing will handle conversion from
6351 // NDArray to scalar.
6352
6353 switch (dt)
6354 {
6356 retval = FloatComplexNDArray (dims,
6357 static_cast<FloatComplex> (val));
6358 break;
6359
6361 retval = ComplexNDArray (dims, val);
6362 break;
6363
6364 default:
6365 error ("%s: invalid class name", fcn);
6366 break;
6367 }
6368
6369 return retval;
6370}
6371
6372static octave_value
6373fill_matrix (const octave_value_list& args, bool val, const char *fcn)
6374{
6375 octave_value retval;
6376
6377 int nargin = args.length ();
6378
6379 dim_vector dims (1, 1);
6380
6381 // The TYPE argument is required to be "logical" if present. This
6382 // feature appears to be undocumented in Matlab.
6383
6384 if (nargin > 0 && args(nargin-1).is_string ())
6385 {
6386 std::string nm = args(nargin-1).string_value ();
6387 nargin--;
6388
6390 error ("%s: invalid data type '%s'", fcn, nm.c_str ());
6391 }
6392
6393 bool issparse = false;
6394
6395 if (nargin > 1 && args(nargin-2).is_string ()
6396 && args(nargin-2).string_value () == "like")
6397 {
6398 if (! args(nargin-1).islogical ())
6399 error (R"(%s: input followed by "like" must be logical)", fcn);
6400
6401 issparse = args(nargin-1).issparse ();
6402 nargin -= 2;
6403 }
6404
6405 switch (nargin)
6406 {
6407 case 0:
6408 break;
6409
6410 case 1:
6411 get_dimensions (args(0), fcn, dims);
6412 break;
6413
6414 default:
6415 {
6416 dims.resize (nargin);
6417
6418 for (int i = 0; i < nargin; i++)
6419 {
6420 if (args(i).numel () > 1)
6421 error ("%s: dimensions must be scalars.", fcn);
6422
6423 dims(i) = (args(i).isempty () ? 0 : args(i).idx_type_value (true));
6424 }
6425 }
6426 break;
6427 }
6428
6429 dims.chop_trailing_singletons ();
6430
6431 check_dimensions (dims, fcn);
6432
6433 // Note that automatic narrowing will handle conversion from
6434 // NDArray to scalar.
6435
6436 if (issparse)
6437 {
6438 if (dims.ndims () > 2)
6439 error ("%s: sparse ND arrays not supported", fcn);
6440
6441 retval = SparseBoolMatrix (dims(0), dims(1), val);
6442 }
6443 else
6444 retval = boolNDArray (dims, val);
6445
6446 return retval;
6447}
6448
6449DEFUN (ones, args, ,
6450 doc: /* -*- texinfo -*-
6451@deftypefn {} {@var{x} =} ones ()
6452@deftypefnx {} {@var{x} =} ones (@var{n})
6453@deftypefnx {} {@var{x} =} ones (@var{m}, @var{n}, @dots{})
6454@deftypefnx {} {@var{x} =} ones ([@var{m}, @var{n}, @dots{}])
6455@deftypefnx {} {@var{x} =} ones (@dots{}, @var{class})
6456@deftypefnx {} {@var{x} =} ones (@dots{}, "like", @var{var})
6457Return a scalar, matrix, or N-dimensional array whose elements are all
6458@code{1}.
6459
6460If called with no arguments, return the scalar value @code{1}.
6461
6462If invoked with a single scalar integer argument @var{n}, return a square
6463@nospell{NxN} matrix.
6464
6465If invoked with two or more scalar integer arguments, or a vector of integer
6466values, return an array with the given dimensions.
6467
6468The optional argument @var{class} specifies the class of the return array
6469and defaults to @qcode{"double"}.
6470
6471If a variable @var{var} is specified after @qcode{"like"}, the output @var{val}
6472will have the same data type, complexity, and sparsity as @var{var}.
6473
6474Example 1 : @nospell{MxN} matrix of constant value @var{val}
6475
6476@example
6477@var{C} = @var{val} * ones (@var{m}, @var{n})
6478@end example
6479
6480Example 2 : @nospell{MxN} matrix of uint8
6481
6482@example
6483@var{C} = ones (@var{m}, @var{n}, "uint8")
6484@end example
6485
6486Programming Note: Any negative dimensions are treated as zero, and any zero
6487dimensions will result in an empty matrix. This odd behavior is for
6488@sc{matlab} compatibility.
6489@seealso{zeros, true, false}
6490@end deftypefn */)
6491{
6492 return fill_matrix (args, 1, "ones");
6493}
6494
6495/*
6496%!assert (ones (), 1)
6497%!assert (ones (3), [1, 1, 1; 1, 1, 1; 1, 1, 1])
6498%!assert (ones (2, 3), [1, 1, 1; 1, 1, 1])
6499%!assert (ones (3, 2), [1, 1; 1, 1; 1, 1])
6500%!assert (size (ones (3, 4, 5)), [3, 4, 5])
6501
6502%!assert (ones ("single"), single (1))
6503%!assert (ones (3, "single"), single ([1, 1, 1; 1, 1, 1; 1, 1, 1]))
6504%!assert (ones (2, 3, "single"), single ([1, 1, 1; 1, 1, 1]))
6505%!assert (ones (3, 2, "single"), single ([1, 1; 1, 1; 1, 1]))
6506%!assert (size (ones (3, 4, 5, "single")), [3, 4, 5])
6507
6508%!assert (ones ("int8"), int8 (1))
6509%!assert (ones (3, "int8"), int8 ([1, 1, 1; 1, 1, 1; 1, 1, 1]))
6510%!assert (ones (2, 3, "int8"), int8 ([1, 1, 1; 1, 1, 1]))
6511%!assert (ones (3, 2, "int8"), int8 ([1, 1; 1, 1; 1, 1]))
6512%!assert (size (ones (3, 4, 5, "int8")), [3, 4, 5])
6513
6514%!assert (ones (2, 2, "like", double (1)), double ([1, 1; 1, 1]))
6515%!assert (ones (2, 2, "like", complex (ones (2, 2))), [1, 1; 1, 1])
6516%!assert (ones (1, 2, "like", single (1)), single ([1, 1]))
6517%!assert (ones (1, "like", single (1i)), single (1))
6518%!assert (ones (2, 2, "like", uint8 (8)), uint8 ([1, 1; 1, 1]))
6519%!assert (ones (2, "like", speye (2)), sparse ([1, 1; 1, 1]))
6520%!assert (ones (2, "like", sparse (1i)), sparse (complex ([1, 1; 1, 1])))
6521
6522## Note: Matlab compatibility requires using 0 for negative dimensions.
6523%!assert (size (ones (1, -2, 2)), [1, 0, 2])
6524
6525## Test input validation
6526%!error <invalid data type specified> ones (1, 1, "foobar")
6527%!error <conversion of 1.1 .*failed> ones (1.1)
6528%!error <conversion of 1.1 .*failed> ones (1, 1.1)
6529%!error <conversion of 1.1 .*failed> ones ([1, 1.1])
6530%!error <sparse ND .* not supported> ones (3, 3, 3, "like", speye (1))
6531%!error <must be scalar> ones (1:3, 1)
6532%!error <must be scalar> ones (1, 1:3)
6533%!error <must be scalar> ones (1, 2, 1:3)
6534%!error <must be scalar> ones (1:3, 1, "like", single (1))
6535*/
6536
6537/*
6538## Tests for bug #47298
6539## Matlab requires the size to be a row vector. In that logic, it supports
6540## n to be a 1x0 vector (returns 0x0) but not a 0x1 vector. Octave supports
6541## row and column vectors and therefore must support 0x1, 1x0, and 0x1x1.
6542## Also any empty input results in a 0x0 output.
6543%!test <*47298>
6544%! fcns = {@zeros, @ones, @inf, @nan, @NA, @i, @pi, @e};
6545%! for idx = 1:numel (fcns)
6546%! fcn = fcns{idx};
6547%! assert (fcn (zeros (1, 0)), zeros (0, 0));
6548%! assert (fcn (zeros (0, 1)), zeros (0, 0));
6549%! assert (fcn (zeros (0, 1, 1)), zeros (0, 0));
6550%! assert (fcn (zeros ([])), zeros (0, 0));
6551%! assert (fcn (zeros (0, 0, 1)), zeros (0, 0));
6552%! endfor
6553*/
6554
6555DEFUN (zeros, args, ,
6556 doc: /* -*- texinfo -*-
6557@deftypefn {} {@var{x} =} zeros ()
6558@deftypefnx {} {@var{x} =} zeros (@var{n})
6559@deftypefnx {} {@var{x} =} zeros (@var{m}, @var{n}, @dots{})
6560@deftypefnx {} {@var{x} =} zeros ([@var{m}, @var{n}, @dots{}])
6561@deftypefnx {} {@var{x} =} zeros (@dots{}, @var{class})
6562@deftypefnx {} {@var{x} =} zeros (@dots{}, "like", @var{var})
6563Return a scalar, matrix, or N-dimensional array whose elements are all
6564@code{0}.
6565
6566If called with no arguments, return the scalar value @code{0}.
6567
6568If invoked with a single scalar integer argument @var{n}, return a square
6569@nospell{NxN} matrix.
6570
6571If invoked with two or more scalar integer arguments, or a vector of integer
6572values, return an array with the given dimensions.
6573
6574The optional argument @var{class} specifies the class of the return array
6575and defaults to @qcode{"double"}.
6576
6577If a variable @var{var} is specified after @qcode{"like"}, the output @var{val}
6578will have the same data type, complexity, and sparsity as @var{var}.
6579
6580Example : @nospell{MxN} matrix of uint8
6581
6582@example
6583@var{C} = ones (@var{m}, @var{n}, "uint8")
6584@end example
6585
6586Programming Note: Any negative dimensions are treated as zero, and any zero
6587dimensions will result in an empty matrix. This odd behavior is for
6588@sc{matlab} compatibility.
6589@seealso{ones, true, false}
6590@end deftypefn */)
6591{
6592 return fill_matrix (args, 0, "zeros");
6593}
6594
6595/*
6596%!assert (zeros (), 0)
6597%!assert (zeros (3), [0, 0, 0; 0, 0, 0; 0, 0, 0])
6598%!assert (zeros (2, 3), [0, 0, 0; 0, 0, 0])
6599%!assert (zeros (3, 2), [0, 0; 0, 0; 0, 0])
6600%!assert (size (zeros (3, 4, 5)), [3, 4, 5])
6601
6602%!assert (zeros ("single"), single (0))
6603%!assert (zeros (3, "single"), single ([0, 0, 0; 0, 0, 0; 0, 0, 0]))
6604%!assert (zeros (2, 3, "single"), single ([0, 0, 0; 0, 0, 0]))
6605%!assert (zeros (3, 2, "single"), single ([0, 0; 0, 0; 0, 0]))
6606%!assert (size (zeros (3, 4, 5, "single")), [3, 4, 5])
6607
6608%!assert (zeros ("int8"), int8 (0))
6609%!assert (zeros (3, "int8"), int8 ([0, 0, 0; 0, 0, 0; 0, 0, 0]))
6610%!assert (zeros (2, 3, "int8"), int8 ([0, 0, 0; 0, 0, 0]))
6611%!assert (zeros (3, 2, "int8"), int8 ([0, 0; 0, 0; 0, 0]))
6612%!assert (size (zeros (3, 4, 5, "int8")), [3, 4, 5])
6613
6614%!assert (zeros (2, 2, "like", double (1)), double ([0, 0; 0, 0]))
6615%!assert (zeros (2, 2, "like", complex (ones (2, 2))), [0, 0; 0, 0])
6616%!assert (zeros (1, 2, "like", single (1)), single ([0, 0]))
6617%!assert (zeros (1, 2, "like", single (1i)), single ([0, 0]))
6618%!assert (zeros (2, 2, "like", uint8 (8)), uint8 ([0, 0; 0, 0]))
6619%!assert (zeros (2, "like", speye (2)), sparse ([0, 0; 0, 0]))
6620
6621## Test input validation
6622%!error <invalid data type specified> zeros (1, 1, "foobar")
6623%!error <conversion of 1.1 .*failed> zeros (1.1)
6624%!error <conversion of 1.1 .*failed> zeros (1, 1.1)
6625%!error <conversion of 1.1 .*failed> zeros ([1, 1.1])
6626%!error <sparse ND .* not supported> zeros (3, 3, 3, "like", speye (1))
6627%!error <must be scalar> zeros (1:3, 1)
6628%!error <must be scalar> zeros (1, 1:3)
6629%!error <must be scalar> zeros (1, 2, 1:3)
6630%!error <must be scalar> zeros (1:3, 1, "like", single (1))
6631*/
6632
6633DEFUN (Inf, args, ,
6634 doc: /* -*- texinfo -*-
6635@c List other form of function in documentation index
6636@findex inf
6637
6638@deftypefn {} {@var{x} =} Inf
6639@deftypefnx {} {@var{x} =} Inf (@var{n})
6640@deftypefnx {} {@var{x} =} Inf (@var{m}, @var{n}, @dots{})
6641@deftypefnx {} {@var{x} =} Inf ([@var{m}, @var{n}, @dots{}])
6642@deftypefnx {} {@var{x} =} Inf (@dots{}, @var{class})
6643@deftypefnx {} {@var{x} =} Inf (@dots{}, "like", @var{var})
6644Return a scalar, matrix or N-dimensional array whose elements are all equal
6645to the IEEE@tie{}754 representation for positive infinity.
6646
6647Infinity is produced when results are too large to be represented using the
6648IEEE@tie{}754 floating point format for numbers. Two common examples which
6649produce infinity are division by zero and overflow.
6650
6651@example
6652@group
6653[ 1/0 e^800 ]
6654@xresult{} Inf Inf
6655@end group
6656@end example
6657
6658If called with no arguments, return the scalar value @code{Inf}.
6659
6660If invoked with a single scalar integer argument @var{n}, return a square
6661@nospell{NxN} matrix.
6662
6663If invoked with two or more scalar integer arguments, or a vector of integer
6664values, return an array with the given dimensions.
6665
6666The optional argument @var{class} specifies the class of the return array.
6667The only valid options are @qcode{"double"} (default) or @qcode{"single"}.
6668
6669If a variable @var{var} is specified after @qcode{"like"}, the output @var{x}
6670will have the same data type, complexity, and sparsity as @var{var}.
6671@seealso{isinf, NaN}
6672@end deftypefn */)
6673{
6674 return fill_matrix (args, lo_ieee_inf_value (),
6675 lo_ieee_float_inf_value (), "Inf");
6676}
6677
6678DEFALIAS (inf, Inf);
6679
6680/*
6681%!assert (Inf (3), [Inf, Inf, Inf; Inf, Inf, Inf; Inf, Inf, Inf])
6682%!assert (Inf (2, 3), [Inf, Inf, Inf; Inf, Inf, Inf])
6683%!assert (Inf (3, 2), [Inf, Inf; Inf, Inf; Inf, Inf])
6684%!assert (size (Inf (3, 4, 5)), [3, 4, 5])
6685
6686%!assert (Inf (3, "single"),
6687%! single ([Inf, Inf, Inf; Inf, Inf, Inf; Inf, Inf, Inf]))
6688%!assert (Inf (2, 3, "single"), single ([Inf, Inf, Inf; Inf, Inf, Inf]))
6689%!assert (Inf (3, 2, "single"), single ([Inf, Inf; Inf, Inf; Inf, Inf]))
6690%!assert (size (inf (3, 4, 5, "single")), [3, 4, 5])
6691
6692## Note: Matlab compatibility requires using 0 for negative dimensions.
6693%!assert (size (Inf (2, -3, 2)), [2, 0, 2])
6694
6695%!assert (Inf (2, 2, "like", speye (2)), sparse ([Inf, Inf; Inf, Inf]))
6696%!assert (Inf (2, 2, "like", complex (ones (2, 2))), [Inf, Inf; Inf, Inf])
6697%!assert (Inf (2, 2, "like", double (1)), double ([Inf, Inf; Inf, Inf]))
6698%!assert (Inf (3, 3, "like", single (1)),
6699%! single ([Inf, Inf, Inf; Inf, Inf, Inf; Inf, Inf, Inf]))
6700%!assert (Inf (2, "like", single (1i)), single ([Inf, Inf; Inf, Inf]))
6701
6702%!error Inf (3, "like", int8 (1))
6703
6704%!error Inf (3, "int8")
6705%!error Inf (2, 3, "int8")
6706%!error Inf (3, 2, "int8")
6707%!error Inf (3, 4, 5, "int8")
6708%!error <input .* floating> Inf (3, 3, "like", true)
6709%!error <input .* floating> Inf (2, "like", uint8 (1))
6710%!error <must be scalar> Inf (1:3, 1)
6711%!error <must be scalar> Inf (1, 1:3)
6712%!error <must be scalar> Inf (1, 2, 1:3)
6713%!error <must be scalar> Inf (1:3, 1, "like", single (1))
6714*/
6715
6716DEFUN (NaN, args, ,
6717 doc: /* -*- texinfo -*-
6718@c List other form of function in documentation index
6719@findex nan
6720
6721@deftypefn {} {@var{x} =} NaN
6722@deftypefnx {} {@var{x} =} NaN (@var{n})
6723@deftypefnx {} {@var{x} =} NaN (@var{m}, @var{n}, @dots{})
6724@deftypefnx {} {@var{x} =} NaN ([@var{m}, @var{n}, @dots{}])
6725@deftypefnx {} {@var{x} =} NaN (@dots{}, @var{class})
6726@deftypefnx {} {@var{x} =} NaN (@dots{}, "like", @var{var})
6727Return a scalar, matrix, or N-dimensional array whose elements are all equal
6728to the IEEE@tie{}754 symbol NaN (Not a Number).
6729
6730@code{NaN} is the result of operations which do not produce a well defined
6731numerical result. Common operations which produce a @code{NaN} are arithmetic
6732with infinity
6733@tex
6734($\infty - \infty$), zero divided by zero ($0/0$),
6735@end tex
6736@ifnottex
6737(Inf - Inf), zero divided by zero (0/0),
6738@end ifnottex
6739and any operation involving another @code{NaN} value (5 + @code{NaN}).
6740
6741Note that @code{NaN} always compares not equal to @code{NaN}
6742(@code{NaN != NaN}). This behavior is specified by the IEEE@tie{}754 standard
6743for floating point arithmetic. To find @code{NaN} values, use the @code{isnan}
6744function.
6745
6746If called with no arguments, return the scalar value @code{NaN}.
6747
6748If invoked with a single scalar integer argument @var{n}, return a square
6749@nospell{NxN} matrix.
6750
6751If invoked with two or more scalar integer arguments, or a vector of integer
6752values, return an array with the given dimensions.
6753
6754The optional argument @var{class} specifies the class of the return array.
6755The only valid options are @qcode{"double"} (default) or @qcode{"single"}.
6756
6757If a variable @var{var} is specified after @qcode{"like"}, the output @var{x}
6758will have the same data type, complexity, and sparsity as @var{var}.
6759@seealso{isnan, Inf}
6760@end deftypefn */)
6761{
6762 return fill_matrix (args, lo_ieee_nan_value (),
6763 lo_ieee_float_nan_value (), "NaN");
6764}
6765
6766DEFALIAS (nan, NaN);
6767
6768/*
6769%!assert (NaN (3), [NaN, NaN, NaN; NaN, NaN, NaN; NaN, NaN, NaN])
6770%!assert (NaN (2, 3), [NaN, NaN, NaN; NaN, NaN, NaN])
6771%!assert (NaN (3, 2), [NaN, NaN; NaN, NaN; NaN, NaN])
6772%!assert (size (NaN (3, 4, 5)), [3, 4, 5])
6773
6774%!assert (NaN (3, "single"),
6775%! single ([NaN, NaN, NaN; NaN, NaN, NaN; NaN, NaN, NaN]))
6776%!assert (NaN (2, 3, "single"), single ([NaN, NaN, NaN; NaN, NaN, NaN]))
6777%!assert (NaN (3, 2, "single"), single ([NaN, NaN; NaN, NaN; NaN, NaN]))
6778%!assert (size (NaN (3, 4, 5, "single")), [3, 4, 5])
6779
6780%!assert (NaN (2, 2, "like", double (1)), double ([NaN, NaN; NaN, NaN]))
6781%!assert (NaN (2, 2, "like", complex (ones(2, 2))), [NaN, NaN; NaN, NaN])
6782%!assert (NaN (3, 3, "like", single (1)),
6783%! single ([NaN, NaN, NaN; NaN, NaN, NaN; NaN, NaN, NaN]))
6784%!assert (NaN (2, "like", single (1i)), single ([NaN, NaN; NaN, NaN]))
6785%!assert (NaN (2, 2, "like", speye (2)), sparse ([NaN, NaN; NaN, NaN]))
6786
6787%!error NaN (3, 'like', int8 (1))
6788
6789%!error NaN (3, "int8")
6790%!error NaN (2, 3, "int8")
6791%!error NaN (3, 2, "int8")
6792%!error NaN (3, 4, 5, "int8")
6793%!error <input .* floating> NaN (3, 3, "like", true)
6794%!error <input .* floating> NaN (2, "like", uint8 (1))
6795%!error <must be scalar> NaN (1:3, 1)
6796%!error <must be scalar> NaN (1, 1:3)
6797%!error <must be scalar> NaN (1, 2, 1:3)
6798%!error <must be scalar> NaN (1:3, 1, "like", single (1))
6799*/
6800
6801DEFUN (e, args, ,
6802 doc: /* -*- texinfo -*-
6803@deftypefn {} {@var{x} =} e
6804@deftypefnx {} {@var{x} =} e (@var{n})
6805@deftypefnx {} {@var{x} =} e (@var{m}, @var{n}, @dots{})
6806@deftypefnx {} {@var{x} =} e ([@var{m}, @var{n}, @dots{}])
6807@deftypefnx {} {@var{x} =} e (@dots{}, @var{class})
6808Return a scalar, matrix, or N-dimensional array whose elements are all equal
6809to the base of natural logarithms.
6810
6811The constant
6812@tex
6813$e$ satisfies the equation $\log (e) = 1$.
6814@end tex
6815@ifnottex
6816@samp{e} satisfies the equation @code{log} (e) = 1.
6817@end ifnottex
6818
6819If called with no arguments, return the scalar value @math{e}.
6820
6821If invoked with a single scalar integer argument @var{n}, return a square
6822@nospell{NxN} matrix.
6823
6824If invoked with two or more scalar integer arguments, or a vector of integer
6825values, return an array with the given dimensions.
6826
6827The optional argument @var{class} specifies the class of the return array.
6828The only valid options are @qcode{"double"} (default) or @qcode{"single"}.
6829@seealso{log, exp, pi, I}
6830@end deftypefn */)
6831{
6832#if defined (M_E)
6833 double e_val = M_E;
6834#else
6835 double e_val = exp (1.0);
6836#endif
6837
6838 return fill_matrix (args, e_val, "e");
6839}
6840
6841template <typename T>
6842T
6843eps (const T& x)
6844{
6845 T epsval = x.abs ();
6846 typedef typename T::value_type P;
6847 for (octave_idx_type i = 0; i < x.numel (); i++)
6848 {
6849 P val = epsval.xelem (i);
6850 if (math::isnan (val) || math::isinf (val))
6851 epsval(i) = numeric_limits<P>::NaN ();
6852 else if (val < std::numeric_limits<P>::min ())
6853 epsval(i) = std::numeric_limits<P>::denorm_min ();
6854 else
6855 {
6856 int exponent;
6857 math::frexp (val, &exponent);
6858 const P digits = std::numeric_limits<P>::digits;
6859 epsval(i) = std::pow (static_cast<P> (2.0),
6860 static_cast<P> (exponent - digits));
6861 }
6862 }
6863 return epsval;
6864}
6865
6866DEFUN (eps, args, ,
6867 doc: /* -*- texinfo -*-
6868@deftypefn {} {@var{d} =} eps
6869@deftypefnx {} {@var{d} =} eps (@var{x})
6870@deftypefnx {} {@var{d} =} eps (@var{m}, @var{n}, @dots{})
6871@deftypefnx {} {@var{d} =} eps ([@var{m}, @var{n}, @dots{}])
6872@deftypefnx {} {@var{d} =} eps (@dots{}, @var{class})
6873Return a scalar, matrix or N-dimensional array whose elements are eps,
6874the machine precision.
6875
6876More precisely, @code{eps} is the relative spacing between any two adjacent
6877numbers in the machine's floating point system. This number depends both on
6878the system and where the number lies in the range representable by the floating
6879point system. On machines that support IEEE@tie{}754 floating point
6880arithmetic, @w{@code{eps (1.0)}} is approximately
6881@tex
6882$2.2204\times10^{-16}$ for double precision and $1.1921\times10^{-7}$
6883@end tex
6884@ifnottex
68852.2204e-16 for double precision and 1.1921e-07
6886@end ifnottex
6887for single precision.
6888
6889If called with no arguments, return the scalar value @w{@code{eps (1.0)}}.
6890
6891Given a floating point argument @var{x}, return an array @var{d} of the same
6892size where each element is the distance between the element of @var{x} and
6893the next largest value.
6894
6895If invoked with two or more scalar integer arguments, or a vector of integer
6896values, return an array with the given dimensions whose elements are all the
6897scalar value @code{eps}.
6898
6899The optional argument @var{class} specifies the class of the return array.
6900The only valid options are @qcode{"double"} (default) or @qcode{"single"}.
6901@seealso{realmax, realmin, intmax, flintmax}
6902@end deftypefn */)
6903{
6904 octave_value retval;
6905
6906 if (args.length () == 1 && ! args(0).is_string ())
6907 {
6908 octave_value arg0 = args(0);
6909 if (arg0.is_single_type ())
6910 {
6911 FloatNDArray epsval = eps (arg0.float_array_value ());
6912 retval = epsval;
6913 }
6914 else if (arg0.is_double_type ())
6915 {
6916 NDArray epsval = eps (arg0.array_value ());
6917 retval = epsval;
6918 }
6919 else
6920 error ("eps: X must be of a floating point type");
6921 }
6922 else
6923 retval = fill_matrix (args, std::numeric_limits<double>::epsilon (),
6924 std::numeric_limits<float>::epsilon (), "eps");
6925
6926 return retval;
6927}
6928
6929/*
6930%!assert (eps (1/2), 2^(-53))
6931%!assert (eps (1), 2^(-52))
6932%!assert (eps (2), 2^(-51))
6933%!assert (eps (realmax), 2^971)
6934%!assert (eps (0), 2^(-1074))
6935%!assert (eps (realmin/2), 2^(-1074))
6936%!assert (eps (realmin/16), 2^(-1074))
6937%!assert (eps (Inf), NaN)
6938%!assert (eps (NaN), NaN)
6939%!assert (eps ([1/2 1 2 realmax 0 realmin/2 realmin/16 Inf NaN]),
6940%! [2^-53 2^-52 2^-51 2^971 2^-1074 2^-1074 2^-1074 NaN NaN])
6941%!assert (eps (single (1/2)), single (2^(-24)))
6942%!assert (eps (single (1)), single (2^(-23)))
6943%!assert (eps (single (2)), single (2^(-22)))
6944%!assert (eps (realmax ("single")), single (2^104))
6945%!assert (eps (single (0)), single (2^(-149)))
6946%!assert (eps (realmin ("single")/2), single (2^(-149)))
6947%!assert (eps (realmin ("single")/16), single (2^(-149)))
6948%!assert (eps (single (Inf)), single (NaN))
6949%!assert (eps (single (NaN)), single (NaN))
6950%!assert (eps (single ([1/2 1 2 realmax("single") 0 realmin("single")/2 realmin("single")/16 Inf NaN])),
6951%! single ([2^-24 2^-23 2^-22 2^104 2^-149 2^-149 2^-149 NaN NaN]))
6952
6953## Note: Matlab compatibility requires using 0 for negative dimensions.
6954%!assert (size (eps (2, -3, 2)), [2, 0, 2])
6955
6956%!error <X must be of a floating point type> eps (uint8 ([0 1 2]))
6957%!error <must be scalar> eps (1:3, 1)
6958%!error <must be scalar> eps (1, 1:3)
6959%!error <must be scalar> eps (1, 2, 1:3)
6960%!error <must be scalar> eps (1:3, 1, "single")
6961*/
6962
6963DEFUN (pi, args, ,
6964 doc: /* -*- texinfo -*-
6965@deftypefn {} {@var{p} =} pi
6966@deftypefnx {} {@var{p} =} pi (@var{n})
6967@deftypefnx {} {@var{p} =} pi (@var{m}, @var{n}, @dots{})
6968@deftypefnx {} {@var{p} =} pi ([@var{m}, @var{n}, @dots{}])
6969@deftypefnx {} {@var{p} =} pi (@dots{}, @var{class})
6970Return a scalar, matrix, or N-dimensional array whose elements are all equal
6971to the ratio of the circumference of a circle to its
6972@tex
6973diameter($\pi$).
6974@end tex
6975@ifnottex
6976diameter.
6977@end ifnottex
6978
6979If called with no arguments, return the scalar value
6980@tex
6981$\pi$.
6982@end tex
6983@ifnottex
6984pi.
6985@end ifnottex
6986
6987If invoked with a single scalar integer argument @var{n}, return a square
6988@nospell{NxN} matrix.
6989
6990If invoked with two or more scalar integer arguments, or a vector of integer
6991values, return an array with the given dimensions.
6992
6993The optional argument @var{class} specifies the class of the return array.
6994The only valid options are @qcode{"double"} (default) or @qcode{"single"}.
6995@seealso{e, I}
6996@end deftypefn */)
6997{
6998#if defined (M_PI)
6999 double pi_val = M_PI;
7000#else
7001 double pi_val = 4.0 * atan (1.0);
7002#endif
7003
7004 return fill_matrix (args, pi_val, "pi");
7005}
7006
7007DEFUN (realmax, args, ,
7008 doc: /* -*- texinfo -*-
7009@deftypefn {} {@var{Rmax} =} realmax
7010@deftypefnx {} {@var{Rmax} =} realmax (@var{n})
7011@deftypefnx {} {@var{Rmax} =} realmax (@var{m}, @var{n}, @dots{})
7012@deftypefnx {} {@var{Rmax} =} realmax ([@var{m}, @var{n}, @dots{}])
7013@deftypefnx {} {@var{Rmax} =} realmax (@dots{}, @var{class})
7014@deftypefnx {} {@var{Rmax} =} realmax (@dots{}, "like", @var{var})
7015Return a scalar, matrix, or N-dimensional array whose elements are all equal
7016to the largest floating point number that is representable.
7017
7018The actual value is system-dependent. On machines that support IEEE@tie{}754
7019floating point arithmetic, @code{realmax} is approximately
7020@tex
7021$1.7977\times10^{308}$ for double precision and $3.4028\times10^{38}$
7022@end tex
7023@ifnottex
70241.7977e+308 for double precision and 3.4028e+38
7025@end ifnottex
7026for single precision.
7027
7028If called with no arguments, return the scalar value
7029@code{realmax (@qcode{"double"})}.
7030
7031If invoked with a single scalar integer argument @var{n}, return a square
7032@nospell{NxN} matrix.
7033
7034If invoked with two or more scalar integer arguments, or a vector of integer
7035values, return an array with the given dimensions.
7036
7037The optional argument @var{class} specifies the class of the return array.
7038The only valid options are @qcode{"double"} (default) or @qcode{"single"}.
7039
7040If a variable @var{var} is specified after @qcode{"like"}, the output
7041@var{Rmax} will have the same data type, complexity, and sparsity as @var{var}.
7042@seealso{realmin, intmax, flintmax, eps}
7043@end deftypefn */)
7044{
7045 return fill_matrix (args, std::numeric_limits<double>::max (),
7046 std::numeric_limits<float>::max (), "realmax");
7047}
7048
7049DEFUN (realmin, args, ,
7050 doc: /* -*- texinfo -*-
7051@deftypefn {} {@var{Rmin} =} realmin
7052@deftypefnx {} {@var{Rmin} =} realmin (@var{n})
7053@deftypefnx {} {@var{Rmin} =} realmin (@var{m}, @var{n}, @dots{})
7054@deftypefnx {} {@var{Rmin} =} realmin ([@var{m}, @var{n}, @dots{}])
7055@deftypefnx {} {@var{Rmin} =} realmin (@dots{}, @var{class})
7056@deftypefnx {} {@var{Rmin} =} realmin (@dots{}, "like", @var{var})
7057Return a scalar, matrix, or N-dimensional array whose elements are all equal
7058to the smallest normalized floating point number that is representable.
7059
7060The actual value is system-dependent. On machines that support IEEE@tie{}754
7061floating point arithmetic, @code{realmin} is approximately
7062@tex
7063$2.2251\times10^{-308}$ for double precision and $1.1755\times10^{-38}$
7064@end tex
7065@ifnottex
70662.2251e-308 for double precision and 1.1755e-38
7067@end ifnottex
7068for single precision.
7069
7070If called with no arguments, return the scalar value
7071@code{realmin (@qcode{"double"})}.
7072
7073If invoked with a single scalar integer argument @var{n}, return a square
7074@nospell{NxN} matrix.
7075
7076If invoked with two or more scalar integer arguments, or a vector of integer
7077values, return an array with the given dimensions.
7078
7079The optional argument @var{class} specifies the class of the return array.
7080The only valid options are @qcode{"double"} (default) or @qcode{"single"}.
7081
7082If a variable @var{var} is specified after @qcode{"like"}, the output
7083@var{Rmin} will have the same data type, complexity, and sparsity as @var{var}.
7084@seealso{realmax, intmin, eps}
7085@end deftypefn */)
7086{
7087 return fill_matrix (args, std::numeric_limits<double>::min (),
7088 std::numeric_limits<float>::min (), "realmin");
7089}
7090
7091DEFUN (I, args, ,
7092 doc: /* -*- texinfo -*-
7093@c List other forms of function in documentation index
7094@findex i
7095@findex j
7096@findex J
7097
7098@deftypefn {} {@var{x} =} I
7099@deftypefnx {} {@var{x} =} I (@var{n})
7100@deftypefnx {} {@var{x} =} I (@var{m}, @var{n}, @dots{})
7101@deftypefnx {} {@var{x} =} I ([@var{m}, @var{n}, @dots{}])
7102@deftypefnx {} {@var{x} =} I (@dots{}, @var{class})
7103Return a scalar, matrix, or N-dimensional array whose elements are all equal
7104to the pure imaginary unit, defined as
7105@tex
7106$\sqrt{-1}$.
7107@end tex
7108@ifnottex
7109@w{@code{sqrt (-1)}}.
7110@end ifnottex
7111
7112@code{I}, and its equivalents @code{i}, @code{j}, and @code{J}, are functions
7113so any of the names may be reused for other purposes (such as @code{i} for a
7114counter variable).
7115
7116If called with no arguments, return the scalar value @code{complex (0, 1)}.
7117
7118If invoked with a single scalar integer argument @var{n}, return a square
7119@nospell{NxN} matrix.
7120
7121If invoked with two or more scalar integer arguments, or a vector of integer
7122values, return an array with the given dimensions.
7123
7124The optional argument @var{class} specifies the class of the return array.
7125The only valid options are @qcode{"double"} (default) or @qcode{"single"}.
7126@seealso{e, pi, log, exp}
7127@end deftypefn */)
7128{
7129 return fill_matrix (args, Complex (0.0, 1.0), "I");
7130}
7131
7132DEFALIAS (i, I);
7133DEFALIAS (J, I);
7134DEFALIAS (j, I);
7135
7136DEFUN (NA, args, ,
7137 doc: /* -*- texinfo -*-
7138@deftypefn {} {@var{x} =} NA
7139@deftypefnx {} {@var{x} =} NA (@var{n})
7140@deftypefnx {} {@var{x} =} NA (@var{m}, @var{n}, @dots{})
7141@deftypefnx {} {@var{x} =} NA ([@var{m}, @var{n}, @dots{}])
7142@deftypefnx {} {@var{x} =} NA (@dots{}, @var{class})
7143@deftypefnx {} {@var{x} =} NA (@dots{}, "like", @var{var})
7144Return a scalar, matrix, or N-dimensional array whose elements are all equal
7145to the special constant NA (Not Available) used to designate missing values.
7146
7147Note that @code{NA} always compares not equal to @code{NA} (@code{NA != NA}).
7148To find @code{NA} values, use the @code{isna} function.
7149
7150If called with no arguments, return the scalar value @code{NA}.
7151
7152If invoked with a single scalar integer argument @var{n}, return a square
7153@nospell{NxN} matrix.
7154
7155If invoked with two or more scalar integer arguments, or a vector of integer
7156values, return an array with the given dimensions.
7157
7158The optional argument @var{class} specifies the class of the return array.
7159The only valid options are @qcode{"double"} (default) or @qcode{"single"}.
7160
7161Programming Note: The missing data marker @code{NA} is a special case of the
7162representation of @code{NaN}. Numerical calculations with @code{NA} will
7163generally "poison" the results and conclude with an output of @code{NA}.
7164However, this can not be guaranteed on all platforms and @code{NA} may be
7165replaced by @code{NaN}. @xref{Missing Data}.
7166@seealso{isna}
7167@end deftypefn */)
7168{
7169 return fill_matrix (args, lo_ieee_na_value (),
7170 lo_ieee_float_na_value (), "NA");
7171}
7172
7173/*
7174%!testif HAVE_QNAN_WITH_PAYLOAD
7175%! assert (single (NA ("double")), NA ("single"));
7176%!testif HAVE_QNAN_WITH_PAYLOAD
7177%! assert (double (NA ("single")), NA ("double"));
7178// Duplicate from above. Only for test statistics
7179%!testif ; ! __have_feature__ ("QNAN_WITH_PAYLOAD") <59830>
7180%! assert (single (NA ("double")), NA ("single"));
7181%!testif ; ! __have_feature__ ("QNAN_WITH_PAYLOAD") <59830>
7182%! assert (double (NA ("single")), NA ("double"));
7183*/
7184
7185DEFUN (false, args, ,
7186 doc: /* -*- texinfo -*-
7187@deftypefn {} {@var{F} =} false
7188@deftypefnx {} {@var{F} =} false (@var{n})
7189@deftypefnx {} {@var{F} =} false (@var{m}, @var{n}, @dots{})
7190@deftypefnx {} {@var{F} =} false ([@var{m}, @var{n}, @dots{}])
7191@deftypefnx {} {@var{F} =} false (@dots{}, "like", @var{var})
7192Return a scalar, matrix, or N-dimensional array whose elements are all logical
7193@code{0}.
7194
7195If called with no arguments, return the scalar value logical @code{0}.
7196
7197If invoked with a single scalar integer argument @var{n}, return a square
7198@nospell{NxN} matrix.
7199
7200If invoked with two or more scalar integer arguments, or a vector of integer
7201values, return an array with the given dimensions.
7202
7203If a logical variable @var{var} is specified after @qcode{"like"}, the output
7204@var{F} will have the same sparsity as @var{var}.
7205
7206Programming Note: The code @code{false (@dots{})} is faster (30X) and more
7207memory efficient than @code{logical (zeros (@dots{}))}.
7208
7209Any negative dimensions are treated as zero, and any zero dimensions will
7210result in an empty matrix. This odd behavior is for @sc{matlab} compatibility.
7211@seealso{true, logical, ones, zeros}
7212@end deftypefn */)
7213{
7214 return fill_matrix (args, false, "false");
7215}
7216
7217/*
7218%!assert (false, logical (0))
7219%!assert (false (3), logical ([0, 0, 0; 0, 0, 0; 0, 0, 0]))
7220%!assert (false (2, 3), logical ([0, 0, 0; 0, 0, 0]))
7221%!assert (false (3, 2), logical ([0, 0; 0, 0; 0, 0]))
7222%!assert (size (false (3, 4, 5)), [3, 4, 5])
7223%!assert (false (2, 3, "logical"), logical (zeros (2, 3)))
7224%!assert (false (2, 1, "like", true), [false; false])
7225%!assert (false (2, 1, "like", sparse (true)), sparse ([false; false]))
7226
7227## Test input validation
7228%!error false (2, 3, "double")
7229%!error <input .* must be logical> false (2, 1, "like", double (1))
7230%!error <must be scalar> false (1:3, 1)
7231%!error <must be scalar> false (1, 1:3)
7232%!error <must be scalar> false (1, 2, 1:3)
7233*/
7234
7235DEFUN (true, args, ,
7236 doc: /* -*- texinfo -*-
7237@deftypefn {} {@var{T} =} true
7238@deftypefnx {} {@var{T} =} true (@var{n})
7239@deftypefnx {} {@var{T} =} true (@var{m}, @var{n}, @dots{})
7240@deftypefnx {} {@var{T} =} true ([@var{m}, @var{n}, @dots{}])
7241@deftypefnx {} {@var{T} =} true (@dots{}, "like", @var{var})
7242Return a scalar, matrix, or N-dimensional array whose elements are all logical
7243@code{1}.
7244
7245If called with no arguments, return the scalar value logical @code{1}.
7246
7247If invoked with a single scalar integer argument @var{n}, return a square
7248@nospell{NxN} matrix.
7249
7250If invoked with two or more scalar integer arguments, or a vector of integer
7251values, return an array with the given dimensions.
7252
7253If a logical variable @var{var} is specified after @qcode{"like"}, the output
7254@var{T} will have the same sparsity as @var{var}.
7255
7256Programming Notes: The code @code{true (@dots{})} is faster (30X) and more
7257memory efficient than @code{logical (ones (@dots{}))}.
7258
7259Any negative dimensions are treated as zero, and any zero dimensions will
7260result in an empty matrix. This odd behavior is for @sc{matlab} compatibility.
7261@seealso{false, logical, ones, zeros}
7262@end deftypefn */)
7263{
7264 return fill_matrix (args, true, "true");
7265}
7266
7267/*
7268
7269%!assert (true, logical (1))
7270%!assert (true (3), logical ([1, 1, 1; 1, 1, 1; 1, 1, 1]))
7271%!assert (true (2, 3), logical ([1, 1, 1; 1, 1, 1]))
7272%!assert (true (3, 2), logical ([1, 1; 1, 1; 1, 1]))
7273%!assert (size (true (3, 4, 5)), [3, 4, 5])
7274%!assert (true (2, 3, "logical"), logical (ones (2, 3)))
7275%!assert (true (2, 1, "like", false), [true; true])
7276%!assert (true (2, 1, "like", sparse (true)), sparse ([true; true]))
7277
7278## Test input validation
7279%!error true (2, 3, "double")
7280%!error <input .* must be logical> true (2, 1, "like", double (1))
7281%!error <must be scalar> true (1:3, 1)
7282%!error <must be scalar> true (1, 1:3)
7283%!error <must be scalar> true (1, 2, 1:3)
7284*/
7285
7286template <typename MT>
7288identity_matrix (int nr, int nc)
7289{
7290 octave_value retval;
7291
7292 typename MT::element_type one (1);
7293
7294 if (nr == 1 && nc == 1)
7295 retval = one;
7296 else
7297 {
7298 dim_vector dims (nr, nc);
7299
7300 typename MT::element_type zero (0);
7301
7302 MT m (dims, zero);
7303
7304 if (nr > 0 && nc > 0)
7305 {
7306 int n = std::min (nr, nc);
7307
7308 for (int i = 0; i < n; i++)
7309 m(i, i) = one;
7310 }
7311
7312 retval = m;
7313 }
7314
7315 return retval;
7316}
7317
7318#define INSTANTIATE_EYE(T) \
7319 template octave_value identity_matrix<T> (int, int)
7320
7332
7333static octave_value
7334identity_matrix (int nr, int nc, oct_data_conv::data_type dt)
7335{
7336 octave_value retval;
7337
7338 // FIXME: Perhaps this should be made extensible by using the class name
7339 // to lookup a function to call to create the new value.
7340
7341 switch (dt)
7342 {
7344 retval = identity_matrix<int8NDArray> (nr, nc);
7345 break;
7346
7348 retval = identity_matrix<uint8NDArray> (nr, nc);
7349 break;
7350
7352 retval = identity_matrix<int16NDArray> (nr, nc);
7353 break;
7354
7356 retval = identity_matrix<uint16NDArray> (nr, nc);
7357 break;
7358
7360 retval = identity_matrix<int32NDArray> (nr, nc);
7361 break;
7362
7364 retval = identity_matrix<uint32NDArray> (nr, nc);
7365 break;
7366
7368 retval = identity_matrix<int64NDArray> (nr, nc);
7369 break;
7370
7372 retval = identity_matrix<uint64NDArray> (nr, nc);
7373 break;
7374
7376 retval = FloatDiagMatrix (nr, nc, 1.0f);
7377 break;
7378
7380 retval = DiagMatrix (nr, nc, 1.0);
7381 break;
7382
7384 retval = identity_matrix<boolNDArray> (nr, nc);
7385 break;
7386
7387 default:
7388 error ("eye: invalid class name");
7389 break;
7390 }
7391
7392 return retval;
7393}
7394
7395#undef INT_EYE_MATRIX
7396
7397DEFUN (eye, args, ,
7398 doc: /* -*- texinfo -*-
7399@deftypefn {} {@var{I} =} eye ()
7400@deftypefnx {} {@var{I} =} eye (@var{n})
7401@deftypefnx {} {@var{I} =} eye (@var{m}, @var{n})
7402@deftypefnx {} {@var{I} =} eye ([@var{m}, @var{n}])
7403@deftypefnx {} {@var{I} =} eye (@dots{}, @var{class})
7404Return an identity matrix.
7405
7406If called with no arguments, return the scalar value @code{1}.
7407
7408If invoked with a single scalar argument @var{n}, return a square @nospell{NxN}
7409identity matrix.
7410
7411If supplied two scalar arguments (@var{m}, @var{n}), or a 2-element vector
7412@w{@code{[@var{m}, @var{n}]}}, return an @nospell{MxN} identity matrix with
7413@var{m} rows and @var{n} columns.
7414
7415The optional argument @var{class} specifies the return type of the matrix and
7416defaults to @qcode{"double"}.
7417
7418Example 1 : 1-input, square identity matrix
7419
7420@example
7421@group
7422eye (3)
7423 @xresult{} 1 0 0
7424 0 1 0
7425 0 0 1
7426@end group
7427@end example
7428
7429Example 2 : following expressions all produce 2x2 identity matrix
7430
7431@example
7432@group
7433eye (2) @equiv{} eye (2, 2) @equiv{} eye (size ([1, 2; 3, 4]))
7434 @xresult{} 1 0
7435 0 1
7436@end group
7437@end example
7438
7439Example 3 : 2x2 uint8 identity matrix
7440
7441@example
7442I = eye (2, "uint8")
7443@end example
7444
7445Programming Note: Calling @code{eye} with no arguments is equivalent to calling
7446it with an argument of @code{1}. Any negative dimensions are treated as zero.
7447These odd definitions are for compatibility with @sc{matlab}.
7448@seealso{speye, ones, zeros}
7449@end deftypefn */)
7450{
7451 int nargin = args.length ();
7452
7454
7455 // Check for type information.
7456
7457 if (nargin > 0 && args(nargin-1).is_string ())
7458 {
7459 std::string nm = args(nargin-1).string_value ();
7460 nargin--;
7461
7463 }
7464
7465 if (nargin > 2)
7466 print_usage ();
7467
7468 octave_value retval;
7469
7470 if (nargin == 0)
7471 retval = identity_matrix (1, 1, dt);
7472 else if (nargin == 1)
7473 {
7474 octave_idx_type nr, nc;
7475 get_dimensions (args(0), "eye", nr, nc);
7476
7477 retval = identity_matrix (nr, nc, dt);
7478 }
7479 else
7480 {
7481 octave_idx_type nr, nc;
7482 get_dimensions (args(0), args(1), "eye", nr, nc);
7483
7484 retval = identity_matrix (nr, nc, dt);
7485 }
7486
7487 return retval;
7488}
7489
7490/*
7491%!assert (full (eye (3)), [1, 0, 0; 0, 1, 0; 0, 0, 1])
7492%!assert (full (eye (2, 3)), [1, 0, 0; 0, 1, 0])
7493
7494%!assert (full (eye (3, "single")), single ([1, 0, 0; 0, 1, 0; 0, 0, 1]))
7495%!assert (full (eye (2, 3, "single")), single ([1, 0, 0; 0, 1, 0]))
7496
7497%!assert (eye (3, "int8"), int8 ([1, 0, 0; 0, 1, 0; 0, 0, 1]))
7498%!assert (eye (2, 3, "int8"), int8 ([1, 0, 0; 0, 1, 0]))
7499
7500## Note: Matlab compatibility requires using 0 for negative dimensions.
7501%!assert (size (eye (2, -3)), [2, 0])
7502
7503## Test input validation
7504%!error eye (1, 2, 3)
7505%!error <conversion of 1.1 .*failed> eye (1.1)
7506%!error <conversion of 1.1 .*failed> eye (1, 1.1)
7507%!error <conversion of 1.1 .*failed> eye ([1, 1.1])
7508*/
7509
7510template <typename MT>
7511static octave_value
7512do_linspace (const octave_value& base, const octave_value& limit,
7514{
7515 typedef typename MT::column_vector_type CVT;
7516 typedef typename MT::element_type T;
7517
7518 octave_value retval;
7519
7520 if (base.is_scalar_type ())
7521 {
7522 T bs = octave_value_extract<T> (base);
7523 if (limit.is_scalar_type ())
7524 {
7525 T ls = octave_value_extract<T> (limit);
7526 retval = linspace (bs, ls, n);
7527 }
7528 else
7529 {
7530 CVT lv = octave_value_extract<CVT> (limit);
7531 CVT bv (lv.numel (), bs);
7532 retval = linspace (bv, lv, n);
7533 }
7534 }
7535 else
7536 {
7537 CVT bv = octave_value_extract<CVT> (base);
7538 if (limit.is_scalar_type ())
7539 {
7540 T ls = octave_value_extract<T> (limit);
7541 CVT lv (bv.numel (), ls);
7542 retval = linspace (bv, lv, n);
7543 }
7544 else
7545 {
7546 CVT lv = octave_value_extract<CVT> (limit);
7547 retval = linspace (bv, lv, n);
7548 }
7549 }
7550
7551 return retval;
7552}
7553
7554DEFUN (linspace, args, ,
7555 doc: /* -*- texinfo -*-
7556@deftypefn {} {@var{y} =} linspace (@var{start}, @var{end})
7557@deftypefnx {} {@var{y} =} linspace (@var{start}, @var{end}, @var{n})
7558Return a row vector with @var{n} linearly spaced elements between @var{start}
7559and @var{end}.
7560
7561If the number of elements @var{n} is greater than one, then the endpoints
7562@var{start} and @var{end} are always included in the range. If @var{start} is
7563greater than @var{end}, the elements are stored in decreasing order. If the
7564number of points @var{n} is not specified, a value of 100 is used.
7565
7566The @code{linspace} function returns a row vector when both @var{start} and
7567@var{end} are scalars. If one, or both, inputs are vectors, then
7568@code{linspace} transforms them to column vectors and returns a matrix where
7569each row is an independent sequence between
7570@w{@code{@var{start}(@var{row_n}), @var{end}(@var{row_n})}}.
7571
7572Programming Notes: For compatibility with @sc{matlab}, return the second
7573argument (@var{end}) when a single value (@var{n} = 1) is requested. If
7574@var{n} is not an integer then @code{floor (@var{n})} is used to round the
7575number of elements. If @var{n} is zero or negative then an empty 1x0 matrix
7576is returned.
7577@seealso{colon, logspace}
7578@end deftypefn */)
7579{
7580 int nargin = args.length ();
7581
7582 if (nargin != 2 && nargin != 3)
7583 print_usage ();
7584
7585 octave_idx_type npoints = 100;
7586 if (nargin == 3)
7587 {
7588 // Apparently undocumented Matlab. If the third arg is an empty
7589 // numeric value, the number of points defaults to 1.
7590 octave_value arg_3 = args(2);
7591
7592 if (arg_3.isnumeric () && arg_3.isempty ())
7593 npoints = 1;
7594 else if (! arg_3.is_scalar_type ())
7595 error ("linspace: N must be a scalar");
7596 else
7597 // Even if third arg is not an integer, it must be cast to int
7598 npoints = arg_3.idx_type_value ();
7599 }
7600
7601 octave_value arg_1 = args(0);
7602 octave_value arg_2 = args(1);
7603
7604 const dim_vector& sz1 = arg_1.dims ();
7605 bool isvector1 = sz1.ndims () == 2 && (sz1(0) == 1 || sz1(1) == 1);
7606 const dim_vector& sz2 = arg_2.dims ();
7607 bool isvector2 = sz2.ndims () == 2 && (sz2(0) == 1 || sz2(1) == 1);
7608
7609 if (! isvector1 || ! isvector2)
7610 error ("linspace: START, END must be scalars or vectors");
7611
7612 octave_value retval;
7613
7614 if (arg_1.is_single_type () || arg_2.is_single_type ())
7615 {
7616 if (arg_1.iscomplex () || arg_2.iscomplex ())
7617 retval = do_linspace<FloatComplexMatrix> (arg_1, arg_2, npoints);
7618 else
7619 retval = do_linspace<FloatMatrix> (arg_1, arg_2, npoints);
7620 }
7621 else
7622 {
7623 if (arg_1.iscomplex () || arg_2.iscomplex ())
7624 retval = do_linspace<ComplexMatrix> (arg_1, arg_2, npoints);
7625 else
7626 retval = do_linspace<Matrix> (arg_1, arg_2, npoints);
7627 }
7628
7629 return retval;
7630}
7631
7632/*
7633%!test
7634%! x1 = linspace (1, 2);
7635%! x2 = linspace (1, 2, 10);
7636%! x3 = linspace (1, -2, 10);
7637%! assert (size (x1) == [1, 100] && x1(1) == 1 && x1(100) == 2);
7638%! assert (x1(2) - x1(1), (2 - 1)/ (100 - 1), eps);
7639%! assert (size (x2) == [1, 10] && x2(1) == 1 && x2(10) == 2);
7640%! assert (x2(2) - x2(1), (2 - 1)/ (10 - 1), eps);
7641%! assert (size (x3) == [1, 10] && x3(1) == 1 && x3(10) == -2);
7642%! assert (x3(2) - x3(1), (-2 - 1)/ (10 - 1), eps);
7643
7644## Test complex values
7645%!test
7646%! exp = [1+0i, 2-1.25i, 3-2.5i, 4-3.75i, 5-5i];
7647%! obs = linspace (1, 5-5i, 5);
7648%! assert (obs, exp);
7649
7650## Test support for vectors in START and END
7651%!assert (linspace ([1 2 3], [7 8 9]),
7652%! [linspace(1, 7); linspace(2, 8); linspace(3, 9)], 10*eps)
7653%!assert (linspace ([1 2 3]', [7 8 9]'),
7654%! [linspace(1, 7); linspace(2, 8); linspace(3, 9)], 10*eps)
7655%!assert (linspace ([1 2 3], 9),
7656%! [linspace(1, 9); linspace(2, 9); linspace(3, 9)], 10*eps)
7657%!assert (linspace ([1 2 3]', 9),
7658%! [linspace(1, 9); linspace(2, 9); linspace(3, 9)], 10*eps)
7659%!assert (linspace (1, [7 8 9]),
7660%! [linspace(1, 7); linspace(1, 8); linspace(1, 9)], 10*eps)
7661%!assert (linspace (1, [7 8 9]'),
7662%! [linspace(1, 7); linspace(1, 8); linspace(1, 9)], 10*eps)
7663
7664## Test class of output
7665%!assert (class (linspace (1, 2)), "double")
7666%!assert (class (linspace (single (1), 2)), "single")
7667%!assert (class (linspace (1, single (2))), "single")
7668
7669## Test symmetry
7670%!test <*56659>
7671%! x = linspace (-1, 1, 10);
7672%! assert (all (x == -fliplr (x)));
7673%! x = linspace (-1, 1, 11);
7674%! assert (all (x == -fliplr (x)));
7675
7676%!test <*56659>
7677%! x = linspace (-1-1i, 1+1i, 10);
7678%! assert (all (x == -fliplr (x)));
7679%! x = linspace (-1-1i, 1+1i, 11);
7680%! assert (all (x == -fliplr (x)));
7681
7682%!test <*56659>
7683%! x = linspace (single (-1), 1, 10);
7684%! assert (all (x == -fliplr (x)));
7685%! x = linspace (single (-1), 1, 11);
7686%! assert (all (x == -fliplr (x)));
7687
7688%!test <*56659>
7689%! x = linspace (single (-1-1i), 1+1i, 10);
7690%! assert (all (x == -fliplr (x)));
7691%! x = linspace (single (-1-1i), 1+1i, 11);
7692%! assert (all (x == -fliplr (x)));
7693
7694## Test obscure Matlab compatibility options
7695%!assert (linspace (0, 1, []), 1)
7696%!assert (linspace (10, 20, 2), [10 20])
7697%!assert (linspace (10, 20, 1), [20])
7698%!assert (linspace (10, 20, 0), zeros (1, 0))
7699%!assert (linspace (10, 20, -1), zeros (1, 0))
7700%!assert (numel (linspace (0, 1, 2+eps)), 2)
7701%!assert (numel (linspace (0, 1, 2-eps)), 1)
7702%!assert (linspace (10, 20, 2.1), [10 20])
7703%!assert (linspace (10, 20, 2.9), [10 20])
7704%!assert (linspace (Inf, Inf, 3), [Inf, Inf, Inf])
7705%!assert (linspace (-Inf, -Inf, 3), [-Inf, -Inf, -Inf])
7706%!assert (linspace (-Inf, Inf, 3), [-Inf, 0, Inf])
7707## Octave prefers to return NaN which indicates failure of algorithm.
7708%!assert (linspace (-Inf, Inf, 4), [-Inf, NaN, NaN, Inf])
7709%!assert (linspace (-Inf, 0, 3), [-Inf, NaN, 0])
7710%!assert (linspace (-Inf, 0, 4), [-Inf, NaN, NaN, 0])
7711%!assert (linspace (Inf + 1i, Inf + 1i, 3), [Inf + 1i, Inf + 1i, Inf + 1i])
7712%!assert (linspace (-Inf - 1i, Inf + 1i, 3), [-Inf - 1i, 0 + 0i, Inf + 1i])
7713%!assert (linspace (-Inf - 1i, Inf + 2i, 3), [-Inf - 1i, NaN + 0.5i, Inf + 2i])
7714%!assert (linspace (-Inf - 3i, Inf + 0i, 4),
7715%! [-Inf - 3i, NaN - 2i, NaN - 1i, Inf + 0i])
7716%!assert (linspace (complex (-1, -Inf), complex (1, Inf), 3),
7717%! [complex(-1, -Inf), 0 + 0i, complex(1, Inf)])
7718%!assert (linspace (complex (-1, -Inf), complex (2, Inf), 3),
7719%! [complex(-1, -Inf), complex(0.5, NaN), complex(2, Inf)])
7720%!assert (linspace (complex (-3, -Inf), complex (0, Inf), 4),
7721%! [complex(-3, -Inf) complex(-2, NaN) complex(-1, NaN) complex(0, Inf)])
7722
7723## FIXME: Octave is not fully Matlab-compatible for some combinations of
7724## Inf/-Inf endpoints. See bug #56933. This was dubbed "Won't Fix"
7725## as Octave prefers to return NaN for some of these conditions to
7726## better reflect that the algorithm has failed. If the behavior in
7727## the future is made compatible these tests can be re-instated.
7728##%!assert <56933> (linspace (-Inf, Inf, 4), [-Inf, -Inf, Inf, Inf])
7729##%!assert <56933> (linspace (-Inf, Inf, 5), [-Inf, -Inf, 0, Inf, Inf])
7730##%!assert <56933> (linspace (0, Inf, 4), [0, Inf, Inf, Inf])
7731##%!assert <56933> (linspace (0, -Inf, 4), [0, -Inf, -Inf, -Inf])
7732##%!assert <56933> (linspace (-Inf, 0, 4), [-Inf, NaN, NaN, 0])
7733##%!assert <56933> (linspace (Inf, 0, 4), [Inf, NaN, NaN, 0])
7734##%!assert (1 ./ linspace (-0, 0, 4), [-Inf, Inf, Inf, Inf])
7735
7736## Test input validation
7737%!error <Invalid call> linspace ()
7738%!error <Invalid call> linspace (1, 2, 3, 4)
7739%!error <N must be a scalar> linspace (1, 2, [3, 4])
7740%!error <START, END must be scalars or vectors> linspace (ones (2,2), 2, 3)
7741%!error <START, END must be scalars or vectors> linspace (2, ones (2,2), 3)
7742%!error <START, END must be scalars or vectors> linspace (1, [], 3)
7743*/
7744
7745// FIXME: should accept dimensions as separate args for N-D
7746// arrays as well as 1-D and 2-D arrays.
7747
7748DEFUN (resize, args, ,
7749 doc: /* -*- texinfo -*-
7750@deftypefn {} {@var{B} =} resize (@var{A}, @var{m})
7751@deftypefnx {} {@var{B} =} resize (@var{A}, @var{m}, @var{n}, @dots{})
7752@deftypefnx {} {@var{B} =} resize (@var{A}, [@var{m} @var{n} @dots{}])
7753Resize @var{A} cutting off elements as necessary.
7754
7755In the result, element with certain indices is equal to the corresponding
7756element of @var{A} if the indices are within the bounds of @var{A}; otherwise,
7757the element is set to zero.
7758
7759In other words, the statement
7760
7761@example
7762B = resize (A, dv)
7763@end example
7764
7765@noindent
7766is equivalent to the following code:
7767
7768@example
7769@group
7770B = zeros (dv, class (A));
7771sz = min (dv, size (A));
7772for i = 1:length (sz)
7773 idx@{i@} = 1:sz(i);
7774endfor
7775B(idx@{:@}) = A(idx@{:@});
7776@end group
7777@end example
7778
7779@noindent
7780but is performed more efficiently.
7781
7782If only @var{m} is supplied, and it is a scalar, the dimension of the result is
7783@var{m}-by-@var{m}. If @var{m}, @var{n}, @dots{} are all scalars, then the
7784dimensions of the result are @var{m}-by-@var{n}-by-@enddots{} If given a
7785vector as input, then the dimensions of the result are given by the elements of
7786that vector.
7787
7788An object can be resized to more dimensions than it has; in such case the
7789missing dimensions are assumed to be 1. Resizing an object to fewer dimensions
7790is not possible.
7791@seealso{reshape, postpad, prepad, cat}
7792@end deftypefn */)
7793{
7794 int nargin = args.length ();
7795
7796 if (nargin < 2)
7797 print_usage ();
7798
7799 octave_value retval;
7800
7801 if (nargin == 2)
7802 {
7803 Array<double> vec = args(1).vector_value ();
7804 int ndim = vec.numel ();
7805 if (ndim == 1)
7806 {
7807 octave_idx_type m = static_cast<octave_idx_type> (vec(0));
7808 retval = args(0);
7809 retval = retval.resize (dim_vector (m, m), true);
7810 }
7811 else
7812 {
7813 dim_vector dv;
7814 dv.resize (ndim);
7815 for (int i = 0; i < ndim; i++)
7816 dv(i) = static_cast<octave_idx_type> (vec(i));
7817 retval = args(0);
7818 retval = retval.resize (dv, true);
7819 }
7820 }
7821 else
7822 {
7823 dim_vector dv;
7824 dv.resize (nargin - 1);
7825 for (octave_idx_type i = 1; i < nargin; i++)
7826 dv(i-1) = static_cast<octave_idx_type> (args(i).scalar_value ());
7827
7828 retval = args(0);
7829 retval = retval.resize (dv, true);
7830 }
7831
7832 return retval;
7833}
7834
7835// FIXME: should use octave_idx_type for dimensions.
7836
7837DEFUN (reshape, args, ,
7838 doc: /* -*- texinfo -*-
7839@deftypefn {} {@var{B} =} reshape (@var{A}, @var{m}, @var{n}, @dots{})
7840@deftypefnx {} {@var{B} =} reshape (@var{A}, [@var{m} @var{n} @dots{}])
7841@deftypefnx {} {@var{B} =} reshape (@var{A}, @dots{}, [], @dots{})
7842@deftypefnx {} {@var{B} =} reshape (@var{A}, @var{size})
7843Return a matrix with the specified dimensions (@var{m}, @var{n}, @dots{})
7844whose elements are taken from the matrix @var{A}.
7845
7846The elements of the matrix are accessed in column-major order (like Fortran
7847arrays are stored).
7848
7849The following code demonstrates reshaping a 1x4 row vector into a 2x2 square
7850matrix.
7851
7852@example
7853@group
7854reshape ([1, 2, 3, 4], 2, 2)
7855 @xresult{} 1 3
7856 2 4
7857@end group
7858@end example
7859
7860@noindent
7861Note that the total number of elements in the original matrix
7862(@code{prod (size (@var{A}))}) must match the total number of elements
7863in the new matrix (@code{prod ([@var{m} @var{n} @dots{}])}).
7864
7865A single dimension of the return matrix may be left unspecified and Octave
7866will determine its size automatically. An empty matrix ([]) is used to flag
7867the unspecified dimension.
7868@seealso{resize, vec, postpad, cat, squeeze}
7869@end deftypefn */)
7870{
7871 int nargin = args.length ();
7872
7873 if (nargin < 2)
7874 print_usage ();
7875
7876 octave_value retval;
7877
7878 dim_vector new_dims;
7879
7880 if (nargin == 2)
7881 {
7882 Array<octave_idx_type> new_size = args(1).octave_idx_type_vector_value ();
7883
7884 if (new_size.numel () < 2)
7885 error ("reshape: SIZE must have 2 or more dimensions");
7886
7887 new_dims = dim_vector::alloc (new_size.numel ());
7888
7889 for (octave_idx_type i = 0; i < new_size.numel (); i++)
7890 {
7891 if (new_size(i) < 0)
7892 error ("reshape: SIZE must be non-negative");
7893
7894 new_dims(i) = new_size(i);
7895 }
7896 }
7897 else
7898 {
7899 new_dims = dim_vector::alloc (nargin-1);
7900 int empty_dim = -1;
7901
7902 for (int i = 1; i < nargin; i++)
7903 {
7904 if (args(i).isempty ())
7905 {
7906 if (empty_dim > 0)
7907 error ("reshape: only a single dimension can be unknown");
7908
7909 empty_dim = i;
7910 new_dims(i-1) = 1;
7911 }
7912 else
7913 {
7914 new_dims(i-1) = args(i).idx_type_value ();
7915
7916 if (new_dims(i-1) < 0)
7917 error ("reshape: SIZE must be non-negative");
7918 }
7919 }
7920
7921 if (empty_dim > 0)
7922 {
7923 octave_idx_type nel = new_dims.numel ();
7924
7925 if (nel == 0)
7926 new_dims(empty_dim-1) = 0;
7927 else
7928 {
7929 octave_idx_type a_nel = args(0).numel ();
7930 octave_idx_type size_empty_dim = a_nel / nel;
7931
7932 if (a_nel != size_empty_dim * nel)
7933 error ("reshape: SIZE is not divisible by the product of "
7934 "known dimensions (= %" OCTAVE_IDX_TYPE_FORMAT ")",
7935 nel);
7936
7937 new_dims(empty_dim-1) = size_empty_dim;
7938 }
7939 }
7940 }
7941
7942 retval = args(0).reshape (new_dims);
7943
7944 return retval;
7945}
7946
7947/*
7948%!assert (size (reshape (ones (4, 4), 2, 8)), [2, 8])
7949%!assert (size (reshape (ones (4, 4), 8, 2)), [8, 2])
7950%!assert (size (reshape (ones (15, 4), 1, 60)), [1, 60])
7951%!assert (size (reshape (ones (15, 4), 60, 1)), [60, 1])
7952
7953%!assert (size (reshape (ones (4, 4, "single"), 2, 8)), [2, 8])
7954%!assert (size (reshape (ones (4, 4, "single"), 8, 2)), [8, 2])
7955%!assert (size (reshape (ones (15, 4, "single"), 1, 60)), [1, 60])
7956%!assert (size (reshape (ones (15, 4, "single"), 60, 1)), [60, 1])
7957
7958%!assert <*64080> (size (reshape (sparse (0, 1), 0, 0)), [0, 0])
7959
7960%!test
7961%! s.a = 1;
7962%! fail ("reshape (s, 2, 3)", "can't reshape 1x1 array to 2x3 array");
7963
7964%!error reshape ()
7965%!error reshape (1, 2, 3, 4)
7966%!error <SIZE must have 2 or more dimensions> reshape (1:3, 3)
7967%!error <SIZE must be non-negative> reshape (1:3, [3 -1])
7968%!error <only a single dimension can be unknown> reshape (1:3, 1,[],[],3)
7969%!error <SIZE must be non-negative> reshape (1:3, 3, -1)
7970%!error <SIZE is not divisible> reshape (1:3, 3, [], 2)
7971*/
7972
7973DEFUN (vec, args, ,
7974 doc: /* -*- texinfo -*-
7975@deftypefn {} {@var{v} =} vec (@var{x})
7976@deftypefnx {} {@var{v} =} vec (@var{x}, @var{dim})
7977Return the vector obtained by stacking the columns of the matrix @var{x}
7978one above the other.
7979
7980Without @var{dim} this is equivalent to @code{@var{x}(:)}.
7981
7982If @var{dim} is supplied, the dimensions of @var{v} are set to @var{dim}
7983with all elements along the last dimension. This is equivalent to
7984@code{shiftdim (@var{x}(:), 1-@var{dim})}.
7985@seealso{vech, resize, cat}
7986@end deftypefn */)
7987{
7988 int nargin = args.length ();
7989
7990 if (nargin < 1 || nargin > 2)
7991 print_usage ();
7992
7993 int dim = 1;
7994 if (nargin == 2)
7995 {
7996 dim = args(1).idx_type_value ();
7997
7998 if (dim < 1)
7999 error ("vec: DIM must be greater than zero");
8000 }
8001
8003 octave_value arg = args(0);
8004
8005 octave_value retval = arg.single_subsref ("(", colon);
8006
8007 if (dim > 1)
8008 {
8009 dim_vector new_dims = dim_vector::alloc (dim);
8010
8011 for (int i = 0; i < dim-1; i++)
8012 new_dims(i) = 1;
8013
8014 new_dims(dim-1) = retval.numel ();
8015
8016 retval = retval.reshape (new_dims);
8017 }
8018
8019 return retval;
8020}
8021
8022/*
8023%!assert (vec ([1, 2; 3, 4]), [1; 3; 2; 4])
8024%!assert (vec ([1, 3, 2, 4]), [1; 3; 2; 4])
8025%!assert (vec ([1, 2, 3, 4], 2), [1, 2, 3, 4])
8026%!assert (vec ([1, 2; 3, 4]), vec ([1, 2; 3, 4], 1))
8027%!assert (vec ([1, 2; 3, 4], 1), [1; 3; 2; 4])
8028%!assert (vec ([1, 2; 3, 4], 2), [1, 3, 2, 4])
8029%!assert (vec ([1, 3; 2, 4], 3), reshape ([1, 2, 3, 4], 1, 1, 4))
8030%!assert (vec ([1, 3; 2, 4], 3), shiftdim (vec ([1, 3; 2, 4]), -2))
8031
8032%!error vec ()
8033%!error vec (1, 2, 3)
8034%!error vec ([1, 2; 3, 4], 0)
8035*/
8036
8037DEFUN (squeeze, args, ,
8038 doc: /* -*- texinfo -*-
8039@deftypefn {} {@var{B} =} squeeze (@var{A})
8040Remove singleton dimensions from @var{A} and return the result.
8041
8042Note that for compatibility with @sc{matlab}, all objects have
8043a minimum of two dimensions and row vectors are left unchanged.
8044@seealso{reshape}
8045@end deftypefn */)
8046{
8047 if (args.length () != 1)
8048 print_usage ();
8049
8050 return ovl (args(0).squeeze ());
8051}
8052
8053DEFUN (full, args, ,
8054 doc: /* -*- texinfo -*-
8055@deftypefn {} {@var{FM} =} full (@var{SM})
8056Return a full storage matrix from a sparse, diagonal, or permutation matrix,
8057or from a range.
8058@seealso{sparse, issparse}
8059@end deftypefn */)
8060{
8061 if (args.length () != 1)
8062 print_usage ();
8063
8064 return ovl (args(0).full_value ());
8065}
8066
8067// Compute various norms of the vector X.
8068
8069DEFUN (norm, args, ,
8070 doc: /* -*- texinfo -*-
8071@deftypefn {} {@var{n} =} norm (@var{A})
8072@deftypefnx {} {@var{n} =} norm (@var{A}, @var{p})
8073@deftypefnx {} {@var{n} =} norm (@var{A}, @var{p}, @var{opt})
8074Compute the p-norm of the matrix @var{A}.
8075
8076If the second argument is not given, @w{@code{p = 2}}@ is used.
8077
8078If @var{A} is a matrix (or sparse matrix):
8079
8080@table @asis
8081@item @var{p} = @code{1}
80821-norm, the largest column sum of the absolute values of @var{A}.
8083
8084@item @var{p} = @code{2}
8085Largest singular value of @var{A}.
8086
8087@item @var{p} = @code{Inf} or @qcode{"inf"}
8088@cindex infinity norm
8089Infinity norm, the largest row sum of the absolute values of @var{A}.
8090
8091@item @var{p} = @qcode{"fro"}
8092@cindex @nospell{Frobenius} norm
8093@nospell{Frobenius} norm of @var{A},
8094@code{sqrt (sum (diag (@var{A}' * @var{A})))}.
8095
8096@item other @var{p}, @code{@var{p} > 1}
8097@cindex general p-norm
8098maximum @code{norm (A*x, p)} such that @code{norm (x, p) == 1}
8099@end table
8100
8101If @var{A} is a vector or a scalar:
8102
8103@table @asis
8104@item @var{p} = @code{Inf} or @qcode{"inf"}
8105@code{max (abs (@var{A}))}.
8106
8107@item @var{p} = @code{-Inf}
8108@code{min (abs (@var{A}))}.
8109
8110@item @var{p} = @qcode{"fro"}
8111@nospell{Frobenius} norm of @var{A}, @code{sqrt (sumsq (abs (A)))}.
8112
8113@item @var{p} = 0
8114Hamming norm---the number of nonzero elements.
8115
8116@item other @var{p}, @code{@var{p} > 1}
8117p-norm of @var{A}, @code{(sum (abs (@var{A}) .^ @var{p})) ^ (1/@var{p})}.
8118
8119@item other @var{p} @code{@var{p} < 1}
8120the p-pseudonorm defined as above.
8121@end table
8122
8123If @var{opt} is the value @qcode{"rows"}, treat each row as a vector and
8124compute its norm. The result is returned as a column vector.
8125Similarly, if @var{opt} is @qcode{"columns"} or @qcode{"cols"} then
8126compute the norms of each column and return a row vector.
8127@seealso{normest, normest1, vecnorm, cond, svd}
8128@end deftypefn */)
8129{
8130 int nargin = args.length ();
8131
8132 if (nargin < 1 || nargin > 3)
8133 print_usage ();
8134
8135 octave_value x_arg = args(0);
8136
8137 if (x_arg.ndims () != 2)
8138 error ("norm: only valid for 2-D objects");
8139
8140 enum {sfmatrix, sfcols, sfrows, sffrob, sfinf, sfneginf} strflag = sfmatrix;
8141 if (nargin > 1 && args(nargin-1).is_string ())
8142 {
8143 std::string str = args(nargin-1).string_value ();
8144 std::transform (str.begin (), str.end (), str.begin (), tolower);
8145 if (str == "cols" || str == "columns")
8146 strflag = sfcols;
8147 else if (str == "rows")
8148 strflag = sfrows;
8149 else if (str == "fro")
8150 strflag = sffrob;
8151 else if (str == "inf")
8152 strflag = sfinf;
8153 else if (str == "-inf")
8154 strflag = sfneginf;
8155 else
8156 error ("norm: unrecognized option: %s", str.c_str ());
8157
8158 // we've handled the last parameter, so act as if it was removed
8159 nargin--;
8160 }
8161
8162 octave_value p_arg = (nargin > 1) ? args(1) : octave_value (2);
8163
8164 if (p_arg.isempty ())
8165 p_arg = octave_value (2);
8166 else if (p_arg.is_string ())
8167 {
8168 std::string str = p_arg.string_value ();
8169 std::transform (str.begin (), str.end (), str.begin (), tolower);
8170 if (strflag != sfcols && strflag != sfrows)
8171 error ("norm: invalid combination of options");
8172
8173 if (str == "cols" || str == "columns" || str == "rows")
8174 error ("norm: invalid combination of options");
8175
8176 if (str == "fro")
8177 p_arg = octave_value (2);
8178 else if (str == "inf")
8179 p_arg = numeric_limits<double>::Inf ();
8180 else if (str == "-inf")
8181 p_arg = -numeric_limits<double>::Inf ();
8182 else
8183 error ("norm: unrecognized option: %s", str.c_str ());
8184 }
8185 else if (! p_arg.is_scalar_type ())
8186 err_wrong_type_arg ("norm", p_arg);
8187
8188 octave_value retval;
8189
8190 switch (strflag)
8191 {
8192 case sfmatrix:
8193 retval = xnorm (x_arg, p_arg);
8194 break;
8195
8196 case sfcols:
8197 retval = xcolnorms (x_arg, p_arg);
8198 break;
8199
8200 case sfrows:
8201 retval = xrownorms (x_arg, p_arg);
8202 break;
8203
8204 case sffrob:
8205 retval = xfrobnorm (x_arg);
8206 break;
8207
8208 case sfinf:
8209 retval = xnorm (x_arg, numeric_limits<double>::Inf ());
8210 break;
8211
8212 case sfneginf:
8213 retval = xnorm (x_arg, -numeric_limits<double>::Inf ());
8214 break;
8215 }
8216
8217 return retval;
8218}
8219
8220/*
8221%!shared x
8222%! x = [1, -3, 4, 5, -7];
8223%!assert (norm (x,0), 5)
8224%!assert (norm (x,1), 20)
8225%!assert (norm (x,2), 10)
8226%!assert (norm (x,3), 8.24257059961711, -4*eps)
8227%!assert (norm (x,Inf), 7)
8228%!assert (norm (x,-Inf), 1)
8229%!assert (norm (x,"inf"), 7)
8230%!assert (norm (x,"-Inf"), 1)
8231%!assert (norm (x,"fro"), 10, -eps)
8232%!assert (norm (x), 10)
8233%!assert (norm ([1e200, 1]), 1e200)
8234%!assert (norm ([3+4i, 3-4i, sqrt(31)]), 9, -4*eps)
8235%!shared m
8236%! m = magic (4);
8237%!assert (norm (m,1), 34)
8238%!assert (norm (m,2), 34, -eps)
8239%!assert (norm (m,3), 34, -sqrt (eps))
8240%!assert (norm (m,Inf), 34)
8241%!assert (norm (m,"inf"), 34)
8242%!shared m2, flo, fhi
8243%! m2 = [1,2;3,4];
8244%! flo = 1e-300;
8245%! fhi = 1e+300;
8246%!assert (norm (flo*m2,"fro"), sqrt (30)*flo, -eps)
8247%!assert (norm (fhi*m2,"fro"), sqrt (30)*fhi, -eps)
8248
8249%!shared x
8250%! x = single ([1, -3, 4, 5, -7]);
8251%!assert (norm (x,0), single (5))
8252%!assert (norm (x,1), single (20))
8253%!assert (norm (x,2), single (10))
8254%!assert (norm (x,3), single (8.24257059961711), -4* eps ("single"))
8255%!assert (norm (x,Inf), single (7))
8256%!assert (norm (x,-Inf), single (1))
8257%!assert (norm (x,"inf"), single (7))
8258%!assert (norm (x,"-Inf"), single (1))
8259%!assert (norm (x,"fro"), single (10), -eps ("single"))
8260%!assert (norm (x), single (10))
8261
8262%!test <67918>
8263%! ## fails with reference BLAS 3.10.0
8264%! assert (norm (single ([1e38, 1])), single (1e38));
8265
8266%!assert (norm (single ([3+4i, 3-4i, sqrt(31)])),
8267%! single (9), -4* eps ("single"))
8268%!shared m
8269%! m = single (magic (4));
8270%!assert (norm (m,1), single (34))
8271%!assert (norm (m,2), single (34), -eps ("single"))
8272%!assert (norm (m,3), single (34), -sqrt (eps ("single")))
8273%!assert (norm (m,Inf), single (34))
8274%!assert (norm (m,"inf"), single (34))
8275%!shared m2, flo, fhi
8276%! m2 = single ([1,2;3,4]);
8277%! flo = single (1e-300);
8278%! fhi = single (1e+300);
8279%!assert (norm (flo*m2,"fro"), single (sqrt (30)*flo), -eps ("single"))
8280%!assert (norm (fhi*m2,"fro"), single (sqrt (30)*fhi), -eps ("single"))
8281
8282## Hamming norm (p == 0)
8283%!assert (norm ([1, 0, 0, 0, 1], 0), 2)
8284
8285%!shared q
8286%! q = rand (1e3, 3);
8287%!assert (norm (q, 3, "rows"), sum (q.^3, 2).^(1/3), sqrt (eps))
8288%!assert (norm (q, "fro", "rows"), sum (q.^2, 2).^(1/2), sqrt (eps))
8289%!assert (norm (q, "fro", "rows"), sqrt (sumsq (q, 2)), sqrt (eps))
8290%!assert (norm (q, "fro", "cols"), sqrt (sumsq (q, 1)), sqrt (eps))
8291%!assert (norm (q, 3, "cols"), sum (q.^3, 1).^(1/3), sqrt (eps))
8292%!assert (norm (q, "inf", "rows"), norm (q, Inf, "rows"))
8293%!assert (norm (q, "inf", "cols"), norm (q, Inf, "cols"))
8294%!assert (norm (q, [], "rows"), norm (q, 2, "rows"))
8295%!assert (norm (q, [], "cols"), norm (q, 2, "cols"))
8296
8297%!test <30631>
8298%! ## Test for norm returning NaN on sparse matrix
8299%! A = sparse (2,2);
8300%! A(2,1) = 1;
8301%! assert (norm (A), 1);
8302
8303## Tests for single precision norm calculations.
8304## Verify that float norms accumulated in double do not overflow
8305## near float maximum.
8306##
8307## We need values where val^2 overflows float (~3.4e38) but
8308## the final norm sqrt(n*val^2) = sqrt(n)*val still fits in float.
8309## Using val = 1e19: val^2 = 1e38 (near float max), sqrt(1000)*1e19 ~ 3.2e20 (OK)
8310
8311%!test <67610>
8312%! ## Test 2-norm with values where val^2 would overflow float output
8313%! ## val = 1e19, val^2 = 1e38 (would overflow float), but result fits
8314%! x = single (1e19) * ones (1000, 1, "single");
8315%! result = norm (x, 2);
8316%! assert (isfinite (result));
8317%! xd = double (x);
8318%! expected = norm (xd, 2);
8319%! assert (double (result), expected, eps ("single") * expected);
8320
8321%!test <67610>
8322%! ## Test 1-norm - straightforward sum
8323%! ## Use moderately large values
8324%! x = single (1e30) * ones (1000, 1, "single");
8325%! result = norm (x, 1);
8326%! assert (isfinite (result));
8327%! xd = double (x);
8328%! expected = norm (xd, 1);
8329%! assert (double (result), expected, eps ("single") * expected);
8330
8331%!test <67610>
8332%! ## Test p-norm (p=1.5) with large values
8333%! x = single (1e20) * ones (1000, 1, "single");
8334%! result = norm (x, 1.5);
8335%! assert (isfinite (result));
8336%! xd = double (x);
8337%! expected = norm (xd, 1.5);
8338%! assert (double (result), expected, eps ("single") * expected);
8339
8340%!test <67610>
8341%! ## Test p-norm (p=0.5) - terms are val^0.5, no overflow concern
8342%! x = single (1e30) * ones (1000, 1, "single");
8343%! result = norm (x, 0.5);
8344%! assert (isfinite (result));
8345%! xd = double (x);
8346%! expected = norm (xd, 0.5);
8347%! assert (double (result), expected, eps ("single") * expected);
8348
8349%!test <67610>
8350%! ## Test p-norm (p=3) with values where val^3 overflows float
8351%! ## val = 1e12, val^3 = 1e36 (near float max in accumulator)
8352%! x = single (1e12) * ones (1000, 1, "single");
8353%! result = norm (x, 3);
8354%! assert (isfinite (result));
8355%! xd = double (x);
8356%! expected = norm (xd, 3);
8357%! assert (double (result), expected, eps ("single") * expected);
8358
8359%!test <67610>
8360%! ## Test complex single precision 2-norm
8361%! ## |z|^2 = re^2 + im^2, use values where this would overflow float
8362%! x = single (1e19) * complex (ones (1000, 1, "single"), ones (1000, 1, "single"));
8363%! result = norm (x, 2);
8364%! assert (isfinite (result));
8365%! xd = double (x);
8366%! expected = norm (xd, 2);
8367%! assert (double (result), expected, eps ("single") * expected);
8368
8369%!test <67610>
8370%! ## Test that single and double give consistent results for normal values
8371%! x = single (randn (1000, 1));
8372%! xd = double (x);
8373%! for p = [0.5, 1, 1.5, 2, 3, Inf]
8374%! rs = norm (x, p);
8375%! rd = norm (xd, p);
8376%! assert (double (rs), rd, eps ("single") * rd);
8377%! endfor
8378
8379%!test <67610>
8380%! ## Test inf-norm (no accumulation, just max)
8381%! fmax = realmax ("single");
8382%! x = (fmax / 2) * ones (1000, 1, "single");
8383%! x(500) = fmax - 1;
8384%! result = norm (x, Inf);
8385%! assert (result, fmax - 1);
8386
8387%!test <67610>
8388%! ## Test -inf norm (min absolute value)
8389%! x = single ([1, 2, 0.5, 3]);
8390%! result = norm (x, -Inf);
8391%! assert (result, single (0.5));
8392*/
8393
8394/*
8395## Test input validation
8396%!error norm ()
8397%!error norm (1,2,3,4)
8398%!error <unrecognized option> norm (1, "invalid")
8399%!error <unrecognized option> norm (1, "rows", "invalid")
8400%!error <unrecognized option> norm (1, "invalid", "rows")
8401%!error <invalid combination of options> norm (1, "cols", "rows")
8402%!error <invalid combination of options> norm (1, "rows", "rows")
8403%!error <p must be .= 1> norm (ones (2,2), -Inf)
8404*/
8405
8406static octave_value
8407unary_op_defun_body (octave_value::unary_op op,
8408 const octave_value_list& args)
8409{
8410 if (args.length () != 1)
8411 print_usage ();
8412
8413 return unary_op (op, args(0));
8414}
8415
8416DEFUN (not, args, ,
8417 doc: /* -*- texinfo -*-
8418@deftypefn {} {@var{z} =} not (@var{x})
8419Return the logical NOT of @var{x}.
8420
8421This function is equivalent to the operator syntax @w{@code{! @var{x}}}.
8422@seealso{and, or, xor}
8423@end deftypefn */)
8424{
8425 return unary_op_defun_body (octave_value::op_not, args);
8426}
8427
8428DEFUN (uplus, args, ,
8429 doc: /* -*- texinfo -*-
8430@deftypefn {} {@var{B} =} uplus (@var{A})
8431This function and @w{@tcode{+ @var{A}}}@ are equivalent.
8432@seealso{uminus, plus}
8433@end deftypefn */)
8434{
8435 return unary_op_defun_body (octave_value::op_uplus, args);
8436}
8437
8438DEFUN (uminus, args, ,
8439 doc: /* -*- texinfo -*-
8440@deftypefn {} {@var{B} =} uminus (@var{A})
8441This function and @w{@tcode{- @var{A}}}@ are equivalent.
8442@seealso{uplus, minus}
8443@end deftypefn */)
8444{
8445 return unary_op_defun_body (octave_value::op_uminus, args);
8446}
8447
8448DEFUN (transpose, args, ,
8449 doc: /* -*- texinfo -*-
8450@deftypefn {} {@var{B} =} transpose (@var{A})
8451Return the transpose of @var{A}.
8452
8453This function and @tcode{@var{A}.'@:}@ are equivalent.
8454@seealso{ctranspose}
8455@end deftypefn */)
8456{
8457 return unary_op_defun_body (octave_value::op_transpose, args);
8458}
8459
8460/*
8461%!assert (2.', 2)
8462%!assert (2i.', 2i)
8463%!assert ([1:4].', [1;2;3;4])
8464%!assert ([1;2;3;4].', [1:4])
8465%!assert ([1,2;3,4].', [1,3;2,4])
8466%!assert ([1,2i;3,4].', [1,3;2i,4])
8467
8468%!assert (transpose ([1,2;3,4]), [1,3;2,4])
8469
8470%!assert (single (2).', single (2))
8471%!assert (single (2i).', single (2i))
8472%!assert (single ([1:4]).', single ([1;2;3;4]))
8473%!assert (single ([1;2;3;4]).', single ([1:4]))
8474%!assert (single ([1,2;3,4]).', single ([1,3;2,4]))
8475%!assert (single ([1,2i;3,4]).', single ([1,3;2i,4]))
8476
8477%!assert (transpose (single ([1,2;3,4])), single ([1,3;2,4]))
8478*/
8479
8480DEFUN (ctranspose, args, ,
8481 doc: /* -*- texinfo -*-
8482@deftypefn {} {@var{B} =} ctranspose (@var{A})
8483Return the complex conjugate transpose of @var{A}.
8484
8485This function and @tcode{@var{A}'}@ are equivalent.
8486@seealso{transpose}
8487@end deftypefn */)
8488{
8489 return unary_op_defun_body (octave_value::op_hermitian, args);
8490}
8491
8492/*
8493%!assert (2', 2)
8494%!assert (2i', -2i)
8495%!assert ([1:4]', [1;2;3;4])
8496%!assert ([1;2;3;4]', [1:4])
8497%!assert ([1,2;3,4]', [1,3;2,4])
8498%!assert ([1,2i;3,4]', [1,3;-2i,4])
8499
8500%!assert (ctranspose ([1,2i;3,4]), [1,3;-2i,4])
8501
8502%!assert (single (2)', single (2))
8503%!assert (single (2i)', single (-2i))
8504%!assert (single ([1:4])', single ([1;2;3;4]))
8505%!assert (single ([1;2;3;4])', single ([1:4]))
8506%!assert (single ([1,2;3,4])', single ([1,3;2,4]))
8507%!assert (single ([1,2i;3,4])', single ([1,3;-2i,4]))
8508
8509%!assert (ctranspose (single ([1,2i;3,4])), single ([1,3;-2i,4]))
8510*/
8511
8512static octave_value
8513binary_op_defun_body (octave_value::binary_op op,
8514 const octave_value_list& args)
8515{
8516 if (args.length () != 2)
8517 print_usage ();
8518
8519 return binary_op (op, args(0), args(1));
8520}
8521
8522static octave_value
8523binary_assoc_op_defun_body (octave_value::binary_op op,
8525 const octave_value_list& args)
8526{
8527 int nargin = args.length ();
8528
8529 if (nargin < 2)
8530 print_usage ();
8531
8532 octave_value retval;
8533
8534 if (nargin == 2)
8535 retval = binary_op (op, args(0), args(1));
8536 else
8537 {
8538 retval = binary_op (op, args(0), args(1));
8539
8540 for (int i = 2; i < nargin; i++)
8541 retval.assign (aop, args(i));
8542 }
8543
8544 return retval;
8545}
8546
8547DEFUN (plus, args, ,
8548 doc: /* -*- texinfo -*-
8549@deftypefn {} {@var{C} =} plus (@var{A}, @var{B})
8550@deftypefnx {} {@var{C} =} plus (@var{A1}, @var{A2}, @dots{})
8551This function and @w{@tcode{@var{A} + @var{B}}}@ are equivalent.
8552
8553If more arguments are given, the summation is applied
8554cumulatively from left to right:
8555
8556@example
8557(@dots{}((@var{A1} + @var{A2}) + @var{A3}) + @dots{})
8558@end example
8559
8560@seealso{minus, uplus}
8561@end deftypefn */)
8562{
8563 return binary_assoc_op_defun_body (octave_value::op_add,
8565}
8566
8567/*
8568%!assert (plus (1,1), 2)
8569%!assert (plus (1:3, 1), 2:4)
8570%!assert (plus (1:3, 1, 3), 5:7)
8571%!assert (plus (1,2,3,4,5,6,7,8,9), sum (1:9))
8572
8573## Test input validation for all functions which use binary_assoc_op_defun_body
8574%!error plus ()
8575%!error plus (1)
8576*/
8577
8578DEFUN (minus, args, ,
8579 doc: /* -*- texinfo -*-
8580@deftypefn {} {@var{C} =} minus (@var{A}, @var{B})
8581This function and @w{@tcode{@var{A} - @var{B}}}@ are equivalent.
8582@seealso{plus, uminus}
8583@end deftypefn */)
8584{
8585 return binary_op_defun_body (octave_value::op_sub, args);
8586}
8587
8588DEFUN (mtimes, args, ,
8589 doc: /* -*- texinfo -*-
8590@deftypefn {} {@var{C} =} mtimes (@var{A}, @var{B})
8591@deftypefnx {} {@var{C} =} mtimes (@var{A1}, @var{A2}, @dots{})
8592Return the matrix multiplication product of inputs.
8593
8594This function and @w{@tcode{@var{A} * @var{B}}}@ are equivalent.
8595If more arguments are given, the multiplication is applied
8596cumulatively from left to right:
8597
8598@example
8599(@dots{}((@var{A1} * @var{A2}) * @var{A3}) * @dots{})
8600@end example
8601
8602@seealso{times, plus, minus, rdivide, mrdivide, mldivide, mpower, tensorprod}
8603@end deftypefn */)
8604{
8605 return binary_assoc_op_defun_body (octave_value::op_mul,
8607}
8608
8609DEFUN (mrdivide, args, ,
8610 doc: /* -*- texinfo -*-
8611@deftypefn {} {@var{C} =} mrdivide (@var{A}, @var{B})
8612Return the matrix right division of @var{A} and @var{B}.
8613
8614This function and @w{@tcode{@var{A} / @var{B}}}@ are equivalent.
8615
8616If the system is not square, or if the coefficient matrix is singular, a
8617minimum norm solution is computed.
8618@seealso{mldivide, rdivide, plus, minus}
8619@end deftypefn */)
8620{
8621 return binary_op_defun_body (octave_value::op_div, args);
8622}
8623
8624DEFUN (mpower, args, ,
8625 doc: /* -*- texinfo -*-
8626@deftypefn {} {@var{C} =} mpower (@var{A}, @var{B})
8627Return the matrix power operation of @var{A} raised to the @var{B} power.
8628
8629This function and @w{@tcode{@var{A} ^ @var{B}}}@ are equivalent.
8630@seealso{power, mtimes, plus, minus}
8631@end deftypefn */)
8632{
8633 return binary_op_defun_body (octave_value::op_pow, args);
8634}
8635/*
8636%!assert (complex (realmin, realmin) ^ realmax, 0)
8637*/
8638
8639DEFUN (mldivide, args, ,
8640 doc: /* -*- texinfo -*-
8641@deftypefn {} {@var{C} =} mldivide (@var{A}, @var{B})
8642Return the matrix left division of @var{A} and @var{B}.
8643
8644This function and @w{@tcode{@var{A} @backslashchar{} @var{B}}}@ are equivalent.
8645
8646If the system is not square, or if the coefficient matrix is singular, a
8647minimum norm solution is computed.
8648@seealso{mrdivide, ldivide, rdivide, linsolve}
8649@end deftypefn */)
8650{
8651 return binary_op_defun_body (octave_value::op_ldiv, args);
8652}
8653
8654DEFUN (lt, args, ,
8655 doc: /* -*- texinfo -*-
8656@deftypefn {} {@var{TF} =} lt (@var{A}, @var{B})
8657This function is equivalent to @w{@code{@var{A} < @var{B}}}.
8658@seealso{le, eq, ge, gt, ne}
8659@end deftypefn */)
8660{
8661 return binary_op_defun_body (octave_value::op_lt, args);
8662}
8663
8664DEFUN (le, args, ,
8665 doc: /* -*- texinfo -*-
8666@deftypefn {} {@var{TF} =} le (@var{A}, @var{B})
8667This function is equivalent to @w{@code{@var{A} <= @var{B}}}.
8668@seealso{eq, ge, gt, ne, lt}
8669@end deftypefn */)
8670{
8671 return binary_op_defun_body (octave_value::op_le, args);
8672}
8673
8674DEFUN (eq, args, ,
8675 doc: /* -*- texinfo -*-
8676@deftypefn {} {@var{TF} =} eq (@var{A}, @var{B})
8677Return true if the two inputs are equal.
8678
8679This function is equivalent to @w{@code{@var{A} == @var{B}}}.
8680@seealso{ne, isequal, le, ge, gt, ne, lt}
8681@end deftypefn */)
8682{
8683 return binary_op_defun_body (octave_value::op_eq, args);
8684}
8685
8686DEFUN (ge, args, ,
8687 doc: /* -*- texinfo -*-
8688@deftypefn {} {@var{TF} =} ge (@var{A}, @var{B})
8689This function is equivalent to @w{@code{@var{A} >= @var{B}}}.
8690@seealso{le, eq, gt, ne, lt}
8691@end deftypefn */)
8692{
8693 return binary_op_defun_body (octave_value::op_ge, args);
8694}
8695
8696DEFUN (gt, args, ,
8697 doc: /* -*- texinfo -*-
8698@deftypefn {} {@var{TF} =} gt (@var{A}, @var{B})
8699This function is equivalent to @w{@code{@var{A} > @var{B}}}.
8700@seealso{le, eq, ge, ne, lt}
8701@end deftypefn */)
8702{
8703 return binary_op_defun_body (octave_value::op_gt, args);
8704}
8705
8706DEFUN (ne, args, ,
8707 doc: /* -*- texinfo -*-
8708@deftypefn {} {@var{TF} =} ne (@var{A}, @var{B})
8709Return true if the two inputs are not equal.
8710
8711This function is equivalent to @w{@code{@var{A} != @var{B}}}.
8712@seealso{eq, isequal, le, ge, lt}
8713@end deftypefn */)
8714{
8715 return binary_op_defun_body (octave_value::op_ne, args);
8716}
8717
8718DEFUN (times, args, ,
8719 doc: /* -*- texinfo -*-
8720@deftypefn {} {@var{C} =} times (@var{A}, @var{B})
8721@deftypefnx {} {@var{C} =} times (@var{A1}, @var{A2}, @dots{})
8722Return the element-by-element multiplication product of inputs.
8723
8724This function and @w{@tcode{@var{A} .* @var{B}}}@ are equivalent.
8725If more arguments are given, the multiplication is applied
8726cumulatively from left to right:
8727
8728@example
8729(@dots{}((@var{A1} .* @var{A2}) .* @var{A3}) .* @dots{})
8730@end example
8731
8732@seealso{mtimes, rdivide}
8733@end deftypefn */)
8734{
8735 return binary_assoc_op_defun_body (octave_value::op_el_mul,
8737}
8738
8739DEFUN (rdivide, args, ,
8740 doc: /* -*- texinfo -*-
8741@deftypefn {} {@var{C} =} rdivide (@var{A}, @var{B})
8742Return the element-by-element right division of @var{A} and @var{B}.
8743
8744This function and @w{@tcode{@var{A} ./ @var{B}}}@ are equivalent.
8745@seealso{ldivide, mrdivide, times, plus}
8746@end deftypefn */)
8747{
8748 return binary_op_defun_body (octave_value::op_el_div, args);
8749}
8750
8751DEFUN (power, args, ,
8752 doc: /* -*- texinfo -*-
8753@deftypefn {} {@var{C} =} power (@var{A}, @var{B})
8754Return the element-by-element operation of @var{A} raised to the
8755@var{B} power.
8756
8757This function and @w{@tcode{@var{A} .^ @var{B}}}@ are equivalent.
8758
8759If several complex results are possible, returns the one with smallest
8760non-negative argument (angle). Use @code{realpow}, @code{realsqrt},
8761@code{cbrt}, or @code{nthroot} if a real result is preferred.
8762
8763@seealso{mpower, realpow, realsqrt, cbrt, nthroot}
8764@end deftypefn */)
8765{
8766 return binary_op_defun_body (octave_value::op_el_pow, args);
8767}
8768/*
8769%!assert (complex (realmin, realmin) .^ realmax, 0)
8770*/
8771
8772DEFUN (ldivide, args, ,
8773 doc: /* -*- texinfo -*-
8774@deftypefn {} {@var{C} =} ldivide (@var{A}, @var{B})
8775Return the element-by-element left division of @var{A} and @var{B}.
8776
8777This function and @w{@tcode{@var{A} .@backslashchar{} @var{B}}}@ are
8778equivalent.
8779@seealso{rdivide, mldivide, times, plus}
8780@end deftypefn */)
8781{
8782 return binary_op_defun_body (octave_value::op_el_ldiv, args);
8783}
8784
8785DEFUN (and, args, ,
8786 doc: /* -*- texinfo -*-
8787@deftypefn {} {@var{TF} =} and (@var{x}, @var{y})
8788@deftypefnx {} {@var{TF} =} and (@var{x1}, @var{x2}, @dots{})
8789Return the logical AND of @var{x} and @var{y}.
8790
8791This function is equivalent to the operator syntax
8792@w{@code{@var{x} & @var{y}}}. If more than two arguments are given, the
8793logical AND is applied cumulatively from left to right:
8794
8795@example
8796(@dots{}((@var{x1} & @var{x2}) & @var{x3}) & @dots{})
8797@end example
8798
8799@seealso{or, not, xor}
8800@end deftypefn */)
8801{
8802 return binary_assoc_op_defun_body (octave_value::op_el_and,
8804}
8805
8806DEFUN (or, args, ,
8807 doc: /* -*- texinfo -*-
8808@deftypefn {} {@var{TF} =} or (@var{x}, @var{y})
8809@deftypefnx {} {@var{TF} =} or (@var{x1}, @var{x2}, @dots{})
8810Return the logical OR of @var{x} and @var{y}.
8811
8812This function is equivalent to the operator syntax
8813@w{@code{@var{x} | @var{y}}}. If more than two arguments are given, the
8814logical OR is applied cumulatively from left to right:
8815
8816@example
8817(@dots{}((@var{x1} | @var{x2}) | @var{x3}) | @dots{})
8818@end example
8819
8820@seealso{and, not, xor}
8821@end deftypefn */)
8822{
8823 return binary_assoc_op_defun_body (octave_value::op_el_or,
8825}
8826
8827DEFUN (colon, args, ,
8828 doc: /* -*- texinfo -*-
8829@deftypefn {} {@var{r} =} colon (@var{base}, @var{limit})
8830@deftypefnx {} {@var{r} =} colon (@var{base}, @var{increment}, @var{limit})
8831Return the result of the colon expression corresponding to @var{base},
8832@var{limit}, and optionally, @var{increment}.
8833
8834This function is equivalent to the operator syntax
8835@w{@code{@var{base} : @var{limit}}}@ or
8836@w{@code{@var{base} : @var{increment} : @var{limit}}}.
8837@seealso{linspace}
8838@end deftypefn */)
8839{
8840 int nargin = args.length ();
8841
8842 if (nargin < 2 || nargin > 3)
8843 print_usage ();
8844
8845 return (nargin == 2
8846 ? colon_op (args(0), args(1))
8847 : colon_op (args(0), args(1), args(2)));
8848}
8849
8850static double tic_toc_timestamp = -1.0;
8851
8852DEFUN (tic, args, nargout,
8853 doc: /* -*- texinfo -*-
8854@deftypefn {} {} tic ()
8855@deftypefnx {} {@var{id} =} tic ()
8856Initialize a wall-clock timer.
8857
8858Calling @code{tic} without an output argument resets the internal timer.
8859Subsequent calls to @code{toc} return the number of seconds since the timer was
8860set.
8861
8862If called with one output argument, @code{tic} creates a new timer instance and
8863returns a timer identifier @var{id}. The @var{id} is a scalar of type
8864@code{uint64} that may be passed to @code{toc} to check elapsed time on this
8865timer, rather than the default internal timer.
8866
8867Example 1 : benchmarking code with internal timer
8868
8869@example
8870@group
8871tic;
8872# many computations later@dots{}
8873elapsed_time = toc;
8874@end group
8875@end example
8876
8877Example 2 : mixed timer id and internal timer
8878
8879@example
8880@group
8881tic;
8882pause (1);
8883toc
8884@xresult{} Elapsed time is 1.0089 seconds.
8885id = tic;
8886pause (2);
8887toc (id)
8888@xresult{} Elapsed time is 2.01142 seconds.
8889toc
8890Elapsed time is 3.02308 seconds.
8891@end group
8892@end example
8893
8894@noindent
8895Calling @code{tic} and @code{toc} in this way allows nested timing calls.
8896
8897If you are more interested in the CPU time that your process used, you should
8898use the @code{cputime} function instead. The @code{tic} and @code{toc}
8899functions report the actual wall clock time that elapsed between the calls.
8900This may include time spent processing other jobs or doing nothing at all.
8901@seealso{toc, cputime}
8902@end deftypefn */)
8903{
8904 if (args.length () != 0)
8905 warning ("tic: ignoring extra arguments");
8906
8907 octave_value retval;
8908 sys::time now;
8909 double tmp = now.double_value ();
8910
8911 if (nargout > 0)
8912 {
8913 double ip = 0.0;
8914 double frac = std::modf (tmp, &ip);
8915 uint64_t microsecs = static_cast<uint64_t> (CLOCKS_PER_SEC * frac);
8916 microsecs += CLOCKS_PER_SEC * static_cast<uint64_t> (ip);
8917 retval = octave_uint64 (microsecs);
8918 }
8919 else
8920 tic_toc_timestamp = tmp;
8921
8922 return retval;
8923}
8924
8925DEFUN (toc, args, nargout,
8926 doc: /* -*- texinfo -*-
8927@deftypefn {} {} toc ()
8928@deftypefnx {} {} toc (@var{id})
8929@deftypefnx {} {@var{elapsed_time} =} toc (@dots{})
8930Measure elapsed time on a wall-clock timer.
8931
8932With no arguments, return the number of seconds elapsed on the internal timer
8933since the last call to @code{tic}.
8934
8935When given the identifier @var{id} of a specific timer, return the number of
8936seconds elapsed since the timer @var{id} was initialized.
8937
8938@xref{XREFtic,,tic}, for examples of the use of @code{tic}/@code{toc}.
8939
8940@seealso{tic, cputime}
8941@end deftypefn */)
8942{
8943 int nargin = args.length ();
8944
8945 if (nargin > 1)
8946 print_usage ();
8947
8948 double start_time = tic_toc_timestamp;
8949
8950 if (nargin == 1)
8951 {
8952 octave_uint64 id = args(0).xuint64_scalar_value ("toc: invalid ID");
8953
8954 uint64_t val = id.value ();
8955
8956 start_time
8957 = (static_cast<double> (val / CLOCKS_PER_SEC)
8958 + static_cast<double> (val % CLOCKS_PER_SEC)
8959 / CLOCKS_PER_SEC);
8960
8961 // FIXME: should we also check to see whether the start
8962 // time is after the beginning of this Octave session?
8963 }
8964
8965 if (start_time < 0)
8966 error ("toc: function called before timer initialization with tic()");
8967
8968 sys::time now;
8969
8970 double etime = now.double_value () - start_time;
8971
8972 octave_value retval;
8973 if (nargout > 0)
8974 retval = etime;
8975 else
8976 octave_stdout << "Elapsed time is " << etime << " seconds.\n";
8977
8978 return retval;
8979}
8980
8981/*
8982%!shared id
8983%! id = tic ();
8984%!assert (isa (id, "uint64"))
8985%!assert (isa (toc (id), "double"))
8986*/
8987
8988DEFUN (cputime, args, ,
8989 doc: /* -*- texinfo -*-
8990@deftypefn {} {[@var{total}, @var{user}, @var{system}] =} cputime ();
8991Return the CPU time used by your Octave session.
8992
8993The first output is the total time spent executing your process and is equal
8994to the sum of second and third outputs, which are the number of CPU seconds
8995spent executing in user mode and the number of CPU seconds spent executing
8996in system mode, respectively.
8997
8998If your system does not have a way to report CPU time usage, @code{cputime}
8999returns 0 for each of its output values.
9000
9001Note that because Octave used some CPU time to start, it is reasonable
9002to check to see if @code{cputime} works by checking to see if the total
9003CPU time used is nonzero.
9004@seealso{tic, toc}
9005@end deftypefn */)
9006{
9007 if (args.length () != 0)
9008 print_usage ();
9009
9010 sys::cpu_time cpu_tm;
9011
9012 double usr = cpu_tm.user ();
9013 double sys = cpu_tm.system ();
9014
9015 return ovl (usr + sys, usr, sys);
9016}
9017
9018DEFUN (sort, args, nargout,
9019 doc: /* -*- texinfo -*-
9020@deftypefn {} {[@var{s}, @var{i}] =} sort (@var{x})
9021@deftypefnx {} {[@var{s}, @var{i}] =} sort (@var{x}, @var{dim})
9022@deftypefnx {} {[@var{s}, @var{i}] =} sort (@var{x}, @var{mode})
9023@deftypefnx {} {[@var{s}, @var{i}] =} sort (@var{x}, @var{dim}, @var{mode})
9024Return a copy of @var{x} with the elements arranged in increasing order.
9025
9026For matrices, @code{sort} orders the elements within columns
9027
9028For example:
9029
9030@example
9031@group
9032sort ([1, 2; 2, 3; 3, 1])
9033 @xresult{} 1 1
9034 2 2
9035 3 3
9036@end group
9037@end example
9038
9039If the optional argument @var{dim} is given, then the matrix is sorted
9040along the dimension defined by @var{dim}. The optional argument @var{mode}
9041defines the order in which the values will be sorted. Valid values of
9042@var{mode} are @qcode{"ascend"} or @qcode{"descend"}.
9043
9044The @code{sort} function may also be used to produce a matrix
9045containing the original row indices of the elements in the sorted
9046matrix. For example:
9047
9048@example
9049@group
9050[s, i] = sort ([1, 2; 2, 3; 3, 1])
9051 @xresult{} s = 1 1
9052 2 2
9053 3 3
9054 @xresult{} i = 1 3
9055 2 1
9056 3 2
9057@end group
9058@end example
9059
9060For equal elements, the indices are such that equal elements are listed
9061in the order in which they appeared in the original list.
9062
9063Sorting of complex entries is done first by magnitude
9064(@w{@code{abs (@var{z})}})@ and for any ties by phase angle
9065(@w{@code{angle (z)}}). For example:
9066
9067@example
9068@group
9069sort ([1+i; 1; 1-i])
9070 @xresult{} 1 + 0i
9071 1 - 1i
9072 1 + 1i
9073@end group
9074@end example
9075
9076NaN values are treated as being greater than any other value and are sorted
9077to the end of the list.
9078
9079The @code{sort} function may also be used to sort strings and cell arrays
9080of strings, in which case ASCII dictionary order (uppercase 'A' precedes
9081lowercase 'a') of the strings is used.
9082
9083The algorithm used in @code{sort} is optimized for the sorting of partially
9084ordered lists.
9085@seealso{sortrows, issorted}
9086@end deftypefn */)
9087{
9088 int nargin = args.length ();
9089
9090 if (nargin < 1 || nargin > 3)
9091 print_usage ();
9092
9093 sortmode smode = ASCENDING;
9094 bool return_idx = (nargout > 1);
9095 bool have_sortmode = (nargin > 1 && args(1).is_string ());
9096 octave_value arg = args(0);
9097
9098 int dim = 0;
9099 if (nargin > 1)
9100 {
9101 if (have_sortmode)
9102 {
9103 std::string mode = args(1).string_value ();
9104 if (mode == "ascend")
9105 smode = ASCENDING;
9106 else if (mode == "descend")
9107 smode = DESCENDING;
9108 else
9109 error (R"(sort: MODE must be either "ascend" or "descend")");
9110 }
9111 else
9112 {
9113 // Require dim to be positive real scalar.
9114 if (! args(1).is_scalar_type () || args(1).iscomplex ()
9115 || args(1).double_value () <= 0)
9116 error ("sort: DIM must be a positive scalar integer");
9117
9118 // Forbid fractional value input, also NaN input.
9119 dim = args(1).strict_int_value ("sort: DIM must be a positive scalar integer") - 1;
9120 }
9121 }
9122
9123 if (nargin > 2)
9124 {
9125 if (have_sortmode)
9126 error ("sort: DIM argument must precede MODE argument");
9127
9128 std::string mode = args(2).xstring_value ("sort: MODE must be a string");
9129
9130 if (mode == "ascend")
9131 smode = ASCENDING;
9132 else if (mode == "descend")
9133 smode = DESCENDING;
9134 else
9135 error (R"(sort: MODE must be either "ascend" or "descend")");
9136 }
9137
9138 const dim_vector dv = arg.dims ();
9139 if (nargin == 1 || have_sortmode)
9140 {
9141 dim = dv.first_non_singleton ();
9142 }
9143
9144 octave_value_list retval (return_idx ? 2 : 1);
9145
9146 if (return_idx)
9147 {
9149
9150 // NOTE: Can not change this to ovl() call because arg.sort changes sidx
9151 // and objects are declared const in ovl prototype.
9152 retval(0) = arg.sort (sidx, dim, smode);
9153 // Check for index dimension extent. Set to 1 for dimension >= ndims ()
9154 retval(1) = idx_vector (sidx, (dim < arg.ndims ()) ? dv(dim) : 1);
9155 }
9156 else
9157 retval = ovl (arg.sort (dim, smode));
9158
9159 return retval;
9160}
9161
9162/*
9163## Double
9164%!assert (sort ([NaN, 1, -1, 2, Inf]), [-1, 1, 2, Inf, NaN])
9165%!assert (sort ([NaN, 1, -1, 2, Inf], 1), [NaN, 1, -1, 2, Inf])
9166%!assert (sort ([NaN, 1, -1, 2, Inf], 2), [-1, 1, 2, Inf, NaN])
9167%!assert (sort ([NaN, 1, -1, 2, Inf], 3), [NaN, 1, -1, 2, Inf])
9168%!assert (sort ([NaN, 1, -1, 2, Inf], "ascend"), [-1, 1, 2, Inf, NaN])
9169%!assert (sort ([NaN, 1, -1, 2, Inf], 2, "ascend"), [-1, 1, 2, Inf, NaN])
9170%!assert (sort ([NaN, 1, -1, 2, Inf], "descend"), [NaN, Inf, 2, 1, -1])
9171%!assert (sort ([NaN, 1, -1, 2, Inf], 2, "descend"), [NaN, Inf, 2, 1, -1])
9172%!assert (sort ([3, 1, 7, 5; 8, 2, 6, 4]), [3, 1, 6, 4; 8, 2, 7, 5])
9173%!assert (sort ([3, 1, 7, 5; 8, 2, 6, 4], 1), [3, 1, 6, 4; 8, 2, 7, 5])
9174%!assert (sort ([3, 1, 7, 5; 8, 2, 6, 4], 2), [1, 3, 5, 7; 2, 4, 6, 8])
9175%!assert (sort (1), 1)
9176
9177%!test
9178%! [v, i] = sort ([NaN, 1, -1, Inf, 1]);
9179%! assert (v, [-1, 1, 1, Inf, NaN]);
9180%! assert (i, [3, 2, 5, 4, 1]);
9181
9182## Complex
9183%!assert (sort ([NaN, 1i, -1, 2, Inf]), [1i, -1, 2, Inf, NaN])
9184%!assert (sort ([NaN, 1i, -1, 2, Inf], 1), [NaN, 1i, -1, 2, Inf])
9185%!assert (sort ([NaN, 1i, -1, 2, Inf], 2), [1i, -1, 2, Inf, NaN])
9186%!assert (sort ([NaN, 1i, -1, 2, Inf], 3), [NaN, 1i, -1, 2, Inf])
9187%!assert (sort ([NaN, 1i, -1, 2, Inf], "ascend"), [1i, -1, 2, Inf, NaN])
9188%!assert (sort ([NaN, 1i, -1, 2, Inf], 2, "ascend"), [1i, -1, 2, Inf, NaN])
9189%!assert (sort ([NaN, 1i, -1, 2, Inf], "descend"), [NaN, Inf, 2, -1, 1i])
9190%!assert (sort ([NaN, 1i, -1, 2, Inf], 2, "descend"), [NaN, Inf, 2, -1, 1i])
9191%!assert (sort ([3, 1i, 7, 5; 8, 2, 6, 4]), [3, 1i, 6, 4; 8, 2, 7, 5])
9192%!assert (sort ([3, 1i, 7, 5; 8, 2, 6, 4], 1), [3, 1i, 6, 4; 8, 2, 7, 5])
9193%!assert (sort ([3, 1i, 7, 5; 8, 2, 6, 4], 2), [1i, 3, 5, 7; 2, 4, 6, 8])
9194%!assert (sort (1i), 1i)
9195
9196%!test
9197%! [v, i] = sort ([NaN, 1i, -1, Inf, 1, 1i]);
9198%! assert (v, [1, 1i, 1i, -1, Inf, NaN]);
9199%! assert (i, [5, 2, 6, 3, 4, 1]);
9200
9201## Single
9202%!assert (sort (single ([NaN, 1, -1, 2, Inf])), single ([-1, 1, 2, Inf, NaN]))
9203%!assert (sort (single ([NaN, 1, -1, 2, Inf]), 1),
9204%! single ([NaN, 1, -1, 2, Inf]))
9205%!assert (sort (single ([NaN, 1, -1, 2, Inf]), 2),
9206%! single ([-1, 1, 2, Inf, NaN]))
9207%!assert (sort (single ([NaN, 1, -1, 2, Inf]), 3),
9208%! single ([NaN, 1, -1, 2, Inf]))
9209%!assert (sort (single ([NaN, 1, -1, 2, Inf]), "ascend"),
9210%! single ([-1, 1, 2, Inf, NaN]))
9211%!assert (sort (single ([NaN, 1, -1, 2, Inf]), 2, "ascend"),
9212%! single ([-1, 1, 2, Inf, NaN]))
9213%!assert (sort (single ([NaN, 1, -1, 2, Inf]), "descend"),
9214%! single ([NaN, Inf, 2, 1, -1]))
9215%!assert (sort (single ([NaN, 1, -1, 2, Inf]), 2, "descend"),
9216%! single ([NaN, Inf, 2, 1, -1]))
9217%!assert (sort (single ([3, 1, 7, 5; 8, 2, 6, 4])),
9218%! single ([3, 1, 6, 4; 8, 2, 7, 5]))
9219%!assert (sort (single ([3, 1, 7, 5; 8, 2, 6, 4]), 1),
9220%! single ([3, 1, 6, 4; 8, 2, 7, 5]))
9221%!assert (sort (single ([3, 1, 7, 5; 8, 2, 6, 4]), 2),
9222%! single ([1, 3, 5, 7; 2, 4, 6, 8]))
9223%!assert (sort (single (1)), single (1))
9224
9225%!test
9226%! [v, i] = sort (single ([NaN, 1, -1, Inf, 1]));
9227%! assert (v, single ([-1, 1, 1, Inf, NaN]));
9228%! assert (i, [3, 2, 5, 4, 1]);
9229
9230## Single Complex
9231%!assert (sort (single ([NaN, 1i, -1, 2, Inf])),
9232%! single ([1i, -1, 2, Inf, NaN]))
9233%!assert (sort (single ([NaN, 1i, -1, 2, Inf]), 1),
9234%! single ([NaN, 1i, -1, 2, Inf]))
9235%!assert (sort (single ([NaN, 1i, -1, 2, Inf]), 2),
9236%! single ([1i, -1, 2, Inf, NaN]))
9237%!assert (sort (single ([NaN, 1i, -1, 2, Inf]), 3),
9238%! single ([NaN, 1i, -1, 2, Inf]))
9239%!assert (sort (single ([NaN, 1i, -1, 2, Inf]), "ascend"),
9240%! single ([1i, -1, 2, Inf, NaN]))
9241%!assert (sort (single ([NaN, 1i, -1, 2, Inf]), 2, "ascend"),
9242%! single ([1i, -1, 2, Inf, NaN]))
9243%!assert (sort (single ([NaN, 1i, -1, 2, Inf]), "descend"),
9244%! single ([NaN, Inf, 2, -1, 1i]))
9245%!assert (sort (single ([NaN, 1i, -1, 2, Inf]), 2, "descend"),
9246%! single ([NaN, Inf, 2, -1, 1i]))
9247%!assert (sort (single ([3, 1i, 7, 5; 8, 2, 6, 4])),
9248%! single ([3, 1i, 6, 4; 8, 2, 7, 5]))
9249%!assert (sort (single ([3, 1i, 7, 5; 8, 2, 6, 4]), 1),
9250%! single ([3, 1i, 6, 4; 8, 2, 7, 5]))
9251%!assert (sort (single ([3, 1i, 7, 5; 8, 2, 6, 4]), 2),
9252%! single ([1i, 3, 5, 7; 2, 4, 6, 8]))
9253%!assert (sort (single (1i)), single (1i))
9254
9255%!test
9256%! [v, i] = sort (single ([NaN, 1i, -1, Inf, 1, 1i]));
9257%! assert (v, single ([1, 1i, 1i, -1, Inf, NaN]));
9258%! assert (i, [5, 2, 6, 3, 4, 1]);
9259
9260## Bool
9261%!assert (sort ([true, false, true, false]), [false, false, true, true])
9262%!assert (sort ([true, false, true, false], 1), [true, false, true, false])
9263%!assert (sort ([true, false, true, false], 2), [false, false, true, true])
9264%!assert (sort ([true, false, true, false], 3), [true, false, true, false])
9265%!assert (sort ([true, false, true, false], "ascend"),
9266%! [false, false, true, true])
9267%!assert (sort ([true, false, true, false], 2, "ascend"),
9268%! [false, false, true, true])
9269%!assert (sort ([true, false, true, false], "descend"),
9270%! [true, true, false, false])
9271%!assert (sort ([true, false, true, false], 2, "descend"),
9272%! [true, true, false, false])
9273%!assert (sort (true), true)
9274
9275%!test
9276%! [v, i] = sort ([true, false, true, false]);
9277%! assert (v, [false, false, true, true]);
9278%! assert (i, [2, 4, 1, 3]);
9279
9280## Sparse Double
9281%!assert (sort (sparse ([0, NaN, 1, 0, -1, 2, Inf])),
9282%! sparse ([-1, 0, 0, 1, 2, Inf, NaN]))
9283%!assert (sort (sparse ([0, NaN, 1, 0, -1, 2, Inf]), 1),
9284%! sparse ([0, NaN, 1, 0, -1, 2, Inf]))
9285%!assert (sort (sparse ([0, NaN, 1, 0, -1, 2, Inf]), 2),
9286%! sparse ([-1, 0, 0, 1, 2, Inf, NaN]))
9287%!assert (sort (sparse ([0, NaN, 1, 0, -1, 2, Inf]), 3),
9288%! sparse ([0, NaN, 1, 0, -1, 2, Inf]))
9289%!assert (sort (sparse ([0, NaN, 1, 0, -1, 2, Inf]), "ascend"),
9290%! sparse ([-1, 0, 0, 1, 2, Inf, NaN]))
9291%!assert (sort (sparse ([0, NaN, 1, 0, -1, 2, Inf]), 2, "ascend"),
9292%! sparse ([-1, 0, 0, 1, 2, Inf, NaN]))
9293%!assert (sort (sparse ([0, NaN, 1, 0, -1, 2, Inf]), "descend"),
9294%! sparse ([NaN, Inf, 2, 1, 0, 0, -1]))
9295%!assert (sort (sparse ([0, NaN, 1, 0, -1, 2, Inf]), 2, "descend"),
9296%! sparse ([NaN, Inf, 2, 1, 0, 0, -1]))
9297
9298%!shared a
9299%! a = randn (10, 10);
9300%! a(a < 0) = 0;
9301%!assert (sort (sparse (a)), sparse (sort (a)))
9302%!assert (sort (sparse (a), 1), sparse (sort (a, 1)))
9303%!assert (sort (sparse (a), 2), sparse (sort (a, 2)))
9304%!test
9305%! [v, i] = sort (a);
9306%! [vs, is] = sort (sparse (a));
9307%! assert (vs, sparse (v));
9308%! assert (is, i);
9309
9310## Sparse Complex
9311%!assert (sort (sparse ([0, NaN, 1i, 0, -1, 2, Inf])),
9312%! sparse ([0, 0, 1i, -1, 2, Inf, NaN]))
9313%!assert (sort (sparse ([0, NaN, 1i, 0, -1, 2, Inf]), 1),
9314%! sparse ([0, NaN, 1i, 0, -1, 2, Inf]))
9315%!assert (sort (sparse ([0, NaN, 1i, 0, -1, 2, Inf]), 2),
9316%! sparse ([0, 0, 1i, -1, 2, Inf, NaN]))
9317%!assert (sort (sparse ([0, NaN, 1i, 0, -1, 2, Inf]), 3),
9318%! sparse ([0, NaN, 1i, 0, -1, 2, Inf]))
9319%!assert (sort (sparse ([0, NaN, 1i, 0, -1, 2, Inf]), "ascend"),
9320%! sparse ([0, 0, 1i, -1, 2, Inf, NaN]))
9321%!assert (sort (sparse ([0, NaN, 1i, 0, -1, 2, Inf]), 2, "ascend"),
9322%! sparse ([0, 0, 1i, -1, 2, Inf, NaN]))
9323%!assert (sort (sparse ([0, NaN, 1i, 0, -1, 2, Inf]), "descend"),
9324%! sparse ([NaN, Inf, 2, -1, 1i, 0, 0]))
9325%!assert (sort (sparse ([0, NaN, 1i, 0, -1, 2, Inf]), 2, "descend"),
9326%! sparse ([NaN, Inf, 2, -1, 1i, 0, 0]))
9327
9328%!shared a
9329%! a = randn (10, 10);
9330%! a(a < 0) = 0;
9331%! a = 1i * a;
9332%!assert (sort (sparse (a)), sparse (sort (a)))
9333%!assert (sort (sparse (a), 1), sparse (sort (a, 1)))
9334%!assert (sort (sparse (a), 2), sparse (sort (a, 2)))
9335%!test
9336%! [v, i] = sort (a);
9337%! [vs, is] = sort (sparse (a));
9338%! assert (vs, sparse (v));
9339%! assert (is, i);
9340
9341## Sparse Bool
9342%!assert (sort (sparse ([true, false, true, false])),
9343%! sparse ([false, false, true, true]))
9344%!assert (sort (sparse ([true, false, true, false]), 1),
9345%! sparse ([true, false, true, false]))
9346%!assert (sort (sparse ([true, false, true, false]), 2),
9347%! sparse ([false, false, true, true]))
9348%!assert (sort (sparse ([true, false, true, false]), 3),
9349%! sparse ([true, false, true, false]))
9350%!assert (sort (sparse ([true, false, true, false]), "ascend"),
9351%! sparse ([false, false, true, true]))
9352%!assert (sort (sparse ([true, false, true, false]), 2, "ascend"),
9353%! sparse ([false, false, true, true]))
9354%!assert (sort (sparse ([true, false, true, false]), "descend"),
9355%! sparse ([true, true, false, false]))
9356%!assert (sort (sparse ([true, false, true, false]), 2, "descend"),
9357%! sparse ([true, true, false, false]))
9358
9359%!test
9360%! [v, i] = sort (sparse ([true, false, true, false]));
9361%! assert (v, sparse ([false, false, true, true]));
9362%! assert (i, [2, 4, 1, 3]);
9363
9364## Cell string array
9365%!shared a, b, c
9366%! a = {"Alice", "Cecile", "Eric", "Barry", "David"};
9367%! b = {"Alice", "Barry", "Cecile", "David", "Eric"};
9368%! c = {"Eric", "David", "Cecile", "Barry", "Alice"};
9369%!assert (sort (a), b)
9370%!assert (sort (a, 1), a)
9371%!assert (sort (a, 2), b)
9372%!assert (sort (a, 3), a)
9373%!assert (sort (a, "ascend"), b)
9374%!assert (sort (a, 2, "ascend"), b)
9375%!assert (sort (a, "descend"), c)
9376%!assert (sort (a, 2, "descend"), c)
9377
9378%!test
9379%! [v, i] = sort (a);
9380%! assert (i, [1, 4, 2, 5, 3]);
9381
9382## Test sort dimension being very large
9383%!test <*65712>
9384%! A = [1 2; 3 4];
9385%! assert (sort (A, 100), A);
9386%! assert (sort (A, inf), A);
9387%! [B, idx] = sort (A, 100);
9388%! assert (B, A);
9389%! assert (idx, ones (2));
9390%! [B, idx] = sort (A, inf);
9391%! assert (B, A);
9392%! assert (idx, ones (2));
9393
9394%!error <Invalid call> sort ()
9395%!error <Invalid call> sort (1, 2, 3, 4)
9396%!error <MODE must be either "ascend" or "descend"> sort (1, "foobar")
9397%!error <DIM must be a positive scalar integer> sort (1, [1 2 3])
9398%!error <DIM must be a positive scalar integer> sort ([1 2; 3 4], -inf)
9399%!error <DIM must be a positive scalar integer> sort ([1 2; 3 4], 0)
9400%!error <DIM must be a positive scalar integer> sort ([1 2; 3 4], 1+i)
9401%!error <DIM must be a positive scalar integer> sort ([1 2; 3 4], 1.234)
9402%!error <DIM must be a positive scalar integer> sort ([1 2; 3 4], NaN)
9403%!error <DIM argument must precede MODE argument> sort (1, "ascend", 1)
9404%!error <MODE must be a string> sort (1, 1, 1)
9405%!error <MODE must be either "ascend" or "descend"> sort (1, 1, "foobar")
9406%!error <DIM must be a positive scalar integer> sort (1, 0)
9407
9408*/
9409
9410// Sort the rows of the matrix @var{a} according to the order
9411// specified by @var{mode}, which can either be 'ascend' or 'descend'
9412// and return the index vector corresponding to the sort order.
9413//
9414// FIXME: This function does not yet support sparse matrices.
9415
9416DEFUN (__sort_rows_idx__, args, ,
9417 doc: /* -*- texinfo -*-
9418@deftypefn {} {@var{idx} =} __sort_rows_idx__ (@var{A}, @var{mode})
9419Called internally from @file{sortrows.m}.
9420@end deftypefn */)
9421{
9422 int nargin = args.length ();
9423
9424 if (nargin < 1 || nargin > 2)
9425 print_usage ();
9426
9427 if (nargin == 2 && ! args(1).is_string ())
9428 error ("__sort_rows_idx__: second argument must be a string");
9429
9430 sortmode smode = ASCENDING;
9431 if (nargin > 1)
9432 {
9433 std::string mode = args(1).string_value ();
9434 if (mode == "ascend")
9435 smode = ASCENDING;
9436 else if (mode == "descend")
9437 smode = DESCENDING;
9438 else
9439 error (R"(__sort_rows_idx__: MODE must be either "ascend" or "descend")");
9440 }
9441
9442 octave_value arg = args(0);
9443
9444 if (arg.issparse ())
9445 error ("__sort_rows_idx__: sparse matrices not yet supported");
9446
9447 if (arg.ndims () != 2)
9448 error ("__sort_rows_idx__: needs a 2-D object");
9449
9450 Array<octave_idx_type> idx = arg.sort_rows_idx (smode);
9451
9452 // This cannot be ovl(), relies on special overloaded octave_value call.
9453 return octave_value (idx, true, true);
9454}
9455
9456static sortmode
9457get_sort_mode_option (const octave_value& arg)
9458{
9459 // FIXME: we initialize to UNSORTED here to avoid a GCC warning
9460 // about possibly using sortmode uninitialized.
9461 // FIXME: shouldn't these modes be scoped inside a class?
9462 sortmode smode = UNSORTED;
9463
9464 std::string mode = arg.xstring_value ("issorted: MODE must be a string");
9465
9466 if (mode == "ascend")
9467 smode = ASCENDING;
9468 else if (mode == "descend")
9469 smode = DESCENDING;
9470 else if (mode == "either" || mode == "monotonic")
9471 smode = UNSORTED;
9472 else
9473 error (R"(issorted: MODE must be "ascend", "descend", "monotonic", or "either")");
9474
9475 return smode;
9476}
9477
9478DEFUN (issorted, args, ,
9479 doc: /* -*- texinfo -*-
9480@deftypefn {} {@var{tf} =} issorted (@var{A})
9481@deftypefnx {} {@var{tf} =} issorted (@var{A}, @var{mode})
9482@deftypefnx {} {@var{tf} =} issorted (@var{A}, "rows", @var{mode})
9483Return true if the vector @var{A} is sorted according to @var{mode}, which may
9484be either @qcode{"ascend"}, @qcode{"descend"}, @qcode{"either"}, or
9485@qcode{"monotonic"} (@qcode{"either"} and @qcode{"monotonic"} are equivalent).
9486
9487By default, @var{mode} is @qcode{"ascend"}. NaNs are treated in the same manner
9488as @code{sort}.
9489
9490If the optional argument @qcode{"rows"} is supplied, check whether the matrix
9491is sorted by rows as output by the function @code{sortrows} (with no options).
9492@emph{Note:} the @qcode{"rows"} argument can not be used with sparse matrices.
9493
9494@seealso{sort, sortrows}
9495@end deftypefn */)
9496{
9497 int nargin = args.length ();
9498
9499 if (nargin < 1 || nargin > 3)
9500 print_usage ();
9501
9502 bool by_rows = false;
9503
9504 sortmode smode = ASCENDING;
9505
9506 if (nargin > 1)
9507 {
9508 if (nargin == 3)
9509 smode = get_sort_mode_option (args(2));
9510
9511 std::string tmp = args(1).xstring_value ("issorted: second argument must be a string");
9512 if (tmp == "rows")
9513 by_rows = true;
9514 else
9515 smode = get_sort_mode_option (args(1));
9516 }
9517
9518 octave_value retval;
9519
9520 octave_value arg = args(0);
9521
9522 if (arg.isempty ())
9523 retval = true;
9524 else if (by_rows)
9525 {
9526 if (arg.issparse ())
9527 error ("issorted: sparse matrices not yet supported");
9528
9529 if (arg.ndims () != 2)
9530 error ("issorted: A must be a 2-D object");
9531
9532 retval = arg.is_sorted_rows (smode) != UNSORTED;
9533 }
9534 else
9535 {
9536 if (! arg.dims ().isvector ())
9537 error ("issorted: needs a vector");
9538
9539 retval = args(0).issorted (smode) != UNSORTED;
9540 }
9541
9542 return retval;
9543}
9544
9545/*
9546%!shared sm, um, sv, uv
9547%! sm = [1, 2; 3, 4];
9548%! um = [3, 1; 2, 4];
9549%! sv = [1, 2, 3, 4];
9550%! uv = [2, 1, 4, 3];
9551
9552%!assert (issorted (sm, "rows"))
9553%!assert (! issorted (um, "rows"))
9554%!assert (issorted (sv))
9555%!assert (! issorted (uv))
9556%!assert (issorted (sv'))
9557%!assert (! issorted (uv'))
9558%!assert (issorted (sm, "rows", "ascend"))
9559%!assert (! issorted (um, "rows", "ascend"))
9560%!assert (issorted (sv, "ascend"))
9561%!assert (! issorted (uv, "ascend"))
9562%!assert (issorted (sv', "ascend"))
9563%!assert (! issorted (uv', "ascend"))
9564%!assert (! issorted (sm, "rows", "descend"))
9565%!assert (issorted (flipud (sm), "rows", "descend"))
9566%!assert (! issorted (sv, "descend"))
9567%!assert (issorted (fliplr (sv), "descend"))
9568%!assert (! issorted (sv', "descend"))
9569%!assert (issorted (fliplr (sv)', "descend"))
9570%!assert (! issorted (um, "rows", "either"))
9571%!assert (! issorted (uv, "either"))
9572%!assert (issorted (sm, "rows", "either"))
9573%!assert (issorted (flipud (sm), "rows", "either"))
9574%!assert (issorted (sv, "either"))
9575%!assert (issorted (fliplr (sv), "either"))
9576%!assert (issorted (sv', "either"))
9577%!assert (issorted (fliplr (sv)', "either"))
9578%!assert (issorted (sm, "rows", "monotonic"))
9579%!assert (issorted (flipud (sm), "rows", "monotonic"))
9580%!assert (issorted (sv, "either"))
9581%!assert (issorted (fliplr (sv), "monotonic"))
9582%!assert (issorted (sv', "either"))
9583%!assert (issorted (fliplr (sv)', "monotonic"))
9584
9585%!assert (issorted ([]))
9586%!assert (issorted ([], "rows"))
9587%!assert (issorted ([], "ascend"))
9588%!assert (issorted ([], "rows", "ascend"))
9589%!assert (issorted ([], "descend"))
9590%!assert (issorted ([], "rows", "descend"))
9591%!assert (issorted ({}))
9592%!assert (issorted ({}, "rows"))
9593%!assert (issorted ({}, "ascend"))
9594%!assert (issorted ({}, "rows", "ascend"))
9595%!assert (issorted ({}, "descend"))
9596%!assert (issorted ({}, "rows", "descend"))
9597%!assert (issorted (""))
9598%!assert (issorted ("", "rows"))
9599%!assert (issorted ("", "ascend"))
9600%!assert (issorted ("", "rows", "ascend"))
9601%!assert (issorted ("", "descend"))
9602%!assert (issorted ("", "rows", "descend"))
9603
9604## Test input validation
9605%!error issorted ()
9606%!error issorted (1,2,3,4)
9607%!error <second argument must be a string> issorted (1, 2)
9608%!error <second argument must be a string> issorted (1, {"rows"})
9609%!error <sparse matrices not yet supported> issorted (sparse ([1 2 3]), "rows")
9610%!error <A must be a 2-D object> issorted (rand (2,2,2), "rows")
9611%!error <needs a vector> issorted (ones (2,2))
9612*/
9613
9614DEFUN (nth_element, args, ,
9615 doc: /* -*- texinfo -*-
9616@deftypefn {} {@var{nel} =} nth_element (@var{x}, @var{n})
9617@deftypefnx {} {@var{nel} =} nth_element (@var{x}, @var{n}, @var{dim})
9618Select the n-th smallest element of a vector, using the ordering defined by
9619@code{sort}.
9620
9621The result is equivalent to @code{sort(@var{x})(@var{n})}.
9622
9623@var{n} can also be a contiguous range, either ascending @code{l:u}
9624or descending @code{u:-1:l}, in which case a range of elements is returned.
9625
9626If @var{x} is an array, @code{nth_element} operates along the dimension
9627defined by @var{dim}, or the first non-singleton dimension if @var{dim} is
9628not given.
9629
9630Programming Note: nth_element encapsulates the C++ standard library
9631algorithms nth_element and partial_sort. On average, the complexity of the
9632operation is O(M*log(K)), where @w{@code{M = size (@var{x}, @var{dim})}}@ and
9633@w{@code{K = length (@var{n})}}. This function is intended for cases where
9634the ratio K/M is small; otherwise, it may be better to use @code{sort}.
9635@seealso{sort, min, max}
9636@end deftypefn */)
9637{
9638 int nargin = args.length ();
9639
9640 if (nargin < 2 || nargin > 3)
9641 print_usage ();
9642
9643 int dim = -1;
9644 if (nargin == 3)
9645 {
9646 dim = args(2).int_value (true) - 1;
9647 if (dim < 0)
9648 error ("nth_element: DIM must be a valid dimension");
9649 }
9650
9651 octave_value argx = args(0);
9652 if (dim < 0)
9653 dim = argx.dims ().first_non_singleton ();
9654
9655 octave_value retval;
9656
9657 try
9658 {
9659 idx_vector n = args(1).index_vector ();
9660
9661 switch (argx.builtin_type ())
9662 {
9663 case btyp_double:
9664 retval = argx.array_value ().nth_element (n, dim);
9665 break;
9666 case btyp_float:
9667 retval = argx.float_array_value ().nth_element (n, dim);
9668 break;
9669 case btyp_complex:
9670 retval = argx.complex_array_value ().nth_element (n, dim);
9671 break;
9672 case btyp_float_complex:
9673 retval = argx.float_complex_array_value ().nth_element (n, dim);
9674 break;
9675
9676#define MAKE_INT_BRANCH(X) \
9677 case btyp_ ## X: \
9678 retval = argx.X ## _array_value ().nth_element (n, dim); \
9679 break;
9680
9681 MAKE_INT_BRANCH (int8);
9682 MAKE_INT_BRANCH (int16);
9683 MAKE_INT_BRANCH (int32);
9684 MAKE_INT_BRANCH (int64);
9685 MAKE_INT_BRANCH (uint8);
9686 MAKE_INT_BRANCH (uint16);
9687 MAKE_INT_BRANCH (uint32);
9688 MAKE_INT_BRANCH (uint64);
9689 MAKE_INT_BRANCH (bool);
9690
9691#undef MAKE_INT_BRANCH
9692
9693 default:
9694 if (argx.iscellstr ())
9695 retval = argx.cellstr_value ().nth_element (n, dim);
9696 else
9697 err_wrong_type_arg ("nth_element", argx);
9698 }
9699 }
9700 catch (const index_exception& ie)
9701 {
9702 error ("nth_element: invalid index %s", ie.what ());
9703 }
9704
9705 return retval;
9706}
9707
9708/*
9709%!assert (nth_element ([1:10], 1), 1)
9710%!assert (nth_element ([1:10], 10), 10)
9711%!assert (nth_element ([1:10], 1:3), [1 2 3])
9712%!assert (nth_element ([1:10], 1:10), [1:10])
9713
9714%!assert <*51329> (nth_element ([1:10], [1:10]), [1:10])
9715
9716%!error nth_element ()
9717%!error nth_element (1)
9718%!error nth_element (1, 1.5)
9719%!error nth_element (1, 2, 3, 4)
9720%!error nth_element ("abcd", 3)
9721*/
9722
9723template <typename NDT>
9724static NDT
9725do_accumarray_sum (const idx_vector& idx, const NDT& vals,
9726 octave_idx_type n = -1)
9727{
9728 typedef typename NDT::element_type T;
9729 if (n < 0)
9730 n = idx.extent (0);
9731 else if (idx.extent (n) > n)
9732 error ("accumarray: index out of range");
9733
9734 NDT retval (dim_vector (n, 1), T ());
9735
9736 if (vals.numel () == 1)
9737 retval.idx_add (idx, vals (0));
9738 else if (vals.numel () == idx.length (n))
9739 retval.idx_add (idx, vals);
9740 else
9741 error ("accumarray: dimensions mismatch");
9742
9743 return retval;
9744}
9745
9746DEFUN (__accumarray_sum__, args, ,
9747 doc: /* -*- texinfo -*-
9748@deftypefn {} {} __accumarray_sum__ (@var{idx}, @var{vals}, @var{n})
9749Undocumented internal function.
9750@end deftypefn */)
9751{
9752 int nargin = args.length ();
9753
9754 if (nargin < 2 || nargin > 3)
9755 print_usage ();
9756
9757 if (! args(0).isnumeric ())
9758 error ("__accumarray_sum__: first argument must be numeric");
9759
9760 octave_value retval;
9761
9762 try
9763 {
9764 idx_vector idx = args(0).index_vector ();
9765 octave_idx_type n = -1;
9766 if (nargin == 3)
9767 n = args(2).idx_type_value (true);
9768
9769 octave_value vals = args(1);
9770
9771 if (vals.is_range ())
9772 {
9773 range<double> r = vals.range_value ();
9774 if (r.increment () == 0)
9775 vals = r.base ();
9776 }
9777
9778 if (vals.is_single_type ())
9779 {
9780 if (vals.iscomplex ())
9781 retval = do_accumarray_sum (idx,
9783 n);
9784 else
9785 retval = do_accumarray_sum (idx, vals.float_array_value (), n);
9786 }
9787 else if (vals.isnumeric () || vals.islogical ())
9788 {
9789 if (vals.iscomplex ())
9790 retval = do_accumarray_sum (idx,
9791 vals.complex_array_value (),
9792 n);
9793 else
9794 retval = do_accumarray_sum (idx, vals.array_value (), n);
9795 }
9796 else
9797 err_wrong_type_arg ("accumarray", vals);
9798 }
9799 catch (const index_exception& ie)
9800 {
9801 error ("__accumarray_sum__: invalid index %s", ie.what ());
9802 }
9803
9804 return retval;
9805}
9806
9807template <typename NDT>
9808static NDT
9809do_accumarray_minmax (const idx_vector& idx, const NDT& vals,
9810 octave_idx_type n, bool ismin,
9811 const typename NDT::element_type& zero_val)
9812{
9813 typedef typename NDT::element_type T;
9814 if (n < 0)
9815 n = idx.extent (0);
9816 else if (idx.extent (n) > n)
9817 error ("accumarray: index out of range");
9818
9819 NDT retval (dim_vector (n, 1), zero_val);
9820
9821 // Pick minimizer or maximizer.
9822 void (MArray<T>::*op) (const idx_vector&, const MArray<T>&)
9823 = ismin ? (&MArray<T>::idx_min) : (&MArray<T>::idx_max);
9824
9825 octave_idx_type l = idx.length (n);
9826 if (vals.numel () == 1)
9827 (retval.*op) (idx, NDT (dim_vector (l, 1), vals(0)));
9828 else if (vals.numel () == l)
9829 (retval.*op) (idx, vals);
9830 else
9831 error ("accumarray: dimensions mismatch");
9832
9833 return retval;
9834}
9835
9836static octave_value_list
9837do_accumarray_minmax_fcn (const octave_value_list& args,
9838 bool ismin)
9839{
9840 int nargin = args.length ();
9841
9842 if (nargin < 3 || nargin > 4)
9843 print_usage ();
9844
9845 if (! args(0).isnumeric ())
9846 error ("accumarray: first argument must be numeric");
9847
9848 octave_value retval;
9849
9850 try
9851 {
9852 idx_vector idx = args(0).index_vector ();
9853 octave_idx_type n = -1;
9854 if (nargin == 4)
9855 n = args(3).idx_type_value (true);
9856
9857 octave_value vals = args(1);
9858 octave_value zero = args(2);
9859
9860 switch (vals.builtin_type ())
9861 {
9862 case btyp_double:
9863 retval = do_accumarray_minmax (idx, vals.array_value (), n, ismin,
9864 zero.double_value ());
9865 break;
9866
9867 case btyp_float:
9868 retval = do_accumarray_minmax (idx, vals.float_array_value (), n,
9869 ismin, zero.float_value ());
9870 break;
9871
9872 case btyp_complex:
9873 retval = do_accumarray_minmax (idx, vals.complex_array_value (),
9874 n, ismin, zero.complex_value ());
9875 break;
9876
9877 case btyp_float_complex:
9878 retval = do_accumarray_minmax (idx,
9880 n, ismin,
9881 zero.float_complex_value ());
9882 break;
9883
9884#define MAKE_INT_BRANCH(X) \
9885 case btyp_ ## X: \
9886 retval = do_accumarray_minmax (idx, vals.X ## _array_value (), \
9887 n, ismin, zero.X ## _scalar_value ()); \
9888 break;
9889
9890 MAKE_INT_BRANCH (int8);
9891 MAKE_INT_BRANCH (int16);
9892 MAKE_INT_BRANCH (int32);
9893 MAKE_INT_BRANCH (int64);
9894 MAKE_INT_BRANCH (uint8);
9895 MAKE_INT_BRANCH (uint16);
9896 MAKE_INT_BRANCH (uint32);
9897 MAKE_INT_BRANCH (uint64);
9898
9899#undef MAKE_INT_BRANCH
9900
9901 case btyp_bool:
9902 retval = do_accumarray_minmax (idx, vals.array_value (), n, ismin,
9903 zero.bool_value ());
9904 break;
9905
9906 default:
9907 err_wrong_type_arg ("accumarray", vals);
9908 }
9909 }
9910 catch (const index_exception& ie)
9911 {
9912 error ("do_accumarray_minmax_fcn: invalid index %s", ie.what ());
9913 }
9914
9915 return retval;
9916}
9917
9918DEFUN (__accumarray_min__, args, ,
9919 doc: /* -*- texinfo -*-
9920@deftypefn {} {} __accumarray_min__ (@var{idx}, @var{vals}, @var{zero}, @var{n})
9921Undocumented internal function.
9922@end deftypefn */)
9923{
9924 return do_accumarray_minmax_fcn (args, true);
9925}
9926
9927DEFUN (__accumarray_max__, args, ,
9928 doc: /* -*- texinfo -*-
9929@deftypefn {} {} __accumarray_max__ (@var{idx}, @var{vals}, @var{zero}, @var{n})
9930Undocumented internal function.
9931@end deftypefn */)
9932{
9933 return do_accumarray_minmax_fcn (args, false);
9934}
9935
9936template <typename NDT>
9937static NDT
9938do_accumdim_sum (const idx_vector& idx, const NDT& vals,
9939 int dim = -1, octave_idx_type n = -1)
9940{
9941 typedef typename NDT::element_type T;
9942 if (n < 0)
9943 n = idx.extent (0);
9944 else if (idx.extent (n) > n)
9945 error ("accumdim: index out of range");
9946
9947 const dim_vector& vals_dim = vals.dims ();
9948 dim_vector rdv = vals_dim;
9949
9950 if (dim < 0)
9951 dim = vals.dims ().first_non_singleton ();
9952 else if (dim >= rdv.ndims ())
9953 rdv.resize (dim+1, 1);
9954
9955 rdv(dim) = n;
9956
9957 NDT retval (rdv, T ());
9958
9959 if (idx.length () != vals_dim(dim))
9960 error ("accumdim: dimension mismatch");
9961
9962 retval.idx_add_nd (idx, vals, dim);
9963
9964 return retval;
9965}
9966
9967DEFUN (__accumdim_sum__, args, ,
9968 doc: /* -*- texinfo -*-
9969@deftypefn {} {} __accumdim_sum__ (@var{idx}, @var{vals}, @var{dim}, @var{n})
9970Undocumented internal function.
9971@end deftypefn */)
9972{
9973 int nargin = args.length ();
9974
9975 if (nargin < 2 || nargin > 4)
9976 print_usage ();
9977
9978 if (! args(0).isnumeric ())
9979 error ("__accumdim_sum__: first argument must be numeric");
9980
9981 octave_value retval;
9982
9983 try
9984 {
9985 idx_vector idx = args(0).index_vector ();
9986 int dim = -1;
9987 if (nargin >= 3)
9988 dim = args(2).int_value () - 1;
9989
9990 octave_idx_type n = -1;
9991 if (nargin == 4)
9992 n = args(3).idx_type_value (true);
9993
9994 octave_value vals = args(1);
9995
9996 if (vals.is_single_type ())
9997 {
9998 if (vals.iscomplex ())
9999 retval = do_accumdim_sum (idx,
10001 dim, n);
10002 else
10003 retval = do_accumdim_sum (idx, vals.float_array_value (),
10004 dim, n);
10005 }
10006 else if (vals.isnumeric () || vals.islogical ())
10007 {
10008 if (vals.iscomplex ())
10009 retval = do_accumdim_sum (idx, vals.complex_array_value (),
10010 dim, n);
10011 else
10012 retval = do_accumdim_sum (idx, vals.array_value (), dim, n);
10013 }
10014 else
10015 err_wrong_type_arg ("accumdim", vals);
10016 }
10017 catch (const index_exception& ie)
10018 {
10019 error ("__accumdim_sum__: invalid index %s", ie.what ());
10020 }
10021
10022 return retval;
10023}
10024
10025template <typename NDT>
10026static NDT
10027do_merge (const Array<bool>& mask,
10028 const NDT& tval, const NDT& fval)
10029{
10030 typedef typename NDT::element_type T;
10031 const dim_vector& dv = mask.dims ();
10032 NDT retval (dv);
10033
10034 bool tscl = tval.numel () == 1;
10035 bool fscl = fval.numel () == 1;
10036
10037 if ((! tscl && tval.dims () != dv) || (! fscl && fval.dims () != dv))
10038 error ("merge: MASK, TVAL, and FVAL dimensions must match");
10039
10040 T *rv = retval.rwdata ();
10041 octave_idx_type n = retval.numel ();
10042
10043 const T *tv = tval.data ();
10044 const T *fv = fval.data ();
10045 const bool *mv = mask.data ();
10046
10047 if (tscl)
10048 {
10049 if (fscl)
10050 {
10051 T ts = tv[0];
10052 T fs = fv[0];
10053 for (octave_idx_type i = 0; i < n; i++)
10054 rv[i] = (mv[i] ? ts : fs);
10055 }
10056 else
10057 {
10058 T ts = tv[0];
10059 for (octave_idx_type i = 0; i < n; i++)
10060 rv[i] = (mv[i] ? ts : fv[i]);
10061 }
10062 }
10063 else
10064 {
10065 if (fscl)
10066 {
10067 T fs = fv[0];
10068 for (octave_idx_type i = 0; i < n; i++)
10069 rv[i] = (mv[i] ? tv[i] : fs);
10070 }
10071 else
10072 {
10073 for (octave_idx_type i = 0; i < n; i++)
10074 rv[i] = (mv[i] ? tv[i] : fv[i]);
10075 }
10076 }
10077
10078 return retval;
10079}
10080
10081#define MAKE_INT_BRANCH(INTX) \
10082 else if (tval.is_ ## INTX ## _type () && fval.is_ ## INTX ## _type ()) \
10083 { \
10084 retval = do_merge (mask, \
10085 tval.INTX ## _array_value (), \
10086 fval.INTX ## _array_value ()); \
10087 }
10088
10089DEFUN (merge, args, ,
10090 doc: /* -*- texinfo -*-
10091@deftypefn {} {@var{M} =} merge (@var{mask}, @var{tval}, @var{fval})
10092@deftypefnx {} {@var{M} =} ifelse (@var{mask}, @var{tval}, @var{fval})
10093Merge elements of @var{true_val} and @var{false_val}, depending on the
10094value of @var{mask}.
10095
10096If @var{mask} is a logical scalar, the other two arguments can be arbitrary
10097values. Otherwise, @var{mask} must be a logical array, and @var{tval},
10098@var{fval} should be arrays of matching class, or cell arrays. In the
10099scalar mask case, @var{tval} is returned if @var{mask} is true, otherwise
10100@var{fval} is returned.
10101
10102In the array mask case, both @var{tval} and @var{fval} must be either
10103scalars or arrays with dimensions equal to @var{mask}. The result is
10104constructed as follows:
10105
10106@example
10107@group
10108result(mask) = tval(mask);
10109result(! mask) = fval(! mask);
10110@end group
10111@end example
10112
10113@var{mask} can also be arbitrary numeric type, in which case it is first
10114converted to logical.
10115
10116Programming Note: @code{ifelse} is an alias for @code{merge} and can be used
10117interchangeably.
10118@seealso{logical, diff}
10119@end deftypefn */)
10120{
10121 if (args.length () != 3)
10122 print_usage ();
10123
10124 if (! (args(0).islogical () || args(0).isnumeric ()))
10125 error ("merge: first argument must be logical or numeric");
10126
10127 octave_value retval;
10128
10129 octave_value mask_val = args(0);
10130
10131 if (mask_val.is_scalar_type ())
10132 retval = (mask_val.is_true () ? args(1) : args(2));
10133 else
10134 {
10135 boolNDArray mask = mask_val.bool_array_value ();
10136
10137 octave_value tval = args(1);
10138 octave_value fval = args(2);
10139
10140 if (tval.is_double_type () && fval.is_double_type ())
10141 {
10142 if (tval.iscomplex () || fval.iscomplex ())
10143 retval = do_merge (mask,
10144 tval.complex_array_value (),
10145 fval.complex_array_value ());
10146 else
10147 retval = do_merge (mask,
10148 tval.array_value (),
10149 fval.array_value ());
10150 }
10151 else if (tval.is_single_type () && fval.is_single_type ())
10152 {
10153 if (tval.iscomplex () || fval.iscomplex ())
10154 retval = do_merge (mask,
10157 else
10158 retval = do_merge (mask,
10159 tval.float_array_value (),
10160 fval.float_array_value ());
10161 }
10162 else if (tval.is_string () && fval.is_string ())
10163 {
10164 bool sq_string = tval.is_sq_string () || fval.is_sq_string ();
10165 retval = octave_value (do_merge (mask,
10166 tval.char_array_value (),
10167 fval.char_array_value ()),
10168 sq_string ? '\'' : '"');
10169 }
10170 else if (tval.iscell () && fval.iscell ())
10171 {
10172 retval = do_merge (mask,
10173 tval.cell_value (),
10174 fval.cell_value ());
10175 }
10176
10177 MAKE_INT_BRANCH (int8)
10178 MAKE_INT_BRANCH (int16)
10179 MAKE_INT_BRANCH (int32)
10180 MAKE_INT_BRANCH (int64)
10181 MAKE_INT_BRANCH (uint8)
10182 MAKE_INT_BRANCH (uint16)
10183 MAKE_INT_BRANCH (uint32)
10184 MAKE_INT_BRANCH (uint64)
10185
10186 else
10187 error ("merge: cannot merge %s with %s with array mask",
10188 tval.class_name ().c_str (),
10189 fval.class_name ().c_str ());
10190 }
10191
10192 return retval;
10193}
10194
10195DEFALIAS (ifelse, merge);
10196
10197#undef MAKE_INT_BRANCH
10198
10199template <typename SparseT>
10200static SparseT
10201do_sparse_diff (const SparseT& array, octave_idx_type order,
10202 int dim)
10203{
10204 SparseT retval = array;
10205 if (dim == 1)
10206 {
10207 octave_idx_type k = retval.columns ();
10208 while (order > 0 && k > 0)
10209 {
10210 idx_vector col1 (':'), col2 (':'), sl1 (1, k), sl2 (0, k-1);
10211 retval = SparseT (retval.index (col1, sl1))
10212 - SparseT (retval.index (col2, sl2));
10213 panic_unless (retval.columns () == k-1);
10214 order--;
10215 k--;
10216 }
10217 }
10218 else
10219 {
10220 octave_idx_type k = retval.rows ();
10221 while (order > 0 && k > 0)
10222 {
10223 idx_vector col1 (':'), col2 (':'), sl1 (1, k), sl2 (0, k-1);
10224 retval = SparseT (retval.index (sl1, col1))
10225 - SparseT (retval.index (sl2, col2));
10226 panic_unless (retval.rows () == k-1);
10227 order--;
10228 k--;
10229 }
10230 }
10231
10232 return retval;
10233}
10234
10235static octave_value
10236do_diff (const octave_value& array, octave_idx_type order,
10237 int dim = -1)
10238{
10239 octave_value retval;
10240
10241 const dim_vector& dv = array.dims ();
10242 if (dim == -1)
10243 {
10244 dim = array.dims ().first_non_singleton ();
10245
10246 // Bother Matlab. This behavior is really wicked.
10247 if (dv(dim) <= order)
10248 {
10249 if (dv(dim) == 1)
10250 retval = array.resize (dim_vector (0, 0));
10251 else
10252 {
10253 retval = array;
10254 while (order > 0)
10255 {
10256 if (dim == dv.ndims ())
10257 {
10258 retval = do_diff (array, order, dim - 1);
10259 order = 0;
10260 }
10261 else if (dv(dim) == 1)
10262 dim++;
10263 else
10264 {
10265 retval = do_diff (array, dv(dim) - 1, dim);
10266 order -= dv(dim) - 1;
10267 dim++;
10268 }
10269 }
10270 }
10271
10272 return retval;
10273 }
10274 }
10275
10276 if (array.isinteger ())
10277 {
10278 if (array.is_int8_type ())
10279 retval = array.int8_array_value ().diff (order, dim);
10280 else if (array.is_int16_type ())
10281 retval = array.int16_array_value ().diff (order, dim);
10282 else if (array.is_int32_type ())
10283 retval = array.int32_array_value ().diff (order, dim);
10284 else if (array.is_int64_type ())
10285 retval = array.int64_array_value ().diff (order, dim);
10286 else if (array.is_uint8_type ())
10287 retval = array.uint8_array_value ().diff (order, dim);
10288 else if (array.is_uint16_type ())
10289 retval = array.uint16_array_value ().diff (order, dim);
10290 else if (array.is_uint32_type ())
10291 retval = array.uint32_array_value ().diff (order, dim);
10292 else if (array.is_uint64_type ())
10293 retval = array.uint64_array_value ().diff (order, dim);
10294 else
10295 error ("diff: unexpected integer type - please report this bug");
10296 }
10297 else if (array.issparse ())
10298 {
10299 if (array.iscomplex ())
10300 retval = do_sparse_diff (array.sparse_complex_matrix_value (),
10301 order, dim);
10302 else
10303 retval = do_sparse_diff (array.sparse_matrix_value (), order, dim);
10304 }
10305 else if (array.is_single_type ())
10306 {
10307 if (array.iscomplex ())
10308 retval = array.float_complex_array_value ().diff (order, dim);
10309 else
10310 retval = array.float_array_value ().diff (order, dim);
10311 }
10312 else
10313 {
10314 if (array.iscomplex ())
10315 retval = array.complex_array_value ().diff (order, dim);
10316 else
10317 retval = array.array_value ().diff (order, dim);
10318 }
10319
10320 return retval;
10321}
10322
10323DEFUN (diff, args, ,
10324 doc: /* -*- texinfo -*-
10325@deftypefn {} {@var{y} =} diff (@var{x})
10326@deftypefnx {} {@var{y} =} diff (@var{x}, @var{k})
10327@deftypefnx {} {@var{y} =} diff (@var{x}, @var{k}, @var{dim})
10328If @var{x} is a vector of length @math{n}, @w{@code{diff (@var{x})}}@ is the
10329vector of first differences
10330@tex
10331 $x_2 - x_1, \ldots{}, x_n - x_{n-1}$.
10332@end tex
10333@ifnottex
10334 @var{x}(2) - @var{x}(1), @dots{}, @var{x}(n) - @var{x}(n-1).
10335@end ifnottex
10336
10337If @var{x} is a matrix, @w{@code{diff (@var{x})}}@ is the matrix of column
10338differences along the first non-singleton dimension.
10339
10340The second argument is optional. If supplied,
10341@w{@code{diff (@var{x}, @var{k})}}, where @var{k} is a non-negative integer,
10342returns the @var{k}-th differences. It is possible that @var{k} is larger than
10343the first non-singleton dimension of the matrix. In this case, @code{diff}
10344continues to take the differences along the next non-singleton dimension.
10345
10346The dimension along which to take the difference can be explicitly stated with
10347the optional variable @var{dim}. In this case the @var{k}-th order differences
10348are calculated along this dimension. In the case where @var{k} exceeds
10349@w{@code{size (@var{x}, @var{dim})}}@ an empty matrix is returned.
10350@seealso{sort, merge}
10351@end deftypefn */)
10352{
10353 int nargin = args.length ();
10354
10355 if (nargin < 1 || nargin > 3)
10356 print_usage ();
10357
10358 if (! (args(0).isnumeric () || args(0).islogical ()))
10359 error ("diff: X must be numeric or logical");
10360
10361 int dim = -1;
10362 octave_idx_type order = 1;
10363 if (nargin > 1)
10364 {
10365 if (args(1).is_scalar_type ())
10366 order = args(1).idx_type_value (true, false);
10367 else if (! args(1).is_zero_by_zero ())
10368 error ("diff: order K must be a scalar or []");
10369 if (order < 0)
10370 error ("diff: order K must be non-negative");
10371 }
10372
10373 if (nargin > 2)
10374 {
10375 dim = args(2).int_value (true, false);
10376 if (dim < 1 || dim > args(0).ndims ())
10377 error ("diff: DIM must be a valid dimension");
10378
10379 dim -= 1;
10380 }
10381
10382 return do_diff (args(0), order, dim);
10383}
10384
10385/*
10386%!assert (diff ([1, 2, 3, 4]), [1, 1, 1])
10387%!assert (diff ([1, 3, 7, 19], 2), [2, 8])
10388%!assert (diff ([1, 2; 5, 4; 8, 7; 9, 6; 3, 1]), [4, 2; 3, 3; 1, -1; -6, -5])
10389%!assert (diff ([1, 2; 5, 4; 8, 7; 9, 6; 3, 1], 3), [-1, -5; -5, 0])
10390%!assert (isempty (diff (1)))
10391
10392%!error diff ()
10393%!error diff (1, 2, 3, 4)
10394%!error diff ("foo")
10395%!error diff ([1, 2; 3, 4], -1)
10396*/
10397
10398template <typename T>
10399static Array<T>
10400do_repelems (const Array<T>& src, const Array<octave_idx_type>& rep)
10401{
10402 Array<T> retval;
10403
10404 if (rep.ndims () != 2 || rep.rows () != 2)
10405 error ("repelems: R must be a 2-row, N-column matrix of integers");
10406
10407 octave_idx_type n = rep.columns ();
10408 octave_idx_type l = 0;
10409 for (octave_idx_type i = 0; i < n; i++)
10410 {
10411 octave_idx_type k = rep(1, i);
10412 if (k < 0)
10413 error ("repelems: second row must contain non-negative numbers");
10414
10415 l += k;
10416 }
10417
10418 retval.clear (1, l);
10419 T *dest = retval.rwdata ();
10420 l = 0;
10421 for (octave_idx_type i = 0; i < n; i++)
10422 {
10423 octave_idx_type k = rep(1, i);
10424 std::fill_n (dest, k, src.checkelem (rep(0, i) - 1));
10425 dest += k;
10426 }
10427
10428 return retval;
10429}
10430
10431DEFUN (repelems, args, ,
10432 doc: /* -*- texinfo -*-
10433@deftypefn {} {@var{y} =} repelems (@var{x}, @var{r})
10434Construct a vector of repeated elements from @var{x}.
10435
10436@var{r} is a 2x@var{N} integer matrix specifying which elements to repeat
10437and how often to repeat each element. Entries in the first row,
10438@var{r}(1,j), select an element to repeat. The corresponding entry in the
10439second row, @var{r}(2,j), specifies the repeat count. If @var{x} is a
10440matrix then the columns of @var{x} are imagined to be stacked on top of
10441each other for purposes of the selection index. A row vector is always
10442returned.
10443
10444Conceptually the result is calculated as follows:
10445
10446@example
10447@group
10448y = [];
10449for i = 1:columns (@var{r})
10450 y = [y, @var{x}(@var{r}(1,i)*ones(1, @var{r}(2,i)))];
10451endfor
10452@end group
10453@end example
10454@seealso{repmat, cat}
10455@end deftypefn */)
10456{
10457 if (args.length () != 2)
10458 print_usage ();
10459
10460 octave_value retval;
10461
10462 const Matrix rm = args(1).matrix_value ();
10463
10464 if (rm.rows () != 2 || rm.ndims () != 2)
10465 error ("repelems: R must be a matrix with two rows");
10466
10467 octave_value x = args(0);
10468
10469 Array<octave_idx_type> r (rm.dims ());
10470
10471 for (octave_idx_type i = 0; i < rm.numel (); i++)
10472 {
10473 octave_idx_type rx = rm(i);
10474 if (static_cast<double> (rx) != rm(i))
10475 error ("repelems: R must be a matrix of integers");
10476
10477 r.xelem (i) = rx;
10478 }
10479
10480 switch (x.builtin_type ())
10481 {
10482#define BTYP_BRANCH(X, EX) \
10483 case btyp_ ## X: \
10484 retval = do_repelems (x.EX ## _value (), r); \
10485 break;
10486
10487 BTYP_BRANCH (double, array);
10488 BTYP_BRANCH (float, float_array);
10489 BTYP_BRANCH (complex, complex_array);
10490 BTYP_BRANCH (float_complex, float_complex_array);
10491 BTYP_BRANCH (bool, bool_array);
10492 BTYP_BRANCH (char, char_array);
10493
10494 BTYP_BRANCH (int8, int8_array);
10495 BTYP_BRANCH (int16, int16_array);
10496 BTYP_BRANCH (int32, int32_array);
10497 BTYP_BRANCH (int64, int64_array);
10498 BTYP_BRANCH (uint8, uint8_array);
10499 BTYP_BRANCH (uint16, uint16_array);
10500 BTYP_BRANCH (uint32, uint32_array);
10501 BTYP_BRANCH (uint64, uint64_array);
10502
10503 case btyp_cell:
10504 retval = Cell (do_repelems (x.cell_value (), r));
10505 break;
10506
10507 //BTYP_BRANCH (struct, map);//FIXME
10508
10509#undef BTYP_BRANCH
10510
10511 default:
10512 err_wrong_type_arg ("repelems", x);
10513 }
10514
10515 return retval;
10516}
10517
10518DEFUN (base64_encode, args, ,
10519 doc: /* -*- texinfo -*-
10520@deftypefn {} {@var{s} =} base64_encode (@var{x})
10521Encode a double matrix or array @var{x} into the base64 format string
10522@var{s}.
10523
10524@seealso{base64_decode, matlab.net.base64decode, matlab.net.base64encode}
10525@end deftypefn */)
10526{
10527 if (args.length () != 1)
10528 print_usage ();
10529
10530 if (! args(0).isnumeric ())
10531 error ("base64_encode: encoding is supported only for numeric arrays");
10532
10533 if (args(0).iscomplex () || args(0).issparse ())
10534 error ("base64_encode: encoding complex or sparse data is not supported");
10535
10536 octave_value_list retval;
10537
10538 if (args(0).isinteger ())
10539 {
10540#define MAKE_INT_BRANCH(X) \
10541 if (args(0).is_ ## X ## _type ()) \
10542 { \
10543 const X##NDArray in = args(0). X## _array_value (); \
10544 std::size_t inlen = in.numel () * sizeof (X## _t) / sizeof (char); \
10545 const char *inc = reinterpret_cast<const char *> (in.data ()); \
10546 char *out; \
10547 if (base64_encode (inc, inlen, &out)) \
10548 { \
10549 retval(0) = octave_value (out); \
10550 ::free (out); \
10551 } \
10552 }
10553
10554 MAKE_INT_BRANCH(int8)
10555 else MAKE_INT_BRANCH(int16)
10556 else MAKE_INT_BRANCH(int32)
10557 else MAKE_INT_BRANCH(int64)
10558 else MAKE_INT_BRANCH(uint8)
10559 else MAKE_INT_BRANCH(uint16)
10560 else MAKE_INT_BRANCH(uint32)
10561 else MAKE_INT_BRANCH(uint64)
10562
10563#undef MAKE_INT_BRANCH
10564
10565 else
10566 error ("base_64_decode: unexpected integer type - please report this bug");
10567 }
10568 else if (args(0).is_single_type ())
10569 {
10570 const Array<float> in = args(0).float_array_value ();
10571 std::size_t inlen;
10572 inlen = in.numel () * sizeof (float) / sizeof (char);
10573 const char *inc;
10574 inc = reinterpret_cast<const char *> (in.data ());
10575 char *out;
10576 if (base64_encode (inc, inlen, &out))
10577 {
10578 retval(0) = octave_value (out);
10579 ::free (out);
10580 }
10581 }
10582 else // double_type
10583 {
10584 const Array<double> in = args(0).array_value ();
10585 std::size_t inlen;
10586 inlen = in.numel () * sizeof (double) / sizeof (char);
10587 const char *inc;
10588 inc = reinterpret_cast<const char *> (in.data ());
10589 char *out;
10590 if (base64_encode (inc, inlen, &out))
10591 {
10592 retval(0) = octave_value (out);
10593 ::free (out);
10594 }
10595 }
10596
10597 return retval;
10598}
10599
10600/*
10601%!test
10602%! ## FIXME: better test for endianness?
10603%! if (bitunpack (uint16 (1))(1) == 1)
10604%! expected = "2w9JQA==";
10605%! else
10606%! expected = "QEkP2w==";
10607%! endif
10608%! assert (base64_encode (single (pi)), expected);
10609
10610%!assert (base64_encode (uint8 ([0 0 0])), "AAAA")
10611%!assert (base64_encode (uint16 ([0 0 0])), "AAAAAAAA")
10612%!assert (base64_encode (uint32 ([0 0 0])), "AAAAAAAAAAAAAAAA")
10613%!assert (base64_encode (uint64 ([0 0 0])), "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")
10614%!assert (base64_encode (uint8 ([255 255 255])), "////")
10615
10616%!error base64_encode ()
10617%!error base64_encode (1,2)
10618%!error base64_encode ("A string")
10619%!error base64_encode ({"A cell array"})
10620%!error base64_encode (struct ())
10621*/
10622
10623DEFUN (base64_decode, args, ,
10624 doc: /* -*- texinfo -*-
10625@deftypefn {} {@var{x} =} base64_decode (@var{s})
10626@deftypefnx {} {@var{x} =} base64_decode (@var{s}, @var{dims})
10627Decode the double matrix or array @var{x} from the base64 encoded string
10628@var{s}.
10629
10630The optional input parameter @var{dims} should be a vector containing the
10631dimensions of the decoded array.
10632@seealso{base64_encode, matlab.net.base64decode, matlab.net.base64encode}
10633@end deftypefn */)
10634{
10635 int nargin = args.length ();
10636
10637 if (nargin < 1 || nargin > 2)
10638 print_usage ();
10639
10640 std::string str = args(0).xstring_value ("base64_decode: first argument must be a character array");
10641
10642 Array<double> retval = base64_decode (str);
10643
10644 if (nargin == 2)
10645 {
10646 dim_vector dims;
10647
10648 const Array<octave_idx_type> size
10649 = args(1).octave_idx_type_vector_value ();
10650
10651 dims = dim_vector::alloc (size.numel ());
10652 for (octave_idx_type i = 0; i < size.numel (); i++)
10653 dims(i) = size(i);
10654
10655 retval = retval.reshape (dims);
10656 }
10657
10658 return ovl (retval);
10659}
10660
10661/*
10662%!assert (base64_decode (base64_encode (pi)), pi)
10663%!
10664%!test
10665%! in = randn (10);
10666%! outv = base64_decode (base64_encode (in));
10667%! outm = base64_decode (base64_encode (in), size (in));
10668%! assert (outv, in(:).');
10669%! assert (outm, in);
10670
10671%!error base64_decode ()
10672%!error base64_decode (1,2,3)
10673%!error base64_decode (1, "this is not a valid set of dimensions")
10674%!error <first argument must be a character array> base64_decode (1)
10675%!error <input was not valid base64> base64_decode ("AQ=")
10676%!error <incorrect input size> base64_decode ("AQ==")
10677*/
10678
10679DEFUN (__base64_decode_bytes__, args, ,
10680 doc: /* -*- texinfo -*-
10681@deftypefn {} {@var{x} =} base64_decode_bytes (@var{s})
10682@deftypefnx {} {@var{x} =} base64_decode_bytes (@var{s}, @var{dims})
10683Decode the uint8 matrix or array @var{x} from the base64 encoded string
10684@var{s}.
10685
10686The optional input parameter @var{dims} should be a vector containing the
10687dimensions of the decoded array.
10688@seealso{base64_decode}
10689@end deftypefn */)
10690{
10691 int nargin = args.length ();
10692
10693 if (nargin < 1 || nargin > 2)
10694 print_usage ();
10695
10696 std::string str = args(0).xstring_value ("__base64_decode_bytes__: first argument must be a character array");
10697
10699
10700 if (nargin == 2)
10701 {
10702 dim_vector dims;
10703
10704 const Array<octave_idx_type> size
10705 = args(1).octave_idx_type_vector_value ();
10706
10707 dims = dim_vector::alloc (size.numel ());
10708 for (octave_idx_type i = 0; i < size.numel (); i++)
10709 dims(i) = size(i);
10710
10711 retval = retval.reshape (dims);
10712 }
10713
10714 return ovl (retval);
10715}
10716
10717/*
10718%!assert (__base64_decode_bytes__ (base64_encode (uint8 (1))), uint8 (1))
10719
10720%!test
10721%! in = uint8 (rand (10)*255);
10722%! outv = __base64_decode_bytes__ (base64_encode (in));
10723%! outm = __base64_decode_bytes__ (base64_encode (in), size (in));
10724%! assert (outv, in(:).');
10725%! assert (outm, in);
10726
10727%!error __base64_decode_bytes__ ()
10728%!error __base64_decode_bytes__ (1,2,3)
10729%!error __base64_decode_bytes__ (1, "this is not a valid set of dimensions")
10730%!error <first argument must be a character array> __base64_decode_bytes__ (1)
10731*/
10732
10733OCTAVE_END_NAMESPACE(octave)
bool isvector(const dim_vector &dim)
ComplexMatrix linspace(const ComplexColumnVector &x1, const ComplexColumnVector &x2, octave_idx_type n)
Definition CMatrix.cc:3684
ComplexNDArray concat(NDArray &ra, ComplexNDArray &rb, const Array< octave_idx_type > &ra_idx)
Definition CNDArray.cc:424
#define Inf
Definition Faddeeva.cc:257
#define NaN
Definition Faddeeva.cc:258
cdef_class lookup_class(const std::string &name, bool error_if_not_found, bool load_if_not_found)
Definition cdef-utils.cc:80
octave_value to_ov(const cdef_object &obj)
N Dimensional Array with copy-on-write semantics.
Definition Array-base.h:130
const dim_vector & dims() const
Return a const-reference so that dims ()(i) works efficiently.
Definition Array-base.h:529
T & xelem(octave_idx_type n)
Size of the specified dimension.
Definition Array-base.h:547
Array< T, Alloc > nth_element(const octave::idx_vector &n, int dim=0) const
Returns the n-th element in increasing order, using the same ordering as used for sort.
void clear()
int ndims() const
Size of the specified dimension.
Definition Array-base.h:701
octave_idx_type rows() const
Definition Array-base.h:485
void resize(const dim_vector &dv, const T &rfv)
Size of the specified dimension.
octave_idx_type columns() const
Definition Array-base.h:497
Array< T, Alloc > reshape(octave_idx_type nr, octave_idx_type nc) const
Size of the specified dimension.
Definition Array-base.h:658
const T * data() const
Size of the specified dimension.
Definition Array-base.h:687
T * rwdata()
Size of the specified dimension.
T & checkelem(octave_idx_type n)
Size of the specified dimension.
static Array< T, Alloc > cat(int dim, octave_idx_type n, const Array< T, Alloc > *array_list)
Concatenation along a specified (0-based) dimension, equivalent to cat().
octave_idx_type numel() const
Number of elements in the array.
Definition Array-base.h:440
Definition Cell.h:41
ComplexNDArray xsum(int dim=-1, bool nanflag=false) const
Definition CNDArray.cc:388
ComplexNDArray cumprod(int dim=-1, bool nanflag=false) const
Definition CNDArray.cc:364
ComplexNDArray sumsq(int dim=-1, bool nanflag=false) const
Definition CNDArray.cc:394
ComplexNDArray diff(octave_idx_type order=1, int dim=-1) const
Definition CNDArray.cc:400
ComplexNDArray flip(int dim=-1) const
Definition CNDArray.cc:358
ComplexNDArray cumsum(int dim=-1, bool nanflag=false) const
Definition CNDArray.cc:370
ComplexNDArray sum(int dim=-1, bool nanflag=false) const
Definition CNDArray.cc:382
ComplexNDArray prod(int dim=-1, bool nanflag=false) const
Definition CNDArray.cc:376
ComplexNDArray dsumsq(int dim=-1, bool nanflag=false) const
Definition fCNDArray.cc:413
ComplexNDArray dsum(int dim=-1, bool nanflag=false) const
Definition fCNDArray.cc:399
ComplexNDArray dprod(int dim=-1, bool nanflag=false) const
Definition fCNDArray.cc:385
FloatComplexNDArray cumsum(int dim=-1, bool nanflag=false) const
Definition fCNDArray.cc:371
FloatComplexNDArray diff(octave_idx_type order=1, int dim=-1) const
Definition fCNDArray.cc:420
FloatComplexNDArray cumprod(int dim=-1, bool nanflag=false) const
Definition fCNDArray.cc:364
FloatComplexNDArray sumsq(int dim=-1, bool nanflag=false) const
Definition fCNDArray.cc:406
FloatComplexNDArray prod(int dim=-1, bool nanflag=false) const
Definition fCNDArray.cc:378
FloatComplexNDArray sum(int dim=-1, bool nanflag=false) const
Definition fCNDArray.cc:392
FloatComplexNDArray flip(int dim=-1) const
Definition fCNDArray.cc:358
NDArray dsum(int dim=-1, bool nanflag=false) const
Definition fNDArray.cc:400
NDArray dprod(int dim=-1, bool nanflag=false) const
Definition fNDArray.cc:388
FloatNDArray cumsum(int dim=-1, bool nanflag=false) const
Definition fNDArray.cc:376
NDArray dsumsq(int dim=-1, bool nanflag=false) const
Definition fNDArray.cc:412
FloatNDArray sumsq(int dim=-1, bool nanflag=false) const
Definition fNDArray.cc:406
FloatNDArray diff(octave_idx_type order=1, int dim=-1) const
Definition fNDArray.cc:476
FloatNDArray sum(int dim=-1, bool nanflag=false) const
Definition fNDArray.cc:394
FloatNDArray flip(int dim=-1) const
Definition fNDArray.cc:364
FloatNDArray cumprod(int dim=-1, bool nanflag=false) const
Definition fNDArray.cc:370
FloatNDArray prod(int dim=-1, bool nanflag=false) const
Definition fNDArray.cc:382
Template for N-dimensional array classes with like-type math operators.
Definition MArray.h:61
MArray< T > reshape(const dim_vector &new_dims) const
Definition MArray.h:91
void resize(octave_idx_type nr, octave_idx_type nc, double rfv=0)
Definition dMatrix.h:156
NDArray xsum(int dim=-1, bool nanflag=false) const
Definition dNDArray.cc:434
NDArray flip(int dim=-1) const
Definition dNDArray.cc:405
NDArray diff(octave_idx_type order=1, int dim=-1) const
Definition dNDArray.cc:504
NDArray cumsum(int dim=-1, bool nanflag=false) const
Definition dNDArray.cc:417
NDArray cumprod(int dim=-1, bool nanflag=false) const
Definition dNDArray.cc:411
NDArray sum(int dim=-1, bool nanflag=false) const
Definition dNDArray.cc:428
NDArray sumsq(int dim=-1, bool nanflag=false) const
Definition dNDArray.cc:440
NDArray prod(int dim=-1, bool nanflag=false) const
Definition dNDArray.cc:422
SparseBoolMatrix all(int dim=-1) const
SparseMatrix sum(int dim=-1) const
SparseBoolMatrix any(int dim=-1) const
SparseComplexMatrix cumprod(int dim=-1, bool nanflag=false) const
Definition CSparse.cc:7593
SparseComplexMatrix cumsum(int dim=-1, bool nanflag=false) const
Definition CSparse.cc:7601
SparseComplexMatrix xsum(int dim=-1, bool nanflag=false) const
Definition CSparse.cc:7679
SparseComplexMatrix prod(int dim=-1, bool nanflag=false) const
Definition CSparse.cc:7621
SparseComplexMatrix sumsq(int dim=-1, bool nanflag=false) const
Definition CSparse.cc:7687
SparseComplexMatrix sum(int dim=-1, bool nanflag=false) const
Definition CSparse.cc:7653
SparseMatrix cumprod(int dim=-1, bool nanflag=false) const
Definition dSparse.cc:7560
SparseMatrix prod(int dim=-1, bool nanflag=false) const
Definition dSparse.cc:7588
SparseMatrix sum(int dim=-1, bool nanflag=false) const
Definition dSparse.cc:7619
SparseMatrix xsum(int dim=-1, bool nanflag=false) const
Definition dSparse.cc:7645
SparseMatrix sumsq(int dim=-1, bool nanflag=false) const
Definition dSparse.cc:7653
SparseMatrix cumsum(int dim=-1, bool nanflag=false) const
Definition dSparse.cc:7568
octave_idx_type cols() const
Definition Sparse.h:351
octave_idx_type * cidx()
Definition Sparse.h:595
T * data()
Definition Sparse.h:573
octave_idx_type * ridx()
Definition Sparse.h:582
static Sparse< T, Alloc > cat(int dim, octave_idx_type n, const Sparse< T, Alloc > *sparse_list)
Definition Sparse.cc:2680
octave_idx_type nnz() const
Actual number of nonzero terms.
Definition Sparse.h:338
octave_idx_type rows() const
Definition Sparse.h:350
dim_vector dims() const
Definition Sparse.h:370
boolNDArray any(int dim=-1) const
boolNDArray all(int dim=-1) const
octave_value find_method_symbol(const std::string &method_name, const std::string &class_name)
cdef_class find_class(const std::string &name, bool error_if_not_found=true, bool load_if_not_found=true)
bool is_array() const
bool ok() const
void set_class(const cdef_class &cls)
Array< cdef_object > array_value() const
Vector representing the dimensions (size) of an Array.
Definition dim-vector.h:92
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition dim-vector.h:341
void resize(int n, int fill_value=0)
Definition dim-vector.h:278
static dim_vector alloc(int n)
Definition dim-vector.h:208
octave_idx_type ndims() const
Number of dimensions.
Definition dim-vector.h:263
bool isvector() const
Definition dim-vector.h:432
bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
int first_non_singleton(int def=0) const
Definition dim-vector.h:481
dim_vector redim(int n) const
Force certain dimensionality, preserving numel ().
octave_idx_type length(octave_idx_type n=0) const
Definition idx-vector.h:518
octave_idx_type extent(octave_idx_type n) const
Definition idx-vector.h:521
intNDArray diff(octave_idx_type order=1, int dim=-1) const
cdef_manager & get_cdef_manager()
octave_value_list feval(const char *name, const octave_value_list &args=octave_value_list(), int nargout=0)
Evaluate an Octave function (built-in or interpreted) and return the list of result values.
void recover_from_exception()
symbol_table & get_symbol_table()
static data_type string_to_data_type(const std::string &s)
Definition data-conv.cc:293
virtual octave_function * function_value(bool silent=false)
Definition ov-base.cc:935
virtual bool is_legacy_constructor(const std::string &="") const
Definition ov-fcn.h:127
static octave_map cat(int dim, octave_idx_type n, const octave_scalar_map *map_list)
Definition oct-map.cc:690
void resize(octave_idx_type n, const octave_value &rfv=octave_value())
Definition ovl.h:115
Array< octave_value > array_value() const
Definition ovl.h:88
bool empty() const
Definition ovl.h:113
octave_value_list slice(octave_idx_type offset, octave_idx_type len, bool tags=false) const
Definition ovl.h:129
octave_idx_type length() const
Definition ovl.h:111
bool is_uint32_type() const
Definition ov.h:722
bool isinteger() const
Definition ov.h:728
boolNDArray bool_array_value(bool warn=false) const
Definition ov.h:898
SparseMatrix sparse_matrix_value(bool frc_str_conv=false) const
Definition ov.h:907
std::string class_name() const
Definition ov.h:1360
bool bool_value(bool warn=false) const
Definition ov.h:889
FloatComplexNDArray xfloat_complex_array_value(const char *fmt,...) const
int32NDArray int32_array_value() const
Definition ov.h:963
uint16NDArray uint16_array_value() const
Definition ov.h:972
bool is_true() const
Definition ov.h:756
int16NDArray int16_array_value() const
Definition ov.h:960
Cell cell_value() const
int int_value(bool req_int=false, bool frc_str_conv=false) const
Definition ov.h:810
@ op_hermitian
Definition ov.h:84
@ op_uminus
Definition ov.h:82
@ op_not
Definition ov.h:80
@ op_transpose
Definition ov.h:83
@ op_uplus
Definition ov.h:81
int8NDArray int8_array_value() const
Definition ov.h:957
int ndims() const
Definition ov.h:549
octave_idx_type rows() const
Definition ov.h:543
bool is_scalar_type() const
Definition ov.h:742
bool is_sq_string() const
Definition ov.h:638
Array< std::string > cellstr_value() const
Definition ov.h:989
bool is_string() const
Definition ov.h:635
octave_value permute(const Array< int > &vec, bool inv=false) const
Definition ov.h:572
bool isnumeric() const
Definition ov.h:748
bool is_int8_type() const
Definition ov.h:704
Complex complex_value(bool frc_str_conv=false) const
Definition ov.h:869
Array< octave_idx_type > sort_rows_idx(sortmode mode=ASCENDING) const
Definition ov.h:1438
octave_idx_type idx_type_value(bool req_int=false, bool frc_str_conv=false) const
Matrix size()
Definition ov.h:460
Complex xcomplex_value(const char *fmt,...) const
octave_value sort(octave_idx_type dim=0, sortmode mode=ASCENDING) const
Definition ov.h:1429
bool is_range() const
Definition ov.h:644
ComplexNDArray complex_array_value(bool frc_str_conv=false) const
Definition ov.h:882
bool is_single_type() const
Definition ov.h:696
bool is_defined() const
Definition ov.h:590
charNDArray char_array_value(bool frc_str_conv=false) const
Definition ov.h:904
bool isempty() const
Definition ov.h:599
FloatComplex float_complex_value(bool frc_str_conv=false) const
Definition ov.h:872
@ op_add_eq
Definition ov.h:136
@ op_mul_eq
Definition ov.h:138
@ op_el_and_eq
Definition ov.h:146
@ op_el_mul_eq
Definition ov.h:142
@ op_el_or_eq
Definition ov.h:147
bool is_uint8_type() const
Definition ov.h:716
bool is_uint64_type() const
Definition ov.h:725
bool is_uint16_type() const
Definition ov.h:719
uint64NDArray uint64_array_value() const
Definition ov.h:978
float float_value(bool frc_str_conv=false) const
Definition ov.h:848
SparseComplexMatrix xsparse_complex_matrix_value(const char *fmt,...) const
octave_value any(int dim=0) const
Definition ov.h:685
bool is_int16_type() const
Definition ov.h:707
bool issparse() const
Definition ov.h:751
octave_value reshape(const dim_vector &dv) const
Definition ov.h:569
double scalar_value(bool frc_str_conv=false) const
Definition ov.h:851
bool iscell() const
Definition ov.h:602
octave_idx_type numel() const
Definition ov.h:557
Array< int > int_vector_value(bool req_int=false, bool frc_str_conv=false, bool frc_vec_conv=false) const
@ magic_colon_t
Definition ov.h:170
std::string string_value(bool force=false) const
Definition ov.h:981
octave_value & assign(assign_op op, const std::string &type, const std::list< octave_value_list > &idx, const octave_value &rhs)
bool is_int64_type() const
Definition ov.h:713
bool iscomplex() const
Definition ov.h:739
ComplexNDArray xcomplex_array_value(const char *fmt,...) const
binary_op
Definition ov.h:92
@ op_ldiv
Definition ov.h:98
@ op_ne
Definition ov.h:104
@ op_el_or
Definition ov.h:110
@ op_el_ldiv
Definition ov.h:108
@ op_pow
Definition ov.h:97
@ op_ge
Definition ov.h:102
@ op_div
Definition ov.h:96
@ op_el_pow
Definition ov.h:107
@ op_mul
Definition ov.h:95
@ op_add
Definition ov.h:93
@ op_sub
Definition ov.h:94
@ op_el_mul
Definition ov.h:105
@ op_le
Definition ov.h:100
@ op_lt
Definition ov.h:99
@ op_gt
Definition ov.h:103
@ op_eq
Definition ov.h:101
@ op_el_and
Definition ov.h:109
@ op_el_div
Definition ov.h:106
sortmode is_sorted_rows(sortmode mode=UNSORTED) const
Definition ov.h:1441
octave_value single_subsref(const std::string &type, const octave_value_list &idx)
int64NDArray int64_array_value() const
Definition ov.h:966
SparseBoolMatrix sparse_bool_matrix_value(bool warn=false) const
Definition ov.h:914
NDArray array_value(bool frc_str_conv=false) const
Definition ov.h:863
FloatComplex xfloat_complex_value(const char *fmt,...) const
octave_idx_type length() const
bool is_double_type() const
Definition ov.h:693
bool is_int32_type() const
Definition ov.h:710
uint8NDArray uint8_array_value() const
Definition ov.h:969
octave_value all(int dim=0) const
Definition ov.h:682
std::string xstring_value(const char *fmt,...) const
FloatComplexNDArray float_complex_array_value(bool frc_str_conv=false) const
Definition ov.h:886
octave_value resize(const dim_vector &dv, bool fill=false) const
Definition ov.h:578
bool iscellstr() const
Definition ov.h:605
FloatNDArray float_array_value(bool frc_str_conv=false) const
Definition ov.h:866
uint32NDArray uint32_array_value() const
Definition ov.h:975
octave_idx_type columns() const
Definition ov.h:545
builtin_type_t builtin_type() const
Definition ov.h:688
octave_value diag(octave_idx_type k=0) const
Definition ov.h:1423
octave::range< double > range_value() const
Definition ov.h:992
double double_value(bool frc_str_conv=false) const
Definition ov.h:845
bool isobject() const
Definition ov.h:662
SparseComplexMatrix sparse_complex_matrix_value(bool frc_str_conv=false) const
Definition ov.h:911
bool islogical() const
Definition ov.h:733
octave_value abs() const
Definition ov.h:1460
dim_vector dims() const
Definition ov.h:539
octave_value find_method(const std::string &name, const std::string &dispatch_type)
Definition symtab.cc:128
OCTAVE_BEGIN_NAMESPACE(octave) static octave_value daspk_fcn
#define INSTANTIATE_EYE(T)
Definition data.cc:7318
template octave_value identity_matrix< int8NDArray >(int, int)
octave_value do_class_concat(const octave_value_list &ovl, const std::string &cattype, int dim)
Definition data.cc:2889
template octave_value identity_matrix< uint64NDArray >(int, int)
template octave_value identity_matrix< int16NDArray >(int, int)
#define BTYP_BRANCH(X, EX)
void map_2_xlog2(const Array< T > &x, Array< T > &f, Array< ET > &e)
Definition data.cc:742
template octave_value identity_matrix< int64NDArray >(int, int)
template octave_value identity_matrix< int32NDArray >(int, int)
template octave_value identity_matrix< uint8NDArray >(int, int)
octave_value identity_matrix(int nr, int nc)
Definition data.cc:7288
T eps(const T &x)
Definition data.cc:6843
#define MAKE_INT_BRANCH(X)
Definition data.cc:10081
template octave_value identity_matrix< uint16NDArray >(int, int)
template octave_value identity_matrix< boolNDArray >(int, int)
template octave_value identity_matrix< uint32NDArray >(int, int)
void print_usage()
Definition defun-int.h:72
#define DEFUN(name, args_name, nargout_name, doc)
Macro to define a builtin function.
Definition defun.h:56
#define DEFALIAS(alias, name)
Macro to define an alias for another existing function name.
Definition defun.h:160
void warning(const char *fmt,...)
Definition error.cc:1083
void error(const char *fmt,...)
Definition error.cc:1008
void err_wrong_type_arg(const char *name, const char *s)
Definition errwarn.cc:166
void warn_implicit_conversion(const char *id, const char *from, const char *to)
Definition errwarn.cc:345
std::string get_dispatch_type(const octave_value_list &args, builtin_type_t &builtin_type)
Definition fcn-info.cc:271
double norm(const ColumnVector &v)
Definition graphics.cc:5788
intNDArray< octave_int16 > int16NDArray
intNDArray< octave_int32 > int32NDArray
intNDArray< octave_int64 > int64NDArray
intNDArray< octave_int8 > int8NDArray
Definition int8NDArray.h:36
cdef_manager & __get_cdef_manager__()
interpreter & __get_interpreter__()
Complex log2(const Complex &x)
Definition mappers.cc:140
double lo_ieee_na_value()
Definition lo-ieee.cc:76
float lo_ieee_float_nan_value()
Definition lo-ieee.cc:116
float lo_ieee_float_na_value()
Definition lo-ieee.cc:108
double lo_ieee_nan_value()
Definition lo-ieee.cc:84
float lo_ieee_float_inf_value()
Definition lo-ieee.cc:100
double lo_ieee_inf_value()
Definition lo-ieee.cc:68
Complex atan(const Complex &x)
Definition mappers.h:83
bool isinteger(double x)
Definition mappers.h:249
T mod(T x, T y)
Definition mappers.h:324
T rem(T x, T y)
Definition mappers.h:357
T mx_inline_count(const bool *v, octave_idx_type n)
bool base64_encode(const char *inc, const std::size_t inlen, char **out)
Definition oct-base64.cc:39
intNDArray< octave_uint8 > base64_decode_bytes(const std::string &str)
Definition oct-base64.cc:99
Array< double > base64_decode(const std::string &str)
Definition oct-base64.cc:61
std::complex< double > Complex
Definition oct-cmplx.h:33
std::complex< float > FloatComplex
Definition oct-cmplx.h:34
octave_int< uint64_t > octave_uint64
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition oct-locbuf.h:44
double xfrobnorm(const Matrix &x)
Definition oct-norm.cc:1065
double xnorm(const ColumnVector &x, double p)
Definition oct-norm.cc:1065
RowVector xcolnorms(const Matrix &m, double p)
Definition oct-norm.cc:1108
ColumnVector xrownorms(const Matrix &m, double p)
Definition oct-norm.cc:1108
sortmode
Definition oct-sort.h:97
@ UNSORTED
Definition oct-sort.h:97
@ ASCENDING
Definition oct-sort.h:97
@ DESCENDING
Definition oct-sort.h:97
T::size_type numel(const T &str)
Definition oct-string.cc:81
void free(void *)
const octave_base_value const Array< octave_idx_type > & ra_idx
std::string btyp_class_name[btyp_num_types+1]
Definition ov-base.cc:91
builtin_type_t
Definition ov-base.h:83
@ btyp_float_complex
Definition ov-base.h:87
@ btyp_cell
Definition ov-base.h:99
@ btyp_func_handle
Definition ov-base.h:100
@ btyp_double
Definition ov-base.h:84
@ btyp_float
Definition ov-base.h:85
@ btyp_struct
Definition ov-base.h:98
@ btyp_bool
Definition ov-base.h:96
@ btyp_unknown
Definition ov-base.h:101
@ btyp_char
Definition ov-base.h:97
@ btyp_complex
Definition ov-base.h:86
octave_value colon_op(const octave_value &base, const octave_value &increment, const octave_value &limit, bool is_for_cmd_expr=false)
octave_value cat_op(type_info &ti, const octave_value &a, const octave_value &b, const Array< octave_idx_type > &ra_idx)
octave_value binary_op(type_info &ti, octave_value::binary_op op, const octave_value &a, const octave_value &b)
octave_value unary_op(type_info &ti, octave_value::unary_op op, const octave_value &a)
octave_value_list ovl(const OV_Args &... args)
Construct an octave_value_list with less typing.
Definition ovl.h:217
#define octave_stdout
Definition pager.h:301
#define panic_unless(cond)
Definition panic.h:59
void maybe_warn_string_concat(bool all_dq_strings_p, bool all_sq_strings_p)
Definition pt-mat.cc:134
std::string get_concat_class(const std::string &c1, const std::string &c2)
Definition pt-mat.cc:63
F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
F77_RET_T const F77_DBLE * x
F77_RET_T const F77_DBLE const F77_DBLE * f
intNDArray< octave_uint16 > uint16NDArray
intNDArray< octave_uint32 > uint32NDArray
intNDArray< octave_uint64 > uint64NDArray
intNDArray< octave_uint8 > uint8NDArray
octave_idx_type dims_to_numel(const dim_vector &dims, const octave_value_list &idx_arg)
Definition utils.cc:1442
void get_dimensions(const octave_value &a, const char *warn_for, dim_vector &dim)
Definition utils.cc:1377
void check_dimensions(dim_vector &dim, const char *warnfor)
Definition utils.cc:1358
F77_RET_T len
Definition xerbla.cc:61