30 #include <sys/types.h>
31 #include <sys/times.h>
33 #ifdef HAVE_SYS_RESOURCE_H
34 #include <sys/resource.h>
72 #if ! defined (CLOCKS_PER_SEC)
74 #define CLOCKS_PER_SEC CLK_TCK
76 #error "no definition for CLOCKS_PER_SEC!"
80 #if ! defined (HAVE_HYPOTF) && defined (HAVE__HYPOTF)
81 #define hypotf _hypotf
85 #define ANY_ALL(FCN) \
87 octave_value retval; \
89 int nargin = args.length (); \
91 if (nargin == 1 || nargin == 2) \
93 int dim = (nargin == 1 ? -1 : args(1).int_value (true) - 1); \
98 retval = args(0).FCN (dim); \
100 error (#FCN ": invalid dimension argument = %d", dim + 1); \
103 error (#FCN ": expecting dimension argument to be an integer"); \
112 @deftypefn {Built-in Function} {} all (@var{x})\n\
113 @deftypefnx {Built-in Function} {} all (@var{x}, @var{dim})\n\
114 For a vector argument, return true (logical 1) if all elements of the vector\n\
117 For a matrix argument, return a row vector of logical ones and\n\
118 zeros with each element indicating whether all of the elements of the\n\
119 corresponding column of the matrix are nonzero. For example:\n\
123 all ([2, 3; 1, 0]))\n\
124 @result{} [ 1, 0 ]\n\
128 If the optional argument @var{dim} is supplied, work along dimension\n\
159 @deftypefn {Built-in Function} {} any (@var{x})\n\
160 @deftypefnx {Built-in Function} {} any (@var{x}, @var{dim})\n\
161 For a vector argument, return true (logical 1) if any element of the vector\n\
164 For a matrix argument, return a row vector of logical ones and\n\
165 zeros with each element indicating whether any of the elements of the\n\
166 corresponding column of the matrix are nonzero. For example:\n\
171 @result{} [ 1, 1, 0, 0 ]\n\
175 If the optional argument @var{dim} is supplied, work along dimension\n\
176 @var{dim}. For example:\n\
180 any (eye (2, 4), 2)\n\
181 @result{} [ 1; 1 ]\n\
215 @deftypefn {Mapping Function} {} atan2 (@var{y}, @var{x})\n\
216 Compute atan (@var{y} / @var{x}) for corresponding elements of @var{y}\n\
217 and @var{x}. Signal an error if @var{y} and @var{x} do not match in size\n\
219 @seealso{tan, tand, tanh, atanh}\n\
224 int nargin = args.
length ();
228 if (! args(0).is_numeric_type ())
230 else if (! args(1).is_numeric_type ())
232 else if (args(0).is_complex_type () || args(1).is_complex_type ())
233 error (
"atan2: not defined for complex numbers");
234 else if (args(0).is_single_type () || args(1).is_single_type ())
236 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
237 retval = atan2f (args(0).float_value (), args(1).float_value ());
242 retval = binmap<float> (a0, a1, ::atan2f,
"atan2");
248 bool a1_scalar = args(1).is_scalar_type ();
249 if (a0_scalar && a1_scalar)
250 retval =
atan2 (args(0).scalar_value (), args(1).scalar_value ());
251 else if ((a0_scalar || args(0).is_sparse_type ())
252 && (a1_scalar || args(1).is_sparse_type ()))
256 retval = binmap<double> (m0, m1,
::atan2,
"atan2");
260 NDArray a0 = args(0).array_value ();
261 NDArray a1 = args(1).array_value ();
262 retval = binmap<double> (a0, a1,
::atan2,
"atan2");
308 else if (! arg1.is_numeric_type ())
314 if (arg1.is_complex_type ())
320 retval = hypotf (arg0.
float_value (), arg1.float_value ());
325 retval = binmap<float> (a0, a1, ::hypotf,
"hypot");
331 bool a1_scalar = arg1.is_scalar_type ();
332 if (a0_scalar && a1_scalar)
333 retval = hypot (arg0.
scalar_value (), arg1.scalar_value ());
335 && (a1_scalar || arg1.is_sparse_type ()))
339 retval = binmap<double> (m0, m1, ::hypot,
"hypot");
344 NDArray a1 = arg1.array_value ();
345 retval = binmap<double> (a0, a1, ::hypot,
"hypot");
353 DEFUN (hypot, args, ,
355 @deftypefn {Built-in Function} {} hypot (@var{x}, @var{y})\n\
356 @deftypefnx {Built-in Function} {} hypot (@var{x}, @var{y}, @var{z}, @dots{})\n\
357 Compute the element-by-element square root of the sum of the squares of\n\
358 @var{x} and @var{y}. This is equivalent to\n\
359 @code{sqrt (@var{x}.^2 + @var{y}.^2)}, but calculated in a manner that\n\
360 avoids overflows for large values of @var{x} or @var{y}.\n\
361 @code{hypot} can also be called with more than 2 arguments; in this case,\n\
362 the arguments are accumulated from left to right:\n\
366 hypot (hypot (@var{x}, @var{y}), @var{z})\n\
367 hypot (hypot (hypot (@var{x}, @var{y}), @var{z}), @var{w}), etc.\n\
374 int nargin = args.
length ();
378 retval =
do_hypot (args(0), args(1));
380 else if (nargin >= 3)
384 retval =
do_hypot (retval, args(i));
402 template<
typename T,
typename ET>
416 DEFUN (log2, args, nargout,
418 @deftypefn {Mapping Function} {} log2 (@var{x})\n\
419 @deftypefnx {Mapping Function} {[@var{f}, @var{e}] =} log2 (@var{x})\n\
420 Compute the base-2 logarithm of each element of @var{x}.\n\
422 If called with two output arguments, split @var{x} into\n\
423 binary mantissa and exponent so that\n\
425 ${1 \\over 2} \\le \\left| f \\right| < 1$\n\
428 @code{1/2 <= abs(f) < 1}\n\
430 and @var{e} is an integer. If\n\
432 $x = 0$, $f = e = 0$.\n\
435 @code{x = 0}, @code{f = e = 0}.\n\
437 @seealso{pow2, log, log10, exp}\n\
442 if (args.length () == 1)
445 retval(0) = args(0).log2 ();
446 else if (args(0).is_single_type ())
448 if (args(0).is_real_type ())
458 else if (args(0).is_complex_type ())
469 else if (args(0).is_real_type ())
479 else if (args(0).is_complex_type ())
518 @deftypefn {Mapping Function} {} rem (@var{x}, @var{y})\n\
519 @deftypefnx {Mapping Function} {} fmod (@var{x}, @var{y})\n\
520 Return the remainder of the division @code{@var{x} / @var{y}}, computed\n\
521 using the expression\n\
524 x - y .* fix (x ./ y)\n\
527 An error message is printed if the dimensions of the arguments do not\n\
528 agree, or if either of the arguments is complex.\n\
534 int nargin = args.
length ();
538 if (! args(0).is_numeric_type ())
540 else if (! args(1).is_numeric_type ())
542 else if (args(0).is_complex_type () || args(1).is_complex_type ())
543 error (
"rem: not defined for complex numbers");
544 else if (args(0).is_integer_type () || args(1).is_integer_type ())
557 #define MAKE_INT_BRANCH(X) \
560 X##NDArray a0 = args(0).X##_array_value (); \
561 X##NDArray a1 = args(1).X##_array_value (); \
562 retval = binmap<octave_##X,octave_##X,octave_##X> (a0, a1, rem, "rem"); \
573 #undef MAKE_INT_BRANCH
579 error (
"rem: cannot combine %s and %d",
580 args(0).class_name ().c_str (),
581 args(1).class_name ().c_str ());
583 else if (args(0).is_single_type () || args(1).is_single_type ())
585 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
586 retval =
xrem (args(0).float_value (), args(1).float_value ());
591 retval = binmap<float> (a0, a1, xrem<float>,
"rem");
597 bool a1_scalar = args(1).is_scalar_type ();
598 if (a0_scalar && a1_scalar)
599 retval =
xrem (args(0).scalar_value (), args(1).scalar_value ());
600 else if ((a0_scalar || args(0).is_sparse_type ())
601 && (a1_scalar || args(1).is_sparse_type ()))
605 retval = binmap<double> (m0, m1, xrem<double>,
"rem");
609 NDArray a0 = args(0).array_value ();
610 NDArray a1 = args(1).array_value ();
611 retval = binmap<double> (a0, a1, xrem<double>,
"rem");
648 @deftypefn {Mapping Function} {}
mod (@var{
x}, @var{y})\n\
649 Compute the modulo of @var{
x} and @var{y}. Conceptually
this is given by\n\
652 x - y .*
floor (
x ./ y)\n\
656 and is written such that the correct modulus is returned for\n\
657 integer types. This
function handles negative values correctly. That\n\
658 is, @code{
mod (-1, 3)} is 2, not -1, as @code{
rem (-1, 3)} returns.\n\
659 @code{
mod (@var{
x}, 0)} returns @var{
x}.\n\
661 An
error results
if the dimensions of the arguments
do not agree, or if\n\
662 either of the arguments is complex.\n\
668 int nargin = args.length ();
672 if (! args(0).is_numeric_type ())
673 gripe_wrong_type_arg ("mod", args(0));
674 else if (! args(1).is_numeric_type ())
675 gripe_wrong_type_arg ("mod", args(1));
676 else if (args(0).is_complex_type () || args(1).is_complex_type ())
677 error ("mod: not defined
for complex numbers
");
678 else if (args(0).is_integer_type () || args(1).is_integer_type ())
680 builtin_type_t btyp0 = args(0).builtin_type ();
681 builtin_type_t btyp1 = args(1).builtin_type ();
682 if (btyp0 == btyp_double || btyp0 == btyp_float)
684 if (btyp1 == btyp_double || btyp1 == btyp_float)
691 #define MAKE_INT_BRANCH(X) \
694 X##NDArray a0 = args(0).X##_array_value (); \
695 X##NDArray a1 = args(1).X##_array_value (); \
696 retval = binmap<octave_##X,octave_##X,octave_##X> (a0, a1, mod, "mod"); \
699 MAKE_INT_BRANCH (int8);
700 MAKE_INT_BRANCH (int16);
701 MAKE_INT_BRANCH (int32);
702 MAKE_INT_BRANCH (int64);
703 MAKE_INT_BRANCH (uint8);
704 MAKE_INT_BRANCH (uint16);
705 MAKE_INT_BRANCH (uint32);
706 MAKE_INT_BRANCH (uint64);
707 #undef MAKE_INT_BRANCH
713 error ("mod: cannot combine %s and %
d",
714 args(0).class_name ().c_str (),
715 args(1).class_name ().c_str ());
717 else if (args(0).is_single_type () || args(1).is_single_type ())
719 if (args(0).is_scalar_type () && args(1).is_scalar_type ())
720 retval = xmod (args(0).float_value (), args(1).float_value ());
723 FloatNDArray a0 = args(0).float_array_value ();
724 FloatNDArray a1 = args(1).float_array_value ();
725 retval = binmap<float> (a0, a1, xmod<float>, "mod");
730 bool a0_scalar = args(0).is_scalar_type ();
731 bool a1_scalar = args(1).is_scalar_type ();
732 if (a0_scalar && a1_scalar)
733 retval = xmod (args(0).scalar_value (), args(1).scalar_value ());
734 else if ((a0_scalar || args(0).is_sparse_type ())
735 && (a1_scalar || args(1).is_sparse_type ()))
737 SparseMatrix m0 = args(0).sparse_matrix_value ();
738 SparseMatrix m1 = args(1).sparse_matrix_value ();
739 retval = binmap<double> (m0, m1, xmod<double>, "mod");
743 NDArray a0 = args(0).array_value ();
744 NDArray a1 = args(1).array_value ();
745 retval = binmap<double> (a0, a1, xmod<double>, "mod");
757 %!assert (isempty (mod ([], [])))
759 ## x mod y, y != 0 tests
760 %!assert (mod (5, 3), 2)
761 %!assert (mod (-5, 3), 1)
762 %!assert (mod (0, 3), 0)
763 %!assert (mod ([-5, 5, 0], [3, 3, 3]), [1, 2, 0])
764 %!assert (mod ([-5; 5; 0], [3; 3; 3]), [1; 2; 0])
765 %!assert (mod ([-5, 5; 0, 3], [3, 3 ; 3, 1]), [1, 2 ; 0, 0])
768 %!assert (mod (5, 0), 5)
769 %!assert (mod (-5, 0), -5)
770 %!assert (mod ([-5, 5, 0], [3, 0, 3]), [1, 5, 0])
771 %!assert (mod ([-5; 5; 0], [3; 0; 3]), [1; 5; 0])
772 %!assert (mod ([-5, 5; 0, 3], [3, 0 ; 3, 1]), [1, 5 ; 0, 0])
773 %!assert (mod ([-5, 5; 0, 3], [0, 0 ; 0, 0]), [-5, 5; 0, 3])
775 ## mixed scalar/matrix tests
776 %!assert (mod ([-5, 5; 0, 3], 0), [-5, 5; 0, 3])
777 %!assert (mod ([-5, 5; 0, 3], 3), [1, 2; 0, 0])
778 %!assert (mod (-5, [0,0; 0,0]), [-5, -5; -5, -5])
779 %!assert (mod (-5, [3,0; 3,1]), [1, -5; 1, 0])
780 %!assert (mod (-5, [3,2; 3,1]), [1, 1; 1, 0])
783 %!assert (mod (uint8 (5), uint8 (4)), uint8 (1))
784 %!assert (mod (uint8 ([1:5]), uint8 (4)), uint8 ([1,2,3,0,1]))
785 %!assert (mod (uint8 ([1:5]), uint8 (0)), uint8 ([1:5]))
786 %!error (mod (uint8 (5), int8 (4)))
788 ## mixed integer/real types
789 %!assert (mod (uint8 (5), 4), uint8 (1))
790 %!assert (mod (5, uint8 (4)), uint8 (1))
791 %!assert (mod (uint8 ([1:5]), 4), uint8 ([1,2,3,0,1]))
793 ## non-integer real numbers
794 %!assert (mod (2.1, 0.1), 0)
795 %!assert (mod (2.1, 0.2), 0.1, eps)
798 // FIXME: Need to convert reduction functions of this file for single precision
800 #define NATIVE_REDUCTION_1(FCN, TYPE, DIM) \
801 (arg.is_ ## TYPE ## _type ()) \
803 TYPE ## NDArray tmp = arg. TYPE ##_array_value (); \
807 retval = tmp.FCN (DIM); \
811 #define NATIVE_REDUCTION(FCN, BOOL_FCN) \
813 octave_value retval; \
815 int nargin = args.length (); \
817 bool isnative = false; \
818 bool isdouble = false; \
820 if (nargin > 1 && args(nargin - 1).is_string ()) \
822 std::string str = args(nargin - 1).string_value (); \
826 if (str == "native
") \
828 else if (str == "double") \
831 error ("sum: unrecognized
string argument
"); \
836 if (nargin == 1 || nargin == 2) \
838 octave_value arg = args(0); \
840 int dim = (nargin == 1 ? -1 : args(1).int_value (true) - 1); \
846 if (arg.is_sparse_type ()) \
848 if (arg.is_real_type ()) \
850 SparseMatrix tmp = arg.sparse_matrix_value (); \
853 retval = tmp.FCN (dim); \
857 SparseComplexMatrix tmp \
858 = arg.sparse_complex_matrix_value (); \
861 retval = tmp.FCN (dim); \
868 if NATIVE_REDUCTION_1 (FCN, uint8, dim) \
869 else if NATIVE_REDUCTION_1 (FCN, uint16, dim) \
870 else if NATIVE_REDUCTION_1 (FCN, uint32, dim) \
871 else if NATIVE_REDUCTION_1 (FCN, uint64, dim) \
872 else if NATIVE_REDUCTION_1 (FCN, int8, dim) \
873 else if NATIVE_REDUCTION_1 (FCN, int16, dim) \
874 else if NATIVE_REDUCTION_1 (FCN, int32, dim) \
875 else if NATIVE_REDUCTION_1 (FCN, int64, dim) \
876 else if (arg.is_bool_type ()) \
878 boolNDArray tmp = arg.bool_array_value (); \
880 retval = boolNDArray (tmp.BOOL_FCN (dim)); \
882 else if (arg.is_char_matrix ()) \
884 error (#FCN, ": invalid
char type"); \
886 else if (!isdouble && arg.is_single_type ()) \
888 if (arg.is_complex_type ()) \
890 FloatComplexNDArray tmp = \
891 arg.float_complex_array_value (); \
894 retval = tmp.FCN (dim); \
896 else if (arg.is_real_type ()) \
898 FloatNDArray tmp = arg.float_array_value (); \
901 retval = tmp.FCN (dim); \
904 else if (arg.is_complex_type ()) \
906 ComplexNDArray tmp = arg.complex_array_value (); \
909 retval = tmp.FCN (dim); \
911 else if (arg.is_real_type ()) \
913 NDArray tmp = arg.array_value (); \
916 retval = tmp.FCN (dim); \
920 gripe_wrong_type_arg (#FCN, arg); \
924 else if (arg.is_bool_type ()) \
926 boolNDArray tmp = arg.bool_array_value (); \
928 retval = tmp.FCN (dim); \
930 else if (!isdouble && arg.is_single_type ()) \
932 if (arg.is_real_type ()) \
934 FloatNDArray tmp = arg.float_array_value (); \
937 retval = tmp.FCN (dim); \
939 else if (arg.is_complex_type ()) \
941 FloatComplexNDArray tmp = \
942 arg.float_complex_array_value (); \
945 retval = tmp.FCN (dim); \
948 else if (arg.is_real_type ()) \
950 NDArray tmp = arg.array_value (); \
953 retval = tmp.FCN (dim); \
955 else if (arg.is_complex_type ()) \
957 ComplexNDArray tmp = arg.complex_array_value (); \
960 retval = tmp.FCN (dim); \
964 gripe_wrong_type_arg (#FCN, arg); \
970 error (#FCN ": invalid dimension argument = %
d", dim + 1); \
979 #define DATA_REDUCTION(FCN) \
981 octave_value retval; \
983 int nargin = args.length (); \
985 if (nargin == 1 || nargin == 2) \
987 octave_value arg = args(0); \
989 int dim = (nargin == 1 ? -1 : args(1).int_value (true) - 1); \
995 if (arg.is_real_type ()) \
997 if (arg.is_sparse_type ()) \
999 SparseMatrix tmp = arg.sparse_matrix_value (); \
1001 if (! error_state) \
1002 retval = tmp.FCN (dim); \
1004 else if (arg.is_single_type ()) \
1006 FloatNDArray tmp = arg.float_array_value (); \
1008 if (! error_state) \
1009 retval = tmp.FCN (dim); \
1013 NDArray tmp = arg.array_value (); \
1015 if (! error_state) \
1016 retval = tmp.FCN (dim); \
1019 else if (arg.is_complex_type ()) \
1021 if (arg.is_sparse_type ()) \
1023 SparseComplexMatrix tmp = arg.sparse_complex_matrix_value (); \
1025 if (! error_state) \
1026 retval = tmp.FCN (dim); \
1028 else if (arg.is_single_type ()) \
1030 FloatComplexNDArray tmp \
1031 = arg.float_complex_array_value (); \
1033 if (! error_state) \
1034 retval = tmp.FCN (dim); \
1038 ComplexNDArray tmp = arg.complex_array_value (); \
1040 if (! error_state) \
1041 retval = tmp.FCN (dim); \
1046 gripe_wrong_type_arg (#FCN, arg); \
1051 error (#FCN ": invalid dimension argument = %
d", dim + 1); \
1059 DEFUN (cumprod, args, ,
1061 @deftypefn {Built-in Function} {} cumprod (@var{
x})\n\
1062 @deftypefnx {Built-in Function} {} cumprod (@var{
x}, @var{dim})\n\
1063 Cumulative
product of elements along dimension @var{dim}. If\n\
1064 @var{dim} is omitted, it defaults to the first non-singleton dimension.\n\
1066 @seealso{prod, cumsum}\n\
1069 DATA_REDUCTION (cumprod);
1073 %!assert (cumprod ([1, 2, 3]), [1, 2, 6])
1074 %!assert (cumprod ([-1; -2; -3]), [-1; 2; -6])
1075 %!assert (cumprod ([i, 2+i, -3+2i, 4]), [i, -1+2i, -1-8i, -4-32i])
1076 %!assert (cumprod ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i]), [1, 2, 3; i, 4i, 9i; -1+i, -8+8i, -27+27i])
1078 %!assert (cumprod (single ([1, 2, 3])), single ([1, 2, 6]))
1079 %!assert (cumprod (single ([-1; -2; -3])), single ([-1; 2; -6]))
1080 %!assert (cumprod (single ([i, 2+i, -3+2i, 4])), single ([i, -1+2i, -1-8i, -4-32i]))
1081 %!assert (cumprod (single ([1, 2, 3; i, 2i, 3i; 1+i, 2+2i, 3+3i])), single ([1, 2, 3; i, 4i, 9i; -1+i, -8+8i, -27+27i]))
1083 %!assert (cumprod ([2, 3; 4, 5], 1), [2, 3; 8, 15])
1084 %!assert (cumprod ([2, 3; 4, 5], 2), [2, 6; 4, 20])
1086 %!assert (cumprod (single ([2, 3; 4, 5]), 1), single ([2, 3; 8, 15]))
1087 %!assert (cumprod (single ([2, 3; 4, 5]), 2), single ([2, 6; 4, 20]))
1092 DEFUN (cumsum, args, ,
1094 @deftypefn {Built-in Function} {} cumsum (@var{
x})\n\
1095 @deftypefnx {Built-in Function} {} cumsum (@var{
x}, @var{dim})\n\
1096 @deftypefnx {Built-in Function} {} cumsum (@dots{}, \
"native\")\n\
1097 @deftypefnx {Built-in Function} {} cumsum (@dots{}, \"double\")\n\
1098 @deftypefnx {Built-in Function} {} cumsum (@dots{}, \"extra\")\n\
1099 Cumulative sum of elements along dimension @var{dim}. If @var{dim}\n\
1100 is omitted, it defaults to the first non-singleton dimension.\n\
1102 See @code{sum} for an explanation of the optional parameters\n\
1103 @qcode{\"native\"}, @qcode{\"double\"}, and @qcode{\"extra\"}.\n\
1104 @seealso{sum, cumprod}\n\
1109 int nargin = args.
length ();
1111 bool isnative =
false;
1112 bool isdouble =
false;
1114 if (nargin > 1 && args(nargin - 1).is_string ())
1116 std::string str = args(nargin - 1).string_value ();
1120 if (str ==
"native")
1122 else if (str ==
"double")
1125 error (
"sum: unrecognized string argument");
1133 if (nargin == 1 || nargin == 2)
1140 dim = args(1).int_value () - 1;
1142 error (
"cumsum: invalid dimension argument = %d", dim + 1);
1174 #define MAKE_INT_BRANCH(X) \
1177 retval = arg.X ## _array_value ().cumsum (dim); \
1179 retval = arg.array_value ().cumsum (dim); \
1189 #undef MAKE_INT_BRANCH
1241 DEFUN (diag, args, ,
1243 @deftypefn {Built-in Function} {@var{M} =} diag (@var{v})\n\
1244 @deftypefnx {Built-in Function} {@var{M} =} diag (@var{v}, @var{k})\n\
1245 @deftypefnx {Built-in Function} {@var{M} =} diag (@var{v}, @var{m}, @var{n})\n\
1246 @deftypefnx {Built-in Function} {@var{v} =} diag (@var{M})\n\
1247 @deftypefnx {Built-in Function} {@var{v} =} diag (@var{M}, @var{k})\n\
1248 Return a diagonal matrix with vector @var{v} on diagonal @var{k}. The\n\
1249 second argument is optional. If it is positive, the vector is placed on\n\
1250 the @var{k}-th super-diagonal. If it is negative, it is placed on the\n\
1251 @var{-k}-th sub-diagonal. The default value of @var{k} is 0, and the\n\
1252 vector is placed on the main diagonal. For example:\n\
1256 diag ([1, 2, 3], 1)\n\
1257 @result{} 0 1 0 0\n\
1265 The 3-input form returns a diagonal matrix with vector @var{v} on the main\n\
1266 diagonal and the resulting matrix being of size @var{m} rows x @var{n}\n\
1269 Given a matrix argument, instead of a vector, @code{diag} extracts the\n\
1270 @var{k}-th diagonal of the matrix.\n\
1275 int nargin = args.
length ();
1277 if (nargin == 1 && args(0).is_defined ())
1278 retval = args(0).
diag ();
1279 else if (nargin == 2 && args(0).is_defined () && args(1).is_defined ())
1284 error (
"diag: invalid argument K");
1286 retval = args(0).
diag (k);
1288 else if (nargin == 3)
1298 retval = arg0.
diag (m, n);
1300 error (
"diag: invalid dimensions");
1303 error (
"diag: V must be a vector");
1372 DEFUN (prod, args, ,
1374 @deftypefn {Built-in Function} {} prod (@var{x})\n\
1375 @deftypefnx {Built-in Function} {} prod (@var{x}, @var{dim})\n\
1376 Product of elements along dimension @var{dim}. If @var{dim} is\n\
1377 omitted, it defaults to the first non-singleton dimension.\n\
1378 @seealso{cumprod, sum}\n\
1431 int n_args = args.
length ();
1432 for (
int i = 0; i < n_args; i++)
1433 if (args(i).numel () != 1)
1439 template <
class TYPE,
class T>
1445 int n_args = args.
length ();
1452 if (dim == -1 || dim == -2)
1464 result(j) = octave_value_extract<T> (args(j));
1475 array_list[j] = octave_value_extract<TYPE> (args(j));
1483 template <
class TYPE,
class T>
1489 int n_args = args.
length ();
1496 sparse_list[j] = octave_value_extract<TYPE> (args(j));
1504 template<
class TYPE>
1510 single_type_concat<TYPE, typename TYPE::element_type> (result, args, dim);
1521 int n_args = args.
length ();
1528 map_list[j] = octave_value_extract<MAP> (args(j));
1541 single_type_concat_map<octave_scalar_map> (result, args, dim);
1543 single_type_concat_map<octave_map> (result, args, dim);
1569 error (
"conversion from %s to %s failed", dtype.c_str (),
1587 error (
"%s constructor failed for %s argument", dtype.c_str (),
1591 error (
"no constructor for %s!", dtype.c_str ());
1621 error (
"%s/%s method did not return a value",
1622 dtype.c_str (), cattype.c_str ());
1644 if (t1_type == dtype)
1659 std::string cname = tmp(0).class_name ();
1660 std::list<std::string> parents = tmp(0).parent_class_name_list ();
1679 int n_args = args.
length ();
1683 else if (n_args == 1)
1685 else if (n_args > 1)
1687 std::string result_type;
1689 bool all_sq_strings_p =
true;
1690 bool all_dq_strings_p =
true;
1691 bool all_real_p =
true;
1692 bool all_cmplx_p =
true;
1693 bool any_sparse_p =
false;
1694 bool any_cell_p =
false;
1695 bool any_class_p =
false;
1697 bool first_elem_is_struct =
false;
1699 for (
int i = 0; i < n_args; i++)
1703 result_type = args(i).class_name ();
1705 first_elem_is_struct = args(i).is_map ();
1710 if (all_sq_strings_p && ! args(i).is_sq_string ())
1711 all_sq_strings_p =
false;
1712 if (all_dq_strings_p && ! args(i).is_dq_string ())
1713 all_dq_strings_p =
false;
1714 if (all_real_p && ! args(i).is_real_type ())
1716 if (all_cmplx_p && ! (args(i).is_complex_type ()
1717 || args(i).is_real_type ()))
1718 all_cmplx_p =
false;
1719 if (!any_sparse_p && args(i).is_sparse_type ())
1720 any_sparse_p =
true;
1721 if (!any_cell_p && args(i).is_cell ())
1723 if (!any_class_p && args(i).is_object ())
1727 if (any_cell_p && ! any_class_p && ! first_elem_is_struct)
1729 for (
int i = 0; i < n_args; i++)
1731 if (! args(i).is_cell ())
1732 args(i) =
Cell (args(i));
1740 else if (result_type ==
"double")
1745 retval = do_single_type_concat<SparseMatrix> (args, dim);
1747 retval = do_single_type_concat<SparseComplexMatrix> (args, dim);
1752 retval = do_single_type_concat<NDArray> (args, dim);
1754 retval = do_single_type_concat<ComplexNDArray> (args, dim);
1757 else if (result_type ==
"single")
1760 retval = do_single_type_concat<FloatNDArray> (args, dim);
1762 retval = do_single_type_concat<FloatComplexNDArray> (args, dim);
1764 else if (result_type ==
"char")
1766 char type = all_dq_strings_p ?
'"' :
'\'';
1770 charNDArray result = do_single_type_concat<charNDArray> (args, dim);
1774 else if (result_type ==
"logical")
1777 retval = do_single_type_concat<SparseBoolMatrix> (args, dim);
1779 retval = do_single_type_concat<boolNDArray> (args, dim);
1781 else if (result_type ==
"int8")
1782 retval = do_single_type_concat<int8NDArray> (args, dim);
1783 else if (result_type ==
"int16")
1784 retval = do_single_type_concat<int16NDArray> (args, dim);
1785 else if (result_type ==
"int32")
1786 retval = do_single_type_concat<int32NDArray> (args, dim);
1787 else if (result_type ==
"int64")
1788 retval = do_single_type_concat<int64NDArray> (args, dim);
1789 else if (result_type ==
"uint8")
1790 retval = do_single_type_concat<uint8NDArray> (args, dim);
1791 else if (result_type ==
"uint16")
1792 retval = do_single_type_concat<uint16NDArray> (args, dim);
1793 else if (result_type ==
"uint32")
1794 retval = do_single_type_concat<uint32NDArray> (args, dim);
1795 else if (result_type ==
"uint64")
1796 retval = do_single_type_concat<uint64NDArray> (args, dim);
1797 else if (result_type ==
"cell")
1798 retval = do_single_type_concat<Cell> (args, dim);
1799 else if (result_type ==
"struct")
1809 if (dim == -1 || dim == -2)
1815 for (
int i = 1; i < args.length (); i++)
1817 if (! (dv.*concat_rule) (args(i).dims (), dim))
1820 error (
"cat: dimension mismatch");
1845 int dv_len = dv.
length ();
1848 for (
int j = 0; j < n_args; j++)
1853 tmp =
do_cat_op (tmp, args (j), ra_idx);
1863 error (
"%s: indexing error", fname.c_str ());
1867 ra_idx (dim) += (dim < dv_tmp.
length () ?
1879 DEFUN (horzcat, args, ,
1881 @deftypefn {Built-in Function} {} horzcat (@var{array1}, @var{array2}, @dots{}, @var{arrayN})\n\
1882 Return the horizontal concatenation of N-D array objects, @var{array1},\n\
1883 @var{array2}, @dots{}, @var{arrayN} along dimension 2.\n\
1885 Arrays may also be concatenated horizontally using the syntax for creating\n\
1886 new matrices. For example:\n\
1889 @var{hcat} = [ @var{array1}, @var{array2}, @dots{} ]\n\
1891 @seealso{cat, vertcat}\n\
1894 return do_cat (args, -2,
"horzcat");
2089 DEFUN (vertcat, args, ,
2091 @deftypefn {Built-in Function} {} vertcat (@var{array1}, @var{array2}, @dots{}, @var{arrayN})\n\
2092 Return the vertical concatenation of N-D array objects, @var{array1},\n\
2093 @var{array2}, @dots{}, @var{arrayN} along dimension 1.\n\
2095 Arrays may also be concatenated vertically using the syntax for creating\n\
2096 new matrices. For example:\n\
2099 @var{vcat} = [ @var{array1}; @var{array2}; @dots{} ]\n\
2101 @seealso{cat, horzcat}\n\
2104 return do_cat (args, -1,
"vertcat");
2115 @deftypefn {Built-in Function} {} cat (@var{dim}, @var{array1}, @var{array2}, @dots{}, @var{arrayN})\n\
2116 Return the concatenation of N-D array objects, @var{array1},\n\
2117 @var{array2}, @dots{}, @var{arrayN} along dimension @var{dim}.\n\
2122 B = zeros (2, 2);\n\
2124 @result{} 1 1 0 0\n\
2129 Alternatively, we can concatenate @var{A} and @var{B} along the\n\
2130 second dimension in the following way:\n\
2138 @var{dim} can be larger than the dimensions of the N-D array objects\n\
2139 and the result will thus have @var{dim} dimensions as the\n\
2140 following example shows:\n\
2144 cat (4, ones (2, 2), zeros (2, 2))\n\
2145 @result{} ans(:,:,1,1) =\n\
2156 @seealso{horzcat, vertcat}\n\
2161 if (args.length () > 0)
2168 retval =
do_cat (args.slice (1, args.length () - 1), dim,
"cat");
2170 error (
"cat: DIM must be a valid dimension");
2173 error (
"cat: DIM must be an integer");
2329 if (args.
length () == 2 && args(1).
length () >= args(1).ndims ())
2331 Array<int> vec = args(1).int_vector_value ();
2338 for (
int i = 0; i < n; i++)
2352 DEFUN (permute, args, ,
2354 @deftypefn {Built-in Function} {} permute (@var{A}, @var{perm})\n\
2355 Return the generalized transpose for an N-D array object @var{A}.\n\
2356 The permutation vector @var{perm} must contain the elements\n\
2357 @code{1:ndims (A)} (in any order, but each element must appear only once).\n\
2358 @seealso{ipermute}\n\
2364 DEFUN (ipermute, args, ,
2366 @deftypefn {Built-in Function} {} ipermute (@var{A}, @var{iperm})\n\
2367 The inverse of the @code{permute} function. The expression\n\
2370 ipermute (permute (A, perm), perm)\n\
2374 returns the original array @var{A}.\n\
2375 @seealso{permute}\n\
2383 @deftypefn {Built-in Function} {} length (@var{a})\n\
2384 Return the length of the object @var{a}.\n\
2386 The length is 0 for empty objects, 1 for scalars, and the number of elements\n\
2387 for vectors. For matrix objects, the length is the number of rows or\n\
2388 columns, whichever is greater (this odd definition is used for compatibility\n\
2389 with @sc{matlab}).\n\
2390 @seealso{numel, size}\n\
2395 if (args.length () == 1)
2396 retval = args(0).
length ();
2403 DEFUN (ndims, args, ,
2405 @deftypefn {Built-in Function} {} ndims (@var{a})\n\
2406 Return the number of dimensions of @var{a}.\n\
2407 For any array, the result will always be larger than or equal to 2.\n\
2408 Trailing singleton dimensions are not counted.\n\
2412 ndims (ones (4, 1, 2, 1))\n\
2421 if (args.length () == 1)
2422 retval = args(0).
ndims ();
2429 DEFUN (numel, args, ,
2431 @deftypefn {Built-in Function} {} numel (@var{a})\n\
2432 @deftypefnx {Built-in Function} {} numel (@var{a}, @var{idx1}, @var{idx2}, @dots{})\n\
2433 Return the number of elements in the object @var{a}.\n\
2434 Optionally, if indices @var{idx1}, @var{idx2}, @dots{} are supplied,\n\
2435 return the number of elements that would result from the indexing\n\
2438 @var{a}(@var{idx1}, @var{idx2}, @dots{})\n\
2441 Note that the indices do not have to be numerical. For example,\n\
2446 @var{b} = ones (2, 3);\n\
2447 numel (@var{a}, @var{b})\n\
2452 will return 6, as this is the number of ways to index with @var{b}.\n\
2454 This method is also called when an object appears as lvalue with cs-list\n\
2455 indexing, i.e., @code{object@{@dots{}@}} or @code{object(@dots{}).field}.\n\
2463 retval = args(0).
numel ();
2464 else if (nargin > 1)
2468 retval =
dims_to_numel (args(0).dims (), args.slice (1, nargin-1));
2478 @deftypefn {Built-in Function} {} size (@var{a})\n\
2479 @deftypefnx {Built-in Function} {} size (@var{a}, @var{dim})\n\
2480 Return the number of rows and columns of @var{a}.\n\
2482 With one input argument and one output argument, the result is returned\n\
2483 in a row vector. If there are multiple output arguments, the number of\n\
2484 rows is assigned to the first, and the number of columns to the second,\n\
2485 etc. For example:\n\
2489 size ([1, 2; 3, 4; 5, 6])\n\
2490 @result{} [ 3, 2 ]\n\
2492 [nr, nc] = size ([1, 2; 3, 4; 5, 6])\n\
2498 If given a second argument, @code{size} will return the size of the\n\
2499 corresponding dimension. For example,\n\
2503 size ([1, 2; 3, 4; 5, 6], 2)\n\
2509 returns the number of columns in the given matrix.\n\
2510 @seealso{numel, ndims, length, rows, columns}\n\
2515 int nargin = args.
length ();
2519 const dim_vector dimensions = args(0).dims ();
2525 for (
int i = 0; i < nargout; i++)
2526 retval(i) = rdims(i);
2530 int ndims = dimensions.
length ();
2534 for (
int i = 0; i < ndims; i++)
2535 m(i) = dimensions(i);
2540 else if (nargin == 2 && nargout < 2)
2545 error (
"size: DIM must be a scalar");
2553 retval(0) = dv(nd-1);
2558 error (
"size: requested dimension DIM (= %d) out of range", nd);
2567 DEFUN (size_equal, args, ,
2569 @deftypefn {Built-in Function} {} size_equal (@var{a}, @var{b}, @dots{})\n\
2570 Return true if the dimensions of all arguments agree.\n\
2571 Trailing singleton dimensions are ignored.\n\
2572 Called with a single or no argument, size_equal returns true.\n\
2573 @seealso{size, numel, ndims}\n\
2578 int nargin = args.
length ();
2586 for (
int i = 1; i < nargin; ++i)
2590 if (a_dims != b_dims)
2603 @deftypefn {Built-in Function} {@var{scalar} =} nnz (@var{a})\n\
2604 Return the number of non zero elements in @var{a}.\n\
2605 @seealso{sparse, nzmax}\n\
2610 if (args.length () == 1)
2611 retval = args(0).
nnz ();
2618 DEFUN (nzmax, args, ,
2620 @deftypefn {Built-in Function} {@var{scalar} =} nzmax (@var{SM})\n\
2621 Return the amount of storage allocated to the sparse matrix @var{SM}.\n\
2622 Note that Octave tends to crop unused memory at the first opportunity\n\
2623 for sparse objects. There are some cases of user created sparse objects\n\
2624 where the value returned by @dfn{nzmax} will not be the same as @dfn{nnz},\n\
2625 but in general they will give the same result.\n\
2626 @seealso{nnz, spalloc, sparse}\n\
2631 if (args.length () == 1)
2632 retval = args(0).
nzmax ();
2639 DEFUN (rows, args, ,
2641 @deftypefn {Built-in Function} {} rows (@var{a})\n\
2642 Return the number of rows of @var{a}.\n\
2643 @seealso{columns, size, length, numel, isscalar, isvector, ismatrix}\n\
2648 if (args.length () == 1)
2649 retval = args(0).
rows ();
2656 DEFUN (columns, args, ,
2658 @deftypefn {Built-in Function} {} columns (@var{a})\n\
2659 Return the number of columns of @var{a}.\n\
2660 @seealso{rows, size, length, numel, isscalar, isvector, ismatrix}\n\
2665 if (args.length () == 1)
2675 @deftypefn {Built-in Function} {} sum (@var{x})\n\
2676 @deftypefnx {Built-in Function} {} sum (@var{x}, @var{dim})\n\
2677 @deftypefnx {Built-in Function} {} sum (@dots{}, \"native\")\n\
2678 @deftypefnx {Built-in Function} {} sum (@dots{}, \"double\")\n\
2679 @deftypefnx {Built-in Function} {} sum (@dots{}, \"extra\")\n\
2680 Sum of elements along dimension @var{dim}. If @var{dim} is\n\
2681 omitted, it defaults to the first non-singleton dimension.\n\
2683 If the optional argument @qcode{\"native\"} is given, then the sum is\n\
2684 performed in the same type as the original argument, rather than in the\n\
2685 default double type. For example:\n\
2689 sum ([true, true])\n\
2691 sum ([true, true], \"native\")\n\
2696 On the contrary, if @qcode{\"double\"} is given, the sum is performed in\n\
2697 double precision even for single precision inputs.\n\
2699 For double precision inputs, @qcode{\"extra\"} indicates that a more accurate\n\
2700 algorithm than straightforward summation is to be used. For single precision\n\
2701 inputs, @qcode{\"extra\"} is the same as @qcode{\"double\"}. Otherwise,\n\
2702 @qcode{\"extra\"} has no effect.\n\
2703 @seealso{cumsum, sumsq, prod}\n\
2708 int nargin = args.
length ();
2710 bool isnative =
false;
2711 bool isdouble =
false;
2712 bool isextra =
false;
2714 if (nargin > 1 && args(nargin - 1).is_string ())
2716 std::string str = args(nargin - 1).string_value ();
2720 if (str ==
"native")
2722 else if (str ==
"double")
2724 else if (str ==
"extra")
2727 error (
"sum: unrecognized string argument");
2735 if (nargin == 1 || nargin == 2)
2742 dim = args(1).int_value () - 1;
2744 error (
"sum: invalid dimension DIM = %d", dim + 1);
2755 warning (
"sum: 'extra' not yet implemented for sparse matrices");
2767 warning (
"sum: 'extra' not yet implemented for sparse matrices");
2776 if (isdouble || isextra)
2782 if (isdouble || isextra)
2788 #define MAKE_INT_BRANCH(X) \
2791 retval = arg.X ## _array_value ().sum (dim); \
2793 retval = arg.X ## _array_value ().dsum (dim); \
2803 #undef MAKE_INT_BRANCH
2898 DEFUN (sumsq, args, ,
2900 @deftypefn {Built-in Function} {} sumsq (@var{x})\n\
2901 @deftypefnx {Built-in Function} {} sumsq (@var{x}, @var{dim})\n\
2902 Sum of squares of elements along dimension @var{dim}. If @var{dim}\n\
2903 is omitted, it defaults to the first non-singleton dimension.\n\
2905 This function is conceptually equivalent to computing\n\
2908 sum (x .* conj (x), dim)\n\
2912 but it uses less memory and avoids calling @code{conj} if @var{x} is real.\n\
2913 @seealso{sum, prod}\n\
2937 DEFUN (islogical, args, ,
2939 @deftypefn {Built-in Function} {} islogical (@var{x})\n\
2940 @deftypefnx {Built-in Function} {} isbool (@var{x})\n\
2941 Return true if @var{x} is a logical object.\n\
2942 @seealso{isfloat, isinteger, ischar, isnumeric, isa}\n\
2947 if (args.length () == 1)
2971 DEFUN (isinteger, args, ,
2973 @deftypefn {Built-in Function} {} isinteger (@var{x})\n\
2974 Return true if @var{x} is an integer object (int8, uint8, int16, etc.).\n\
2975 Note that @w{@code{isinteger (14)}} is false because numeric constants in\n\
2976 Octave are double precision floating point values.\n\
2977 @seealso{isfloat, ischar, islogical, isnumeric, isa}\n\
2982 if (args.length () == 1)
2990 DEFUN (iscomplex, args, ,
2992 @deftypefn {Built-in Function} {} iscomplex (@var{x})\n\
2993 Return true if @var{x} is a complex-valued numeric object.\n\
2994 @seealso{isreal, isnumeric, islogical, ischar, isfloat, isa}\n\
2999 if (args.length () == 1)
3007 DEFUN (isfloat, args, ,
3009 @deftypefn {Built-in Function} {} isfloat (@var{x})\n\
3010 Return true if @var{x} is a floating-point numeric object.\n\
3011 Objects of class double or single are floating-point objects.\n\
3012 @seealso{isinteger, ischar, islogical, isnumeric, isa}\n\
3017 if (args.length () == 1)
3028 DEFUN (complex, args, ,
3030 @deftypefn {Built-in Function} {} complex (@var{x})\n\
3031 @deftypefnx {Built-in Function} {} complex (@var{re}, @var{im})\n\
3032 Return a complex result from real arguments. With 1 real argument @var{x},\n\
3033 return the complex result @code{@var{x} + 0i}. With 2 real arguments,\n\
3034 return the complex result @code{@var{re} + @var{im}}. @code{complex} can\n\
3035 often be more convenient than expressions such as @code{a + i*b}.\n\
3040 complex ([1, 2], [3, 4])\n\
3041 @result{} [ 1 + 3i 2 + 4i ]\n\
3044 @seealso{real, imag, iscomplex, abs, arg}\n\
3049 int nargin = args.
length ();
3068 if (arg.
numel () == 1)
3085 if (arg.
numel () == 1)
3102 error (
"complex: invalid conversion");
3105 else if (nargin == 2)
3117 if (re.
numel () == 1)
3120 if (re_val.
nnz () == 0)
3132 i < im_val.
cidx (j + 1); i++)
3133 result.
data (im_val.
ridx (i) + off) =
3134 result.
data (im_val.
ridx (i) + off) +
3140 else if (im.
numel () == 1)
3143 if (im_val.
nnz () == 0)
3157 i < re_val.
cidx (j + 1); i++)
3158 result.
data (re_val.
ridx (i) + off) =
3159 result.
data (re_val.
ridx (i) + off) +
3167 if (re_val.
dims () == im_val.
dims ())
3176 error (
"complex: dimension mismatch");
3182 if (re.
numel () == 1)
3186 if (im.
numel () == 1)
3216 if (im.
numel () == 1)
3238 if (re_val.
dims () == im_val.
dims ())
3251 error (
"complex: dimension mismatch");
3256 else if (re.
numel () == 1)
3260 if (im.
numel () == 1)
3287 if (im.
numel () == 1)
3307 if (re_val.
dims () == im_val.
dims ())
3318 error (
"complex: dimension mismatch");
3324 error (
"complex: invalid conversion");
3332 DEFUN (isreal, args, ,
3334 @deftypefn {Built-in Function} {} isreal (@var{x})\n\
3335 Return true if @var{x} is a non-complex matrix or scalar.\n\
3336 For compatibility with @sc{matlab}, this includes logical and character\n\
3338 @seealso{iscomplex, isnumeric, isa}\n\
3343 if (args.length () == 1)
3351 DEFUN (isempty, args, ,
3353 @deftypefn {Built-in Function} {} isempty (@var{a})\n\
3354 Return true if @var{a} is an empty matrix (any one of its dimensions is\n\
3355 zero). Otherwise, return false.\n\
3356 @seealso{isnull, isa}\n\
3361 if (args.length () == 1)
3374 DEFUN (isnumeric, args, ,
3376 @deftypefn {Built-in Function} {} isnumeric (@var{x})\n\
3377 Return true if @var{x} is a numeric object, i.e., an integer, real, or\n\
3378 complex array. Logical and character arrays are not considered to be\n\
3380 @seealso{isinteger, isfloat, isreal, iscomplex, islogical, ischar, iscell, isstruct, isa}\n\
3385 if (args.length () == 1)
3409 DEFUN (ismatrix, args, ,
3411 @deftypefn {Built-in Function} {} ismatrix (@var{a})\n\
3412 Return true if @var{a} is a numeric, logical, or character matrix.\n\
3413 Scalars (1x1 matrices) and vectors (@nospell{1xN} or @nospell{Nx1} matrices)\n\
3414 are subsets of the more general N-dimensional matrix and @code{ismatrix}\n\
3415 will return true for these objects as well.\n\
3416 @seealso{isscalar, isvector, iscell, isstruct, issparse, isa}\n\
3421 if (args.length () == 1)
3464 int nargin = args.
length ();
3470 if (nargin > 0 && args(nargin-1).is_string ())
3472 std::string nm = args(nargin-1).string_value ();
3494 for (
int i = 0; i < nargin; i++)
3496 dims(i) = args(i).is_empty () ? 0 : args(i).idx_type_value ();
3500 error (
"%s: expecting scalar integer arguments", fcn);
3563 if (val == 1 && dims.
length () == 2 && dims (0) == 1)
3564 retval =
Range (1.0, 0.0, dims (1));
3575 error (
"%s: invalid class name", fcn);
3590 int nargin = args.
length ();
3596 if (nargin > 0 && args(nargin-1).is_string ())
3598 std::string nm = args(nargin-1).string_value ();
3620 for (
int i = 0; i < nargin; i++)
3622 dims(i) = args(i).is_empty () ? 0 : args(i).idx_type_value ();
3626 error (
"%s: expecting scalar integer arguments", fcn);
3656 error (
"%s: invalid class name", fcn);
3670 int nargin = args.
length ();
3676 if (nargin > 0 && args(nargin-1).is_string ())
3678 std::string nm = args(nargin-1).string_value ();
3700 for (
int i = 0; i < nargin; i++)
3702 dims(i) = args(i).is_empty () ? 0 : args(i).idx_type_value ();
3706 error (
"%s: expecting scalar integer arguments", fcn);
3728 retval =
FloatNDArray (dims, static_cast <float> (val));
3736 error (
"%s: invalid class name", fcn);
3751 int nargin = args.
length ();
3757 if (nargin > 0 && args(nargin-1).is_string ())
3759 std::string nm = args(nargin-1).string_value ();
3781 for (
int i = 0; i < nargin; i++)
3783 dims(i) = args(i).is_empty () ? 0 : args(i).idx_type_value ();
3787 error (
"%s: expecting scalar integer arguments", fcn);
3810 static_cast<FloatComplex> (val));
3818 error (
"%s: invalid class name", fcn);
3832 int nargin = args.
length ();
3847 dims.resize (nargin);
3849 for (
int i = 0; i < nargin; i++)
3851 dims(i) = args(i).is_empty () ? 0 : args(i).idx_type_value ();
3855 error (
"%s: expecting scalar integer arguments", fcn);
3865 dims.chop_trailing_singletons ();
3879 DEFUN (ones, args, ,
3881 @deftypefn {Built-in Function} {} ones (@var{n})\n\
3882 @deftypefnx {Built-in Function} {} ones (@var{m}, @var{n})\n\
3883 @deftypefnx {Built-in Function} {} ones (@var{m}, @var{n}, @var{k}, @dots{})\n\
3884 @deftypefnx {Built-in Function} {} ones ([@var{m} @var{n} @dots{}])\n\
3885 @deftypefnx {Built-in Function} {} ones (@dots{}, @var{class})\n\
3886 Return a matrix or N-dimensional array whose elements are all 1.\n\
3887 If invoked with a single scalar integer argument @var{n}, return a square\n\
3888 @nospell{NxN} matrix. If invoked with two or more scalar\n\
3889 integer arguments, or a vector of integer values, return an array with\n\
3890 the given dimensions.\n\
3892 If you need to create a matrix whose values are all the same, you should\n\
3893 use an expression like\n\
3896 val_matrix = val * ones (m, n)\n\
3899 The optional argument @var{class} specifies the class of the return array\n\
3900 and defaults to double. For example:\n\
3903 val = ones (m,n, \"uint8\")\n\
3928 DEFUN (zeros, args, ,
3930 @deftypefn {Built-in Function} {} zeros (@var{n})\n\
3931 @deftypefnx {Built-in Function} {} zeros (@var{m}, @var{n})\n\
3932 @deftypefnx {Built-in Function} {} zeros (@var{m}, @var{n}, @var{k}, @dots{})\n\
3933 @deftypefnx {Built-in Function} {} zeros ([@var{m} @var{n} @dots{}])\n\
3934 @deftypefnx {Built-in Function} {} zeros (@dots{}, @var{class})\n\
3935 Return a matrix or N-dimensional array whose elements are all 0.\n\
3936 If invoked with a single scalar integer argument, return a square\n\
3937 @nospell{NxN} matrix. If invoked with two or more scalar\n\
3938 integer arguments, or a vector of integer values, return an array with\n\
3939 the given dimensions.\n\
3941 The optional argument @var{class} specifies the class of the return array\n\
3942 and defaults to double. For example:\n\
3945 val = zeros (m,n, \"uint8\")\n\
3972 @c List other form of function in documentation index\n\
3975 @deftypefn {Built-in Function} {} Inf\n\
3976 @deftypefnx {Built-in Function} {} Inf (@var{n})\n\
3977 @deftypefnx {Built-in Function} {} Inf (@var{n}, @var{m})\n\
3978 @deftypefnx {Built-in Function} {} Inf (@var{n}, @var{m}, @var{k}, @dots{})\n\
3979 @deftypefnx {Built-in Function} {} Inf (@dots{}, @var{class})\n\
3980 Return a scalar, matrix or N-dimensional array whose elements are all equal\n\
3981 to the IEEE representation for positive infinity.\n\
3983 Infinity is produced when results are too large to be represented using the\n\
3984 the IEEE floating point format for numbers. Two common examples which\n\
3985 produce infinity are division by zero and overflow.\n\
3990 @result{} Inf Inf\n\
3994 When called with no arguments, return a scalar with the value @samp{Inf}.\n\
3995 When called with a single argument, return a square matrix with the dimension\n\
3996 specified. When called with more than one scalar argument the first two\n\
3997 arguments are taken as the number of rows and columns and any further\n\
3998 arguments specify additional matrix dimensions.\n\
3999 The optional argument @var{class} specifies the return type and may be\n\
4000 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4001 @seealso{isinf, NaN}\n\
4029 @c List other form of function in documentation index\n\
4032 @deftypefn {Built-in Function} {} NaN\n\
4033 @deftypefnx {Built-in Function} {} NaN (@var{n})\n\
4034 @deftypefnx {Built-in Function} {} NaN (@var{n}, @var{m})\n\
4035 @deftypefnx {Built-in Function} {} NaN (@var{n}, @var{m}, @var{k}, @dots{})\n\
4036 @deftypefnx {Built-in Function} {} NaN (@dots{}, @var{class})\n\
4037 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4038 to the IEEE symbol NaN (Not a Number).\n\
4039 NaN is the result of operations which do not produce a well defined numerical\n\
4040 result. Common operations which produce a NaN are arithmetic with infinity\n\
4042 ($\\infty - \\infty$), zero divided by zero ($0/0$),\n\
4045 (Inf - Inf), zero divided by zero (0/0),\n\
4047 and any operation involving another NaN value (5 + NaN).\n\
4049 Note that NaN always compares not equal to NaN (NaN != NaN). This behavior\n\
4050 is specified by the IEEE standard for floating point arithmetic. To\n\
4051 find NaN values, use the @code{isnan} function.\n\
4053 When called with no arguments, return a scalar with the value @samp{NaN}.\n\
4054 When called with a single argument, return a square matrix with the dimension\n\
4055 specified. When called with more than one scalar argument the first two\n\
4056 arguments are taken as the number of rows and columns and any further\n\
4057 arguments specify additional matrix dimensions.\n\
4058 The optional argument @var{class} specifies the return type and may be\n\
4059 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4060 @seealso{isnan, Inf}\n\
4088 @deftypefn {Built-in Function} {} e\n\
4089 @deftypefnx {Built-in Function} {} e (@var{n})\n\
4090 @deftypefnx {Built-in Function} {} e (@var{n}, @var{m})\n\
4091 @deftypefnx {Built-in Function} {} e (@var{n}, @var{m}, @var{k}, @dots{})\n\
4092 @deftypefnx {Built-in Function} {} e (@dots{}, @var{class})\n\
4093 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4094 to the base of natural logarithms. The constant\n\
4096 $e$ satisfies the equation $\\log (e) = 1$.\n\
4099 @samp{e} satisfies the equation @code{log} (e) = 1.\n\
4102 When called with no arguments, return a scalar with the value @math{e}. When\n\
4103 called with a single argument, return a square matrix with the dimension\n\
4104 specified. When called with more than one scalar argument the first two\n\
4105 arguments are taken as the number of rows and columns and any further\n\
4106 arguments specify additional matrix dimensions.\n\
4107 The optional argument @var{class} specifies the return type and may be\n\
4108 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4109 @seealso{log, exp, pi, I}\n\
4115 double e_val = exp (1.0);
4123 @deftypefn {Built-in Function} {} eps\n\
4124 @deftypefnx {Built-in Function} {} eps (@var{x})\n\
4125 @deftypefnx {Built-in Function} {} eps (@var{n}, @var{m})\n\
4126 @deftypefnx {Built-in Function} {} eps (@var{n}, @var{m}, @var{k}, @dots{})\n\
4127 @deftypefnx {Built-in Function} {} eps (@dots{}, @var{class})\n\
4128 Return a scalar, matrix or N-dimensional array whose elements are all eps,\n\
4129 the machine precision. More precisely, @code{eps} is the relative spacing\n\
4130 between any two adjacent numbers in the machine's floating point system.\n\
4131 This number is obviously system dependent. On machines that support IEEE\n\
4132 floating point arithmetic, @code{eps} is approximately\n\
4134 $2.2204\\times10^{-16}$ for double precision and $1.1921\\times10^{-7}$\n\
4137 2.2204e-16 for double precision and 1.1921e-07\n\
4139 for single precision.\n\
4141 When called with no arguments, return a scalar with the value\n\
4142 @code{eps (1.0)}.\n\
4143 Given a single argument @var{x}, return the distance between @var{x} and\n\
4144 the next largest value.\n\
4145 When called with more than one argument the first two arguments are taken as\n\
4146 the number of rows and columns and any further\n\
4147 arguments specify additional matrix dimensions.\n\
4148 The optional argument @var{class} specifies the return type and may be\n\
4149 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4150 @seealso{realmax, realmin, intmax, bitmax}\n\
4153 int nargin = args.
length ();
4156 if (nargin == 1 && ! args(0).is_string ())
4158 if (args(0).is_single_type ())
4168 float val = ::fabsf (
x(i));
4172 epsval(i) =
powf (2.0, -149e0);
4176 gnulib::frexpf (val, &expon);
4177 epsval(i) =
std::pow (static_cast <float> (2.0),
4178 static_cast <float> (expon - 24));
4194 double val = ::fabs (
x(i));
4198 epsval(i) =
pow (2.0, -1074e0);
4202 gnulib::frexp (val, &expon);
4203 epsval(i) =
std::pow (static_cast <double> (2.0),
4204 static_cast <double> (expon - 53));
4212 retval =
fill_matrix (args, std::numeric_limits<double>::epsilon (),
4213 std::numeric_limits<float>::epsilon (),
"eps");
4246 @deftypefn {Built-in Function} {} pi\n\
4247 @deftypefnx {Built-in Function} {} pi (@var{n})\n\
4248 @deftypefnx {Built-in Function} {} pi (@var{n}, @var{m})\n\
4249 @deftypefnx {Built-in Function} {} pi (@var{n}, @var{m}, @var{k}, @dots{})\n\
4250 @deftypefnx {Built-in Function} {} pi (@dots{}, @var{class})\n\
4251 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4252 to the ratio of the circumference of a circle to its\n\
4254 diameter($\\pi$).\n\
4259 Internally, @code{pi} is computed as @samp{4.0 * atan (1.0)}.\n\
4261 When called with no arguments, return a scalar with the value of\n\
4268 When called with a single argument, return a square matrix with the dimension\n\
4269 specified. When called with more than one scalar argument the first two\n\
4270 arguments are taken as the number of rows and columns and any further\n\
4271 arguments specify additional matrix dimensions.\n\
4272 The optional argument @var{class} specifies the return type and may be\n\
4273 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4278 double pi_val = M_PI;
4280 double pi_val = 4.0 *
atan (1.0);
4286 DEFUN (realmax, args, ,
4288 @deftypefn {Built-in Function} {} realmax\n\
4289 @deftypefnx {Built-in Function} {} realmax (@var{n})\n\
4290 @deftypefnx {Built-in Function} {} realmax (@var{n}, @var{m})\n\
4291 @deftypefnx {Built-in Function} {} realmax (@var{n}, @var{m}, @var{k}, @dots{})\n\
4292 @deftypefnx {Built-in Function} {} realmax (@dots{}, @var{class})\n\
4293 Return a scalar, matrix or N-dimensional array whose elements are all equal\n\
4294 to the largest floating point number that is representable. The actual\n\
4295 value is system dependent. On machines that support IEEE\n\
4296 floating point arithmetic, @code{realmax} is approximately\n\
4298 $1.7977\\times10^{308}$ for double precision and $3.4028\\times10^{38}$\n\
4301 1.7977e+308 for double precision and 3.4028e+38\n\
4303 for single precision.\n\
4305 When called with no arguments, return a scalar with the value\n\
4306 @code{realmax (@qcode{\"double\"})}.\n\
4307 When called with a single argument, return a square matrix with the dimension\n\
4308 specified. When called with more than one scalar argument the first two\n\
4309 arguments are taken as the number of rows and columns and any further\n\
4310 arguments specify additional matrix dimensions.\n\
4311 The optional argument @var{class} specifies the return type and may be\n\
4312 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4313 @seealso{realmin, intmax, bitmax, eps}\n\
4320 DEFUN (realmin, args, ,
4322 @deftypefn {Built-in Function} {} realmin\n\
4323 @deftypefnx {Built-in Function} {} realmin (@var{n})\n\
4324 @deftypefnx {Built-in Function} {} realmin (@var{n}, @var{m})\n\
4325 @deftypefnx {Built-in Function} {} realmin (@var{n}, @var{m}, @var{k}, @dots{})\n\
4326 @deftypefnx {Built-in Function} {} realmin (@dots{}, @var{class})\n\
4327 Return a scalar, matrix or N-dimensional array whose elements are all equal\n\
4328 to the smallest normalized floating point number that is representable.\n\
4329 The actual value is system dependent. On machines that support\n\
4330 IEEE floating point arithmetic, @code{realmin} is approximately\n\
4332 $2.2251\\times10^{-308}$ for double precision and $1.1755\\times10^{-38}$\n\
4335 2.2251e-308 for double precision and 1.1755e-38\n\
4337 for single precision.\n\
4339 When called with no arguments, return a scalar with the value\n\
4340 @code{realmin (@qcode{\"double\"})}.\n\
4341 When called with a single argument, return a square matrix with the dimension\n\
4342 specified. When called with more than one scalar argument the first two\n\
4343 arguments are taken as the number of rows and columns and any further\n\
4344 arguments specify additional matrix dimensions.\n\
4345 The optional argument @var{class} specifies the return type and may be\n\
4346 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4347 @seealso{realmax, intmin, eps}\n\
4356 @c List other forms of function in documentation index\n\
4361 @deftypefn {Built-in Function} {} I\n\
4362 @deftypefnx {Built-in Function} {} I (@var{n})\n\
4363 @deftypefnx {Built-in Function} {} I (@var{n}, @var{m})\n\
4364 @deftypefnx {Built-in Function} {} I (@var{n}, @var{m}, @var{k}, @dots{})\n\
4365 @deftypefnx {Built-in Function} {} I (@dots{}, @var{class})\n\
4366 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4367 to the pure imaginary unit, defined as\n\
4372 @code{sqrt (-1)}.\n\
4375 I, and its equivalents i, j, and J, are functions so any of the names may\n\
4376 be reused for other purposes (such as i for a counter variable).\n\
4378 When called with no arguments, return a scalar with the value @math{i}. When\n\
4379 called with a single argument, return a square matrix with the dimension\n\
4380 specified. When called with more than one scalar argument the first two\n\
4381 arguments are taken as the number of rows and columns and any further\n\
4382 arguments specify additional matrix dimensions.\n\
4383 The optional argument @var{class} specifies the return type and may be\n\
4384 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4385 @seealso{e, pi, log, exp}\n\
4397 @deftypefn {Built-in Function} {} NA\n\
4398 @deftypefnx {Built-in Function} {} NA (@var{n})\n\
4399 @deftypefnx {Built-in Function} {} NA (@var{n}, @var{m})\n\
4400 @deftypefnx {Built-in Function} {} NA (@var{n}, @var{m}, @var{k}, @dots{})\n\
4401 @deftypefnx {Built-in Function} {} NA (@dots{}, @var{class})\n\
4402 Return a scalar, matrix, or N-dimensional array whose elements are all equal\n\
4403 to the special constant used to designate missing values.\n\
4405 Note that NA always compares not equal to NA (NA != NA).\n\
4406 To find NA values, use the @code{isna} function.\n\
4408 When called with no arguments, return a scalar with the value @samp{NA}.\n\
4409 When called with a single argument, return a square matrix with the dimension\n\
4410 specified. When called with more than one scalar argument the first two\n\
4411 arguments are taken as the number of rows and columns and any further\n\
4412 arguments specify additional matrix dimensions.\n\
4413 The optional argument @var{class} specifies the return type and may be\n\
4414 either @qcode{\"double\"} or @qcode{\"single\"}.\n\
4427 DEFUN (
false, args, ,
4429 @deftypefn {Built-in Function} {} false (@var{x})\n\
4430 @deftypefnx {Built-in Function} {} false (@var{n}, @var{m})\n\
4431 @deftypefnx {Built-in Function} {} false (@var{n}, @var{m}, @var{k}, @dots{})\n\
4432 Return a matrix or N-dimensional array whose elements are all logical 0.\n\
4433 If invoked with a single scalar integer argument, return a square\n\
4434 matrix of the specified size. If invoked with two or more scalar\n\
4435 integer arguments, or a vector of integer values, return an array with\n\
4436 given dimensions.\n\
4443 DEFUN (
true, args, ,
4445 @deftypefn {Built-in Function} {} true (@var{x})\n\
4446 @deftypefnx {Built-in Function} {} true (@var{n}, @var{m})\n\
4447 @deftypefnx {Built-in Function} {} true (@var{n}, @var{m}, @var{k}, @dots{})\n\
4448 Return a matrix or N-dimensional array whose elements are all logical 1.\n\
4449 If invoked with a single scalar integer argument, return a square\n\
4450 matrix of the specified size. If invoked with two or more scalar\n\
4451 integer arguments, or a vector of integer values, return an array with\n\
4452 given dimensions.\n\
4465 typename MT::element_type one (1);
4467 if (nr == 1 && nc == 1)
4473 typename MT::element_type zero (0);
4477 if (nr > 0 && nc > 0)
4481 for (
int i = 0; i < n; i++)
4491 #define INSTANTIATE_EYE(T) \
4492 template octave_value identity_matrix<T> (int, int)
4564 error (
"eye: invalid class name");
4572 #undef INT_EYE_MATRIX
4576 @deftypefn {Built-in Function} {} eye (@var{n})\n\
4577 @deftypefnx {Built-in Function} {} eye (@var{m}, @var{n})\n\
4578 @deftypefnx {Built-in Function} {} eye ([@var{m} @var{n}])\n\
4579 @deftypefnx {Built-in Function} {} eye (@dots{}, @var{class})\n\
4580 Return an identity matrix. If invoked with a single scalar argument @var{n},\n\
4581 return a square @nospell{NxN} identity matrix. If\n\
4582 supplied two scalar arguments (@var{m}, @var{n}), @code{eye} takes them to be\n\
4583 the number of rows and columns. If given a vector with two elements,\n\
4584 @code{eye} uses the values of the elements as the number of rows and columns,\n\
4585 respectively. For example:\n\
4596 The following expressions all produce the same result:\n\
4604 eye (size ([1, 2; 3, 4])\n\
4608 The optional argument @var{class}, allows @code{eye} to return an array of\n\
4609 the specified type, like\n\
4612 val = zeros (n,m, \"uint8\")\n\
4615 Calling @code{eye} with no arguments is equivalent to calling it\n\
4616 with an argument of 1. Any negative dimensions are treated as zero. \n\
4617 These odd definitions are for compatibility with @sc{matlab}.\n\
4618 @seealso{speye, ones, zeros}\n\
4623 int nargin = args.
length ();
4629 if (nargin > 0 && args(nargin-1).is_string ())
4631 std::string nm = args(nargin-1).string_value ();
4692 typedef typename MT::column_vector_type CVT;
4693 typedef typename MT::element_type T;
4699 T bs = octave_value_extract<T> (base);
4702 T ls = octave_value_extract<T> (limit);
4707 CVT lv = octave_value_extract<CVT> (limit);
4708 CVT bv (lv.length (), bs);
4714 CVT bv = octave_value_extract<CVT> (base);
4717 T ls = octave_value_extract<T> (limit);
4718 CVT lv (bv.length (), ls);
4723 CVT lv = octave_value_extract<CVT> (limit);
4733 @deftypefn {Built-in Function} {} linspace (@var{base}, @var{limit})\n\
4734 @deftypefnx {Built-in Function} {} linspace (@var{base}, @var{limit}, @var{n})\n\
4735 Return a row vector with @var{n} linearly spaced elements between\n\
4736 @var{base} and @var{limit}. If the number of elements is greater than one,\n\
4737 then the endpoints @var{base} and @var{limit} are always included in\n\
4738 the range. If @var{base} is greater than @var{limit}, the elements are\n\
4739 stored in decreasing order. If the number of points is not specified, a\n\
4740 value of 100 is used.\n\
4742 The @code{linspace} function always returns a row vector if both\n\
4743 @var{base} and @var{limit} are scalars. If one, or both, of them are column\n\
4744 vectors, @code{linspace} returns a matrix.\n\
4746 For compatibility with @sc{matlab}, return the second argument (@var{limit})\n\
4747 if fewer than two values are requested.\n\
4748 @seealso{logspace}\n\
4753 int nargin = args.
length ();
4757 if (nargin != 2 && nargin != 3)
4784 retval = do_linspace<FloatComplexMatrix> (arg_1, arg_2, npoints);
4786 retval = do_linspace<FloatMatrix> (arg_1, arg_2, npoints);
4792 retval = do_linspace<ComplexMatrix> (arg_1, arg_2, npoints);
4794 retval = do_linspace<Matrix> (arg_1, arg_2, npoints);
4798 error (
"linspace: N must be an integer");
4824 DEFUN (resize, args, ,
4826 @deftypefn {Built-in Function} {} resize (@var{x}, @var{m})\n\
4827 @deftypefnx {Built-in Function} {} resize (@var{x}, @var{m}, @var{n}, @dots{})\n\
4828 @deftypefnx {Built-in Function} {} resize (@var{x}, [@var{m} @var{n} @dots{}])\n\
4829 Resize @var{x} cutting off elements as necessary.\n\
4831 In the result, element with certain indices is equal to the corresponding\n\
4832 element of @var{x} if the indices are within the bounds of @var{x};\n\
4833 otherwise, the element is set to zero.\n\
4835 In other words, the statement\n\
4838 y = resize (x, dv)\n\
4842 is equivalent to the following code:\n\
4846 y = zeros (dv, class (x));\n\
4847 sz = min (dv, size (x));\n\
4848 for i = 1:length (sz)\n\
4849 idx@{i@} = 1:sz(i);\n\
4851 y(idx@{:@}) = x(idx@{:@});\n\
4856 but is performed more efficiently.\n\
4858 If only @var{m} is supplied, and it is a scalar, the dimension of the\n\
4859 result is @var{m}-by-@var{m}.\n\
4860 If @var{m}, @var{n}, @dots{} are all scalars, then the dimensions of\n\
4861 the result are @var{m}-by-@var{n}-by-@dots{}.\n\
4862 If given a vector as input, then the\n\
4863 dimensions of the result are given by the elements of that vector.\n\
4865 An object can be resized to more dimensions than it has;\n\
4866 in such case the missing dimensions are assumed to be 1.\n\
4867 Resizing an object to fewer dimensions is not possible.\n\
4868 @seealso{reshape, postpad, prepad, cat}\n\
4872 int nargin = args.
length ();
4877 int ndim = vec.
length ();
4888 for (
int i = 0; i < ndim; i++)
4891 retval = retval.
resize (dv,
true);
4894 else if (nargin > 2)
4903 retval = retval.
resize (dv,
true);
4914 DEFUN (reshape, args, ,
4916 @deftypefn {Built-in Function} {} reshape (@var{A}, @var{m}, @var{n}, @dots{})\n\
4917 @deftypefnx {Built-in Function} {} reshape (@var{A}, [@var{m} @var{n} @dots{}])\n\
4918 @deftypefnx {Built-in Function} {} reshape (@var{A}, @dots{}, [], @dots{})\n\
4919 @deftypefnx {Built-in Function} {} reshape (@var{A}, @var{size})\n\
4920 Return a matrix with the specified dimensions (@var{m}, @var{n}, @dots{})\n\
4921 whose elements are taken from the matrix @var{A}. The elements of the\n\
4922 matrix are accessed in column-major order (like Fortran arrays are stored).\n\
4924 The following code demonstrates reshaping a 1x4 row vector into a 2x2 square\n\
4929 reshape ([1, 2, 3, 4], 2, 2)\n\
4936 Note that the total number of elements in the original\n\
4937 matrix (@code{prod (size (@var{A}))}) must match the total number of elements\n\
4938 in the new matrix (@code{prod ([@var{m} @var{n} @dots{}])}).\n\
4940 A single dimension of the return matrix may be left unspecified and Octave\n\
4941 will determine its size automatically. An empty matrix ([]) is used to flag\n\
4942 the unspecified dimension.\n\
4943 @seealso{resize, vec, postpad, cat, squeeze}\n\
4948 int nargin = args.
length ();
4956 if (new_size.
length () < 2)
4958 error (
"reshape: SIZE must have 2 or more dimensions");
4966 if (new_size(i) < 0)
4968 error (
"reshape: SIZE must be non-negative");
4972 new_dims(i) = new_size(i);
4975 else if (nargin > 2)
4980 for (
int i = 1; i < nargin; i++)
4982 if (args(i).is_empty ())
4986 error (
"reshape: only a single dimension can be unknown");
4997 new_dims(i-1) = args(i).idx_type_value ();
5001 else if (new_dims(i-1) < 0)
5003 error (
"reshape: SIZE must be non-negative");
5014 new_dims(empty_dim-1) = 0;
5020 if (a_nel != size_empty_dim * nel)
5021 error (
"reshape: SIZE is not divisible by the product of known dimensions (= %d)",
5024 new_dims(empty_dim-1) = size_empty_dim;
5035 retval = args(0).reshape (new_dims);
5066 @deftypefn {Built-in Function} {@var{v} =} vec (@var{x})\n\
5067 @deftypefnx {Built-in Function} {@var{v} =} vec (@var{x}, @var{dim})\n\
5068 Return the vector obtained by stacking the columns of the matrix @var{x}\n\
5069 one above the other. Without @var{dim} this is equivalent to\n\
5070 @code{@var{x}(:)}. If @var{dim} is supplied, the dimensions of @var{v}\n\
5071 are set to @var{dim} with all elements along the last dimension.\n\
5072 This is equivalent to @code{shiftdim (@var{x}(:), 1-@var{dim})}.\n\
5073 @seealso{vech, resize, cat}\n\
5079 int nargin = args.
length ();
5081 if (nargin < 1 || nargin > 2)
5086 dim = args(1).idx_type_value ();
5089 error (
"vec: DIM must be greater than zero");
5103 for (
int i = 0; i < dim-1; i++)
5106 new_dims(dim-1) = retval.
numel ();
5108 retval = retval.
reshape (new_dims);
5130 DEFUN (squeeze, args, ,
5132 @deftypefn {Built-in Function} {} squeeze (@var{x})\n\
5133 Remove singleton dimensions from @var{x} and return the result.\n\
5134 Note that for compatibility with @sc{matlab}, all objects have\n\
5135 a minimum of two dimensions and row vectors are left unchanged.\n\
5136 @seealso{reshape}\n\
5142 retval = args(0).squeeze ();
5149 DEFUN (full, args, ,
5151 @deftypefn {Built-in Function} {@var{FM} =} full (@var{SM})\n\
5152 Return a full storage matrix from a sparse, diagonal, permutation matrix\n\
5160 retval = args(0).full_value ();
5171 @deftypefn {Built-in Function} {} norm (@var{A})\n\
5172 @deftypefnx {Built-in Function} {} norm (@var{A}, @var{p})\n\
5173 @deftypefnx {Built-in Function} {} norm (@var{A}, @var{p}, @var{opt})\n\
5174 Compute the p-norm of the matrix @var{A}. If the second argument is\n\
5175 missing, @code{p = 2} is assumed.\n\
5177 If @var{A} is a matrix (or sparse matrix):\n\
5180 @item @var{p} = @code{1}\n\
5181 1-norm, the largest column sum of the absolute values of @var{A}.\n\
5183 @item @var{p} = @code{2}\n\
5184 Largest singular value of @var{A}.\n\
5186 @item @var{p} = @code{Inf} or @qcode{\"inf\"}\n\
5187 @cindex infinity norm\n\
5188 Infinity norm, the largest row sum of the absolute values of @var{A}.\n\
5190 @item @var{p} = @qcode{\"fro\"}\n\
5191 @cindex Frobenius norm\n\
5192 Frobenius norm of @var{A}, @code{sqrt (sum (diag (@var{A}' * @var{A})))}.\n\
5194 @item other @var{p}, @code{@var{p} > 1}\n\
5195 @cindex general p-norm\n\
5196 maximum @code{norm (A*x, p)} such that @code{norm (x, p) == 1}\n\
5199 If @var{A} is a vector or a scalar:\n\
5202 @item @var{p} = @code{Inf} or @qcode{\"inf\"}\n\
5203 @code{max (abs (@var{A}))}.\n\
5205 @item @var{p} = @code{-Inf}\n\
5206 @code{min (abs (@var{A}))}.\n\
5208 @item @var{p} = @qcode{\"fro\"}\n\
5209 Frobenius norm of @var{A}, @code{sqrt (sumsq (abs (A)))}.\n\
5211 @item @var{p} = 0\n\
5212 Hamming norm - the number of nonzero elements.\n\
5214 @item other @var{p}, @code{@var{p} > 1}\n\
5215 p-norm of @var{A}, @code{(sum (abs (@var{A}) .^ @var{p})) ^ (1/@var{p})}.\n\
5217 @item other @var{p} @code{@var{p} < 1}\n\
5218 the p-pseudonorm defined as above.\n\
5221 If @var{opt} is the value @qcode{\"rows\"}, treat each row as a vector and\n\
5222 compute its norm. The result is returned as a column vector.\n\
5223 Similarly, if @var{opt} is @qcode{\"columns\"} or @qcode{\"cols\"} then\n\
5224 compute the norms of each column and return a row vector.\n\
5225 @seealso{cond, svd}\n\
5230 int nargin = args.
length ();
5232 if (nargin >= 1 && nargin <= 3)
5236 if (x_arg.
ndims () == 2)
5238 enum { sfmatrix, sfcols, sfrows, sffrob, sfinf } strflag = sfmatrix;
5239 if (nargin > 1 && args(nargin-1).is_string ())
5241 std::string str = args(nargin-1).string_value ();
5242 if (str ==
"cols" || str ==
"columns")
5244 else if (str ==
"rows")
5246 else if (str ==
"fro")
5248 else if (str ==
"inf")
5251 error (
"norm: unrecognized option: %s", str.c_str ());
5265 if ((strflag == sfcols || strflag == sfrows))
5267 if (str ==
"cols" || str ==
"columns" || str ==
"rows")
5268 error (
"norm: invalid combination of options");
5269 else if (str ==
"fro")
5271 else if (str ==
"inf")
5274 error (
"norm: unrecognized option: %s", str.c_str ());
5277 error (
"norm: invalid combination of options");
5287 retval(0) =
xnorm (x_arg, p_arg);
5306 error (
"norm: only valid for 2-D objects");
5399 @deftypefn {Built-in Function} {} not (@var{x})\n\
5400 Return the logical NOT of @var{x}. This function is equivalent to\n\
5402 @seealso{and, or, xor}\n\
5408 DEFUN (uplus, args, ,
5410 @deftypefn {Built-in Function} {} uplus (@var{x})\n\
5411 This function and @w{@tcode{+ x}} are equivalent.\n\
5412 @seealso{uminus, plus, minus}\n\
5418 DEFUN (uminus, args, ,
5420 @deftypefn {Built-in Function} {} uminus (@var{x})\n\
5421 This function and @w{@tcode{- x}} are equivalent.\n\
5422 @seealso{uplus, minus}\n\
5430 @deftypefn {Built-in Function} {} transpose (@var{x})\n\
5431 Return the transpose of @var{x}.\n\
5432 This function and @tcode{x.'} are equivalent.\n\
5433 @seealso{ctranspose}\n\
5459 DEFUN (ctranspose, args, ,
5461 @deftypefn {Built-in Function} {} ctranspose (@var{x})\n\
5462 Return the complex conjugate transpose of @var{x}.\n\
5463 This function and @tcode{x'} are equivalent.\n\
5464 @seealso{transpose}\n\
5510 int nargin = args.
length ();
5525 for (
int i = 2; i < nargin; i++)
5526 retval.
assign (aop, args(i));
5533 DEFUN (plus, args, ,
5535 @deftypefn {Built-in Function} {} plus (@var{x}, @var{y})\n\
5536 @deftypefnx {Built-in Function} {} plus (@var{x1}, @var{x2}, @dots{})\n\
5537 This function and @w{@tcode{x + y}} are equivalent.\n\
5538 If more arguments are given, the summation is applied\n\
5539 cumulatively from left to right:\n\
5542 (@dots{}((x1 + x2) + x3) + @dots{})\n\
5545 At least one argument is required.\n\
5546 @seealso{minus, uplus}\n\
5553 DEFUN (minus, args, ,
5555 @deftypefn {Built-in Function} {} minus (@var{x}, @var{y})\n\
5556 This function and @w{@tcode{x - y}} are equivalent.\n\
5557 @seealso{plus, uminus}\n\
5563 DEFUN (mtimes, args, ,
5565 @deftypefn {Built-in Function} {} mtimes (@var{x}, @var{y})\n\
5566 @deftypefnx {Built-in Function} {} mtimes (@var{x1}, @var{x2}, @dots{})\n\
5567 Return the matrix multiplication product of inputs.\n\
5568 This function and @w{@tcode{x * y}} are equivalent.\n\
5569 If more arguments are given, the multiplication is applied\n\
5570 cumulatively from left to right:\n\
5573 (@dots{}((x1 * x2) * x3) * @dots{})\n\
5576 At least one argument is required.\n\
5577 @seealso{times, plus, minus, rdivide, mrdivide, mldivide, mpower}\n\
5584 DEFUN (mrdivide, args, ,
5586 @deftypefn {Built-in Function} {} mrdivide (@var{x}, @var{y})\n\
5587 Return the matrix right division of @var{x} and @var{y}.\n\
5588 This function and @w{@tcode{x / y}} are equivalent.\n\
5589 @seealso{mldivide, rdivide, plus, minus}\n\
5595 DEFUN (mpower, args, ,
5597 @deftypefn {Built-in Function} {} mpower (@var{x}, @var{y})\n\
5598 Return the matrix power operation of @var{x} raised to the @var{y} power.\n\
5599 This function and @w{@tcode{x ^ y}} are equivalent.\n\
5600 @seealso{power, mtimes, plus, minus}\n\
5606 DEFUN (mldivide, args, ,
5608 @deftypefn {Built-in Function} {} mldivide (@var{x}, @var{y})\n\
5609 Return the matrix left division of @var{x} and @var{y}.\n\
5610 This function and @w{@tcode{x @xbackslashchar{} y}} are equivalent.\n\
5611 @seealso{mrdivide, ldivide, rdivide}\n\
5619 @deftypefn {Built-in Function} {} lt (@var{x}, @var{y})\n\
5620 This function is equivalent to @w{@code{x < y}}.\n\
5621 @seealso{le, eq, ge, gt, ne}\n\
5629 @deftypefn {Built-in Function} {} le (@var{x}, @var{y})\n\
5630 This function is equivalent to @w{@code{x <= y}}.\n\
5631 @seealso{eq, ge, gt, ne, lt}\n\
5639 @deftypefn {Built-in Function} {} eq (@var{x}, @var{y})\n\
5640 Return true if the two inputs are equal.\n\
5641 This function is equivalent to @w{@code{x == y}}.\n\
5642 @seealso{ne, isequal, le, ge, gt, ne, lt}\n\
5650 @deftypefn {Built-in Function} {} ge (@var{x}, @var{y})\n\
5651 This function is equivalent to @w{@code{x >= y}}.\n\
5652 @seealso{le, eq, gt, ne, lt}\n\
5660 @deftypefn {Built-in Function} {} gt (@var{x}, @var{y})\n\
5661 This function is equivalent to @w{@code{x > y}}.\n\
5662 @seealso{le, eq, ge, ne, lt}\n\
5670 @deftypefn {Built-in Function} {} ne (@var{x}, @var{y})\n\
5671 Return true if the two inputs are not equal.\n\
5672 This function is equivalent to @w{@code{x != y}}.\n\
5673 @seealso{eq, isequal, le, ge, lt}\n\
5679 DEFUN (times, args, ,
5681 @deftypefn {Built-in Function} {} times (@var{x}, @var{y})\n\
5682 @deftypefnx {Built-in Function} {} times (@var{x1}, @var{x2}, @dots{})\n\
5683 Return the element-by-element multiplication product of inputs.\n\
5684 This function and @w{@tcode{x .* y}} are equivalent.\n\
5685 If more arguments are given, the multiplication is applied\n\
5686 cumulatively from left to right:\n\
5689 (@dots{}((x1 .* x2) .* x3) .* @dots{})\n\
5692 At least one argument is required.\n\
5693 @seealso{mtimes, rdivide}\n\
5700 DEFUN (rdivide, args, ,
5702 @deftypefn {Built-in Function} {} rdivide (@var{x}, @var{y})\n\
5703 Return the element-by-element right division of @var{x} and @var{y}.\n\
5704 This function and @w{@tcode{x ./ y}} are equivalent.\n\
5705 @seealso{ldivide, mrdivide, times, plus}\n\
5711 DEFUN (power, args, ,
5713 @deftypefn {Built-in Function} {} power (@var{x}, @var{y})\n\
5714 Return the element-by-element operation of @var{x} raised to the\n\
5715 @var{y} power. If several complex results are possible,\n\
5716 returns the one with smallest non-negative argument (angle). Use\n\
5717 @code{realpow}, @code{realsqrt}, @code{cbrt}, or @code{nthroot} if a\n\
5718 real result is preferred.\n\
5720 This function and @w{@tcode{x .^ y}} are equivalent.\n\
5721 @seealso{mpower, realpow, realsqrt, cbrt, nthroot}\n\
5727 DEFUN (ldivide, args, ,
5729 @deftypefn {Built-in Function} {} ldivide (@var{x}, @var{y})\n\
5730 Return the element-by-element left division of @var{x} and @var{y}.\n\
5731 This function and @w{@tcode{x .@xbackslashchar{} y}} are equivalent.\n\
5732 @seealso{rdivide, mldivide, times, plus}\n\
5740 @deftypefn {Built-in Function} {} and (@var{x}, @var{y})\n\
5741 @deftypefnx {Built-in Function} {} and (@var{x1}, @var{x2}, @dots{})\n\
5742 Return the logical AND of @var{x} and @var{y}.\n\
5743 This function is equivalent to @w{@code{x & y}}.\n\
5744 If more arguments are given, the logical and is applied\n\
5745 cumulatively from left to right:\n\
5748 (@dots{}((x1 & x2) & x3) & @dots{})\n\
5751 At least one argument is required.\n\
5752 @seealso{or, not, xor}\n\
5761 @deftypefn {Built-in Function} {} or (@var{x}, @var{y})\n\
5762 @deftypefnx {Built-in Function} {} or (@var{x1}, @var{x2}, @dots{})\n\
5763 Return the logical OR of @var{x} and @var{y}.\n\
5764 This function is equivalent to @w{@code{x | y}}.\n\
5765 If more arguments are given, the logical or is applied\n\
5766 cumulatively from left to right:\n\
5769 (@dots{}((x1 | x2) | x3) | @dots{})\n\
5772 At least one argument is required.\n\
5773 @seealso{and, not, xor}\n\
5782 DEFUN (tic, args, nargout,
5784 @deftypefn {Built-in Function} {} tic ()\n\
5785 @deftypefnx {Built-in Function} {@var{id} =} tic ()\n\
5786 @deftypefnx {Built-in Function} {} toc ()\n\
5787 @deftypefnx {Built-in Function} {} toc (@var{id})\n\
5788 @deftypefnx {Built-in Function} {@var{val} =} toc (@dots{})\n\
5789 Set or check a wall-clock timer. Calling @code{tic} without an\n\
5790 output argument sets the internal timer state. Subsequent calls\n\
5791 to @code{toc} return the number of seconds since the timer was set.\n\
5797 # many computations later@dots{}\n\
5798 elapsed_time = toc ();\n\
5803 will set the variable @code{elapsed_time} to the number of seconds since\n\
5804 the most recent call to the function @code{tic}.\n\
5806 If called with one output argument, @code{tic} returns a scalar\n\
5807 of type @code{uint64} that may be later passed to @code{toc}.\n\
5811 id = tic; sleep (5); toc (id)\n\
5816 Calling @code{tic} and @code{toc} this way allows nested timing calls.\n\
5818 If you are more interested in the CPU time that your process used, you\n\
5819 should use the @code{cputime} function instead. The @code{tic} and\n\
5820 @code{toc} functions report the actual wall clock time that elapsed\n\
5821 between the calls. This may include time spent processing other jobs or\n\
5822 doing nothing at all.\n\
5823 @seealso{toc, cputime}\n\
5828 int nargin = args.
length ();
5831 warning (
"tic: ignoring extra arguments");
5840 double frac = modf (tmp, &ip);
5841 uint64_t microsecs =
static_cast<uint64_t
> (CLOCKS_PER_SEC * frac);
5842 microsecs += CLOCKS_PER_SEC *
static_cast<uint64_t
> (ip);
5846 tic_toc_timestamp = tmp;
5851 DEFUN (toc, args, nargout,
5853 @deftypefn {Built-in Function} {} toc ()\n\
5854 @deftypefnx {Built-in Function} {} toc (@var{id})\n\
5855 @deftypefnx {Built-in Function} {@var{val} =} toc (@dots{})\n\
5856 @seealso{tic, cputime}\n\
5861 int nargin = args.
length ();
5875 uint64_t val =
id.value ();
5878 = (
static_cast<double> (val / CLOCKS_PER_SEC)
5879 + static_cast<double> (val % CLOCKS_PER_SEC)
5886 error (
"toc: invalid ID");
5892 error (
"toc called before timer set");
5902 octave_stdout <<
"Elapsed time is " << tmp <<
" seconds.\n";
5917 DEFUN (cputime, args, ,
5919 @deftypefn {Built-in Function} {[@var{total}, @var{user}, @var{system}] =} cputime ();\n\
5920 Return the CPU time used by your Octave session. The first output is\n\
5921 the total time spent executing your process and is equal to the sum of\n\
5922 second and third outputs, which are the number of CPU seconds spent\n\
5923 executing in user mode and the number of CPU seconds spent executing in\n\
5924 system mode, respectively. If your system does not have a way to report\n\
5925 CPU time usage, @code{cputime} returns 0 for each of its output values.\n\
5926 Note that because Octave used some CPU time to start, it is reasonable\n\
5927 to check to see if @code{cputime} works by checking to see if the total\n\
5928 CPU time used is nonzero.\n\
5929 @seealso{tic, toc}\n\
5933 int nargin = args.
length ();
5938 warning (
"tic: ignoring extra arguments");
5940 #if defined (HAVE_GETRUSAGE)
5946 usr =
static_cast<double> (ru.ru_utime.tv_sec) +
5947 static_cast<double> (ru.ru_utime.tv_usec) * 1e-6;
5949 sys =
static_cast<double> (ru.ru_stime.tv_sec) +
5950 static_cast<double> (ru.ru_stime.tv_usec) * 1e-6;
5958 unsigned long ticks;
5959 unsigned long seconds;
5960 unsigned long fraction;
5962 ticks = t.tms_utime + t.tms_cutime;
5963 fraction = ticks % CLOCKS_PER_SEC;
5964 seconds = ticks / CLOCKS_PER_SEC;
5966 usr =
static_cast<double> (seconds) + static_cast<double>(fraction) /
5967 static_cast<double>(CLOCKS_PER_SEC);
5969 ticks = t.tms_stime + t.tms_cstime;
5970 fraction = ticks % CLOCKS_PER_SEC;
5971 seconds = ticks / CLOCKS_PER_SEC;
5973 sys =
static_cast<double> (seconds) + static_cast<double>(fraction) /
5974 static_cast<double>(CLOCKS_PER_SEC);
5980 retval(0) = sys + usr;
5985 DEFUN (sort, args, nargout,
5987 @deftypefn {Built-in Function} {[@var{s}, @var{i}] =} sort (@var{x})\n\
5988 @deftypefnx {Built-in Function} {[@var{s}, @var{i}] =} sort (@var{x}, @var{dim})\n\
5989 @deftypefnx {Built-in Function} {[@var{s}, @var{i}] =} sort (@var{x}, @var{mode})\n\
5990 @deftypefnx {Built-in Function} {[@var{s}, @var{i}] =} sort (@var{x}, @var{dim}, @var{mode})\n\
5991 Return a copy of @var{x} with the elements arranged in increasing\n\
5992 order. For matrices, @code{sort} orders the elements within columns\n\
5998 sort ([1, 2; 2, 3; 3, 1])\n\
6005 If the optional argument @var{dim} is given, then the matrix is sorted\n\
6006 along the dimension defined by @var{dim}. The optional argument @code{mode}\n\
6007 defines the order in which the values will be sorted. Valid values of\n\
6008 @code{mode} are @qcode{\"ascend\"} or @qcode{\"descend\"}.\n\
6010 The @code{sort} function may also be used to produce a matrix\n\
6011 containing the original row indices of the elements in the sorted\n\
6012 matrix. For example:\n\
6016 [s, i] = sort ([1, 2; 2, 3; 3, 1])\n\
6017 @result{} s = 1 1\n\
6020 @result{} i = 1 3\n\
6026 For equal elements, the indices are such that equal elements are listed\n\
6027 in the order in which they appeared in the original list.\n\
6029 Sorting of complex entries is done first by magnitude (@code{abs (@var{z})})\n\
6030 and for any ties by phase angle (@code{angle (z)}). For example:\n\
6034 sort ([1+i; 1; 1-i])\n\
6041 NaN values are treated as being greater than any other value and are sorted\n\
6042 to the end of the list.\n\
6044 The @code{sort} function may also be used to sort strings and cell arrays\n\
6045 of strings, in which case ASCII dictionary order (uppercase 'A' precedes\n\
6046 lowercase 'a') of the strings is used.\n\
6048 The algorithm used in @code{sort} is optimized for the sorting of partially\n\
6050 @seealso{sortrows, issorted}\n\
6055 int nargin = args.
length ();
6058 if (nargin < 1 || nargin > 3)
6064 bool return_idx = nargout > 1;
6071 if (args(1).is_string ())
6073 std::string mode = args(1).string_value ();
6074 if (mode ==
"ascend")
6076 else if (mode ==
"descend")
6080 error (
"sort: MODE must be either \"ascend\" or \"descend\"");
6085 dim = args(1).nint_value () - 1;
6090 if (args(1).is_string ())
6096 if (! args(2).is_string ())
6098 error (
"sort: MODE must be a string");
6101 std::string mode = args(2).string_value ();
6102 if (mode ==
"ascend")
6104 else if (mode ==
"descend")
6108 error (
"sort: MODE must be either \"ascend\" or \"descend\"");
6114 if (nargin == 1 || args(1).is_string ())
6123 error (
"sort: DIM must be a valid dimension");
6134 retval(0) = arg.
sort (sidx, dim, smode);
6138 retval(0) = arg.
sort (dim, smode);
6324 DEFUN (__sort_rows_idx__, args, ,
6326 @deftypefn {Built-in Function} {} __sort_rows_idx__ (@var{a}, @var{mode})\n\
6327 Undocumented internal function.\n\
6332 int nargin = args.
length ();
6335 if (nargin < 1 || nargin > 2 || (nargin == 2 && ! args(1).is_string ()))
6343 std::string mode = args(1).string_value ();
6344 if (mode ==
"ascend")
6346 else if (mode ==
"descend")
6350 error (
"__sort_rows_idx__: MODE must be either \"ascend\" or \"descend\"");
6358 error (
"__sort_rows_idx__: sparse matrices not yet supported");
6359 if (arg.
ndims () == 2)
6366 error (
"__sort_rows_idx__: needs a 2-dimensional object");
6382 error (
"issorted: expecting %s argument to be a character string", argn);
6383 else if (mode ==
"ascending")
6385 else if (mode ==
"descending")
6387 else if (mode ==
"either")
6390 error (
"issorted: MODE must be \"ascending\", \"descending\", or \"either\"");
6395 DEFUN (issorted, args, ,
6397 @deftypefn {Built-in Function} {} issorted (@var{a})\n\
6398 @deftypefnx {Built-in Function} {} issorted (@var{a}, @var{mode})\n\
6399 @deftypefnx {Built-in Function} {} issorted (@var{a}, \"rows\", @var{mode})\n\
6400 Return true if the array is sorted according to @var{mode}, which\n\
6401 may be either @qcode{\"ascending\"}, @qcode{\"descending\"}, or\n\
6402 @qcode{\"either\"}. By default, @var{mode} is @qcode{\"ascending\"}. NaNs\n\
6403 are treated in the same manner as @code{sort}.\n\
6405 If the optional argument @qcode{\"rows\"} is supplied, check whether\n\
6406 the array is sorted by rows as output by the function @code{sortrows}\n\
6407 (with no options).\n\
6409 This function does not support sparse matrices.\n\
6410 @seealso{sort, sortrows}\n\
6415 int nargin = args.
length ();
6417 if (nargin < 1 || nargin > 3)
6423 bool by_rows =
false;
6434 std::string tmp = args(1).string_value ();
6444 error (
"expecting second argument to be character string");
6455 error (
"issorted: sparse matrices not yet supported");
6456 if (arg.
ndims () == 2)
6459 error (
"issorted: A must be a 2-dimensional object");
6464 retval = args(0).is_sorted (smode) !=
UNSORTED;
6466 error (
"issorted: needs a vector");
6506 DEFUN (nth_element, args, ,
6508 @deftypefn {Built-in Function} {} nth_element (@var{x}, @var{n})\n\
6509 @deftypefnx {Built-in Function} {} nth_element (@var{x}, @var{n}, @var{dim})\n\
6510 Select the n-th smallest element of a vector, using the ordering defined by\n\
6511 @code{sort}. In other words, the result is equivalent to\n\
6512 @code{sort(@var{x})(@var{n})}.\n\
6513 @var{n} can also be a contiguous range, either ascending @code{l:u}\n\
6514 or descending @code{u:-1:l}, in which case a range of elements is returned.\n\
6515 If @var{x} is an array, @code{nth_element} operates along the dimension\n\
6516 defined by @var{dim}, or the first non-singleton dimension if @var{dim} is\n\
6519 nth_element encapsulates the C++ standard library algorithms nth_element and\n\
6520 partial_sort. On average, the complexity of the operation is O(M*log(K)),\n\
6521 where @w{@code{M = size (@var{x}, @var{dim})}} and\n\
6522 @w{@code{K = length (@var{n})}}.\n\
6523 This function is intended for cases where the ratio K/M is small; otherwise,\n\
6524 it may be better to use @code{sort}.\n\
6525 @seealso{sort, min, max}\n\
6529 int nargin = args.
length ();
6531 if (nargin == 2 || nargin == 3)
6538 dim = args(2).int_value (
true) - 1;
6540 error (
"nth_element: DIM must be a valid dimension");
6564 #define MAKE_INT_BRANCH(X) \
6566 retval = argx.X ## _array_value ().nth_element (n, dim); \
6577 #undef MAKE_INT_BRANCH
6591 template <
class NDT>
6596 typedef typename NDT::element_type T;
6599 else if (idx.
extent (n) > n)
6600 error (
"accumarray: index out of range");
6604 if (vals.numel () == 1)
6605 retval.idx_add (idx, vals (0));
6606 else if (vals.numel () == idx.
length (n))
6607 retval.idx_add (idx, vals);
6609 error (
"accumarray: dimensions mismatch");
6614 DEFUN (__accumarray_sum__, args, ,
6616 @deftypefn {Built-in Function} {} __accumarray_sum__ (@var{idx}, @var{vals}, @var{n})\n\
6617 Undocumented internal function.\n\
6621 int nargin = args.
length ();
6622 if (nargin >= 2 && nargin <= 3 && args(0).is_numeric_type ())
6627 n = args(2).idx_type_value (
true);
6667 template <
class NDT>
6671 const typename NDT::element_type& zero_val)
6673 typedef typename NDT::element_type T;
6676 else if (idx.
extent (n) > n)
6677 error (
"accumarray: index out of range");
6686 if (vals.numel () == 1)
6687 (retval.*op) (idx, NDT (
dim_vector (l, 1), vals(0)));
6688 else if (vals.numel () == l)
6689 (retval.*op) (idx, vals);
6691 error (
"accumarray: dimensions mismatch");
6701 int nargin = args.
length ();
6702 if (nargin >= 3 && nargin <= 4 && args(0).is_numeric_type ())
6707 n = args(3).idx_type_value (
true);
6717 zero.double_value ());
6721 ismin, zero.float_value ());
6725 n, ismin, zero.complex_value ());
6731 zero.float_complex_value ());
6733 #define MAKE_INT_BRANCH(X) \
6735 retval = do_accumarray_minmax (idx, vals.X ## _array_value (), \
6737 zero.X ## _scalar_value ()); \
6748 #undef MAKE_INT_BRANCH
6751 zero.bool_value ());
6764 DEFUN (__accumarray_min__, args, ,
6766 @deftypefn {Built-in Function} {} __accumarray_min__ (@var{idx}, @var{vals}, @var{zero}, @var{n})\n\
6767 Undocumented internal function.\n\
6773 DEFUN (__accumarray_max__, args, ,
6775 @deftypefn {Built-in Function} {} __accumarray_max__ (@var{idx}, @var{vals}, @var{zero}, @var{n})\n\
6776 Undocumented internal function.\n\
6782 template <
class NDT>
6787 typedef typename NDT::element_type T;
6790 else if (idx.
extent (n) > n)
6791 error (
"accumdim: index out of range");
6793 dim_vector vals_dim = vals.dims (), rdv = vals_dim;
6797 else if (dim >= rdv.length ())
6798 rdv.resize (dim+1, 1);
6802 NDT retval (rdv, T ());
6804 if (idx.
length () != vals_dim(dim))
6805 error (
"accumdim: dimension mismatch");
6807 retval.idx_add_nd (idx, vals, dim);
6812 DEFUN (__accumdim_sum__, args, ,
6814 @deftypefn {Built-in Function} {} __accumdim_sum__ (@var{idx}, @var{vals}, @var{dim}, @var{n})\n\
6815 Undocumented internal function.\n\
6819 int nargin = args.
length ();
6820 if (nargin >= 2 && nargin <= 4 && args(0).is_numeric_type ())
6825 dim = args(2).int_value () - 1;
6829 n = args(3).idx_type_value (
true);
6863 template <
class NDT>
6866 const NDT& tval,
const NDT& fval)
6868 typedef typename NDT::element_type T;
6872 bool tscl = tval.numel () == 1, fscl = fval.numel () == 1;
6874 if ((! tscl && tval.dims () != dv)
6875 || (! fscl && fval.dims () != dv))
6876 error (
"merge: MASK, TVAL, and FVAL dimensions must match");
6879 T *rv = retval.fortran_vec ();
6882 const T *tv = tval.data (), *fv = fval.data ();
6883 const bool *mv = mask.
data ();
6889 T ts = tv[0], fs = fv[0];
6891 rv[i] = mv[i] ? ts : fs;
6897 rv[i] = mv[i] ? ts : fv[i];
6906 rv[i] = mv[i] ? tv[i] : fs;
6911 rv[i] = mv[i] ? tv[i] : fv[i];
6919 #define MAKE_INT_BRANCH(INTX) \
6920 else if (tval.is_ ## INTX ## _type () && fval.is_ ## INTX ## _type ()) \
6922 retval = do_merge (mask, \
6923 tval.INTX ## _array_value (), \
6924 fval.INTX ## _array_value ()); \
6927 DEFUN (merge, args, ,
6929 @deftypefn {Built-in Function} {} merge (@var{mask}, @var{tval}, @var{fval})\n\
6930 @deftypefnx {Built-in Function} {} ifelse (@var{mask}, @var{tval}, @var{fval})\n\
6931 Merge elements of @var{true_val} and @var{false_val}, depending on the\n\
6932 value of @var{mask}. If @var{mask} is a logical scalar, the other two\n\
6933 arguments can be arbitrary values. Otherwise, @var{mask} must be a logical\n\
6934 array, and @var{tval}, @var{fval} should be arrays of matching class, or\n\
6935 cell arrays. In the scalar mask case, @var{tval} is returned if @var{mask}\n\
6936 is true, otherwise @var{fval} is returned.\n\
6938 In the array mask case, both @var{tval} and @var{fval} must be either\n\
6939 scalars or arrays with dimensions equal to @var{mask}. The result is\n\
6940 constructed as follows:\n\
6944 result(mask) = tval(mask);\n\
6945 result(! mask) = fval(! mask);\n\
6949 @var{mask} can also be arbitrary numeric type, in which case\n\
6950 it is first converted to logical.\n\
6951 @seealso{logical, diff}\n\
6954 int nargin = args.
length ();
6957 if (nargin == 3 && (args(0).is_bool_type () || args(0).is_numeric_type ()))
6962 retval = mask_val.
is_true () ? args(1) : args(2);
6972 fval.complex_array_value ());
6976 fval.array_value ());
6983 fval.float_complex_array_value ());
6987 fval.float_array_value ());
6989 else if (tval.
is_string () && fval.is_string ())
6991 bool sq_string = tval.
is_sq_string () || fval.is_sq_string ();
6994 fval.char_array_value ()),
6995 sq_string ?
'\'' :
'"');
6997 else if (tval.
is_cell () && fval.is_cell ())
7001 fval.cell_value ());
7014 error (
"merge: cannot merge %s with %s with array mask",
7016 fval.class_name ().c_str ());
7027 #undef MAKE_INT_BRANCH
7029 template <
class SparseT>
7034 SparseT retval = array;
7038 while (order > 0 && k > 0)
7040 idx_vector col1 (
':'), col2 (
':'), sl1 (1, k), sl2 (0, k-1);
7041 retval = SparseT (retval.index (col1, sl1))
7042 - SparseT (retval.index (col2, sl2));
7043 assert (retval.columns () == k-1);
7051 while (order > 0 && k > 0)
7053 idx_vector col1 (
':'), col2 (
':'), sl1 (1, k), sl2 (0, k-1);
7054 retval = SparseT (retval.index (sl1, col1))
7055 - SparseT (retval.index (sl2, col2));
7056 assert (retval.rows () == k-1);
7077 if (dv(dim) <= order)
7088 retval =
do_diff (array, order, dim - 1);
7091 else if (dv(dim) == 1)
7095 retval =
do_diff (array, dv(dim) - 1, dim);
7096 order -= dv(dim) - 1;
7153 DEFUN (diff, args, ,
7155 @deftypefn {Built-in Function} {} diff (@var{x})\n\
7156 @deftypefnx {Built-in Function} {} diff (@var{x}, @var{k})\n\
7157 @deftypefnx {Built-in Function} {} diff (@var{x}, @var{k}, @var{dim})\n\
7158 If @var{x} is a vector of length @math{n}, @code{diff (@var{x})} is the\n\
7159 vector of first differences\n\
7161 $x_2 - x_1, \\ldots{}, x_n - x_{n-1}$.\n\
7164 @var{x}(2) - @var{x}(1), @dots{}, @var{x}(n) - @var{x}(n-1).\n\
7167 If @var{x} is a matrix, @code{diff (@var{x})} is the matrix of column\n\
7168 differences along the first non-singleton dimension.\n\
7170 The second argument is optional. If supplied, @code{diff (@var{x},\n\
7171 @var{k})}, where @var{k} is a non-negative integer, returns the\n\
7172 @var{k}-th differences. It is possible that @var{k} is larger than\n\
7173 the first non-singleton dimension of the matrix. In this case,\n\
7174 @code{diff} continues to take the differences along the next\n\
7175 non-singleton dimension.\n\
7177 The dimension along which to take the difference can be explicitly\n\
7178 stated with the optional variable @var{dim}. In this case the\n\
7179 @var{k}-th order differences are calculated along this dimension.\n\
7180 In the case where @var{k} exceeds @code{size (@var{x}, @var{dim})}\n\
7181 an empty matrix is returned.\n\
7182 @seealso{sort, merge}\n\
7185 int nargin = args.
length ();
7188 if (nargin < 1 || nargin > 3)
7190 else if (! (args(0).is_numeric_type () || args(0).is_bool_type ()))
7191 error (
"diff: X must be numeric or logical");
7199 if (args(1).is_scalar_type ())
7200 order = args(1).idx_type_value (
true,
false);
7201 else if (! args(1).is_zero_by_zero ())
7202 error (
"order K must be a scalar or []");
7204 error (
"order K must be non-negative");
7209 dim = args(2).int_value (
true,
false);
7210 if (!
error_state && (dim < 1 || dim > args(0).ndims ()))
7211 error (
"DIM must be a valid dimension");
7217 retval =
do_diff (args(0), order, dim);
7242 assert (rep.
ndims () == 2 && rep.
rows () == 2);
7250 error (
"repelems: second row must contain non-negative numbers");
7257 retval.
clear (1, l);
7263 std::fill_n (dest, k, src.
checkelem (rep(0, i) - 1));
7270 DEFUN (repelems, args, ,
7272 @deftypefn {Built-in Function} {} repelems (@var{x}, @var{r})\n\
7273 Construct a vector of repeated elements from @var{x}. @var{r}\n\
7274 is a 2x@var{N} integer matrix specifying which elements to repeat and\n\
7275 how often to repeat each element.\n\
7277 Entries in the first row, @var{r}(1,j), select an element to repeat.\n\
7278 The corresponding entry in the second row, @var{r}(2,j), specifies\n\
7279 the repeat count. If @var{x} is a matrix then the columns of @var{x} are\n\
7280 imagined to be stacked on top of each other for purposes of the selection\n\
7281 index. A row vector is always returned.\n\
7283 Conceptually the result is calculated as follows:\n\
7288 for i = 1:columns (@var{r})\n\
7289 y = [y, @var{x}(@var{r}(1,i)*ones(1, @var{r}(2,i)))];\n\
7293 @seealso{repmat, cat}\n\
7302 const Matrix rm = args(1).matrix_value ();
7305 else if (rm.
rows () != 2 || rm.
ndims () != 2)
7307 error (
"repelems: R must be a matrix with two rows");
7317 if (static_cast<double> (rx) != rm(i))
7319 error (
"repelems: R must be a matrix of integers");
7328 #define BTYP_BRANCH(X, EX) \
7330 retval = do_repelems (x.EX ## _value (), r); \
7364 DEFUN (base64_encode, args, ,
7366 @deftypefn {Built-in Function} {@var{s} =} base64_encode (@var{x})\n\
7367 Encode a double matrix or array @var{x} into the base64 format string\n\
7370 @seealso{base64_decode}\n\
7374 int nargin = args.
length ();
7380 if (! args(0).is_numeric_type ())
7381 error (
"base64_encode: encoding is supported only for numeric arrays");
7382 else if (args(0).is_complex_type ()
7383 || args(0).is_sparse_type ())
7384 error (
"base64_encode: encoding complex or sparse data is not supported");
7385 else if (args(0).is_integer_type ())
7387 #define MAKE_INT_BRANCH(X) \
7388 if (args(0).is_ ## X ## _type ()) \
7390 const X##NDArray in = args(0). X## _array_value (); \
7392 in.numel () * sizeof (X## _t) / sizeof (char); \
7394 reinterpret_cast<const char*> (in.data ()); \
7397 && octave_base64_encode (inc, inlen, &out)) \
7399 retval(0) = octave_value (out); \
7412 #undef MAKE_INT_BRANCH
7417 else if (args(0).is_single_type ())
7421 inlen = in.
numel () *
sizeof (
float) /
sizeof (
char);
7423 inc =
reinterpret_cast<const char*
> (in.
data ());
7436 inlen = in.
numel () *
sizeof (
double) /
sizeof (
char);
7438 inc =
reinterpret_cast<const char*
> (in.
data ());
7466 DEFUN (base64_decode, args, ,
7468 @deftypefn {Built-in Function} {@var{x} =} base64_decode (@var{s})\n\
7469 @deftypefnx {Built-in Function} {@var{x} =} base64_decode (@var{s}, @var{dims})\n\
7470 Decode the double matrix or array @var{x} from the base64 encoded string\n\
7471 @var{s}. The optional input parameter @var{dims} should be a vector\n\
7472 containing the dimensions of the decoded array.\n\
7473 @seealso{base64_encode}\n\
7478 int nargin = args.
length ();
7480 if (nargin < 1 || nargin > 2)
7489 args(1).octave_idx_type_vector_value ();
7499 const std::string str = args(0).string_value ();