GNU Octave
3.8.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
Main Page
Namespaces
Classes
Files
File List
File Members
All
Classes
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Properties
Friends
Macros
Pages
liboctave
operators
Sparse-op-defs.h
Go to the documentation of this file.
1
/*
2
3
Copyright (C) 2004-2013 David Bateman
4
Copyright (C) 1998-2004 Andy Adler
5
Copyright (C) 2008 Jaroslav Hajek
6
7
This file is part of Octave.
8
9
Octave is free software; you can redistribute it and/or modify it
10
under the terms of the GNU General Public License as published by the
11
Free Software Foundation; either version 3 of the License, or (at your
12
option) any later version.
13
14
Octave is distributed in the hope that it will be useful, but WITHOUT
15
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17
for more details.
18
19
You should have received a copy of the GNU General Public License
20
along with Octave; see the file COPYING. If not, see
21
<http://www.gnu.org/licenses/>.
22
23
*/
24
25
#if !defined (octave_Sparse_op_defs_h)
26
#define octave_Sparse_op_defs_h 1
27
28
#include "
Array-util.h
"
29
#include "
mx-ops.h
"
30
#include "
oct-locbuf.h
"
31
#include "
mx-inlines.cc
"
32
33
#define SPARSE_BIN_OP_DECL(R, OP, X, Y, API) \
34
extern API R OP (const X&, const Y&)
35
36
#define SPARSE_CMP_OP_DECL(OP, X, Y, API) \
37
extern API SparseBoolMatrix OP (const X&, const Y&)
38
39
#define SPARSE_BOOL_OP_DECL(OP, X, Y, API) \
40
extern API SparseBoolMatrix OP (const X&, const Y&)
41
42
// matrix by scalar operations.
43
44
#define SPARSE_SMS_BIN_OP_DECLS(R1, R2, M, S, API) \
45
SPARSE_BIN_OP_DECL (R1, operator +, M, S, API); \
46
SPARSE_BIN_OP_DECL (R1, operator -, M, S, API); \
47
SPARSE_BIN_OP_DECL (R2, operator *, M, S, API); \
48
SPARSE_BIN_OP_DECL (R2, operator /, M, S, API);
49
50
#define SPARSE_SMS_BIN_OP_1(R, F, OP, M, S) \
51
R \
52
F (const M& m, const S& s) \
53
{ \
54
octave_idx_type nr = m.rows (); \
55
octave_idx_type nc = m.cols (); \
56
\
57
R r (nr, nc, (0.0 OP s)); \
58
\
59
for (octave_idx_type j = 0; j < nc; j++) \
60
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
61
r.xelem (m.ridx (i), j) = m.data (i) OP s; \
62
return r; \
63
}
64
65
#define SPARSE_SMS_BIN_OP_2(R, F, OP, M, S) \
66
R \
67
F (const M& m, const S& s) \
68
{ \
69
octave_idx_type nr = m.rows (); \
70
octave_idx_type nc = m.cols (); \
71
octave_idx_type nz = m.nnz (); \
72
\
73
R r (nr, nc, nz); \
74
\
75
for (octave_idx_type i = 0; i < nz; i++) \
76
{ \
77
r.xdata (i) = m.data (i) OP s; \
78
r.xridx (i) = m.ridx (i); \
79
} \
80
for (octave_idx_type i = 0; i < nc + 1; i++) \
81
r.xcidx (i) = m.cidx (i); \
82
\
83
r.maybe_compress (true); \
84
return r; \
85
}
86
87
#define SPARSE_SMS_BIN_OPS(R1, R2, M, S) \
88
SPARSE_SMS_BIN_OP_1 (R1, operator +, +, M, S) \
89
SPARSE_SMS_BIN_OP_1 (R1, operator -, -, M, S) \
90
SPARSE_SMS_BIN_OP_2 (R2, operator *, *, M, S) \
91
SPARSE_SMS_BIN_OP_2 (R2, operator /, /, M, S)
92
93
#define SPARSE_SMS_CMP_OP_DECLS(M, S, API) \
94
SPARSE_CMP_OP_DECL (mx_el_lt, M, S, API); \
95
SPARSE_CMP_OP_DECL (mx_el_le, M, S, API); \
96
SPARSE_CMP_OP_DECL (mx_el_ge, M, S, API); \
97
SPARSE_CMP_OP_DECL (mx_el_gt, M, S, API); \
98
SPARSE_CMP_OP_DECL (mx_el_eq, M, S, API); \
99
SPARSE_CMP_OP_DECL (mx_el_ne, M, S, API);
100
101
#define SPARSE_SMS_EQNE_OP_DECLS(M, S, API) \
102
SPARSE_CMP_OP_DECL (mx_el_eq, M, S, API); \
103
SPARSE_CMP_OP_DECL (mx_el_ne, M, S, API);
104
105
#define SPARSE_SMS_CMP_OP(F, OP, M, MZ, MC, S, SZ, SC) \
106
SparseBoolMatrix \
107
F (const M& m, const S& s) \
108
{ \
109
octave_idx_type nr = m.rows (); \
110
octave_idx_type nc = m.cols (); \
111
SparseBoolMatrix r; \
112
\
113
if (MC (MZ) OP SC (s)) \
114
{ \
115
r = SparseBoolMatrix (nr, nc, true); \
116
for (octave_idx_type j = 0; j < nc; j++) \
117
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
118
if (! (MC (m.data (i)) OP SC (s))) \
119
r.data (m.ridx (i) + j * nr) = false; \
120
r.maybe_compress (true); \
121
} \
122
else \
123
{ \
124
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
125
r.cidx (0) = static_cast<octave_idx_type> (0); \
126
octave_idx_type nel = 0; \
127
for (octave_idx_type j = 0; j < nc; j++) \
128
{ \
129
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
130
if (MC (m.data (i)) OP SC (s)) \
131
{ \
132
r.ridx (nel) = m.ridx (i); \
133
r.data (nel++) = true; \
134
} \
135
r.cidx (j + 1) = nel; \
136
} \
137
r.maybe_compress (false); \
138
} \
139
return r; \
140
}
141
142
#define SPARSE_SMS_CMP_OPS(M, MZ, CM, S, SZ, CS) \
143
SPARSE_SMS_CMP_OP (mx_el_lt, <, M, MZ, , S, SZ, ) \
144
SPARSE_SMS_CMP_OP (mx_el_le, <=, M, MZ, , S, SZ, ) \
145
SPARSE_SMS_CMP_OP (mx_el_ge, >=, M, MZ, , S, SZ, ) \
146
SPARSE_SMS_CMP_OP (mx_el_gt, >, M, MZ, , S, SZ, ) \
147
SPARSE_SMS_CMP_OP (mx_el_eq, ==, M, MZ, , S, SZ, ) \
148
SPARSE_SMS_CMP_OP (mx_el_ne, !=, M, MZ, , S, SZ, )
149
150
#define SPARSE_SMS_EQNE_OPS(M, MZ, CM, S, SZ, CS) \
151
SPARSE_SMS_CMP_OP (mx_el_eq, ==, M, MZ, , S, SZ, ) \
152
SPARSE_SMS_CMP_OP (mx_el_ne, !=, M, MZ, , S, SZ, )
153
154
#define SPARSE_SMS_BOOL_OP_DECLS(M, S, API) \
155
SPARSE_BOOL_OP_DECL (mx_el_and, M, S, API); \
156
SPARSE_BOOL_OP_DECL (mx_el_or, M, S, API);
157
158
#define SPARSE_SMS_BOOL_OP(F, OP, M, S, LHS_ZERO, RHS_ZERO) \
159
SparseBoolMatrix \
160
F (const M& m, const S& s) \
161
{ \
162
octave_idx_type nr = m.rows (); \
163
octave_idx_type nc = m.cols (); \
164
SparseBoolMatrix r; \
165
\
166
if (nr > 0 && nc > 0) \
167
{ \
168
if (LHS_ZERO OP (s != RHS_ZERO)) \
169
{ \
170
r = SparseBoolMatrix (nr, nc, true); \
171
for (octave_idx_type j = 0; j < nc; j++) \
172
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
173
if (! ((m.data (i) != LHS_ZERO) OP (s != RHS_ZERO))) \
174
r.data (m.ridx (i) + j * nr) = false; \
175
r.maybe_compress (true); \
176
} \
177
else \
178
{ \
179
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
180
r.cidx (0) = static_cast<octave_idx_type> (0); \
181
octave_idx_type nel = 0; \
182
for (octave_idx_type j = 0; j < nc; j++) \
183
{ \
184
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
185
if ((m.data (i) != LHS_ZERO) OP (s != RHS_ZERO)) \
186
{ \
187
r.ridx (nel) = m.ridx (i); \
188
r.data (nel++) = true; \
189
} \
190
r.cidx (j + 1) = nel; \
191
} \
192
r.maybe_compress (false); \
193
} \
194
} \
195
return r; \
196
}
197
198
#define SPARSE_SMS_BOOL_OPS2(M, S, LHS_ZERO, RHS_ZERO) \
199
SPARSE_SMS_BOOL_OP (mx_el_and, &&, M, S, LHS_ZERO, RHS_ZERO) \
200
SPARSE_SMS_BOOL_OP (mx_el_or, ||, M, S, LHS_ZERO, RHS_ZERO)
201
202
#define SPARSE_SMS_BOOL_OPS(M, S, ZERO) \
203
SPARSE_SMS_BOOL_OPS2(M, S, ZERO, ZERO)
204
205
#define SPARSE_SMS_OP_DECLS(R1, R2, M, S, API) \
206
SPARSE_SMS_BIN_OP_DECLS (R1, R2, M, S, API) \
207
SPARSE_SMS_CMP_OP_DECLS (M, S, API) \
208
SPARSE_SMS_BOOL_OP_DECLS (M, S, API)
209
210
// scalar by matrix operations.
211
212
#define SPARSE_SSM_BIN_OP_DECLS(R1, R2, S, M, API) \
213
SPARSE_BIN_OP_DECL (R1, operator +, S, M, API); \
214
SPARSE_BIN_OP_DECL (R1, operator -, S, M, API); \
215
SPARSE_BIN_OP_DECL (R2, operator *, S, M, API); \
216
SPARSE_BIN_OP_DECL (R2, operator /, S, M, API);
217
218
#define SPARSE_SSM_BIN_OP_1(R, F, OP, S, M) \
219
R \
220
F (const S& s, const M& m) \
221
{ \
222
octave_idx_type nr = m.rows (); \
223
octave_idx_type nc = m.cols (); \
224
\
225
R r (nr, nc, (s OP 0.0)); \
226
\
227
for (octave_idx_type j = 0; j < nc; j++) \
228
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
229
r.xelem (m.ridx (i), j) = s OP m.data (i); \
230
\
231
return r; \
232
}
233
234
#define SPARSE_SSM_BIN_OP_2(R, F, OP, S, M) \
235
R \
236
F (const S& s, const M& m) \
237
{ \
238
octave_idx_type nr = m.rows (); \
239
octave_idx_type nc = m.cols (); \
240
octave_idx_type nz = m.nnz (); \
241
\
242
R r (nr, nc, nz); \
243
\
244
for (octave_idx_type i = 0; i < nz; i++) \
245
{ \
246
r.xdata (i) = s OP m.data (i); \
247
r.xridx (i) = m.ridx (i); \
248
} \
249
for (octave_idx_type i = 0; i < nc + 1; i++) \
250
r.xcidx (i) = m.cidx (i); \
251
\
252
r.maybe_compress(true); \
253
return r; \
254
}
255
256
#define SPARSE_SSM_BIN_OPS(R1, R2, S, M) \
257
SPARSE_SSM_BIN_OP_1 (R1, operator +, +, S, M) \
258
SPARSE_SSM_BIN_OP_1 (R1, operator -, -, S, M) \
259
SPARSE_SSM_BIN_OP_2 (R2, operator *, *, S, M) \
260
SPARSE_SSM_BIN_OP_2 (R2, operator /, /, S, M)
261
262
#define SPARSE_SSM_CMP_OP_DECLS(S, M, API) \
263
SPARSE_CMP_OP_DECL (mx_el_lt, S, M, API); \
264
SPARSE_CMP_OP_DECL (mx_el_le, S, M, API); \
265
SPARSE_CMP_OP_DECL (mx_el_ge, S, M, API); \
266
SPARSE_CMP_OP_DECL (mx_el_gt, S, M, API); \
267
SPARSE_CMP_OP_DECL (mx_el_eq, S, M, API); \
268
SPARSE_CMP_OP_DECL (mx_el_ne, S, M, API);
269
270
#define SPARSE_SSM_EQNE_OP_DECLS(S, M, API) \
271
SPARSE_CMP_OP_DECL (mx_el_eq, S, M, API); \
272
SPARSE_CMP_OP_DECL (mx_el_ne, S, M, API);
273
274
#define SPARSE_SSM_CMP_OP(F, OP, S, SZ, SC, M, MZ, MC) \
275
SparseBoolMatrix \
276
F (const S& s, const M& m) \
277
{ \
278
octave_idx_type nr = m.rows (); \
279
octave_idx_type nc = m.cols (); \
280
SparseBoolMatrix r; \
281
\
282
if (SC (s) OP SC (MZ)) \
283
{ \
284
r = SparseBoolMatrix (nr, nc, true); \
285
for (octave_idx_type j = 0; j < nc; j++) \
286
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
287
if (! (SC (s) OP MC (m.data (i)))) \
288
r.data (m.ridx (i) + j * nr) = false; \
289
r.maybe_compress (true); \
290
} \
291
else \
292
{ \
293
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
294
r.cidx (0) = static_cast<octave_idx_type> (0); \
295
octave_idx_type nel = 0; \
296
for (octave_idx_type j = 0; j < nc; j++) \
297
{ \
298
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
299
if (SC (s) OP MC (m.data (i))) \
300
{ \
301
r.ridx (nel) = m.ridx (i); \
302
r.data (nel++) = true; \
303
} \
304
r.cidx (j + 1) = nel; \
305
} \
306
r.maybe_compress (false); \
307
} \
308
return r; \
309
}
310
311
#define SPARSE_SSM_CMP_OPS(S, SZ, SC, M, MZ, MC) \
312
SPARSE_SSM_CMP_OP (mx_el_lt, <, S, SZ, , M, MZ, ) \
313
SPARSE_SSM_CMP_OP (mx_el_le, <=, S, SZ, , M, MZ, ) \
314
SPARSE_SSM_CMP_OP (mx_el_ge, >=, S, SZ, , M, MZ, ) \
315
SPARSE_SSM_CMP_OP (mx_el_gt, >, S, SZ, , M, MZ, ) \
316
SPARSE_SSM_CMP_OP (mx_el_eq, ==, S, SZ, , M, MZ, ) \
317
SPARSE_SSM_CMP_OP (mx_el_ne, !=, S, SZ, , M, MZ, )
318
319
#define SPARSE_SSM_EQNE_OPS(S, SZ, SC, M, MZ, MC) \
320
SPARSE_SSM_CMP_OP (mx_el_eq, ==, S, SZ, , M, MZ, ) \
321
SPARSE_SSM_CMP_OP (mx_el_ne, !=, S, SZ, , M, MZ, )
322
323
#define SPARSE_SSM_BOOL_OP_DECLS(S, M, API) \
324
SPARSE_BOOL_OP_DECL (mx_el_and, S, M, API); \
325
SPARSE_BOOL_OP_DECL (mx_el_or, S, M, API); \
326
327
#define SPARSE_SSM_BOOL_OP(F, OP, S, M, LHS_ZERO, RHS_ZERO) \
328
SparseBoolMatrix \
329
F (const S& s, const M& m) \
330
{ \
331
octave_idx_type nr = m.rows (); \
332
octave_idx_type nc = m.cols (); \
333
SparseBoolMatrix r; \
334
\
335
if (nr > 0 && nc > 0) \
336
{ \
337
if ((s != LHS_ZERO) OP RHS_ZERO) \
338
{ \
339
r = SparseBoolMatrix (nr, nc, true); \
340
for (octave_idx_type j = 0; j < nc; j++) \
341
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
342
if (! ((s != LHS_ZERO) OP (m.data (i) != RHS_ZERO))) \
343
r.data (m.ridx (i) + j * nr) = false; \
344
r.maybe_compress (true); \
345
} \
346
else \
347
{ \
348
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
349
r.cidx (0) = static_cast<octave_idx_type> (0); \
350
octave_idx_type nel = 0; \
351
for (octave_idx_type j = 0; j < nc; j++) \
352
{ \
353
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
354
if ((s != LHS_ZERO) OP (m.data (i) != RHS_ZERO)) \
355
{ \
356
r.ridx (nel) = m.ridx (i); \
357
r.data (nel++) = true; \
358
} \
359
r.cidx (j + 1) = nel; \
360
} \
361
r.maybe_compress (false); \
362
} \
363
} \
364
return r; \
365
}
366
367
#define SPARSE_SSM_BOOL_OPS2(S, M, LHS_ZERO, RHS_ZERO) \
368
SPARSE_SSM_BOOL_OP (mx_el_and, &&, S, M, LHS_ZERO, RHS_ZERO) \
369
SPARSE_SSM_BOOL_OP (mx_el_or, ||, S, M, LHS_ZERO, RHS_ZERO)
370
371
#define SPARSE_SSM_BOOL_OPS(S, M, ZERO) \
372
SPARSE_SSM_BOOL_OPS2(S, M, ZERO, ZERO)
373
374
#define SPARSE_SSM_OP_DECLS(R1, R2, S, M, API) \
375
SPARSE_SSM_BIN_OP_DECLS (R1, R2, S, M, API) \
376
SPARSE_SSM_CMP_OP_DECLS (S, M, API) \
377
SPARSE_SSM_BOOL_OP_DECLS (S, M, API) \
378
379
// matrix by matrix operations.
380
381
#define SPARSE_SMSM_BIN_OP_DECLS(R1, R2, M1, M2, API) \
382
SPARSE_BIN_OP_DECL (R1, operator +, M1, M2, API); \
383
SPARSE_BIN_OP_DECL (R1, operator -, M1, M2, API); \
384
SPARSE_BIN_OP_DECL (R2, product, M1, M2, API); \
385
SPARSE_BIN_OP_DECL (R2, quotient, M1, M2, API);
386
387
#define SPARSE_SMSM_BIN_OP_1(R, F, OP, M1, M2) \
388
R \
389
F (const M1& m1, const M2& m2) \
390
{ \
391
R r; \
392
\
393
octave_idx_type m1_nr = m1.rows (); \
394
octave_idx_type m1_nc = m1.cols (); \
395
\
396
octave_idx_type m2_nr = m2.rows (); \
397
octave_idx_type m2_nc = m2.cols (); \
398
\
399
if (m1_nr == 1 && m1_nc == 1) \
400
{ \
401
if (m1.elem (0,0) == 0.) \
402
r = OP R (m2); \
403
else \
404
{ \
405
r = R (m2_nr, m2_nc, m1.data (0) OP 0.); \
406
\
407
for (octave_idx_type j = 0 ; j < m2_nc ; j++) \
408
{ \
409
octave_quit (); \
410
octave_idx_type idxj = j * m2_nr; \
411
for (octave_idx_type i = m2.cidx (j) ; i < m2.cidx (j+1) ; i++) \
412
{ \
413
octave_quit (); \
414
r.data (idxj + m2.ridx (i)) = m1.data (0) OP m2.data (i); \
415
} \
416
} \
417
r.maybe_compress (); \
418
} \
419
} \
420
else if (m2_nr == 1 && m2_nc == 1) \
421
{ \
422
if (m2.elem (0,0) == 0.) \
423
r = R (m1); \
424
else \
425
{ \
426
r = R (m1_nr, m1_nc, 0. OP m2.data (0)); \
427
\
428
for (octave_idx_type j = 0 ; j < m1_nc ; j++) \
429
{ \
430
octave_quit (); \
431
octave_idx_type idxj = j * m1_nr; \
432
for (octave_idx_type i = m1.cidx (j) ; i < m1.cidx (j+1) ; i++) \
433
{ \
434
octave_quit (); \
435
r.data (idxj + m1.ridx (i)) = m1.data (i) OP m2.data (0); \
436
} \
437
} \
438
r.maybe_compress (); \
439
} \
440
} \
441
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
442
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
443
else \
444
{ \
445
r = R (m1_nr, m1_nc, (m1.nnz () + m2.nnz ())); \
446
\
447
octave_idx_type jx = 0; \
448
r.cidx (0) = 0; \
449
for (octave_idx_type i = 0 ; i < m1_nc ; i++) \
450
{ \
451
octave_idx_type ja = m1.cidx (i); \
452
octave_idx_type ja_max = m1.cidx (i+1); \
453
bool ja_lt_max= ja < ja_max; \
454
\
455
octave_idx_type jb = m2.cidx (i); \
456
octave_idx_type jb_max = m2.cidx (i+1); \
457
bool jb_lt_max = jb < jb_max; \
458
\
459
while (ja_lt_max || jb_lt_max ) \
460
{ \
461
octave_quit (); \
462
if ((! jb_lt_max) || \
463
(ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \
464
{ \
465
r.ridx (jx) = m1.ridx (ja); \
466
r.data (jx) = m1.data (ja) OP 0.; \
467
jx++; \
468
ja++; \
469
ja_lt_max= ja < ja_max; \
470
} \
471
else if (( !ja_lt_max ) || \
472
(jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)) ) ) \
473
{ \
474
r.ridx (jx) = m2.ridx (jb); \
475
r.data (jx) = 0. OP m2.data (jb); \
476
jx++; \
477
jb++; \
478
jb_lt_max= jb < jb_max; \
479
} \
480
else \
481
{ \
482
if ((m1.data (ja) OP m2.data (jb)) != 0.) \
483
{ \
484
r.data (jx) = m1.data (ja) OP m2.data (jb); \
485
r.ridx (jx) = m1.ridx (ja); \
486
jx++; \
487
} \
488
ja++; \
489
ja_lt_max= ja < ja_max; \
490
jb++; \
491
jb_lt_max= jb < jb_max; \
492
} \
493
} \
494
r.cidx (i+1) = jx; \
495
} \
496
\
497
r.maybe_compress (); \
498
} \
499
\
500
return r; \
501
}
502
503
#define SPARSE_SMSM_BIN_OP_2(R, F, OP, M1, M2) \
504
R \
505
F (const M1& m1, const M2& m2) \
506
{ \
507
R r; \
508
\
509
octave_idx_type m1_nr = m1.rows (); \
510
octave_idx_type m1_nc = m1.cols (); \
511
\
512
octave_idx_type m2_nr = m2.rows (); \
513
octave_idx_type m2_nc = m2.cols (); \
514
\
515
if (m1_nr == 1 && m1_nc == 1) \
516
{ \
517
if (m1.elem (0,0) == 0.) \
518
r = R (m2_nr, m2_nc); \
519
else \
520
{ \
521
r = R (m2); \
522
octave_idx_type m2_nnz = m2.nnz (); \
523
\
524
for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \
525
{ \
526
octave_quit (); \
527
r.data (i) = m1.data (0) OP r.data (i); \
528
} \
529
r.maybe_compress (); \
530
} \
531
} \
532
else if (m2_nr == 1 && m2_nc == 1) \
533
{ \
534
if (m2.elem (0,0) == 0.) \
535
r = R (m1_nr, m1_nc); \
536
else \
537
{ \
538
r = R (m1); \
539
octave_idx_type m1_nnz = m1.nnz (); \
540
\
541
for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \
542
{ \
543
octave_quit (); \
544
r.data (i) = r.data (i) OP m2.data (0); \
545
} \
546
r.maybe_compress (); \
547
} \
548
} \
549
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
550
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
551
else \
552
{ \
553
r = R (m1_nr, m1_nc, (m1.nnz () > m2.nnz () ? m1.nnz () : m2.nnz ())); \
554
\
555
octave_idx_type jx = 0; \
556
r.cidx (0) = 0; \
557
for (octave_idx_type i = 0 ; i < m1_nc ; i++) \
558
{ \
559
octave_idx_type ja = m1.cidx (i); \
560
octave_idx_type ja_max = m1.cidx (i+1); \
561
bool ja_lt_max= ja < ja_max; \
562
\
563
octave_idx_type jb = m2.cidx (i); \
564
octave_idx_type jb_max = m2.cidx (i+1); \
565
bool jb_lt_max = jb < jb_max; \
566
\
567
while (ja_lt_max || jb_lt_max ) \
568
{ \
569
octave_quit (); \
570
if ((! jb_lt_max) || \
571
(ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \
572
{ \
573
ja++; ja_lt_max= ja < ja_max; \
574
} \
575
else if (( !ja_lt_max ) || \
576
(jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)) ) ) \
577
{ \
578
jb++; jb_lt_max= jb < jb_max; \
579
} \
580
else \
581
{ \
582
if ((m1.data (ja) OP m2.data (jb)) != 0.) \
583
{ \
584
r.data (jx) = m1.data (ja) OP m2.data (jb); \
585
r.ridx (jx) = m1.ridx (ja); \
586
jx++; \
587
} \
588
ja++; ja_lt_max= ja < ja_max; \
589
jb++; jb_lt_max= jb < jb_max; \
590
} \
591
} \
592
r.cidx (i+1) = jx; \
593
} \
594
\
595
r.maybe_compress (); \
596
} \
597
\
598
return r; \
599
}
600
601
#define SPARSE_SMSM_BIN_OP_3(R, F, OP, M1, M2) \
602
R \
603
F (const M1& m1, const M2& m2) \
604
{ \
605
R r; \
606
\
607
octave_idx_type m1_nr = m1.rows (); \
608
octave_idx_type m1_nc = m1.cols (); \
609
\
610
octave_idx_type m2_nr = m2.rows (); \
611
octave_idx_type m2_nc = m2.cols (); \
612
\
613
if (m1_nr == 1 && m1_nc == 1) \
614
{ \
615
if ((m1.elem (0,0) OP Complex ()) == Complex ()) \
616
{ \
617
octave_idx_type m2_nnz = m2.nnz (); \
618
r = R (m2); \
619
for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \
620
r.data (i) = m1.elem (0,0) OP r.data (i); \
621
r.maybe_compress (); \
622
} \
623
else \
624
{ \
625
r = R (m2_nr, m2_nc, m1.elem (0,0) OP Complex ()); \
626
for (octave_idx_type j = 0 ; j < m2_nc ; j++) \
627
{ \
628
octave_quit (); \
629
octave_idx_type idxj = j * m2_nr; \
630
for (octave_idx_type i = m2.cidx (j) ; i < m2.cidx (j+1) ; i++) \
631
{ \
632
octave_quit (); \
633
r.data (idxj + m2.ridx (i)) = m1.elem (0,0) OP m2.data (i); \
634
} \
635
} \
636
r.maybe_compress (); \
637
} \
638
} \
639
else if (m2_nr == 1 && m2_nc == 1) \
640
{ \
641
if ((Complex () OP m1.elem (0,0)) == Complex ()) \
642
{ \
643
octave_idx_type m1_nnz = m1.nnz (); \
644
r = R (m1); \
645
for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \
646
r.data (i) = r.data (i) OP m2.elem (0,0); \
647
r.maybe_compress (); \
648
} \
649
else \
650
{ \
651
r = R (m1_nr, m1_nc, Complex () OP m2.elem (0,0)); \
652
for (octave_idx_type j = 0 ; j < m1_nc ; j++) \
653
{ \
654
octave_quit (); \
655
octave_idx_type idxj = j * m1_nr; \
656
for (octave_idx_type i = m1.cidx (j) ; i < m1.cidx (j+1) ; i++) \
657
{ \
658
octave_quit (); \
659
r.data (idxj + m1.ridx (i)) = m1.data (i) OP m2.elem (0,0); \
660
} \
661
} \
662
r.maybe_compress (); \
663
} \
664
} \
665
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
666
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
667
else \
668
{ \
669
\
670
/* FIXME: Kludge... Always double/Complex, so Complex () */
\
671
r = R (m1_nr, m1_nc, (Complex () OP Complex ())); \
672
\
673
for (octave_idx_type i = 0 ; i < m1_nc ; i++) \
674
{ \
675
octave_idx_type ja = m1.cidx (i); \
676
octave_idx_type ja_max = m1.cidx (i+1); \
677
bool ja_lt_max= ja < ja_max; \
678
\
679
octave_idx_type jb = m2.cidx (i); \
680
octave_idx_type jb_max = m2.cidx (i+1); \
681
bool jb_lt_max = jb < jb_max; \
682
\
683
while (ja_lt_max || jb_lt_max ) \
684
{ \
685
octave_quit (); \
686
if ((! jb_lt_max) || \
687
(ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \
688
{ \
689
/* keep those kludges coming */
\
690
r.elem (m1.ridx (ja),i) = m1.data (ja) OP Complex (); \
691
ja++; \
692
ja_lt_max= ja < ja_max; \
693
} \
694
else if (( !ja_lt_max ) || \
695
(jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)) ) ) \
696
{ \
697
/* keep those kludges coming */
\
698
r.elem (m2.ridx (jb),i) = Complex () OP m2.data (jb); \
699
jb++; \
700
jb_lt_max= jb < jb_max; \
701
} \
702
else \
703
{ \
704
r.elem (m1.ridx (ja),i) = m1.data (ja) OP m2.data (jb); \
705
ja++; \
706
ja_lt_max= ja < ja_max; \
707
jb++; \
708
jb_lt_max= jb < jb_max; \
709
} \
710
} \
711
} \
712
r.maybe_compress (true); \
713
} \
714
\
715
return r; \
716
}
717
718
// Note that SM ./ SM needs to take into account the NaN and Inf values
719
// implied by the division by zero.
720
// FIXME: Are the NaNs double(NaN) or Complex(NaN,Nan) in the complex case?
721
#define SPARSE_SMSM_BIN_OPS(R1, R2, M1, M2) \
722
SPARSE_SMSM_BIN_OP_1 (R1, operator +, +, M1, M2) \
723
SPARSE_SMSM_BIN_OP_1 (R1, operator -, -, M1, M2) \
724
SPARSE_SMSM_BIN_OP_2 (R2, product, *, M1, M2) \
725
SPARSE_SMSM_BIN_OP_3 (R2, quotient, /, M1, M2)
726
727
#define SPARSE_SMSM_CMP_OP_DECLS(M1, M2, API) \
728
SPARSE_CMP_OP_DECL (mx_el_lt, M1, M2, API); \
729
SPARSE_CMP_OP_DECL (mx_el_le, M1, M2, API); \
730
SPARSE_CMP_OP_DECL (mx_el_ge, M1, M2, API); \
731
SPARSE_CMP_OP_DECL (mx_el_gt, M1, M2, API); \
732
SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \
733
SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API);
734
735
#define SPARSE_SMSM_EQNE_OP_DECLS(M1, M2, API) \
736
SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \
737
SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API);
738
739
// FIXME: this macro duplicates the bodies of the template functions
740
// defined in the SPARSE_SSM_CMP_OP and SPARSE_SMS_CMP_OP macros.
741
742
#define SPARSE_SMSM_CMP_OP(F, OP, M1, Z1, C1, M2, Z2, C2) \
743
SparseBoolMatrix \
744
F (const M1& m1, const M2& m2) \
745
{ \
746
SparseBoolMatrix r; \
747
\
748
octave_idx_type m1_nr = m1.rows (); \
749
octave_idx_type m1_nc = m1.cols (); \
750
\
751
octave_idx_type m2_nr = m2.rows (); \
752
octave_idx_type m2_nc = m2.cols (); \
753
\
754
if (m1_nr == 1 && m1_nc == 1) \
755
{ \
756
if (C1 (m1.elem (0,0)) OP C2 (Z2)) \
757
{ \
758
r = SparseBoolMatrix (m2_nr, m2_nc, true); \
759
for (octave_idx_type j = 0; j < m2_nc; j++) \
760
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
761
if (! (C1 (m1.elem (0,0)) OP C2 (m2.data (i)))) \
762
r.data (m2.ridx (i) + j * m2_nr) = false; \
763
r.maybe_compress (true); \
764
} \
765
else \
766
{ \
767
r = SparseBoolMatrix (m2_nr, m2_nc, m2.nnz ()); \
768
r.cidx (0) = static_cast<octave_idx_type> (0); \
769
octave_idx_type nel = 0; \
770
for (octave_idx_type j = 0; j < m2_nc; j++) \
771
{ \
772
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
773
if (C1 (m1.elem (0,0)) OP C2 (m2.data (i))) \
774
{ \
775
r.ridx (nel) = m2.ridx (i); \
776
r.data (nel++) = true; \
777
} \
778
r.cidx (j + 1) = nel; \
779
} \
780
r.maybe_compress (false); \
781
} \
782
} \
783
else if (m2_nr == 1 && m2_nc == 1) \
784
{ \
785
if (C1 (Z1) OP C2 (m2.elem (0,0))) \
786
{ \
787
r = SparseBoolMatrix (m1_nr, m1_nc, true); \
788
for (octave_idx_type j = 0; j < m1_nc; j++) \
789
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
790
if (! (C1 (m1.data (i)) OP C2 (m2.elem (0,0)))) \
791
r.data (m1.ridx (i) + j * m1_nr) = false; \
792
r.maybe_compress (true); \
793
} \
794
else \
795
{ \
796
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz ()); \
797
r.cidx (0) = static_cast<octave_idx_type> (0); \
798
octave_idx_type nel = 0; \
799
for (octave_idx_type j = 0; j < m1_nc; j++) \
800
{ \
801
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
802
if (C1 (m1.data (i)) OP C2 (m2.elem (0,0))) \
803
{ \
804
r.ridx (nel) = m1.ridx (i); \
805
r.data (nel++) = true; \
806
} \
807
r.cidx (j + 1) = nel; \
808
} \
809
r.maybe_compress (false); \
810
} \
811
} \
812
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
813
{ \
814
if (m1_nr != 0 || m1_nc != 0) \
815
{ \
816
if (C1 (Z1) OP C2 (Z2)) \
817
{ \
818
r = SparseBoolMatrix (m1_nr, m1_nc, true); \
819
for (octave_idx_type j = 0; j < m1_nc; j++) \
820
{ \
821
octave_idx_type i1 = m1.cidx (j); \
822
octave_idx_type e1 = m1.cidx (j+1); \
823
octave_idx_type i2 = m2.cidx (j); \
824
octave_idx_type e2 = m2.cidx (j+1); \
825
while (i1 < e1 || i2 < e2) \
826
{ \
827
if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \
828
{ \
829
if (! (C1 (Z1) OP C2 (m2.data (i2)))) \
830
r.data (m2.ridx (i2) + j * m1_nr) = false; \
831
i2++; \
832
} \
833
else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \
834
{ \
835
if (! (C1 (m1.data (i1)) OP C2 (Z2))) \
836
r.data (m1.ridx (i1) + j * m1_nr) = false; \
837
i1++; \
838
} \
839
else \
840
{ \
841
if (! (C1 (m1.data (i1)) OP C2 (m2.data (i2)))) \
842
r.data (m1.ridx (i1) + j * m1_nr) = false; \
843
i1++; \
844
i2++; \
845
} \
846
} \
847
} \
848
r.maybe_compress (true); \
849
} \
850
else \
851
{ \
852
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz () + m2.nnz ()); \
853
r.cidx (0) = static_cast<octave_idx_type> (0); \
854
octave_idx_type nel = 0; \
855
for (octave_idx_type j = 0; j < m1_nc; j++) \
856
{ \
857
octave_idx_type i1 = m1.cidx (j); \
858
octave_idx_type e1 = m1.cidx (j+1); \
859
octave_idx_type i2 = m2.cidx (j); \
860
octave_idx_type e2 = m2.cidx (j+1); \
861
while (i1 < e1 || i2 < e2) \
862
{ \
863
if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \
864
{ \
865
if (C1 (Z1) OP C2 (m2.data (i2))) \
866
{ \
867
r.ridx (nel) = m2.ridx (i2); \
868
r.data (nel++) = true; \
869
} \
870
i2++; \
871
} \
872
else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \
873
{ \
874
if (C1 (m1.data (i1)) OP C2 (Z2)) \
875
{ \
876
r.ridx (nel) = m1.ridx (i1); \
877
r.data (nel++) = true; \
878
} \
879
i1++; \
880
} \
881
else \
882
{ \
883
if (C1 (m1.data (i1)) OP C2 (m2.data (i2))) \
884
{ \
885
r.ridx (nel) = m1.ridx (i1); \
886
r.data (nel++) = true; \
887
} \
888
i1++; \
889
i2++; \
890
} \
891
} \
892
r.cidx (j + 1) = nel; \
893
} \
894
r.maybe_compress (false); \
895
} \
896
} \
897
} \
898
else \
899
{ \
900
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
901
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
902
} \
903
return r; \
904
}
905
906
#define SPARSE_SMSM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \
907
SPARSE_SMSM_CMP_OP (mx_el_lt, <, M1, Z1, , M2, Z2, ) \
908
SPARSE_SMSM_CMP_OP (mx_el_le, <=, M1, Z1, , M2, Z2, ) \
909
SPARSE_SMSM_CMP_OP (mx_el_ge, >=, M1, Z1, , M2, Z2, ) \
910
SPARSE_SMSM_CMP_OP (mx_el_gt, >, M1, Z1, , M2, Z2, ) \
911
SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, Z1, , M2, Z2, ) \
912
SPARSE_SMSM_CMP_OP (mx_el_ne, !=, M1, Z1, , M2, Z2, )
913
914
#define SPARSE_SMSM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \
915
SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, Z1, , M2, Z2, ) \
916
SPARSE_SMSM_CMP_OP (mx_el_ne, !=, M1, Z1, , M2, Z2, )
917
918
#define SPARSE_SMSM_BOOL_OP_DECLS(M1, M2, API) \
919
SPARSE_BOOL_OP_DECL (mx_el_and, M1, M2, API); \
920
SPARSE_BOOL_OP_DECL (mx_el_or, M1, M2, API);
921
922
// FIXME: this macro duplicates the bodies of the template functions
923
// defined in the SPARSE_SSM_BOOL_OP and SPARSE_SMS_BOOL_OP macros.
924
925
#define SPARSE_SMSM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \
926
SparseBoolMatrix \
927
F (const M1& m1, const M2& m2) \
928
{ \
929
SparseBoolMatrix r; \
930
\
931
octave_idx_type m1_nr = m1.rows (); \
932
octave_idx_type m1_nc = m1.cols (); \
933
\
934
octave_idx_type m2_nr = m2.rows (); \
935
octave_idx_type m2_nc = m2.cols (); \
936
\
937
if (m1_nr == 1 && m1_nc == 1) \
938
{ \
939
if (m2_nr > 0 && m2_nc > 0) \
940
{ \
941
if ((m1.elem (0,0) != LHS_ZERO) OP RHS_ZERO) \
942
{ \
943
r = SparseBoolMatrix (m2_nr, m2_nc, true); \
944
for (octave_idx_type j = 0; j < m2_nc; j++) \
945
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
946
if (! ((m1.elem (0,0) != LHS_ZERO) OP (m2.data (i) != RHS_ZERO))) \
947
r.data (m2.ridx (i) + j * m2_nr) = false; \
948
r.maybe_compress (true); \
949
} \
950
else \
951
{ \
952
r = SparseBoolMatrix (m2_nr, m2_nc, m2.nnz ()); \
953
r.cidx (0) = static_cast<octave_idx_type> (0); \
954
octave_idx_type nel = 0; \
955
for (octave_idx_type j = 0; j < m2_nc; j++) \
956
{ \
957
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
958
if ((m1.elem (0,0) != LHS_ZERO) OP (m2.data (i) != RHS_ZERO)) \
959
{ \
960
r.ridx (nel) = m2.ridx (i); \
961
r.data (nel++) = true; \
962
} \
963
r.cidx (j + 1) = nel; \
964
} \
965
r.maybe_compress (false); \
966
} \
967
} \
968
} \
969
else if (m2_nr == 1 && m2_nc == 1) \
970
{ \
971
if (m1_nr > 0 && m1_nc > 0) \
972
{ \
973
if (LHS_ZERO OP (m2.elem (0,0) != RHS_ZERO)) \
974
{ \
975
r = SparseBoolMatrix (m1_nr, m1_nc, true); \
976
for (octave_idx_type j = 0; j < m1_nc; j++) \
977
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
978
if (! ((m1.data (i) != LHS_ZERO) OP (m2.elem (0,0) != RHS_ZERO))) \
979
r.data (m1.ridx (i) + j * m1_nr) = false; \
980
r.maybe_compress (true); \
981
} \
982
else \
983
{ \
984
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz ()); \
985
r.cidx (0) = static_cast<octave_idx_type> (0); \
986
octave_idx_type nel = 0; \
987
for (octave_idx_type j = 0; j < m1_nc; j++) \
988
{ \
989
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
990
if ((m1.data (i) != LHS_ZERO) OP (m2.elem (0,0) != RHS_ZERO)) \
991
{ \
992
r.ridx (nel) = m1.ridx (i); \
993
r.data (nel++) = true; \
994
} \
995
r.cidx (j + 1) = nel; \
996
} \
997
r.maybe_compress (false); \
998
} \
999
} \
1000
} \
1001
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1002
{ \
1003
if (m1_nr != 0 || m1_nc != 0) \
1004
{ \
1005
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz () + m2.nnz ()); \
1006
r.cidx (0) = static_cast<octave_idx_type> (0); \
1007
octave_idx_type nel = 0; \
1008
for (octave_idx_type j = 0; j < m1_nc; j++) \
1009
{ \
1010
octave_idx_type i1 = m1.cidx (j); \
1011
octave_idx_type e1 = m1.cidx (j+1); \
1012
octave_idx_type i2 = m2.cidx (j); \
1013
octave_idx_type e2 = m2.cidx (j+1); \
1014
while (i1 < e1 || i2 < e2) \
1015
{ \
1016
if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \
1017
{ \
1018
if (LHS_ZERO OP m2.data (i2) != RHS_ZERO) \
1019
{ \
1020
r.ridx (nel) = m2.ridx (i2); \
1021
r.data (nel++) = true; \
1022
} \
1023
i2++; \
1024
} \
1025
else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \
1026
{ \
1027
if (m1.data (i1) != LHS_ZERO OP RHS_ZERO) \
1028
{ \
1029
r.ridx (nel) = m1.ridx (i1); \
1030
r.data (nel++) = true; \
1031
} \
1032
i1++; \
1033
} \
1034
else \
1035
{ \
1036
if (m1.data (i1) != LHS_ZERO OP m2.data (i2) != RHS_ZERO) \
1037
{ \
1038
r.ridx (nel) = m1.ridx (i1); \
1039
r.data (nel++) = true; \
1040
} \
1041
i1++; \
1042
i2++; \
1043
} \
1044
} \
1045
r.cidx (j + 1) = nel; \
1046
} \
1047
r.maybe_compress (false); \
1048
} \
1049
} \
1050
else \
1051
{ \
1052
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1053
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1054
} \
1055
return r; \
1056
}
1057
1058
#define SPARSE_SMSM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \
1059
SPARSE_SMSM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \
1060
SPARSE_SMSM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO) \
1061
1062
#define SPARSE_SMSM_BOOL_OPS(M1, M2, ZERO) \
1063
SPARSE_SMSM_BOOL_OPS2(M1, M2, ZERO, ZERO)
1064
1065
#define SPARSE_SMSM_OP_DECLS(R1, R2, M1, M2, API) \
1066
SPARSE_SMSM_BIN_OP_DECLS (R1, R2, M1, M2, API) \
1067
SPARSE_SMSM_CMP_OP_DECLS (M1, M2, API) \
1068
SPARSE_SMSM_BOOL_OP_DECLS (M1, M2, API)
1069
1070
// matrix by matrix operations.
1071
1072
#define SPARSE_MSM_BIN_OP_DECLS(R1, R2, M1, M2, API) \
1073
SPARSE_BIN_OP_DECL (R1, operator +, M1, M2, API); \
1074
SPARSE_BIN_OP_DECL (R1, operator -, M1, M2, API); \
1075
SPARSE_BIN_OP_DECL (R2, product, M1, M2, API); \
1076
SPARSE_BIN_OP_DECL (R2, quotient, M1, M2, API);
1077
1078
#define SPARSE_MSM_BIN_OP_1(R, F, OP, M1, M2) \
1079
R \
1080
F (const M1& m1, const M2& m2) \
1081
{ \
1082
R r; \
1083
\
1084
octave_idx_type m1_nr = m1.rows (); \
1085
octave_idx_type m1_nc = m1.cols (); \
1086
\
1087
octave_idx_type m2_nr = m2.rows (); \
1088
octave_idx_type m2_nc = m2.cols (); \
1089
\
1090
if (m2_nr == 1 && m2_nc == 1) \
1091
r = R (m1 OP m2.elem (0,0)); \
1092
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
1093
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1094
else \
1095
{ \
1096
r = R (F (m1, m2.matrix_value ())); \
1097
} \
1098
return r; \
1099
}
1100
1101
#define SPARSE_MSM_BIN_OP_2(R, F, OP, M1, M2) \
1102
R \
1103
F (const M1& m1, const M2& m2) \
1104
{ \
1105
R r; \
1106
\
1107
octave_idx_type m1_nr = m1.rows (); \
1108
octave_idx_type m1_nc = m1.cols (); \
1109
\
1110
octave_idx_type m2_nr = m2.rows (); \
1111
octave_idx_type m2_nc = m2.cols (); \
1112
\
1113
if (m2_nr == 1 && m2_nc == 1) \
1114
r = R (m1 OP m2.elem (0,0)); \
1115
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
1116
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1117
else \
1118
{ \
1119
if (do_mx_check (m1, mx_inline_all_finite<M1::element_type>)) \
1120
{ \
1121
/* Sparsity pattern is preserved. */
\
1122
octave_idx_type m2_nz = m2.nnz (); \
1123
r = R (m2_nr, m2_nc, m2_nz); \
1124
for (octave_idx_type j = 0, k = 0; j < m2_nc; j++) \
1125
{ \
1126
octave_quit (); \
1127
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
1128
{ \
1129
octave_idx_type mri = m2.ridx (i); \
1130
R::element_type x = m1(mri, j) OP m2.data (i); \
1131
if (x != 0.0) \
1132
{ \
1133
r.xdata (k) = x; \
1134
r.xridx (k) = m2.ridx (i); \
1135
k++; \
1136
} \
1137
} \
1138
r.xcidx (j+1) = k; \
1139
} \
1140
r.maybe_compress (false); \
1141
return r; \
1142
} \
1143
else \
1144
r = R (F (m1, m2.matrix_value ())); \
1145
} \
1146
\
1147
return r; \
1148
}
1149
1150
// FIXME: Pass a specific ZERO value
1151
#define SPARSE_MSM_BIN_OPS(R1, R2, M1, M2) \
1152
SPARSE_MSM_BIN_OP_1 (R1, operator +, +, M1, M2) \
1153
SPARSE_MSM_BIN_OP_1 (R1, operator -, -, M1, M2) \
1154
SPARSE_MSM_BIN_OP_2 (R2, product, *, M1, M2) \
1155
SPARSE_MSM_BIN_OP_1 (R2, quotient, /, M1, M2)
1156
1157
#define SPARSE_MSM_CMP_OP_DECLS(M1, M2, API) \
1158
SPARSE_CMP_OP_DECL (mx_el_lt, M1, M2, API); \
1159
SPARSE_CMP_OP_DECL (mx_el_le, M1, M2, API); \
1160
SPARSE_CMP_OP_DECL (mx_el_ge, M1, M2, API); \
1161
SPARSE_CMP_OP_DECL (mx_el_gt, M1, M2, API); \
1162
SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \
1163
SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API);
1164
1165
#define SPARSE_MSM_EQNE_OP_DECLS(M1, M2, API) \
1166
SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \
1167
SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API);
1168
1169
#define SPARSE_MSM_CMP_OP(F, OP, M1, C1, M2, C2) \
1170
SparseBoolMatrix \
1171
F (const M1& m1, const M2& m2) \
1172
{ \
1173
SparseBoolMatrix r; \
1174
\
1175
octave_idx_type m1_nr = m1.rows (); \
1176
octave_idx_type m1_nc = m1.cols (); \
1177
\
1178
octave_idx_type m2_nr = m2.rows (); \
1179
octave_idx_type m2_nc = m2.cols (); \
1180
\
1181
if (m2_nr == 1 && m2_nc == 1) \
1182
r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \
1183
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1184
{ \
1185
if (m1_nr != 0 || m1_nc != 0) \
1186
{ \
1187
/* Count num of non-zero elements */
\
1188
octave_idx_type nel = 0; \
1189
for (octave_idx_type j = 0; j < m1_nc; j++) \
1190
for (octave_idx_type i = 0; i < m1_nr; i++) \
1191
if (C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j))) \
1192
nel++; \
1193
\
1194
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1195
\
1196
octave_idx_type ii = 0; \
1197
r.cidx (0) = 0; \
1198
for (octave_idx_type j = 0; j < m1_nc; j++) \
1199
{ \
1200
for (octave_idx_type i = 0; i < m1_nr; i++) \
1201
{ \
1202
bool el = C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j)); \
1203
if (el) \
1204
{ \
1205
r.data (ii) = el; \
1206
r.ridx (ii++) = i; \
1207
} \
1208
} \
1209
r.cidx (j+1) = ii; \
1210
} \
1211
} \
1212
} \
1213
else \
1214
{ \
1215
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1216
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1217
} \
1218
return r; \
1219
}
1220
1221
#define SPARSE_MSM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \
1222
SPARSE_MSM_CMP_OP (mx_el_lt, <, M1, , M2, ) \
1223
SPARSE_MSM_CMP_OP (mx_el_le, <=, M1, , M2, ) \
1224
SPARSE_MSM_CMP_OP (mx_el_ge, >=, M1, , M2, ) \
1225
SPARSE_MSM_CMP_OP (mx_el_gt, >, M1, , M2, ) \
1226
SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1227
SPARSE_MSM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1228
1229
#define SPARSE_MSM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \
1230
SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1231
SPARSE_MSM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1232
1233
#define SPARSE_MSM_BOOL_OP_DECLS(M1, M2, API) \
1234
SPARSE_BOOL_OP_DECL (mx_el_and, M1, M2, API); \
1235
SPARSE_BOOL_OP_DECL (mx_el_or, M1, M2, API);
1236
1237
#define SPARSE_MSM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \
1238
SparseBoolMatrix \
1239
F (const M1& m1, const M2& m2) \
1240
{ \
1241
SparseBoolMatrix r; \
1242
\
1243
octave_idx_type m1_nr = m1.rows (); \
1244
octave_idx_type m1_nc = m1.cols (); \
1245
\
1246
octave_idx_type m2_nr = m2.rows (); \
1247
octave_idx_type m2_nc = m2.cols (); \
1248
\
1249
if (m2_nr == 1 && m2_nc == 1) \
1250
r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \
1251
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1252
{ \
1253
if (m1_nr != 0 || m1_nc != 0) \
1254
{ \
1255
/* Count num of non-zero elements */
\
1256
octave_idx_type nel = 0; \
1257
for (octave_idx_type j = 0; j < m1_nc; j++) \
1258
for (octave_idx_type i = 0; i < m1_nr; i++) \
1259
if ((m1.elem (i, j) != LHS_ZERO) \
1260
OP (m2.elem (i, j) != RHS_ZERO)) \
1261
nel++; \
1262
\
1263
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1264
\
1265
octave_idx_type ii = 0; \
1266
r.cidx (0) = 0; \
1267
for (octave_idx_type j = 0; j < m1_nc; j++) \
1268
{ \
1269
for (octave_idx_type i = 0; i < m1_nr; i++) \
1270
{ \
1271
bool el = (m1.elem (i, j) != LHS_ZERO) \
1272
OP (m2.elem (i, j) != RHS_ZERO); \
1273
if (el) \
1274
{ \
1275
r.data (ii) = el; \
1276
r.ridx (ii++) = i; \
1277
} \
1278
} \
1279
r.cidx (j+1) = ii; \
1280
} \
1281
} \
1282
} \
1283
else \
1284
{ \
1285
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1286
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1287
} \
1288
return r; \
1289
}
1290
1291
#define SPARSE_MSM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \
1292
SPARSE_MSM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \
1293
SPARSE_MSM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO) \
1294
1295
#define SPARSE_MSM_BOOL_OPS(M1, M2, ZERO) \
1296
SPARSE_MSM_BOOL_OPS2(M1, M2, ZERO, ZERO)
1297
1298
#define SPARSE_MSM_OP_DECLS(R1, R2, M1, M2, API) \
1299
SPARSE_MSM_BIN_OP_DECLS (R1, R2, M1, M2, API) \
1300
SPARSE_MSM_CMP_OP_DECLS (M1, M2, API) \
1301
SPARSE_MSM_BOOL_OP_DECLS (M1, M2, API)
1302
1303
// matrix by matrix operations.
1304
1305
#define SPARSE_SMM_BIN_OP_DECLS(R1, R2, M1, M2, API) \
1306
SPARSE_BIN_OP_DECL (R1, operator +, M1, M2, API); \
1307
SPARSE_BIN_OP_DECL (R1, operator -, M1, M2, API); \
1308
SPARSE_BIN_OP_DECL (R2, product, M1, M2, API); \
1309
SPARSE_BIN_OP_DECL (R2, quotient, M1, M2, API);
1310
1311
#define SPARSE_SMM_BIN_OP_1(R, F, OP, M1, M2) \
1312
R \
1313
F (const M1& m1, const M2& m2) \
1314
{ \
1315
R r; \
1316
\
1317
octave_idx_type m1_nr = m1.rows (); \
1318
octave_idx_type m1_nc = m1.cols (); \
1319
\
1320
octave_idx_type m2_nr = m2.rows (); \
1321
octave_idx_type m2_nc = m2.cols (); \
1322
\
1323
if (m1_nr == 1 && m1_nc == 1) \
1324
r = R (m1.elem (0,0) OP m2); \
1325
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
1326
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1327
else \
1328
{ \
1329
r = R (m1.matrix_value () OP m2); \
1330
} \
1331
return r; \
1332
}
1333
1334
// sm .* m preserves sparsity if m contains no Infs nor Nans.
1335
#define SPARSE_SMM_BIN_OP_2_CHECK_product(ET) \
1336
do_mx_check (m2, mx_inline_all_finite<ET>)
1337
1338
// sm ./ m preserves sparsity if m contains no NaNs or zeros.
1339
#define SPARSE_SMM_BIN_OP_2_CHECK_quotient(ET) \
1340
! do_mx_check (m2, mx_inline_any_nan<ET>) && m2.nnz () == m2.numel ()
1341
1342
#define SPARSE_SMM_BIN_OP_2(R, F, OP, M1, M2) \
1343
R \
1344
F (const M1& m1, const M2& m2) \
1345
{ \
1346
R r; \
1347
\
1348
octave_idx_type m1_nr = m1.rows (); \
1349
octave_idx_type m1_nc = m1.cols (); \
1350
\
1351
octave_idx_type m2_nr = m2.rows (); \
1352
octave_idx_type m2_nc = m2.cols (); \
1353
\
1354
if (m1_nr == 1 && m1_nc == 1) \
1355
r = R (m1.elem (0,0) OP m2); \
1356
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
1357
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1358
else \
1359
{ \
1360
if (SPARSE_SMM_BIN_OP_2_CHECK_ ## F(M2::element_type)) \
1361
{ \
1362
/* Sparsity pattern is preserved. */
\
1363
octave_idx_type m1_nz = m1.nnz (); \
1364
r = R (m1_nr, m1_nc, m1_nz); \
1365
for (octave_idx_type j = 0, k = 0; j < m1_nc; j++) \
1366
{ \
1367
octave_quit (); \
1368
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
1369
{ \
1370
octave_idx_type mri = m1.ridx (i); \
1371
R::element_type x = m1.data (i) OP m2 (mri, j); \
1372
if (x != 0.0) \
1373
{ \
1374
r.xdata (k) = x; \
1375
r.xridx (k) = m1.ridx (i); \
1376
k++; \
1377
} \
1378
} \
1379
r.xcidx (j+1) = k; \
1380
} \
1381
r.maybe_compress (false); \
1382
return r; \
1383
} \
1384
else \
1385
r = R (F (m1.matrix_value (), m2)); \
1386
} \
1387
\
1388
return r; \
1389
}
1390
1391
#define SPARSE_SMM_BIN_OPS(R1, R2, M1, M2) \
1392
SPARSE_SMM_BIN_OP_1 (R1, operator +, +, M1, M2) \
1393
SPARSE_SMM_BIN_OP_1 (R1, operator -, -, M1, M2) \
1394
SPARSE_SMM_BIN_OP_2 (R2, product, *, M1, M2) \
1395
SPARSE_SMM_BIN_OP_2 (R2, quotient, /, M1, M2)
1396
1397
#define SPARSE_SMM_CMP_OP_DECLS(M1, M2, API) \
1398
SPARSE_CMP_OP_DECL (mx_el_lt, M1, M2, API); \
1399
SPARSE_CMP_OP_DECL (mx_el_le, M1, M2, API); \
1400
SPARSE_CMP_OP_DECL (mx_el_ge, M1, M2, API); \
1401
SPARSE_CMP_OP_DECL (mx_el_gt, M1, M2, API); \
1402
SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \
1403
SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API);
1404
1405
#define SPARSE_SMM_EQNE_OP_DECLS(M1, M2, API) \
1406
SPARSE_CMP_OP_DECL (mx_el_eq, M1, M2, API); \
1407
SPARSE_CMP_OP_DECL (mx_el_ne, M1, M2, API);
1408
1409
#define SPARSE_SMM_CMP_OP(F, OP, M1, C1, M2, C2) \
1410
SparseBoolMatrix \
1411
F (const M1& m1, const M2& m2) \
1412
{ \
1413
SparseBoolMatrix r; \
1414
\
1415
octave_idx_type m1_nr = m1.rows (); \
1416
octave_idx_type m1_nc = m1.cols (); \
1417
\
1418
octave_idx_type m2_nr = m2.rows (); \
1419
octave_idx_type m2_nc = m2.cols (); \
1420
\
1421
if (m1_nr == 1 && m1_nc == 1) \
1422
r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \
1423
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1424
{ \
1425
if (m1_nr != 0 || m1_nc != 0) \
1426
{ \
1427
/* Count num of non-zero elements */
\
1428
octave_idx_type nel = 0; \
1429
for (octave_idx_type j = 0; j < m1_nc; j++) \
1430
for (octave_idx_type i = 0; i < m1_nr; i++) \
1431
if (C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j))) \
1432
nel++; \
1433
\
1434
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1435
\
1436
octave_idx_type ii = 0; \
1437
r.cidx (0) = 0; \
1438
for (octave_idx_type j = 0; j < m1_nc; j++) \
1439
{ \
1440
for (octave_idx_type i = 0; i < m1_nr; i++) \
1441
{ \
1442
bool el = C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j)); \
1443
if (el) \
1444
{ \
1445
r.data (ii) = el; \
1446
r.ridx (ii++) = i; \
1447
} \
1448
} \
1449
r.cidx (j+1) = ii; \
1450
} \
1451
} \
1452
} \
1453
else \
1454
{ \
1455
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1456
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1457
} \
1458
return r; \
1459
}
1460
1461
#define SPARSE_SMM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \
1462
SPARSE_SMM_CMP_OP (mx_el_lt, <, M1, , M2, ) \
1463
SPARSE_SMM_CMP_OP (mx_el_le, <=, M1, , M2, ) \
1464
SPARSE_SMM_CMP_OP (mx_el_ge, >=, M1, , M2, ) \
1465
SPARSE_SMM_CMP_OP (mx_el_gt, >, M1, , M2, ) \
1466
SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1467
SPARSE_SMM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1468
1469
#define SPARSE_SMM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \
1470
SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1471
SPARSE_SMM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1472
1473
#define SPARSE_SMM_BOOL_OP_DECLS(M1, M2, API) \
1474
SPARSE_BOOL_OP_DECL (mx_el_and, M1, M2, API); \
1475
SPARSE_BOOL_OP_DECL (mx_el_or, M1, M2, API);
1476
1477
#define SPARSE_SMM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \
1478
SparseBoolMatrix \
1479
F (const M1& m1, const M2& m2) \
1480
{ \
1481
SparseBoolMatrix r; \
1482
\
1483
octave_idx_type m1_nr = m1.rows (); \
1484
octave_idx_type m1_nc = m1.cols (); \
1485
\
1486
octave_idx_type m2_nr = m2.rows (); \
1487
octave_idx_type m2_nc = m2.cols (); \
1488
\
1489
if (m1_nr == 1 && m1_nc == 1) \
1490
r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \
1491
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1492
{ \
1493
if (m1_nr != 0 || m1_nc != 0) \
1494
{ \
1495
/* Count num of non-zero elements */
\
1496
octave_idx_type nel = 0; \
1497
for (octave_idx_type j = 0; j < m1_nc; j++) \
1498
for (octave_idx_type i = 0; i < m1_nr; i++) \
1499
if ((m1.elem (i, j) != LHS_ZERO) \
1500
OP (m2.elem (i, j) != RHS_ZERO)) \
1501
nel++; \
1502
\
1503
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1504
\
1505
octave_idx_type ii = 0; \
1506
r.cidx (0) = 0; \
1507
for (octave_idx_type j = 0; j < m1_nc; j++) \
1508
{ \
1509
for (octave_idx_type i = 0; i < m1_nr; i++) \
1510
{ \
1511
bool el = (m1.elem (i, j) != LHS_ZERO) \
1512
OP (m2.elem (i, j) != RHS_ZERO); \
1513
if (el) \
1514
{ \
1515
r.data (ii) = el; \
1516
r.ridx (ii++) = i; \
1517
} \
1518
} \
1519
r.cidx (j+1) = ii; \
1520
} \
1521
} \
1522
} \
1523
else \
1524
{ \
1525
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1526
gripe_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1527
} \
1528
return r; \
1529
}
1530
1531
#define SPARSE_SMM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \
1532
SPARSE_SMM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \
1533
SPARSE_SMM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO) \
1534
1535
#define SPARSE_SMM_BOOL_OPS(M1, M2, ZERO) \
1536
SPARSE_SMM_BOOL_OPS2(M1, M2, ZERO, ZERO)
1537
1538
#define SPARSE_SMM_OP_DECLS(R1, R2, M1, M2, API) \
1539
SPARSE_SMM_BIN_OP_DECLS (R1, R2, M1, M2, API) \
1540
SPARSE_SMM_CMP_OP_DECLS (M1, M2, API) \
1541
SPARSE_SMM_BOOL_OP_DECLS (M1, M2, API)
1542
1543
// Avoid some code duplication. Maybe we should use templates.
1544
1545
#define SPARSE_CUMSUM(RET_TYPE, ELT_TYPE, FCN) \
1546
\
1547
octave_idx_type nr = rows (); \
1548
octave_idx_type nc = cols (); \
1549
\
1550
RET_TYPE retval; \
1551
\
1552
if (nr > 0 && nc > 0) \
1553
{ \
1554
if ((nr == 1 && dim == -1) || dim == 1) \
1555
/* Ugly!! Is there a better way? */
\
1556
retval = transpose (). FCN (0) .transpose (); \
1557
else \
1558
{ \
1559
octave_idx_type nel = 0; \
1560
for (octave_idx_type i = 0; i < nc; i++) \
1561
{ \
1562
ELT_TYPE t = ELT_TYPE (); \
1563
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1564
{ \
1565
t += data (j); \
1566
if (t != ELT_TYPE ()) \
1567
{ \
1568
if (j == cidx (i+1) - 1) \
1569
nel += nr - ridx (j); \
1570
else \
1571
nel += ridx (j+1) - ridx (j); \
1572
} \
1573
} \
1574
} \
1575
retval = RET_TYPE (nr, nc, nel); \
1576
retval.cidx (0) = 0; \
1577
octave_idx_type ii = 0; \
1578
for (octave_idx_type i = 0; i < nc; i++) \
1579
{ \
1580
ELT_TYPE t = ELT_TYPE (); \
1581
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1582
{ \
1583
t += data (j); \
1584
if (t != ELT_TYPE ()) \
1585
{ \
1586
if (j == cidx (i+1) - 1) \
1587
{ \
1588
for (octave_idx_type k = ridx (j); k < nr; k++) \
1589
{ \
1590
retval.data (ii) = t; \
1591
retval.ridx (ii++) = k; \
1592
} \
1593
} \
1594
else \
1595
{ \
1596
for (octave_idx_type k = ridx (j); k < ridx (j+1); k++) \
1597
{ \
1598
retval.data (ii) = t; \
1599
retval.ridx (ii++) = k; \
1600
} \
1601
} \
1602
} \
1603
} \
1604
retval.cidx (i+1) = ii; \
1605
} \
1606
} \
1607
} \
1608
else \
1609
retval = RET_TYPE (nr,nc); \
1610
\
1611
return retval
1612
1613
1614
#define SPARSE_CUMPROD(RET_TYPE, ELT_TYPE, FCN) \
1615
\
1616
octave_idx_type nr = rows (); \
1617
octave_idx_type nc = cols (); \
1618
\
1619
RET_TYPE retval; \
1620
\
1621
if (nr > 0 && nc > 0) \
1622
{ \
1623
if ((nr == 1 && dim == -1) || dim == 1) \
1624
/* Ugly!! Is there a better way? */
\
1625
retval = transpose (). FCN (0) .transpose (); \
1626
else \
1627
{ \
1628
octave_idx_type nel = 0; \
1629
for (octave_idx_type i = 0; i < nc; i++) \
1630
{ \
1631
octave_idx_type jj = 0; \
1632
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1633
{ \
1634
if (jj == ridx (j)) \
1635
{ \
1636
nel++; \
1637
jj++; \
1638
} \
1639
else \
1640
break; \
1641
} \
1642
} \
1643
retval = RET_TYPE (nr, nc, nel); \
1644
retval.cidx (0) = 0; \
1645
octave_idx_type ii = 0; \
1646
for (octave_idx_type i = 0; i < nc; i++) \
1647
{ \
1648
ELT_TYPE t = ELT_TYPE (1.); \
1649
octave_idx_type jj = 0; \
1650
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1651
{ \
1652
if (jj == ridx (j)) \
1653
{ \
1654
t *= data (j); \
1655
retval.data (ii) = t; \
1656
retval.ridx (ii++) = jj++; \
1657
} \
1658
else \
1659
break; \
1660
} \
1661
retval.cidx (i+1) = ii; \
1662
} \
1663
} \
1664
} \
1665
else \
1666
retval = RET_TYPE (nr,nc); \
1667
\
1668
return retval
1669
1670
#define SPARSE_BASE_REDUCTION_OP(RET_TYPE, EL_TYPE, ROW_EXPR, COL_EXPR, \
1671
INIT_VAL, MT_RESULT) \
1672
\
1673
octave_idx_type nr = rows (); \
1674
octave_idx_type nc = cols (); \
1675
\
1676
RET_TYPE retval; \
1677
\
1678
if (nr > 0 && nc > 0) \
1679
{ \
1680
if ((nr == 1 && dim == -1) || dim == 1) \
1681
{ \
1682
/* Define j here to allow fancy definition for prod method */
\
1683
octave_idx_type j = 0; \
1684
OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nr); \
1685
\
1686
for (octave_idx_type i = 0; i < nr; i++) \
1687
tmp[i] = INIT_VAL; \
1688
for (j = 0; j < nc; j++) \
1689
{ \
1690
for (octave_idx_type i = cidx (j); i < cidx (j + 1); i++) \
1691
{ \
1692
ROW_EXPR; \
1693
} \
1694
} \
1695
octave_idx_type nel = 0; \
1696
for (octave_idx_type i = 0; i < nr; i++) \
1697
if (tmp[i] != EL_TYPE ()) \
1698
nel++ ; \
1699
retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), nel); \
1700
retval.cidx (0) = 0; \
1701
retval.cidx (1) = nel; \
1702
nel = 0; \
1703
for (octave_idx_type i = 0; i < nr; i++) \
1704
if (tmp[i] != EL_TYPE ()) \
1705
{ \
1706
retval.data (nel) = tmp[i]; \
1707
retval.ridx (nel++) = i; \
1708
} \
1709
} \
1710
else \
1711
{ \
1712
OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nc); \
1713
\
1714
for (octave_idx_type j = 0; j < nc; j++) \
1715
{ \
1716
tmp[j] = INIT_VAL; \
1717
for (octave_idx_type i = cidx (j); i < cidx (j + 1); i++) \
1718
{ \
1719
COL_EXPR; \
1720
} \
1721
} \
1722
octave_idx_type nel = 0; \
1723
for (octave_idx_type i = 0; i < nc; i++) \
1724
if (tmp[i] != EL_TYPE ()) \
1725
nel++ ; \
1726
retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, nel); \
1727
retval.cidx (0) = 0; \
1728
nel = 0; \
1729
for (octave_idx_type i = 0; i < nc; i++) \
1730
if (tmp[i] != EL_TYPE ()) \
1731
{ \
1732
retval.data (nel) = tmp[i]; \
1733
retval.ridx (nel++) = 0; \
1734
retval.cidx (i+1) = retval.cidx (i) + 1; \
1735
} \
1736
else \
1737
retval.cidx (i+1) = retval.cidx (i); \
1738
} \
1739
} \
1740
else if (nc == 0 && (nr == 0 || (nr == 1 && dim == -1))) \
1741
{ \
1742
if (MT_RESULT) \
1743
{ \
1744
retval = RET_TYPE (static_cast<octave_idx_type> (1), \
1745
static_cast<octave_idx_type> (1), \
1746
static_cast<octave_idx_type> (1)); \
1747
retval.cidx (0) = 0; \
1748
retval.cidx (1) = 1; \
1749
retval.ridx (0) = 0; \
1750
retval.data (0) = MT_RESULT; \
1751
} \
1752
else \
1753
retval = RET_TYPE (static_cast<octave_idx_type> (1), \
1754
static_cast<octave_idx_type> (1), \
1755
static_cast<octave_idx_type> (0)); \
1756
} \
1757
else if (nr == 0 && (dim == 0 || dim == -1)) \
1758
{ \
1759
if (MT_RESULT) \
1760
{ \
1761
retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, nc); \
1762
retval.cidx (0) = 0; \
1763
for (octave_idx_type i = 0; i < nc ; i++) \
1764
{ \
1765
retval.ridx (i) = 0; \
1766
retval.cidx (i+1) = i+1; \
1767
retval.data (i) = MT_RESULT; \
1768
} \
1769
} \
1770
else \
1771
retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, \
1772
static_cast<octave_idx_type> (0)); \
1773
} \
1774
else if (nc == 0 && dim == 1) \
1775
{ \
1776
if (MT_RESULT) \
1777
{ \
1778
retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), nr); \
1779
retval.cidx (0) = 0; \
1780
retval.cidx (1) = nr; \
1781
for (octave_idx_type i = 0; i < nr; i++) \
1782
{ \
1783
retval.ridx (i) = i; \
1784
retval.data (i) = MT_RESULT; \
1785
} \
1786
} \
1787
else \
1788
retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), \
1789
static_cast<octave_idx_type> (0)); \
1790
} \
1791
else \
1792
retval.resize (nr > 0, nc > 0); \
1793
\
1794
return retval
1795
1796
#define SPARSE_REDUCTION_OP_ROW_EXPR(OP) \
1797
tmp[ridx (i)] OP data (i)
1798
1799
#define SPARSE_REDUCTION_OP_COL_EXPR(OP) \
1800
tmp[j] OP data (i)
1801
1802
#define SPARSE_REDUCTION_OP(RET_TYPE, EL_TYPE, OP, INIT_VAL, MT_RESULT) \
1803
SPARSE_BASE_REDUCTION_OP (RET_TYPE, EL_TYPE, \
1804
SPARSE_REDUCTION_OP_ROW_EXPR (OP), \
1805
SPARSE_REDUCTION_OP_COL_EXPR (OP), \
1806
INIT_VAL, MT_RESULT)
1807
1808
1809
// Don't break from this loop if the test succeeds because
1810
// we are looping over the rows and not the columns in the inner loop.
1811
#define SPARSE_ANY_ALL_OP_ROW_CODE(TEST_OP, TEST_TRUE_VAL) \
1812
if (data (i) TEST_OP 0.0) \
1813
tmp[ridx (i)] = TEST_TRUE_VAL; \
1814
1815
#define SPARSE_ANY_ALL_OP_COL_CODE(TEST_OP, TEST_TRUE_VAL) \
1816
if (data (i) TEST_OP 0.0) \
1817
{ \
1818
tmp[j] = TEST_TRUE_VAL; \
1819
break; \
1820
}
1821
1822
#define SPARSE_ANY_ALL_OP(DIM, INIT_VAL, MT_RESULT, TEST_OP, TEST_TRUE_VAL) \
1823
SPARSE_BASE_REDUCTION_OP (SparseBoolMatrix, char, \
1824
SPARSE_ANY_ALL_OP_ROW_CODE (TEST_OP, TEST_TRUE_VAL), \
1825
SPARSE_ANY_ALL_OP_COL_CODE (TEST_OP, TEST_TRUE_VAL), \
1826
INIT_VAL, MT_RESULT)
1827
1828
#define SPARSE_ALL_OP(DIM) \
1829
if ((rows () == 1 && dim == -1) || dim == 1) \
1830
return transpose (). all (0). transpose (); \
1831
else \
1832
{ \
1833
SPARSE_ANY_ALL_OP (DIM, (cidx (j+1) - cidx (j) < nr ? false : true), \
1834
true, ==, false); \
1835
}
1836
1837
#define SPARSE_ANY_OP(DIM) SPARSE_ANY_ALL_OP (DIM, false, false, !=, true)
1838
1839
#define SPARSE_SPARSE_MUL( RET_TYPE, RET_EL_TYPE, EL_TYPE ) \
1840
octave_idx_type nr = m.rows (); \
1841
octave_idx_type nc = m.cols (); \
1842
\
1843
octave_idx_type a_nr = a.rows (); \
1844
octave_idx_type a_nc = a.cols (); \
1845
\
1846
if (nr == 1 && nc == 1) \
1847
{ \
1848
RET_EL_TYPE s = m.elem (0,0); \
1849
octave_idx_type nz = a.nnz (); \
1850
RET_TYPE r (a_nr, a_nc, nz); \
1851
\
1852
for (octave_idx_type i = 0; i < nz; i++) \
1853
{ \
1854
octave_quit (); \
1855
r.data (i) = s * a.data (i); \
1856
r.ridx (i) = a.ridx (i); \
1857
} \
1858
for (octave_idx_type i = 0; i < a_nc + 1; i++) \
1859
{ \
1860
octave_quit (); \
1861
r.cidx (i) = a.cidx (i); \
1862
} \
1863
\
1864
r.maybe_compress (true); \
1865
return r; \
1866
} \
1867
else if (a_nr == 1 && a_nc == 1) \
1868
{ \
1869
RET_EL_TYPE s = a.elem (0,0); \
1870
octave_idx_type nz = m.nnz (); \
1871
RET_TYPE r (nr, nc, nz); \
1872
\
1873
for (octave_idx_type i = 0; i < nz; i++) \
1874
{ \
1875
octave_quit (); \
1876
r.data (i) = m.data (i) * s; \
1877
r.ridx (i) = m.ridx (i); \
1878
} \
1879
for (octave_idx_type i = 0; i < nc + 1; i++) \
1880
{ \
1881
octave_quit (); \
1882
r.cidx (i) = m.cidx (i); \
1883
} \
1884
\
1885
r.maybe_compress (true); \
1886
return r; \
1887
} \
1888
else if (nc != a_nr) \
1889
{ \
1890
gripe_nonconformant ("operator *", nr, nc, a_nr, a_nc); \
1891
return RET_TYPE (); \
1892
} \
1893
else \
1894
{ \
1895
OCTAVE_LOCAL_BUFFER (octave_idx_type, w, nr); \
1896
RET_TYPE retval (nr, a_nc, static_cast<octave_idx_type> (0)); \
1897
for (octave_idx_type i = 0; i < nr; i++) \
1898
w[i] = 0; \
1899
retval.xcidx (0) = 0; \
1900
\
1901
octave_idx_type nel = 0; \
1902
\
1903
for (octave_idx_type i = 0; i < a_nc; i++) \
1904
{ \
1905
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1906
{ \
1907
octave_idx_type col = a.ridx (j); \
1908
for (octave_idx_type k = m.cidx (col) ; k < m.cidx (col+1); k++) \
1909
{ \
1910
if (w[m.ridx (k)] < i + 1) \
1911
{ \
1912
w[m.ridx (k)] = i + 1; \
1913
nel++; \
1914
} \
1915
octave_quit (); \
1916
} \
1917
} \
1918
retval.xcidx (i+1) = nel; \
1919
} \
1920
\
1921
if (nel == 0) \
1922
return RET_TYPE (nr, a_nc); \
1923
else \
1924
{ \
1925
for (octave_idx_type i = 0; i < nr; i++) \
1926
w[i] = 0; \
1927
\
1928
OCTAVE_LOCAL_BUFFER (RET_EL_TYPE, Xcol, nr); \
1929
\
1930
retval.change_capacity (nel); \
1931
/* The optimal break-point as estimated from simulations */
\
1932
/* Note that Mergesort is O(nz log(nz)) while searching all */
\
1933
/* values is O(nr), where nz here is non-zero per row of */
\
1934
/* length nr. The test itself was then derived from the */
\
1935
/* simulation with random square matrices and the observation */
\
1936
/* of the number of non-zero elements in the output matrix */
\
1937
/* it was found that the breakpoints were */
\
1938
/* nr: 500 1000 2000 5000 10000 */
\
1939
/* nz: 6 25 97 585 2202 */
\
1940
/* The below is a simplication of the 'polyfit'-ed parameters */
\
1941
/* to these breakpoints */
\
1942
octave_idx_type n_per_col = (a_nc > 43000 ? 43000 : \
1943
(a_nc * a_nc) / 43000); \
1944
octave_idx_type ii = 0; \
1945
octave_idx_type *ri = retval.xridx (); \
1946
octave_sort<octave_idx_type> sort; \
1947
\
1948
for (octave_idx_type i = 0; i < a_nc ; i++) \
1949
{ \
1950
if (retval.xcidx (i+1) - retval.xcidx (i) > n_per_col) \
1951
{ \
1952
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1953
{ \
1954
octave_idx_type col = a.ridx (j); \
1955
EL_TYPE tmpval = a.data (j); \
1956
for (octave_idx_type k = m.cidx (col) ; \
1957
k < m.cidx (col+1); k++) \
1958
{ \
1959
octave_quit (); \
1960
octave_idx_type row = m.ridx (k); \
1961
if (w[row] < i + 1) \
1962
{ \
1963
w[row] = i + 1; \
1964
Xcol[row] = tmpval * m.data (k); \
1965
} \
1966
else \
1967
Xcol[row] += tmpval * m.data (k); \
1968
} \
1969
} \
1970
for (octave_idx_type k = 0; k < nr; k++) \
1971
if (w[k] == i + 1) \
1972
{ \
1973
retval.xdata (ii) = Xcol[k]; \
1974
retval.xridx (ii++) = k; \
1975
} \
1976
} \
1977
else \
1978
{ \
1979
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1980
{ \
1981
octave_idx_type col = a.ridx (j); \
1982
EL_TYPE tmpval = a.data (j); \
1983
for (octave_idx_type k = m.cidx (col) ; \
1984
k < m.cidx (col+1); k++) \
1985
{ \
1986
octave_quit (); \
1987
octave_idx_type row = m.ridx (k); \
1988
if (w[row] < i + 1) \
1989
{ \
1990
w[row] = i + 1; \
1991
retval.xridx (ii++) = row;\
1992
Xcol[row] = tmpval * m.data (k); \
1993
} \
1994
else \
1995
Xcol[row] += tmpval * m.data (k); \
1996
} \
1997
} \
1998
sort.sort (ri + retval.xcidx (i), ii - retval.xcidx (i)); \
1999
for (octave_idx_type k = retval.xcidx (i); k < ii; k++) \
2000
retval.xdata (k) = Xcol[retval.xridx (k)]; \
2001
} \
2002
} \
2003
retval.maybe_compress (true);\
2004
return retval; \
2005
} \
2006
}
2007
2008
#define SPARSE_FULL_MUL( RET_TYPE, EL_TYPE, ZERO ) \
2009
octave_idx_type nr = m.rows (); \
2010
octave_idx_type nc = m.cols (); \
2011
\
2012
octave_idx_type a_nr = a.rows (); \
2013
octave_idx_type a_nc = a.cols (); \
2014
\
2015
if (nr == 1 && nc == 1) \
2016
{ \
2017
RET_TYPE retval = m.elem (0,0) * a; \
2018
return retval; \
2019
} \
2020
else if (nc != a_nr) \
2021
{ \
2022
gripe_nonconformant ("operator *", nr, nc, a_nr, a_nc); \
2023
return RET_TYPE (); \
2024
} \
2025
else \
2026
{ \
2027
RET_TYPE retval (nr, a_nc, ZERO); \
2028
\
2029
for (octave_idx_type i = 0; i < a_nc ; i++) \
2030
{ \
2031
for (octave_idx_type j = 0; j < a_nr; j++) \
2032
{ \
2033
octave_quit (); \
2034
\
2035
EL_TYPE tmpval = a.elem (j,i); \
2036
for (octave_idx_type k = m.cidx (j) ; k < m.cidx (j+1); k++) \
2037
retval.elem (m.ridx (k),i) += tmpval * m.data (k); \
2038
} \
2039
} \
2040
return retval; \
2041
}
2042
2043
#define SPARSE_FULL_TRANS_MUL( RET_TYPE, EL_TYPE, ZERO, CONJ_OP ) \
2044
octave_idx_type nr = m.rows (); \
2045
octave_idx_type nc = m.cols (); \
2046
\
2047
octave_idx_type a_nr = a.rows (); \
2048
octave_idx_type a_nc = a.cols (); \
2049
\
2050
if (nr == 1 && nc == 1) \
2051
{ \
2052
RET_TYPE retval = CONJ_OP (m.elem (0,0)) * a; \
2053
return retval; \
2054
} \
2055
else if (nr != a_nr) \
2056
{ \
2057
gripe_nonconformant ("operator *", nc, nr, a_nr, a_nc); \
2058
return RET_TYPE (); \
2059
} \
2060
else \
2061
{ \
2062
RET_TYPE retval (nc, a_nc); \
2063
\
2064
for (octave_idx_type i = 0; i < a_nc ; i++) \
2065
{ \
2066
for (octave_idx_type j = 0; j < nc; j++) \
2067
{ \
2068
octave_quit (); \
2069
\
2070
EL_TYPE acc = ZERO; \
2071
for (octave_idx_type k = m.cidx (j) ; k < m.cidx (j+1); k++) \
2072
acc += a.elem (m.ridx (k),i) * CONJ_OP (m.data (k)); \
2073
retval.xelem (j,i) = acc; \
2074
} \
2075
} \
2076
return retval; \
2077
}
2078
2079
#define FULL_SPARSE_MUL( RET_TYPE, EL_TYPE, ZERO ) \
2080
octave_idx_type nr = m.rows (); \
2081
octave_idx_type nc = m.cols (); \
2082
\
2083
octave_idx_type a_nr = a.rows (); \
2084
octave_idx_type a_nc = a.cols (); \
2085
\
2086
if (a_nr == 1 && a_nc == 1) \
2087
{ \
2088
RET_TYPE retval = m * a.elem (0,0); \
2089
return retval; \
2090
} \
2091
else if (nc != a_nr) \
2092
{ \
2093
gripe_nonconformant ("operator *", nr, nc, a_nr, a_nc); \
2094
return RET_TYPE (); \
2095
} \
2096
else \
2097
{ \
2098
RET_TYPE retval (nr, a_nc, ZERO); \
2099
\
2100
for (octave_idx_type i = 0; i < a_nc ; i++) \
2101
{ \
2102
octave_quit (); \
2103
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
2104
{ \
2105
octave_idx_type col = a.ridx (j); \
2106
EL_TYPE tmpval = a.data (j); \
2107
\
2108
for (octave_idx_type k = 0 ; k < nr; k++) \
2109
retval.xelem (k,i) += tmpval * m.elem (k,col); \
2110
} \
2111
} \
2112
return retval; \
2113
}
2114
2115
#define FULL_SPARSE_MUL_TRANS( RET_TYPE, EL_TYPE, ZERO, CONJ_OP ) \
2116
octave_idx_type nr = m.rows (); \
2117
octave_idx_type nc = m.cols (); \
2118
\
2119
octave_idx_type a_nr = a.rows (); \
2120
octave_idx_type a_nc = a.cols (); \
2121
\
2122
if (a_nr == 1 && a_nc == 1) \
2123
{ \
2124
RET_TYPE retval = m * CONJ_OP (a.elem (0,0)); \
2125
return retval; \
2126
} \
2127
else if (nc != a_nc) \
2128
{ \
2129
gripe_nonconformant ("operator *", nr, nc, a_nc, a_nr); \
2130
return RET_TYPE (); \
2131
} \
2132
else \
2133
{ \
2134
RET_TYPE retval (nr, a_nr, ZERO); \
2135
\
2136
for (octave_idx_type i = 0; i < a_nc ; i++) \
2137
{ \
2138
octave_quit (); \
2139
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
2140
{ \
2141
octave_idx_type col = a.ridx (j); \
2142
EL_TYPE tmpval = CONJ_OP (a.data (j)); \
2143
for (octave_idx_type k = 0 ; k < nr; k++) \
2144
retval.xelem (k,col) += tmpval * m.elem (k,i); \
2145
} \
2146
} \
2147
return retval; \
2148
}
2149
2150
#endif
Generated on Mon Dec 30 2013 03:04:53 for GNU Octave by
1.8.1.2