00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026 #ifdef HAVE_CONFIG_H
00027 #include <config.h>
00028 #endif
00029
00030 #include <cassert>
00031 #include <climits>
00032
00033 #include <algorithm>
00034 #include <iostream>
00035 #include <sstream>
00036 #include <vector>
00037
00038 #include "Array.h"
00039 #include "MArray.h"
00040 #include "Array-util.h"
00041 #include "Range.h"
00042 #include "idx-vector.h"
00043 #include "lo-error.h"
00044 #include "quit.h"
00045 #include "oct-locbuf.h"
00046
00047 #include "Sparse.h"
00048 #include "sparse-sort.h"
00049 #include "sparse-util.h"
00050 #include "oct-spparms.h"
00051 #include "mx-inlines.cc"
00052
00053 #include "PermMatrix.h"
00054
00055 template <class T>
00056 Sparse<T>::Sparse (const PermMatrix& a)
00057 : rep (new typename Sparse<T>::SparseRep (a.rows (), a.cols (), a.rows ())),
00058 dimensions (dim_vector (a.rows (), a.cols()))
00059 {
00060 octave_idx_type n = a.rows ();
00061 for (octave_idx_type i = 0; i <= n; i++)
00062 cidx (i) = i;
00063
00064 const Array<octave_idx_type> pv = a.pvec ();
00065
00066 if (a.is_row_perm ())
00067 {
00068 for (octave_idx_type i = 0; i < n; i++)
00069 ridx (pv (i)) = i;
00070 }
00071 else
00072 {
00073 for (octave_idx_type i = 0; i < n; i++)
00074 ridx (i) = pv (i);
00075 }
00076
00077 for (octave_idx_type i = 0; i < n; i++)
00078 data (i) = 1.0;
00079 }
00080
00081 template <class T>
00082 T&
00083 Sparse<T>::SparseRep::elem (octave_idx_type _r, octave_idx_type _c)
00084 {
00085 octave_idx_type i;
00086
00087 if (nzmx > 0)
00088 {
00089 for (i = c[_c]; i < c[_c + 1]; i++)
00090 if (r[i] == _r)
00091 return d[i];
00092 else if (r[i] > _r)
00093 break;
00094
00095
00096
00097 if (c[ncols] == nzmx)
00098 {
00099 (*current_liboctave_error_handler)
00100 ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
00101 return *d;
00102 }
00103
00104 octave_idx_type to_move = c[ncols] - i;
00105 if (to_move != 0)
00106 {
00107 for (octave_idx_type j = c[ncols]; j > i; j--)
00108 {
00109 d[j] = d[j-1];
00110 r[j] = r[j-1];
00111 }
00112 }
00113
00114 for (octave_idx_type j = _c + 1; j < ncols + 1; j++)
00115 c[j] = c[j] + 1;
00116
00117 d[i] = 0.;
00118 r[i] = _r;
00119
00120 return d[i];
00121 }
00122 else
00123 {
00124 (*current_liboctave_error_handler)
00125 ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
00126 return *d;
00127 }
00128 }
00129
00130 template <class T>
00131 T
00132 Sparse<T>::SparseRep::celem (octave_idx_type _r, octave_idx_type _c) const
00133 {
00134 if (nzmx > 0)
00135 for (octave_idx_type i = c[_c]; i < c[_c + 1]; i++)
00136 if (r[i] == _r)
00137 return d[i];
00138 return T ();
00139 }
00140
00141 template <class T>
00142 void
00143 Sparse<T>::SparseRep::maybe_compress (bool remove_zeros)
00144 {
00145 if (remove_zeros)
00146 {
00147 octave_idx_type i = 0, k = 0;
00148 for (octave_idx_type j = 1; j <= ncols; j++)
00149 {
00150 octave_idx_type u = c[j];
00151 for (i = i; i < u; i++)
00152 if (d[i] != T())
00153 {
00154 d[k] = d[i];
00155 r[k++] = r[i];
00156 }
00157 c[j] = k;
00158 }
00159 }
00160
00161 change_length (c[ncols]);
00162 }
00163
00164 template <class T>
00165 void
00166 Sparse<T>::SparseRep::change_length (octave_idx_type nz)
00167 {
00168 for (octave_idx_type j = ncols; j > 0 && c[j] > nz; j--)
00169 c[j] = nz;
00170
00171
00172
00173 static const int frac = 5;
00174 if (nz > nzmx || nz < nzmx - nzmx/frac)
00175 {
00176
00177 octave_idx_type min_nzmx = std::min (nz, nzmx);
00178
00179 octave_idx_type * new_ridx = new octave_idx_type [nz];
00180 copy_or_memcpy (min_nzmx, r, new_ridx);
00181
00182 delete [] r;
00183 r = new_ridx;
00184
00185 T * new_data = new T [nz];
00186 copy_or_memcpy (min_nzmx, d, new_data);
00187
00188 delete [] d;
00189 d = new_data;
00190
00191 nzmx = nz;
00192 }
00193 }
00194
00195 template <class T>
00196 bool
00197 Sparse<T>::SparseRep::indices_ok (void) const
00198 {
00199 return sparse_indices_ok (r, c, nrows, ncols, nnz ());
00200 }
00201
00202 template <class T>
00203 Sparse<T>::Sparse (octave_idx_type nr, octave_idx_type nc, T val)
00204 : rep (0), dimensions (dim_vector (nr, nc))
00205 {
00206 if (val != T ())
00207 {
00208 rep = new typename Sparse<T>::SparseRep (nr, nc, nr*nc);
00209
00210 octave_idx_type ii = 0;
00211 xcidx (0) = 0;
00212 for (octave_idx_type j = 0; j < nc; j++)
00213 {
00214 for (octave_idx_type i = 0; i < nr; i++)
00215 {
00216 xdata (ii) = val;
00217 xridx (ii++) = i;
00218 }
00219 xcidx (j+1) = ii;
00220 }
00221 }
00222 else
00223 {
00224 rep = new typename Sparse<T>::SparseRep (nr, nc, 0);
00225 for (octave_idx_type j = 0; j < nc+1; j++)
00226 xcidx(j) = 0;
00227 }
00228 }
00229
00230 template <class T>
00231 Sparse<T>::Sparse (const dim_vector& dv)
00232 : rep (0), dimensions (dv)
00233 {
00234 if (dv.length() != 2)
00235 (*current_liboctave_error_handler)
00236 ("Sparse::Sparse (const dim_vector&): dimension mismatch");
00237 else
00238 rep = new typename Sparse<T>::SparseRep (dv(0), dv(1), 0);
00239 }
00240
00241 template <class T>
00242 Sparse<T>::Sparse (const Sparse<T>& a, const dim_vector& dv)
00243 : rep (0), dimensions (dv)
00244 {
00245
00246
00247 unsigned long long a_nel = static_cast<unsigned long long>(a.rows ()) *
00248 static_cast<unsigned long long>(a.cols ());
00249 unsigned long long dv_nel = static_cast<unsigned long long>(dv (0)) *
00250 static_cast<unsigned long long>(dv (1));
00251
00252 if (a_nel != dv_nel)
00253 (*current_liboctave_error_handler)
00254 ("Sparse::Sparse (const Sparse&, const dim_vector&): dimension mismatch");
00255 else
00256 {
00257 dim_vector old_dims = a.dims();
00258 octave_idx_type new_nzmx = a.nnz ();
00259 octave_idx_type new_nr = dv (0);
00260 octave_idx_type new_nc = dv (1);
00261 octave_idx_type old_nr = old_dims (0);
00262 octave_idx_type old_nc = old_dims (1);
00263
00264 rep = new typename Sparse<T>::SparseRep (new_nr, new_nc, new_nzmx);
00265
00266 octave_idx_type kk = 0;
00267 xcidx(0) = 0;
00268 for (octave_idx_type i = 0; i < old_nc; i++)
00269 for (octave_idx_type j = a.cidx(i); j < a.cidx(i+1); j++)
00270 {
00271 octave_idx_type tmp = i * old_nr + a.ridx(j);
00272 octave_idx_type ii = tmp % new_nr;
00273 octave_idx_type jj = (tmp - ii) / new_nr;
00274 for (octave_idx_type k = kk; k < jj; k++)
00275 xcidx(k+1) = j;
00276 kk = jj;
00277 xdata(j) = a.data(j);
00278 xridx(j) = ii;
00279 }
00280 for (octave_idx_type k = kk; k < new_nc; k++)
00281 xcidx(k+1) = new_nzmx;
00282 }
00283 }
00284
00285 template <class T>
00286 Sparse<T>::Sparse (const Array<T>& a, const idx_vector& r,
00287 const idx_vector& c, octave_idx_type nr,
00288 octave_idx_type nc, bool sum_terms,
00289 octave_idx_type nzm)
00290 : rep (0), dimensions ()
00291 {
00292 if (nr < 0)
00293 nr = r.extent (0);
00294 else if (r.extent (nr) > nr)
00295 (*current_liboctave_error_handler) ("sparse: row index %d out of bound %d",
00296 r.extent (nr), nr);
00297
00298 if (nc < 0)
00299 nc = c.extent (0);
00300 else if (c.extent (nc) > nc)
00301 (*current_liboctave_error_handler)
00302 ("sparse: column index %d out of bound %d", r.extent (nc), nc);
00303
00304 rep = new typename Sparse<T>::SparseRep (nr, nc, (nzm > 0 ? nzm : 0));
00305
00306 dimensions = dim_vector (nr, nc);
00307
00308 octave_idx_type n = a.numel (), rl = r.length (nr), cl = c.length (nc);
00309 bool a_scalar = n == 1;
00310 if (a_scalar)
00311 {
00312 if (rl != 1)
00313 n = rl;
00314 else if (cl != 1)
00315 n = cl;
00316 }
00317
00318 if ((rl != 1 && rl != n) || (cl != 1 && cl != n))
00319 (*current_liboctave_error_handler) ("sparse: dimension mismatch");
00320
00321 if (rl <= 1 && cl <= 1)
00322 {
00323 if (n == 1 && a(0) != T ())
00324 {
00325 change_capacity (nzm > 1 ? nzm : 1);
00326 xcidx(0) = 0;
00327 xridx(0) = r(0);
00328 xdata(0) = a(0);
00329 for (octave_idx_type j = 0; j < nc; j++)
00330 xcidx(j+1) = j >= c(0);
00331 }
00332 }
00333 else if (a_scalar)
00334 {
00335
00336 T a0 = a(0);
00337 if (a0 == T())
00338 {
00339
00340 }
00341 else if (cl == 1)
00342 {
00343
00344 idx_vector rs = r.sorted ();
00345
00346 octave_quit ();
00347
00348 const octave_idx_type *rd = rs.raw ();
00349
00350 octave_idx_type new_nz = 1;
00351 for (octave_idx_type i = 1; i < n; i++)
00352 new_nz += rd[i-1] != rd[i];
00353
00354 change_capacity (nzm > new_nz ? nzm : new_nz);
00355 xcidx (0) = 0;
00356 xcidx (1) = new_nz;
00357 octave_idx_type *rri = ridx ();
00358 T *rrd = data ();
00359
00360 octave_quit ();
00361
00362 octave_idx_type k = -1, l = -1;
00363
00364 if (sum_terms)
00365 {
00366
00367 for (octave_idx_type i = 0; i < n; i++)
00368 {
00369 if (rd[i] != l)
00370 {
00371 l = rd[i];
00372 rri[++k] = rd[i];
00373 rrd[k] = a0;
00374 }
00375 else
00376 rrd[k] += a0;
00377 }
00378 }
00379 else
00380 {
00381
00382 for (octave_idx_type i = 0; i < n; i++)
00383 {
00384 if (rd[i] != l)
00385 {
00386 l = rd[i];
00387 rri[++k] = rd[i];
00388 rrd[k] = a0;
00389 }
00390 }
00391 }
00392
00393 }
00394 else
00395 {
00396 idx_vector rr = r, cc = c;
00397 const octave_idx_type *rd = rr.raw (), *cd = cc.raw ();
00398 OCTAVE_LOCAL_BUFFER_INIT (octave_idx_type, ci, nc+1, 0);
00399 ci[0] = 0;
00400
00401 for (octave_idx_type i = 0; i < n; i++)
00402 ci[cd[i]+1]++;
00403
00404 for (octave_idx_type i = 1, s = 0; i <= nc; i++)
00405 {
00406 octave_idx_type s1 = s + ci[i];
00407 ci[i] = s;
00408 s = s1;
00409 }
00410
00411 octave_quit ();
00412
00413
00414 OCTAVE_LOCAL_BUFFER (octave_idx_type, sidx, n);
00415 for (octave_idx_type i = 0; i < n; i++)
00416 if (rl == 1)
00417 sidx[ci[cd[i]+1]++] = rd[0];
00418 else
00419 sidx[ci[cd[i]+1]++] = rd[i];
00420
00421
00422 xcidx(0) = 0;
00423 for (octave_idx_type j = 0; j < nc; j++)
00424 {
00425 std::sort (sidx + ci[j], sidx + ci[j+1]);
00426 octave_idx_type l = -1, nzj = 0;
00427
00428 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
00429 {
00430 octave_idx_type k = sidx[i];
00431 if (k != l)
00432 {
00433 l = k;
00434 nzj++;
00435 }
00436 }
00437
00438 xcidx(j+1) = xcidx(j) + nzj;
00439 }
00440
00441 change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
00442 octave_idx_type *rri = ridx ();
00443 T *rrd = data ();
00444
00445
00446 for (octave_idx_type j = 0, jj = -1; j < nc; j++)
00447 {
00448 octave_quit ();
00449 octave_idx_type l = -1;
00450 if (sum_terms)
00451 {
00452
00453 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
00454 {
00455 octave_idx_type k = sidx[i];
00456 if (k != l)
00457 {
00458 l = k;
00459 rrd[++jj] = a0;
00460 rri[jj] = k;
00461 }
00462 else
00463 rrd[jj] += a0;
00464 }
00465 }
00466 else
00467 {
00468
00469 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
00470 {
00471 octave_idx_type k = sidx[i];
00472 if (k != l)
00473 {
00474 l = k;
00475 rrd[++jj] = a0;
00476 rri[jj] = k;
00477 }
00478 }
00479 }
00480 }
00481 }
00482 }
00483 else if (cl == 1)
00484 {
00485
00486 Array<octave_idx_type> rsi;
00487 idx_vector rs = r.sorted (rsi);
00488
00489 octave_quit ();
00490
00491 const octave_idx_type *rd = rs.raw (), *rdi = rsi.data ();
00492
00493 octave_idx_type new_nz = 1;
00494 for (octave_idx_type i = 1; i < n; i++)
00495 new_nz += rd[i-1] != rd[i];
00496
00497 change_capacity (nzm > new_nz ? nzm : new_nz);
00498 xcidx(0) = 0;
00499 xcidx(1) = new_nz;
00500 octave_idx_type *rri = ridx ();
00501 T *rrd = data ();
00502
00503 octave_quit ();
00504
00505 octave_idx_type k = 0;
00506 rri[k] = rd[0];
00507 rrd[k] = a(rdi[0]);
00508
00509 if (sum_terms)
00510 {
00511
00512 for (octave_idx_type i = 1; i < n; i++)
00513 {
00514 if (rd[i] != rd[i-1])
00515 {
00516 rri[++k] = rd[i];
00517 rrd[k] = a(rdi[i]);
00518 }
00519 else
00520 rrd[k] += a(rdi[i]);
00521 }
00522 }
00523 else
00524 {
00525
00526 for (octave_idx_type i = 1; i < n; i++)
00527 {
00528 if (rd[i] != rd[i-1])
00529 rri[++k] = rd[i];
00530 rrd[k] = a(rdi[i]);
00531 }
00532 }
00533
00534 maybe_compress (true);
00535 }
00536 else
00537 {
00538 idx_vector rr = r, cc = c;
00539 const octave_idx_type *rd = rr.raw (), *cd = cc.raw ();
00540 OCTAVE_LOCAL_BUFFER_INIT (octave_idx_type, ci, nc+1, 0);
00541 ci[0] = 0;
00542
00543 for (octave_idx_type i = 0; i < n; i++)
00544 ci[cd[i]+1]++;
00545
00546 for (octave_idx_type i = 1, s = 0; i <= nc; i++)
00547 {
00548 octave_idx_type s1 = s + ci[i];
00549 ci[i] = s;
00550 s = s1;
00551 }
00552
00553 octave_quit ();
00554
00555 typedef std::pair<octave_idx_type, octave_idx_type> idx_pair;
00556
00557 OCTAVE_LOCAL_BUFFER (idx_pair, spairs, n);
00558 for (octave_idx_type i = 0; i < n; i++)
00559 {
00560 idx_pair& p = spairs[ci[cd[i]+1]++];
00561 if (rl == 1)
00562 p.first = rd[0];
00563 else
00564 p.first = rd[i];
00565 p.second = i;
00566 }
00567
00568
00569 xcidx(0) = 0;
00570 for (octave_idx_type j = 0; j < nc; j++)
00571 {
00572 std::sort (spairs + ci[j], spairs + ci[j+1]);
00573 octave_idx_type l = -1, nzj = 0;
00574
00575 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
00576 {
00577 octave_idx_type k = spairs[i].first;
00578 if (k != l)
00579 {
00580 l = k;
00581 nzj++;
00582 }
00583 }
00584
00585 xcidx(j+1) = xcidx(j) + nzj;
00586 }
00587
00588 change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
00589 octave_idx_type *rri = ridx ();
00590 T *rrd = data ();
00591
00592
00593 for (octave_idx_type j = 0, jj = -1; j < nc; j++)
00594 {
00595 octave_quit ();
00596 octave_idx_type l = -1;
00597 if (sum_terms)
00598 {
00599
00600 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
00601 {
00602 octave_idx_type k = spairs[i].first;
00603 if (k != l)
00604 {
00605 l = k;
00606 rrd[++jj] = a(spairs[i].second);
00607 rri[jj] = k;
00608 }
00609 else
00610 rrd[jj] += a(spairs[i].second);
00611 }
00612 }
00613 else
00614 {
00615
00616 for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
00617 {
00618 octave_idx_type k = spairs[i].first;
00619 if (k != l)
00620 {
00621 l = k;
00622 rri[++jj] = k;
00623 }
00624 rrd[jj] = a(spairs[i].second);
00625 }
00626 }
00627 }
00628
00629 maybe_compress (true);
00630 }
00631 }
00632
00633 template <class T>
00634 Sparse<T>::Sparse (const Array<T>& a)
00635 : rep (0), dimensions (a.dims ())
00636 {
00637 if (dimensions.length () > 2)
00638 (*current_liboctave_error_handler)
00639 ("Sparse::Sparse (const Array<T>&): dimension mismatch");
00640 else
00641 {
00642 octave_idx_type nr = rows ();
00643 octave_idx_type nc = cols ();
00644 octave_idx_type len = a.length ();
00645 octave_idx_type new_nzmx = 0;
00646
00647
00648 for (octave_idx_type i = 0; i < len; i++)
00649 if (a(i) != T ())
00650 new_nzmx++;
00651
00652 rep = new typename Sparse<T>::SparseRep (nr, nc, new_nzmx);
00653
00654 octave_idx_type ii = 0;
00655 xcidx(0) = 0;
00656 for (octave_idx_type j = 0; j < nc; j++)
00657 {
00658 for (octave_idx_type i = 0; i < nr; i++)
00659 if (a.elem (i,j) != T ())
00660 {
00661 xdata(ii) = a.elem (i,j);
00662 xridx(ii++) = i;
00663 }
00664 xcidx(j+1) = ii;
00665 }
00666 }
00667 }
00668
00669 template <class T>
00670 Sparse<T>::~Sparse (void)
00671 {
00672 if (--rep->count == 0)
00673 delete rep;
00674 }
00675
00676 template <class T>
00677 Sparse<T>&
00678 Sparse<T>::operator = (const Sparse<T>& a)
00679 {
00680 if (this != &a)
00681 {
00682 if (--rep->count == 0)
00683 delete rep;
00684
00685 rep = a.rep;
00686 rep->count++;
00687
00688 dimensions = a.dimensions;
00689 }
00690
00691 return *this;
00692 }
00693
00694 template <class T>
00695 octave_idx_type
00696 Sparse<T>::compute_index (const Array<octave_idx_type>& ra_idx) const
00697 {
00698 octave_idx_type retval = -1;
00699
00700 octave_idx_type n = dimensions.length ();
00701
00702 if (n > 0 && n == ra_idx.length ())
00703 {
00704 retval = ra_idx(--n);
00705
00706 while (--n >= 0)
00707 {
00708 retval *= dimensions(n);
00709 retval += ra_idx(n);
00710 }
00711 }
00712 else
00713 (*current_liboctave_error_handler)
00714 ("Sparse<T>::compute_index: invalid ra_idxing operation");
00715
00716 return retval;
00717 }
00718
00719 template <class T>
00720 T
00721 Sparse<T>::range_error (const char *fcn, octave_idx_type n) const
00722 {
00723 (*current_liboctave_error_handler) ("%s (%d): range error", fcn, n);
00724 return T ();
00725 }
00726
00727 template <class T>
00728 T&
00729 Sparse<T>::range_error (const char *fcn, octave_idx_type n)
00730 {
00731 (*current_liboctave_error_handler) ("%s (%d): range error", fcn, n);
00732 static T foo;
00733 return foo;
00734 }
00735
00736 template <class T>
00737 T
00738 Sparse<T>::range_error (const char *fcn, octave_idx_type i, octave_idx_type j) const
00739 {
00740 (*current_liboctave_error_handler)
00741 ("%s (%d, %d): range error", fcn, i, j);
00742 return T ();
00743 }
00744
00745 template <class T>
00746 T&
00747 Sparse<T>::range_error (const char *fcn, octave_idx_type i, octave_idx_type j)
00748 {
00749 (*current_liboctave_error_handler)
00750 ("%s (%d, %d): range error", fcn, i, j);
00751 static T foo;
00752 return foo;
00753 }
00754
00755 template <class T>
00756 T
00757 Sparse<T>::range_error (const char *fcn, const Array<octave_idx_type>& ra_idx) const
00758 {
00759 std::ostringstream buf;
00760
00761 buf << fcn << " (";
00762
00763 octave_idx_type n = ra_idx.length ();
00764
00765 if (n > 0)
00766 buf << ra_idx(0);
00767
00768 for (octave_idx_type i = 1; i < n; i++)
00769 buf << ", " << ra_idx(i);
00770
00771 buf << "): range error";
00772
00773 std::string buf_str = buf.str ();
00774
00775 (*current_liboctave_error_handler) (buf_str.c_str ());
00776
00777 return T ();
00778 }
00779
00780 template <class T>
00781 T&
00782 Sparse<T>::range_error (const char *fcn, const Array<octave_idx_type>& ra_idx)
00783 {
00784 std::ostringstream buf;
00785
00786 buf << fcn << " (";
00787
00788 octave_idx_type n = ra_idx.length ();
00789
00790 if (n > 0)
00791 buf << ra_idx(0);
00792
00793 for (octave_idx_type i = 1; i < n; i++)
00794 buf << ", " << ra_idx(i);
00795
00796 buf << "): range error";
00797
00798 std::string buf_str = buf.str ();
00799
00800 (*current_liboctave_error_handler) (buf_str.c_str ());
00801
00802 static T foo;
00803 return foo;
00804 }
00805
00806 template <class T>
00807 Sparse<T>
00808 Sparse<T>::reshape (const dim_vector& new_dims) const
00809 {
00810 Sparse<T> retval;
00811 dim_vector dims2 = new_dims;
00812
00813 if (dims2.length () > 2)
00814 {
00815 (*current_liboctave_warning_handler)
00816 ("reshape: sparse reshape to N-d array smashes dims");
00817
00818 for (octave_idx_type i = 2; i < dims2.length(); i++)
00819 dims2(1) *= dims2(i);
00820
00821 dims2.resize (2);
00822 }
00823
00824 if (dimensions != dims2)
00825 {
00826 if (dimensions.numel () == dims2.numel ())
00827 {
00828 octave_idx_type new_nnz = nnz ();
00829 octave_idx_type new_nr = dims2 (0);
00830 octave_idx_type new_nc = dims2 (1);
00831 octave_idx_type old_nr = rows ();
00832 octave_idx_type old_nc = cols ();
00833 retval = Sparse<T> (new_nr, new_nc, new_nnz);
00834
00835 octave_idx_type kk = 0;
00836 retval.xcidx(0) = 0;
00837 for (octave_idx_type i = 0; i < old_nc; i++)
00838 for (octave_idx_type j = cidx(i); j < cidx(i+1); j++)
00839 {
00840 octave_idx_type tmp = i * old_nr + ridx(j);
00841 octave_idx_type ii = tmp % new_nr;
00842 octave_idx_type jj = (tmp - ii) / new_nr;
00843 for (octave_idx_type k = kk; k < jj; k++)
00844 retval.xcidx(k+1) = j;
00845 kk = jj;
00846 retval.xdata(j) = data(j);
00847 retval.xridx(j) = ii;
00848 }
00849 for (octave_idx_type k = kk; k < new_nc; k++)
00850 retval.xcidx(k+1) = new_nnz;
00851 }
00852 else
00853 {
00854 std::string dimensions_str = dimensions.str ();
00855 std::string new_dims_str = new_dims.str ();
00856
00857 (*current_liboctave_error_handler)
00858 ("reshape: can't reshape %s array to %s array",
00859 dimensions_str.c_str (), new_dims_str.c_str ());
00860 }
00861 }
00862 else
00863 retval = *this;
00864
00865 return retval;
00866 }
00867
00868 template <class T>
00869 Sparse<T>
00870 Sparse<T>::permute (const Array<octave_idx_type>& perm_vec, bool) const
00871 {
00872
00873
00874 bool fail = false;
00875 bool trans = false;
00876
00877 if (perm_vec.length () == 2)
00878 {
00879 if (perm_vec(0) == 0 && perm_vec(1) == 1)
00880 ;
00881 else if (perm_vec(0) == 1 && perm_vec(1) == 0)
00882 trans = true;
00883 else
00884 fail = true;
00885 }
00886 else
00887 fail = true;
00888
00889 if (fail)
00890 (*current_liboctave_error_handler)
00891 ("permutation vector contains an invalid element");
00892
00893 return trans ? this->transpose () : *this;
00894 }
00895
00896 template <class T>
00897 void
00898 Sparse<T>::resize1 (octave_idx_type n)
00899 {
00900 octave_idx_type nr = rows (), nc = cols ();
00901
00902 if (nr == 0)
00903 resize (1, std::max (nc, n));
00904 else if (nc == 0)
00905 resize (nr, (n + nr - 1) / nr);
00906 else if (nr == 1)
00907 resize (1, n);
00908 else if (nc == 1)
00909 resize (n, 1);
00910 else
00911 gripe_invalid_resize ();
00912 }
00913
00914 template <class T>
00915 void
00916 Sparse<T>::resize (const dim_vector& dv)
00917 {
00918 octave_idx_type n = dv.length ();
00919
00920 if (n != 2)
00921 {
00922 (*current_liboctave_error_handler) ("sparse array must be 2-D");
00923 return;
00924 }
00925
00926 resize (dv(0), dv(1));
00927 }
00928
00929 template <class T>
00930 void
00931 Sparse<T>::resize (octave_idx_type r, octave_idx_type c)
00932 {
00933 if (r < 0 || c < 0)
00934 {
00935 (*current_liboctave_error_handler)
00936 ("can't resize to negative dimension");
00937 return;
00938 }
00939
00940 if (r == dim1 () && c == dim2 ())
00941 return;
00942
00943
00944
00945 make_unique ();
00946
00947 if (r < rows ())
00948 {
00949 octave_idx_type i = 0, k = 0;
00950 for (octave_idx_type j = 1; j <= rep->ncols; j++)
00951 {
00952 octave_idx_type u = xcidx(j);
00953 for (i = i; i < u; i++)
00954 if (xridx(i) < r)
00955 {
00956 xdata(k) = xdata(i);
00957 xridx(k++) = xridx(i);
00958 }
00959 xcidx(j) = k;
00960 }
00961 }
00962
00963 rep->nrows = dimensions(0) = r;
00964
00965 if (c != rep->ncols)
00966 {
00967 octave_idx_type *new_cidx = new octave_idx_type [c+1];
00968 copy_or_memcpy (std::min (c, rep->ncols)+1, rep->c, new_cidx);
00969 delete [] rep->c;
00970 rep->c = new_cidx;
00971
00972 if (c > rep->ncols)
00973 fill_or_memset (c - rep->ncols, rep->c[rep->ncols], rep->c + rep->ncols + 1);
00974 }
00975
00976 rep->ncols = dimensions(1) = c;
00977
00978 rep->change_length (rep->nnz ());
00979 }
00980
00981 template <class T>
00982 Sparse<T>&
00983 Sparse<T>::insert (const Sparse<T>& a, octave_idx_type r, octave_idx_type c)
00984 {
00985 octave_idx_type a_rows = a.rows ();
00986 octave_idx_type a_cols = a.cols ();
00987 octave_idx_type nr = rows ();
00988 octave_idx_type nc = cols ();
00989
00990 if (r < 0 || r + a_rows > rows () || c < 0 || c + a_cols > cols ())
00991 {
00992 (*current_liboctave_error_handler) ("range error for insert");
00993 return *this;
00994 }
00995
00996
00997 octave_idx_type nel = cidx(c) + a.nnz ();
00998
00999 if (c + a_cols < nc)
01000 nel += cidx(nc) - cidx(c + a_cols);
01001
01002 for (octave_idx_type i = c; i < c + a_cols; i++)
01003 for (octave_idx_type j = cidx(i); j < cidx(i+1); j++)
01004 if (ridx(j) < r || ridx(j) >= r + a_rows)
01005 nel++;
01006
01007 Sparse<T> tmp (*this);
01008 --rep->count;
01009 rep = new typename Sparse<T>::SparseRep (nr, nc, nel);
01010
01011 for (octave_idx_type i = 0; i < tmp.cidx(c); i++)
01012 {
01013 data(i) = tmp.data(i);
01014 ridx(i) = tmp.ridx(i);
01015 }
01016 for (octave_idx_type i = 0; i < c + 1; i++)
01017 cidx(i) = tmp.cidx(i);
01018
01019 octave_idx_type ii = cidx(c);
01020
01021 for (octave_idx_type i = c; i < c + a_cols; i++)
01022 {
01023 octave_quit ();
01024
01025 for (octave_idx_type j = tmp.cidx(i); j < tmp.cidx(i+1); j++)
01026 if (tmp.ridx(j) < r)
01027 {
01028 data(ii) = tmp.data(j);
01029 ridx(ii++) = tmp.ridx(j);
01030 }
01031
01032 octave_quit ();
01033
01034 for (octave_idx_type j = a.cidx(i-c); j < a.cidx(i-c+1); j++)
01035 {
01036 data(ii) = a.data(j);
01037 ridx(ii++) = r + a.ridx(j);
01038 }
01039
01040 octave_quit ();
01041
01042 for (octave_idx_type j = tmp.cidx(i); j < tmp.cidx(i+1); j++)
01043 if (tmp.ridx(j) >= r + a_rows)
01044 {
01045 data(ii) = tmp.data(j);
01046 ridx(ii++) = tmp.ridx(j);
01047 }
01048
01049 cidx(i+1) = ii;
01050 }
01051
01052 for (octave_idx_type i = c + a_cols; i < nc; i++)
01053 {
01054 for (octave_idx_type j = tmp.cidx(i); j < tmp.cidx(i+1); j++)
01055 {
01056 data(ii) = tmp.data(j);
01057 ridx(ii++) = tmp.ridx(j);
01058 }
01059 cidx(i+1) = ii;
01060 }
01061
01062 return *this;
01063 }
01064
01065 template <class T>
01066 Sparse<T>&
01067 Sparse<T>::insert (const Sparse<T>& a, const Array<octave_idx_type>& ra_idx)
01068 {
01069
01070 if (ra_idx.length () != 2)
01071 {
01072 (*current_liboctave_error_handler) ("range error for insert");
01073 return *this;
01074 }
01075
01076 return insert (a, ra_idx (0), ra_idx (1));
01077 }
01078
01079 template <class T>
01080 Sparse<T>
01081 Sparse<T>::transpose (void) const
01082 {
01083 assert (ndims () == 2);
01084
01085 octave_idx_type nr = rows ();
01086 octave_idx_type nc = cols ();
01087 octave_idx_type nz = nnz ();
01088 Sparse<T> retval (nc, nr, nz);
01089
01090 for (octave_idx_type i = 0; i < nz; i++)
01091 retval.xcidx (ridx (i) + 1)++;
01092
01093 nz = 0;
01094 for (octave_idx_type i = 1; i <= nr; i++)
01095 {
01096 const octave_idx_type tmp = retval.xcidx (i);
01097 retval.xcidx (i) = nz;
01098 nz += tmp;
01099 }
01100
01101
01102 for (octave_idx_type j = 0; j < nc; j++)
01103 for (octave_idx_type k = cidx(j); k < cidx(j+1); k++)
01104 {
01105 octave_idx_type q = retval.xcidx (ridx (k) + 1)++;
01106 retval.xridx (q) = j;
01107 retval.xdata (q) = data (k);
01108 }
01109 assert (nnz () == retval.xcidx (nr));
01110
01111
01112
01113 return retval;
01114 }
01115
01116
01117
01118
01119 static octave_idx_type
01120 lblookup (const octave_idx_type *ridx, octave_idx_type nr,
01121 octave_idx_type ri)
01122 {
01123 if (nr <= 8)
01124 {
01125 octave_idx_type l;
01126 for (l = 0; l < nr; l++)
01127 if (ridx[l] >= ri)
01128 break;
01129 return l;
01130 }
01131 else
01132 return std::lower_bound (ridx, ridx + nr, ri) - ridx;
01133 }
01134
01135 template <class T>
01136 void
01137 Sparse<T>::delete_elements (const idx_vector& idx)
01138 {
01139 Sparse<T> retval;
01140
01141 assert (ndims () == 2);
01142
01143 octave_idx_type nr = dim1 ();
01144 octave_idx_type nc = dim2 ();
01145 octave_idx_type nz = nnz ();
01146
01147 octave_idx_type nel = numel ();
01148
01149 const dim_vector idx_dims = idx.orig_dimensions ();
01150
01151 if (idx.extent (nel) > nel)
01152 gripe_del_index_out_of_range (true, idx.extent (nel), nel);
01153 else if (nc == 1)
01154 {
01155
01156 const Sparse<T> tmp = *this;
01157
01158 octave_idx_type lb, ub;
01159
01160 if (idx.is_cont_range (nel, lb, ub))
01161 {
01162
01163
01164 octave_idx_type li = lblookup (tmp.ridx (), nz, lb);
01165 octave_idx_type ui = lblookup (tmp.ridx (), nz, ub);
01166
01167 octave_idx_type nz_new = nz - (ui - li);
01168 *this = Sparse<T> (nr - (ub - lb), 1, nz_new);
01169 copy_or_memcpy (li, tmp.data (), data ());
01170 copy_or_memcpy (li, tmp.ridx (), xridx ());
01171 copy_or_memcpy (nz - ui, tmp.data () + ui, xdata () + li);
01172 mx_inline_sub (nz - ui, xridx () + li, tmp.ridx () + ui, ub - lb);
01173 xcidx(1) = nz_new;
01174 }
01175 else
01176 {
01177 OCTAVE_LOCAL_BUFFER (octave_idx_type, ridx_new, nz);
01178 OCTAVE_LOCAL_BUFFER (T, data_new, nz);
01179 idx_vector sidx = idx.sorted (true);
01180 const octave_idx_type *sj = sidx.raw ();
01181 octave_idx_type sl = sidx.length (nel), nz_new = 0, j = 0;
01182 for (octave_idx_type i = 0; i < nz; i++)
01183 {
01184 octave_idx_type r = tmp.ridx(i);
01185 for (;j < sl && sj[j] < r; j++) ;
01186 if (j == sl || sj[j] > r)
01187 {
01188 data_new[nz_new] = tmp.data(i);
01189 ridx_new[nz_new++] = r - j;
01190 }
01191 }
01192
01193 *this = Sparse<T> (nr - sl, 1, nz_new);
01194 copy_or_memcpy (nz_new, ridx_new, ridx ());
01195 copy_or_memcpy (nz_new, data_new, xdata ());
01196 xcidx(1) = nz_new;
01197 }
01198 }
01199 else if (nr == 1)
01200 {
01201
01202 octave_idx_type lb, ub;
01203 if (idx.is_cont_range (nc, lb, ub))
01204 {
01205 const Sparse<T> tmp = *this;
01206 octave_idx_type lbi = tmp.cidx(lb), ubi = tmp.cidx(ub), new_nz = nz - (ubi - lbi);
01207 *this = Sparse<T> (1, nc - (ub - lb), new_nz);
01208 copy_or_memcpy (lbi, tmp.data (), data ());
01209 copy_or_memcpy (nz - ubi, tmp.data () + ubi, xdata () + lbi);
01210 fill_or_memset (new_nz, static_cast<octave_idx_type> (0), ridx ());
01211 copy_or_memcpy (lb, tmp.cidx () + 1, cidx () + 1);
01212 mx_inline_sub (nc - ub, xcidx () + 1, tmp.cidx () + ub + 1, ubi - lbi);
01213 }
01214 else
01215 *this = index (idx.complement (nc));
01216 }
01217 else
01218 {
01219 *this = index (idx_vector::colon);
01220 delete_elements (idx);
01221 *this = transpose ();
01222 }
01223 }
01224
01225 template <class T>
01226 void
01227 Sparse<T>::delete_elements (const idx_vector& idx_i, const idx_vector& idx_j)
01228 {
01229 assert (ndims () == 2);
01230
01231 octave_idx_type nr = dim1 ();
01232 octave_idx_type nc = dim2 ();
01233 octave_idx_type nz = nnz ();
01234
01235 if (idx_i.is_colon ())
01236 {
01237
01238 octave_idx_type lb, ub;
01239 if (idx_j.extent (nc) > nc)
01240 gripe_del_index_out_of_range (false, idx_j.extent (nc), nc);
01241 else if (idx_j.is_cont_range (nc, lb, ub))
01242 {
01243 if (lb == 0 && ub == nc)
01244 {
01245
01246 *this = Sparse<T> (nr, 0);
01247 }
01248 else if (nz == 0)
01249 {
01250
01251 *this = Sparse<T> (nr, nc - (ub - lb));
01252 }
01253 else
01254 {
01255 const Sparse<T> tmp = *this;
01256 octave_idx_type lbi = tmp.cidx(lb), ubi = tmp.cidx(ub),
01257 new_nz = nz - (ubi - lbi);
01258
01259 *this = Sparse<T> (nr, nc - (ub - lb), new_nz);
01260 copy_or_memcpy (lbi, tmp.data (), data ());
01261 copy_or_memcpy (lbi, tmp.ridx (), ridx ());
01262 copy_or_memcpy (nz - ubi, tmp.data () + ubi, xdata () + lbi);
01263 copy_or_memcpy (nz - ubi, tmp.ridx () + ubi, xridx () + lbi);
01264 copy_or_memcpy (lb, tmp.cidx () + 1, cidx () + 1);
01265 mx_inline_sub (nc - ub, xcidx () + lb + 1,
01266 tmp.cidx () + ub + 1, ubi - lbi);
01267 }
01268 }
01269 else
01270 *this = index (idx_i, idx_j.complement (nc));
01271 }
01272 else if (idx_j.is_colon ())
01273 {
01274
01275 octave_idx_type lb, ub;
01276 if (idx_i.extent (nr) > nr)
01277 gripe_del_index_out_of_range (false, idx_i.extent (nr), nr);
01278 else if (idx_i.is_cont_range (nr, lb, ub))
01279 {
01280 if (lb == 0 && ub == nr)
01281 {
01282
01283 *this = Sparse<T> (0, nc);
01284 }
01285 else if (nz == 0)
01286 {
01287
01288 *this = Sparse<T> (nr - (ub - lb), nc);
01289 }
01290 else
01291 {
01292
01293 const Sparse<T> tmpl = index (idx_vector (0, lb), idx_j);
01294 const Sparse<T> tmpu = index (idx_vector (ub, nr), idx_j);
01295 *this = Sparse<T> (nr - (ub - lb), nc,
01296 tmpl.nnz () + tmpu.nnz ());
01297 for (octave_idx_type j = 0, k = 0; j < nc; j++)
01298 {
01299 for (octave_idx_type i = tmpl.cidx(j); i < tmpl.cidx(j+1);
01300 i++)
01301 {
01302 xdata(k) = tmpl.data(i);
01303 xridx(k++) = tmpl.ridx(i);
01304 }
01305 for (octave_idx_type i = tmpu.cidx(j); i < tmpu.cidx(j+1);
01306 i++)
01307 {
01308 xdata(k) = tmpu.data(i);
01309 xridx(k++) = tmpu.ridx(i) + lb;
01310 }
01311
01312 xcidx(j+1) = k;
01313 }
01314 }
01315 }
01316 else
01317 {
01318
01319
01320 Sparse<T> tmp = transpose ();
01321 tmp.delete_elements (idx_j, idx_i);
01322 *this = tmp.transpose ();
01323 }
01324 }
01325 else
01326 (*current_liboctave_error_handler)
01327 ("a null assignment can only have one non-colon index");
01328 }
01329
01330 template <class T>
01331 void
01332 Sparse<T>::delete_elements (int dim, const idx_vector& idx)
01333 {
01334 if (dim == 0)
01335 delete_elements (idx, idx_vector::colon);
01336 else if (dim == 1)
01337 delete_elements (idx_vector::colon, idx);
01338 else
01339 {
01340 (*current_liboctave_error_handler)
01341 ("invalid dimension in delete_elements");
01342 return;
01343 }
01344 }
01345
01346 template <class T>
01347 Sparse<T>
01348 Sparse<T>::index (const idx_vector& idx, bool resize_ok) const
01349 {
01350 Sparse<T> retval;
01351
01352 assert (ndims () == 2);
01353
01354 octave_idx_type nr = dim1 ();
01355 octave_idx_type nc = dim2 ();
01356 octave_idx_type nz = nnz ();
01357
01358 octave_idx_type nel = numel ();
01359
01360 const dim_vector idx_dims = idx.orig_dimensions ();
01361
01362 if (idx_dims.length () > 2)
01363 (*current_liboctave_error_handler)
01364 ("cannot index sparse matrix with an N-D Array");
01365 else if (idx.is_colon ())
01366 {
01367 if (nc == 1)
01368 retval = *this;
01369 else
01370 {
01371
01372 retval = Sparse<T> (nel, 1, nz);
01373
01374 for (octave_idx_type i = 0; i < nc; i++)
01375 {
01376 for (octave_idx_type j = cidx(i); j < cidx(i+1); j++)
01377 {
01378 retval.xdata(j) = data(j);
01379 retval.xridx(j) = ridx(j) + i * nr;
01380 }
01381 }
01382
01383 retval.xcidx(0) = 0;
01384 retval.xcidx(1) = nz;
01385 }
01386 }
01387 else if (idx.extent (nel) > nel)
01388 {
01389
01390 if (resize_ok)
01391 {
01392 octave_idx_type ext = idx.extent (nel);
01393 Sparse<T> tmp = *this;
01394 tmp.resize1 (ext);
01395 retval = tmp.index (idx);
01396 }
01397 else
01398 gripe_index_out_of_range (1, 1, idx.extent (nel), nel);
01399 }
01400 else if (nr == 1 && nc == 1)
01401 {
01402
01403
01404
01405
01406
01407 retval = Sparse<T> (idx_dims(0), idx_dims(1), nz ? data(0) : T ());
01408 }
01409 else if (nc == 1)
01410 {
01411
01412 octave_idx_type lb, ub;
01413
01414 if (idx.is_scalar ())
01415 {
01416
01417 octave_idx_type i = lblookup (ridx (), nz, idx(0));
01418 if (i < nz && ridx(i) == idx(0))
01419 retval = Sparse (1, 1, data(i));
01420 else
01421 retval = Sparse (1, 1);
01422 }
01423 else if (idx.is_cont_range (nel, lb, ub))
01424 {
01425
01426
01427 octave_idx_type li = lblookup (ridx (), nz, lb);
01428 octave_idx_type ui = lblookup (ridx (), nz, ub);
01429
01430 octave_idx_type nz_new = ui - li;
01431 retval = Sparse<T> (ub - lb, 1, nz_new);
01432 copy_or_memcpy (nz_new, data () + li, retval.data ());
01433 mx_inline_sub (nz_new, retval.xridx (), ridx () + li, lb);
01434 retval.xcidx(1) = nz_new;
01435 }
01436 else if (idx.is_permutation (nel) && idx.is_vector ())
01437 {
01438 if (idx.is_range () && idx.increment () == -1)
01439 {
01440 retval = Sparse<T> (nr, 1, nz);
01441
01442 for (octave_idx_type j = 0; j < nz; j++)
01443 retval.ridx (j) = nr - ridx (nz - j - 1) - 1;
01444
01445 copy_or_memcpy (2, cidx (), retval.cidx ());
01446 std::reverse_copy (data (), data () + nz, retval.data ());
01447 }
01448 else
01449 {
01450 Array<T> tmp = array_value ();
01451 tmp = tmp.index (idx);
01452 retval = Sparse<T> (tmp);
01453 }
01454 }
01455 else
01456 {
01457
01458
01459
01460 const Array<octave_idx_type> idxa = (idx_dims(0) == 1
01461 ? idx.as_array ().transpose ()
01462 : idx.as_array ());
01463
01464 octave_idx_type new_nr = idxa.rows (), new_nc = idxa.cols ();
01465
01466
01467
01468 NoAlias< Array<octave_idx_type> > lidx (dim_vector (new_nr, new_nc));
01469 for (octave_idx_type i = 0; i < new_nr*new_nc; i++)
01470 lidx(i) = lblookup (ridx (), nz, idxa(i));
01471
01472
01473 retval = Sparse<T> (idxa.rows (), idxa.cols ());
01474 for (octave_idx_type j = 0; j < new_nc; j++)
01475 {
01476 octave_idx_type nzj = 0;
01477 for (octave_idx_type i = 0; i < new_nr; i++)
01478 {
01479 octave_idx_type l = lidx(i, j);
01480 if (l < nz && ridx(l) == idxa(i, j))
01481 nzj++;
01482 else
01483 lidx(i, j) = nz;
01484 }
01485 retval.xcidx(j+1) = retval.xcidx(j) + nzj;
01486 }
01487
01488 retval.change_capacity (retval.xcidx(new_nc));
01489
01490
01491 octave_idx_type k = 0;
01492 for (octave_idx_type j = 0; j < new_nc; j++)
01493 for (octave_idx_type i = 0; i < new_nr; i++)
01494 {
01495 octave_idx_type l = lidx(i, j);
01496 if (l < nz)
01497 {
01498 retval.data(k) = data(l);
01499 retval.xridx(k++) = i;
01500 }
01501 }
01502 }
01503 }
01504 else if (nr == 1)
01505 {
01506 octave_idx_type lb, ub;
01507 if (idx.is_scalar ())
01508 retval = Sparse<T> (1, 1, elem(0, idx(0)));
01509 else if (idx.is_cont_range (nel, lb, ub))
01510 {
01511
01512 octave_idx_type lbi = cidx(lb), ubi = cidx(ub), new_nz = ubi - lbi;
01513 retval = Sparse<T> (1, ub - lb, new_nz);
01514 copy_or_memcpy (new_nz, data () + lbi, retval.data ());
01515 fill_or_memset (new_nz, static_cast<octave_idx_type> (0), retval.ridx ());
01516 mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
01517 }
01518 else
01519 {
01520
01521
01522 retval = Sparse<T> (array_value ().index (idx));
01523 }
01524 }
01525 else
01526 {
01527 if (nr != 0 && idx.is_scalar ())
01528 retval = Sparse<T> (1, 1, elem (idx(0) % nr, idx(0) / nr));
01529 else
01530 {
01531
01532
01533
01534 retval = index (idx_vector::colon).index (idx);
01535
01536
01537 if (idx_dims(0) == 1 && idx_dims(1) != 1)
01538 retval = retval.transpose ();
01539 }
01540 }
01541
01542 return retval;
01543 }
01544
01545 template <class T>
01546 Sparse<T>
01547 Sparse<T>::index (const idx_vector& idx_i, const idx_vector& idx_j, bool resize_ok) const
01548 {
01549 Sparse<T> retval;
01550
01551 assert (ndims () == 2);
01552
01553 octave_idx_type nr = dim1 ();
01554 octave_idx_type nc = dim2 ();
01555
01556 octave_idx_type n = idx_i.length (nr);
01557 octave_idx_type m = idx_j.length (nc);
01558
01559 octave_idx_type lb, ub;
01560
01561 if (idx_i.extent (nr) > nr || idx_j.extent (nc) > nc)
01562 {
01563
01564 if (resize_ok)
01565 {
01566 octave_idx_type ext_i = idx_i.extent (nr);
01567 octave_idx_type ext_j = idx_j.extent (nc);
01568 Sparse<T> tmp = *this;
01569 tmp.resize (ext_i, ext_j);
01570 retval = tmp.index (idx_i, idx_j);
01571 }
01572 else if (idx_i.extent (nr) > nr)
01573 gripe_index_out_of_range (2, 1, idx_i.extent (nr), nr);
01574 else
01575 gripe_index_out_of_range (2, 2, idx_j.extent (nc), nc);
01576 }
01577 else if (idx_i.is_colon ())
01578 {
01579
01580
01581 if (idx_j.is_colon ())
01582 retval = *this;
01583 else if (idx_j.is_cont_range (nc, lb, ub))
01584 {
01585
01586 octave_idx_type lbi = cidx(lb), ubi = cidx(ub), new_nz = ubi - lbi;
01587 retval = Sparse<T> (nr, ub - lb, new_nz);
01588 copy_or_memcpy (new_nz, data () + lbi, retval.data ());
01589 copy_or_memcpy (new_nz, ridx () + lbi, retval.ridx ());
01590 mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
01591 }
01592 else
01593 {
01594
01595 retval = Sparse<T> (nr, m);
01596 for (octave_idx_type j = 0; j < m; j++)
01597 {
01598 octave_idx_type jj = idx_j(j);
01599 retval.xcidx(j+1) = retval.xcidx(j) + (cidx(jj+1) - cidx(jj));
01600 }
01601
01602 retval.change_capacity (retval.xcidx (m));
01603
01604
01605 for (octave_idx_type j = 0; j < m; j++)
01606 {
01607 octave_idx_type ljj = cidx(idx_j(j));
01608 octave_idx_type lj = retval.xcidx(j), nzj = retval.xcidx(j+1) - lj;
01609 copy_or_memcpy (nzj, data () + ljj, retval.data () + lj);
01610 copy_or_memcpy (nzj, ridx () + ljj, retval.ridx () + lj);
01611 }
01612 }
01613 }
01614 else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.is_vector ())
01615 {
01616
01617 retval = index (idx_i);
01618
01619
01620 if (nr == 1)
01621 retval.transpose();
01622 }
01623 else if (idx_i.is_scalar ())
01624 {
01625 octave_idx_type ii = idx_i(0);
01626 retval = Sparse<T> (1, m);
01627 OCTAVE_LOCAL_BUFFER (octave_idx_type, ij, m);
01628 for (octave_idx_type j = 0; j < m; j++)
01629 {
01630 octave_quit ();
01631 octave_idx_type jj = idx_j(j), lj = cidx(jj), nzj = cidx(jj+1) - cidx(jj);
01632
01633 octave_idx_type i = lblookup (ridx () + lj, nzj, ii);
01634 if (i < nzj && ridx(i+lj) == ii)
01635 {
01636 ij[j] = i + lj;
01637 retval.xcidx(j+1) = retval.xcidx(j) + 1;
01638 }
01639 else
01640 retval.xcidx(j+1) = retval.xcidx(j);
01641 }
01642
01643 retval.change_capacity (retval.xcidx(m));
01644
01645
01646 for (octave_idx_type j = 0; j < m; j++)
01647 {
01648 octave_idx_type i = retval.xcidx(j);
01649 if (retval.xcidx(j+1) > i)
01650 {
01651 retval.xridx(i) = 0;
01652 retval.xdata(i) = data(ij[j]);
01653 }
01654 }
01655 }
01656 else if (idx_i.is_cont_range (nr, lb, ub))
01657 {
01658 retval = Sparse<T> (n, m);
01659 OCTAVE_LOCAL_BUFFER (octave_idx_type, li, m);
01660 OCTAVE_LOCAL_BUFFER (octave_idx_type, ui, m);
01661 for (octave_idx_type j = 0; j < m; j++)
01662 {
01663 octave_quit ();
01664 octave_idx_type jj = idx_j(j), lj = cidx(jj), nzj = cidx(jj+1) - cidx(jj);
01665 octave_idx_type lij, uij;
01666
01667 li[j] = lij = lblookup (ridx () + lj, nzj, lb) + lj;
01668 ui[j] = uij = lblookup (ridx () + lj, nzj, ub) + lj;
01669 retval.xcidx(j+1) = retval.xcidx(j) + ui[j] - li[j];
01670 }
01671
01672 retval.change_capacity (retval.xcidx(m));
01673
01674
01675 for (octave_idx_type j = 0, k = 0; j < m; j++)
01676 {
01677 octave_quit ();
01678 for (octave_idx_type i = li[j]; i < ui[j]; i++)
01679 {
01680 retval.xdata(k) = data(i);
01681 retval.xridx(k++) = ridx(i) - lb;
01682 }
01683 }
01684 }
01685 else if (idx_i.is_permutation (nr))
01686 {
01687
01688
01689 retval = Sparse<T> (nr, m);
01690 for (octave_idx_type j = 0; j < m; j++)
01691 {
01692 octave_idx_type jj = idx_j(j);
01693 retval.xcidx(j+1) = retval.xcidx(j) + (cidx(jj+1) - cidx(jj));
01694 }
01695
01696 retval.change_capacity (retval.xcidx (m));
01697
01698 octave_quit ();
01699
01700 if (idx_i.is_range () && idx_i.increment () == -1)
01701 {
01702
01703 for (octave_idx_type j = 0; j < m; j++)
01704 {
01705 octave_quit ();
01706 octave_idx_type jj = idx_j(j), lj = cidx(jj), nzj = cidx(jj+1) - cidx(jj);
01707 octave_idx_type li = retval.xcidx(j), uj = lj + nzj - 1;
01708 for (octave_idx_type i = 0; i < nzj; i++)
01709 {
01710 retval.xdata(li + i) = data(uj - i);
01711 retval.xridx(li + i) = nr - 1 - ridx(uj - i);
01712 }
01713 }
01714 }
01715 else
01716 {
01717
01718 idx_vector idx_iinv = idx_i.inverse_permutation (nr);
01719 const octave_idx_type *iinv = idx_iinv.raw ();
01720
01721
01722 OCTAVE_LOCAL_BUFFER (T, scb, nr);
01723 octave_idx_type *rri = retval.ridx ();
01724
01725 for (octave_idx_type j = 0; j < m; j++)
01726 {
01727 octave_quit ();
01728 octave_idx_type jj = idx_j(j), lj = cidx(jj), nzj = cidx(jj+1) - cidx(jj);
01729 octave_idx_type li = retval.xcidx(j);
01730
01731 for (octave_idx_type i = 0; i < nzj; i++)
01732 scb[rri[li + i] = iinv[ridx(lj + i)]] = data(lj + i);
01733
01734 octave_quit ();
01735
01736
01737 std::sort (rri + li, rri + li + nzj);
01738
01739
01740 for (octave_idx_type i = 0; i < nzj; i++)
01741 retval.xdata(li + i) = scb[rri[li + i]];
01742 }
01743 }
01744
01745 }
01746 else if (idx_j.is_colon ())
01747 {
01748
01749
01750
01751 retval = transpose ();
01752 retval = retval.index (idx_vector::colon, idx_i);
01753 retval = retval.transpose ();
01754 }
01755 else
01756 {
01757
01758 retval = index (idx_vector::colon, idx_j);
01759 retval = retval.index (idx_i, idx_vector::colon);
01760 }
01761
01762 return retval;
01763 }
01764
01765 template <class T>
01766 void
01767 Sparse<T>::assign (const idx_vector& idx, const Sparse<T>& rhs)
01768 {
01769 Sparse<T> retval;
01770
01771 assert (ndims () == 2);
01772
01773 octave_idx_type nr = dim1 ();
01774 octave_idx_type nc = dim2 ();
01775 octave_idx_type nz = nnz ();
01776
01777 octave_idx_type n = numel ();
01778
01779 octave_idx_type rhl = rhs.numel ();
01780
01781 if (idx.length (n) == rhl)
01782 {
01783 if (rhl == 0)
01784 return;
01785
01786 octave_idx_type nx = idx.extent (n);
01787
01788 if (nx != n)
01789 {
01790 resize1 (nx);
01791 n = numel ();
01792 nr = rows ();
01793 nc = cols ();
01794
01795 }
01796
01797 if (idx.is_colon ())
01798 {
01799 *this = rhs.reshape (dimensions);
01800 }
01801 else if (nc == 1 && rhs.cols () == 1)
01802 {
01803
01804
01805 octave_idx_type lb, ub;
01806 if (idx.is_cont_range (nr, lb, ub))
01807 {
01808
01809
01810 octave_idx_type li = lblookup (ridx (), nz, lb);
01811 octave_idx_type ui = lblookup (ridx (), nz, ub);
01812 octave_idx_type rnz = rhs.nnz (), new_nz = nz - (ui - li) + rnz;
01813
01814 if (new_nz >= nz && new_nz <= capacity ())
01815 {
01816
01817
01818 if (new_nz > nz)
01819 {
01820
01821 std::copy_backward (data () + ui, data () + nz, data () + nz + rnz);
01822 std::copy_backward (ridx () + ui, ridx () + nz, ridx () + nz + rnz);
01823 }
01824
01825
01826 copy_or_memcpy (rnz, rhs.data (), data () + li);
01827 mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
01828 }
01829 else
01830 {
01831
01832
01833 const Sparse<T> tmp = *this;
01834 *this = Sparse<T> (nr, 1, new_nz);
01835
01836
01837 copy_or_memcpy (li, tmp.data (), data ());
01838 copy_or_memcpy (li, tmp.ridx (), ridx ());
01839
01840
01841 copy_or_memcpy (rnz, rhs.data (), data () + li);
01842 mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
01843
01844
01845 copy_or_memcpy (nz - ui, tmp.data () + ui, data () + li + rnz);
01846 copy_or_memcpy (nz - ui, tmp.ridx () + ui, ridx () + li + rnz);
01847 }
01848
01849 cidx(1) = new_nz;
01850 }
01851 else if (idx.is_range () && idx.increment () == -1)
01852 {
01853
01854 assign (idx.sorted (), rhs.index (idx_vector (rhl - 1, 0, -1)));
01855 }
01856 else if (idx.is_permutation (n))
01857 {
01858 *this = rhs.index (idx.inverse_permutation (n));
01859 }
01860 else if (rhs.nnz () == 0)
01861 {
01862
01863 octave_idx_type *ri = ridx ();
01864 for (octave_idx_type i = 0; i < rhl; i++)
01865 {
01866 octave_idx_type iidx = idx(i);
01867 octave_idx_type li = lblookup (ri, nz, iidx);
01868 if (li != nz && ri[li] == iidx)
01869 xdata(li) = T();
01870 }
01871
01872 maybe_compress (true);
01873 }
01874 else
01875 {
01876 const Sparse<T> tmp = *this;
01877 octave_idx_type new_nz = nz + rhl;
01878
01879 Array<octave_idx_type> new_ri (dim_vector (new_nz, 1));
01880 Array<T> new_data (dim_vector (new_nz, 1));
01881 copy_or_memcpy (nz, tmp.ridx (), new_ri.fortran_vec ());
01882 copy_or_memcpy (nz, tmp.data (), new_data.fortran_vec ());
01883
01884 idx.copy_data (new_ri.fortran_vec () + nz);
01885 new_data.assign (idx_vector (nz, new_nz), rhs.array_value ());
01886
01887 *this = Sparse<T> (new_data, new_ri,
01888 static_cast<octave_idx_type> (0),
01889 nr, nc, false);
01890 }
01891 }
01892 else
01893 {
01894 dim_vector save_dims = dimensions;
01895 *this = index (idx_vector::colon);
01896 assign (idx, rhs.index (idx_vector::colon));
01897 *this = reshape (save_dims);
01898 }
01899 }
01900 else if (rhl == 1)
01901 {
01902 rhl = idx.length (n);
01903 if (rhs.nnz () != 0)
01904 assign (idx, Sparse<T> (rhl, 1, rhs.data (0)));
01905 else
01906 assign (idx, Sparse<T> (rhl, 1));
01907 }
01908 else
01909 gripe_invalid_assignment_size ();
01910 }
01911
01912 template <class T>
01913 void
01914 Sparse<T>::assign (const idx_vector& idx_i,
01915 const idx_vector& idx_j, const Sparse<T>& rhs)
01916 {
01917 Sparse<T> retval;
01918
01919 assert (ndims () == 2);
01920
01921 octave_idx_type nr = dim1 ();
01922 octave_idx_type nc = dim2 ();
01923 octave_idx_type nz = nnz ();
01924
01925 octave_idx_type n = rhs.rows ();
01926 octave_idx_type m = rhs.columns ();
01927
01928
01929
01930
01931 bool orig_zero_by_zero = (nr == 0 && nc == 0);
01932
01933 if (orig_zero_by_zero || (idx_i.length (nr) == n && idx_j.length (nc) == m))
01934 {
01935 octave_idx_type nrx;
01936 octave_idx_type ncx;
01937
01938 if (orig_zero_by_zero)
01939 {
01940 if (idx_i.is_colon ())
01941 {
01942 nrx = n;
01943
01944 if (idx_j.is_colon ())
01945 ncx = m;
01946 else
01947 ncx = idx_j.extent (nc);
01948 }
01949 else if (idx_j.is_colon ())
01950 {
01951 nrx = idx_i.extent (nr);
01952 ncx = m;
01953 }
01954 else
01955 {
01956 nrx = idx_i.extent (nr);
01957 ncx = idx_j.extent (nc);
01958 }
01959 }
01960 else
01961 {
01962 nrx = idx_i.extent (nr);
01963 ncx = idx_j.extent (nc);
01964 }
01965
01966
01967 if (nrx != nr || ncx != nc)
01968 {
01969 resize (nrx, ncx);
01970 nr = rows ();
01971 nc = cols ();
01972
01973 }
01974
01975 if (n == 0 || m == 0)
01976 return;
01977
01978 if (idx_i.is_colon ())
01979 {
01980 octave_idx_type lb, ub;
01981
01982
01983 if (idx_j.is_colon ())
01984 *this = rhs;
01985 else if (idx_j.is_cont_range (nc, lb, ub))
01986 {
01987
01988 octave_idx_type li = cidx(lb), ui = cidx(ub);
01989 octave_idx_type rnz = rhs.nnz (), new_nz = nz - (ui - li) + rnz;
01990
01991 if (new_nz >= nz && new_nz <= capacity ())
01992 {
01993
01994
01995 if (new_nz > nz)
01996 {
01997
01998 std::copy (data () + ui, data () + nz,
01999 data () + li + rnz);
02000 std::copy (ridx () + ui, ridx () + nz,
02001 ridx () + li + rnz);
02002 mx_inline_add2 (nc - ub, cidx () + ub + 1, new_nz - nz);
02003 }
02004
02005
02006 copy_or_memcpy (rnz, rhs.data (), data () + li);
02007 copy_or_memcpy (rnz, rhs.ridx (), ridx () + li);
02008 mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1, li);
02009
02010 assert (nnz () == new_nz);
02011 }
02012 else
02013 {
02014
02015
02016 const Sparse<T> tmp = *this;
02017 *this = Sparse<T> (nr, nc, new_nz);
02018
02019
02020 copy_or_memcpy (li, tmp.data (), data ());
02021 copy_or_memcpy (li, tmp.ridx (), ridx ());
02022 copy_or_memcpy (lb, tmp.cidx () + 1, cidx () + 1);
02023
02024
02025 copy_or_memcpy (rnz, rhs.data (), data () + li);
02026 copy_or_memcpy (rnz, rhs.ridx (), ridx () + li);
02027 mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1, li);
02028
02029
02030 copy_or_memcpy (nz - ui, tmp.data () + ui, data () + li + rnz);
02031 copy_or_memcpy (nz - ui, tmp.ridx () + ui, ridx () + li + rnz);
02032 mx_inline_add (nc - ub, cidx () + ub + 1, tmp.cidx () + ub + 1, new_nz - nz);
02033
02034 assert (nnz () == new_nz);
02035 }
02036 }
02037 else if (idx_j.is_range () && idx_j.increment () == -1)
02038 {
02039
02040 assign (idx_i, idx_j.sorted (), rhs.index (idx_i, idx_vector (m - 1, 0, -1)));
02041 }
02042 else if (idx_j.is_permutation (nc))
02043 {
02044 *this = rhs.index (idx_i, idx_j.inverse_permutation (nc));
02045 }
02046 else
02047 {
02048 const Sparse<T> tmp = *this;
02049 *this = Sparse<T> (nr, nc);
02050 OCTAVE_LOCAL_BUFFER_INIT (octave_idx_type, jsav, nc, -1);
02051
02052
02053 for (octave_idx_type i = 0; i < nc; i++)
02054 xcidx(i+1) = tmp.cidx(i+1) - tmp.cidx(i);
02055
02056 for (octave_idx_type i = 0; i < m; i++)
02057 {
02058 octave_idx_type j =idx_j(i);
02059 jsav[j] = i;
02060 xcidx(j+1) = rhs.cidx(i+1) - rhs.cidx(i);
02061 }
02062
02063
02064 for (octave_idx_type i = 0; i < nc; i++)
02065 xcidx(i+1) += xcidx(i);
02066
02067 change_capacity (nnz ());
02068
02069
02070 for (octave_idx_type i = 0; i < nc; i++)
02071 {
02072 octave_idx_type l = xcidx(i), u = xcidx(i+1), j = jsav[i];
02073 if (j >= 0)
02074 {
02075
02076 octave_idx_type k = rhs.cidx(j);
02077 copy_or_memcpy (u - l, rhs.data () + k, xdata () + l);
02078 copy_or_memcpy (u - l, rhs.ridx () + k, xridx () + l);
02079 }
02080 else
02081 {
02082
02083 octave_idx_type k = tmp.cidx(i);
02084 copy_or_memcpy (u - l, tmp.data () + k, xdata () + l);
02085 copy_or_memcpy (u - l, tmp.ridx () + k, xridx () + l);
02086 }
02087 }
02088
02089 }
02090 }
02091 else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.is_vector ())
02092 {
02093
02094 assign (idx_i, rhs);
02095 }
02096 else if (idx_j.is_colon ())
02097 {
02098 if (idx_i.is_permutation (nr))
02099 {
02100 *this = rhs.index (idx_i.inverse_permutation (nr), idx_j);
02101 }
02102 else
02103 {
02104
02105
02106
02107
02108 *this = transpose ();
02109 assign (idx_vector::colon, idx_i, rhs.transpose ());
02110 *this = transpose ();
02111 }
02112 }
02113 else
02114 {
02115
02116 Sparse<T> tmp = index (idx_vector::colon, idx_j);
02117 tmp.assign (idx_i, idx_vector::colon, rhs);
02118 assign (idx_vector::colon, idx_j, tmp);
02119 }
02120 }
02121 else if (m == 1 && n == 1)
02122 {
02123 n = idx_i.length (nr);
02124 m = idx_j.length (nc);
02125 if (rhs.nnz () != 0)
02126 assign (idx_i, idx_j, Sparse<T> (n, m, rhs.data (0)));
02127 else
02128 assign (idx_i, idx_j, Sparse<T> (n, m));
02129 }
02130 else
02131 gripe_assignment_dimension_mismatch ();
02132 }
02133
02134
02135
02136 template <class T>
02137 bool
02138 sparse_ascending_compare (typename ref_param<T>::type a,
02139 typename ref_param<T>::type b)
02140 {
02141 return (a < b);
02142 }
02143
02144 template <class T>
02145 bool
02146 sparse_descending_compare (typename ref_param<T>::type a,
02147 typename ref_param<T>::type b)
02148 {
02149 return (a > b);
02150 }
02151
02152 template <class T>
02153 Sparse<T>
02154 Sparse<T>::sort (octave_idx_type dim, sortmode mode) const
02155 {
02156 Sparse<T> m = *this;
02157
02158 octave_idx_type nr = m.rows ();
02159 octave_idx_type nc = m.columns ();
02160
02161 if (m.length () < 1 || dim > 1)
02162 return m;
02163
02164 if (dim > 0)
02165 {
02166 m = m.transpose ();
02167 nr = m.rows ();
02168 nc = m.columns ();
02169 }
02170
02171 octave_sort<T> lsort;
02172
02173 if (mode == ASCENDING)
02174 lsort.set_compare (sparse_ascending_compare<T>);
02175 else if (mode == DESCENDING)
02176 lsort.set_compare (sparse_descending_compare<T>);
02177 else
02178 abort ();
02179
02180 T *v = m.data ();
02181 octave_idx_type *mcidx = m.cidx ();
02182 octave_idx_type *mridx = m.ridx ();
02183
02184 for (octave_idx_type j = 0; j < nc; j++)
02185 {
02186 octave_idx_type ns = mcidx [j + 1] - mcidx [j];
02187 lsort.sort (v, ns);
02188
02189 octave_idx_type i;
02190 if (mode == ASCENDING)
02191 {
02192 for (i = 0; i < ns; i++)
02193 if (sparse_ascending_compare<T> (static_cast<T> (0), v [i]))
02194 break;
02195 }
02196 else
02197 {
02198 for (i = 0; i < ns; i++)
02199 if (sparse_descending_compare<T> (static_cast<T> (0), v [i]))
02200 break;
02201 }
02202 for (octave_idx_type k = 0; k < i; k++)
02203 mridx [k] = k;
02204 for (octave_idx_type k = i; k < ns; k++)
02205 mridx [k] = k - ns + nr;
02206
02207 v += ns;
02208 mridx += ns;
02209 }
02210
02211 if (dim > 0)
02212 m = m.transpose ();
02213
02214 return m;
02215 }
02216
02217 template <class T>
02218 Sparse<T>
02219 Sparse<T>::sort (Array<octave_idx_type> &sidx, octave_idx_type dim,
02220 sortmode mode) const
02221 {
02222 Sparse<T> m = *this;
02223
02224 octave_idx_type nr = m.rows ();
02225 octave_idx_type nc = m.columns ();
02226
02227 if (m.length () < 1 || dim > 1)
02228 {
02229 sidx = Array<octave_idx_type> (dim_vector (nr, nc), 1);
02230 return m;
02231 }
02232
02233 if (dim > 0)
02234 {
02235 m = m.transpose ();
02236 nr = m.rows ();
02237 nc = m.columns ();
02238 }
02239
02240 octave_sort<T> indexed_sort;
02241
02242 if (mode == ASCENDING)
02243 indexed_sort.set_compare (sparse_ascending_compare<T>);
02244 else if (mode == DESCENDING)
02245 indexed_sort.set_compare (sparse_descending_compare<T>);
02246 else
02247 abort ();
02248
02249 T *v = m.data ();
02250 octave_idx_type *mcidx = m.cidx ();
02251 octave_idx_type *mridx = m.ridx ();
02252
02253 sidx = Array<octave_idx_type> (dim_vector (nr, nc));
02254 OCTAVE_LOCAL_BUFFER (octave_idx_type, vi, nr);
02255
02256 for (octave_idx_type j = 0; j < nc; j++)
02257 {
02258 octave_idx_type ns = mcidx [j + 1] - mcidx [j];
02259 octave_idx_type offset = j * nr;
02260
02261 if (ns == 0)
02262 {
02263 for (octave_idx_type k = 0; k < nr; k++)
02264 sidx (offset + k) = k;
02265 }
02266 else
02267 {
02268 for (octave_idx_type i = 0; i < ns; i++)
02269 vi[i] = mridx[i];
02270
02271 indexed_sort.sort (v, vi, ns);
02272
02273 octave_idx_type i;
02274 if (mode == ASCENDING)
02275 {
02276 for (i = 0; i < ns; i++)
02277 if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
02278 break;
02279 }
02280 else
02281 {
02282 for (i = 0; i < ns; i++)
02283 if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
02284 break;
02285 }
02286
02287 octave_idx_type ii = 0;
02288 octave_idx_type jj = i;
02289 for (octave_idx_type k = 0; k < nr; k++)
02290 {
02291 if (ii < ns && mridx[ii] == k)
02292 ii++;
02293 else
02294 sidx (offset + jj++) = k;
02295 }
02296
02297 for (octave_idx_type k = 0; k < i; k++)
02298 {
02299 sidx (k + offset) = vi [k];
02300 mridx [k] = k;
02301 }
02302
02303 for (octave_idx_type k = i; k < ns; k++)
02304 {
02305 sidx (k - ns + nr + offset) = vi [k];
02306 mridx [k] = k - ns + nr;
02307 }
02308
02309 v += ns;
02310 mridx += ns;
02311 }
02312 }
02313
02314 if (dim > 0)
02315 {
02316 m = m.transpose ();
02317 sidx = sidx.transpose ();
02318 }
02319
02320 return m;
02321 }
02322
02323 template <class T>
02324 Sparse<T>
02325 Sparse<T>::diag (octave_idx_type k) const
02326 {
02327 octave_idx_type nnr = rows ();
02328 octave_idx_type nnc = cols ();
02329 Sparse<T> d;
02330
02331 if (nnr == 0 || nnc == 0)
02332 ;
02333 else if (nnr != 1 && nnc != 1)
02334 {
02335 if (k > 0)
02336 nnc -= k;
02337 else if (k < 0)
02338 nnr += k;
02339
02340 if (nnr > 0 && nnc > 0)
02341 {
02342 octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
02343
02344
02345 octave_idx_type nel = 0;
02346 if (k > 0)
02347 {
02348 for (octave_idx_type i = 0; i < ndiag; i++)
02349 if (elem (i, i+k) != 0.)
02350 nel++;
02351 }
02352 else if ( k < 0)
02353 {
02354 for (octave_idx_type i = 0; i < ndiag; i++)
02355 if (elem (i-k, i) != 0.)
02356 nel++;
02357 }
02358 else
02359 {
02360 for (octave_idx_type i = 0; i < ndiag; i++)
02361 if (elem (i, i) != 0.)
02362 nel++;
02363 }
02364
02365 d = Sparse<T> (ndiag, 1, nel);
02366 d.xcidx (0) = 0;
02367 d.xcidx (1) = nel;
02368
02369 octave_idx_type ii = 0;
02370 if (k > 0)
02371 {
02372 for (octave_idx_type i = 0; i < ndiag; i++)
02373 {
02374 T tmp = elem (i, i+k);
02375 if (tmp != 0.)
02376 {
02377 d.xdata (ii) = tmp;
02378 d.xridx (ii++) = i;
02379 }
02380 }
02381 }
02382 else if ( k < 0)
02383 {
02384 for (octave_idx_type i = 0; i < ndiag; i++)
02385 {
02386 T tmp = elem (i-k, i);
02387 if (tmp != 0.)
02388 {
02389 d.xdata (ii) = tmp;
02390 d.xridx (ii++) = i;
02391 }
02392 }
02393 }
02394 else
02395 {
02396 for (octave_idx_type i = 0; i < ndiag; i++)
02397 {
02398 T tmp = elem (i, i);
02399 if (tmp != 0.)
02400 {
02401 d.xdata (ii) = tmp;
02402 d.xridx (ii++) = i;
02403 }
02404 }
02405 }
02406 }
02407 else
02408 (*current_liboctave_error_handler)
02409 ("diag: requested diagonal out of range");
02410 }
02411 else if (nnr != 0 && nnc != 0)
02412 {
02413 octave_idx_type roff = 0;
02414 octave_idx_type coff = 0;
02415 if (k > 0)
02416 {
02417 roff = 0;
02418 coff = k;
02419 }
02420 else if (k < 0)
02421 {
02422 roff = -k;
02423 coff = 0;
02424 }
02425
02426 if (nnr == 1)
02427 {
02428 octave_idx_type n = nnc + std::abs (k);
02429 octave_idx_type nz = nnz ();
02430
02431 d = Sparse<T> (n, n, nz);
02432
02433 if (nnz () > 0)
02434 {
02435 for (octave_idx_type i = 0; i < coff+1; i++)
02436 d.xcidx (i) = 0;
02437
02438 for (octave_idx_type j = 0; j < nnc; j++)
02439 {
02440 for (octave_idx_type i = cidx(j); i < cidx(j+1); i++)
02441 {
02442 d.xdata (i) = data (i);
02443 d.xridx (i) = j + roff;
02444 }
02445 d.xcidx (j + coff + 1) = cidx(j+1);
02446 }
02447
02448 for (octave_idx_type i = nnc + coff + 1; i < n + 1; i++)
02449 d.xcidx (i) = nz;
02450 }
02451 }
02452 else
02453 {
02454 octave_idx_type n = nnr + std::abs (k);
02455 octave_idx_type nz = nnz ();
02456
02457 d = Sparse<T> (n, n, nz);
02458
02459 if (nnz () > 0)
02460 {
02461 octave_idx_type ii = 0;
02462 octave_idx_type ir = ridx(0);
02463
02464 for (octave_idx_type i = 0; i < coff+1; i++)
02465 d.xcidx (i) = 0;
02466
02467 for (octave_idx_type i = 0; i < nnr; i++)
02468 {
02469 if (ir == i)
02470 {
02471 d.xdata (ii) = data (ii);
02472 d.xridx (ii++) = ir + roff;
02473
02474 if (ii != nz)
02475 ir = ridx (ii);
02476 }
02477 d.xcidx (i + coff + 1) = ii;
02478 }
02479
02480 for (octave_idx_type i = nnr + coff + 1; i < n+1; i++)
02481 d.xcidx (i) = nz;
02482 }
02483 }
02484 }
02485
02486 return d;
02487 }
02488
02489 template <class T>
02490 Sparse<T>
02491 Sparse<T>::cat (int dim, octave_idx_type n, const Sparse<T> *sparse_list)
02492 {
02493
02494 bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
02495
02496 if (dim == -1 || dim == -2)
02497 {
02498 concat_rule = &dim_vector::hvcat;
02499 dim = -dim - 1;
02500 }
02501 else if (dim < 0)
02502 (*current_liboctave_error_handler)
02503 ("cat: invalid dimension");
02504
02505 dim_vector dv;
02506 octave_idx_type total_nz = 0;
02507 if (dim == 0 || dim == 1)
02508 {
02509 if (n == 1)
02510 return sparse_list[0];
02511
02512 for (octave_idx_type i = 0; i < n; i++)
02513 {
02514 if (! (dv.*concat_rule) (sparse_list[i].dims (), dim))
02515 (*current_liboctave_error_handler)
02516 ("cat: dimension mismatch");
02517 total_nz += sparse_list[i].nnz ();
02518 }
02519 }
02520 else
02521 (*current_liboctave_error_handler)
02522 ("cat: invalid dimension for sparse concatenation");
02523
02524 Sparse<T> retval (dv, total_nz);
02525
02526 if (retval.is_empty ())
02527 return retval;
02528
02529 switch (dim)
02530 {
02531 case 0:
02532 {
02533
02534
02535 octave_idx_type l = 0;
02536 for (octave_idx_type j = 0; j < dv(1); j++)
02537 {
02538 octave_quit ();
02539
02540 octave_idx_type rcum = 0;
02541 for (octave_idx_type i = 0; i < n; i++)
02542 {
02543 const Sparse<T>& spi = sparse_list[i];
02544
02545 if (spi.is_empty ())
02546 continue;
02547
02548 octave_idx_type kl = spi.cidx(j), ku = spi.cidx(j+1);
02549 for (octave_idx_type k = kl; k < ku; k++, l++)
02550 {
02551 retval.xridx(l) = spi.ridx(k) + rcum;
02552 retval.xdata(l) = spi.data(k);
02553 }
02554
02555 rcum += spi.rows ();
02556 }
02557
02558 retval.xcidx(j+1) = l;
02559 }
02560
02561 break;
02562 }
02563 case 1:
02564 {
02565 octave_idx_type l = 0;
02566 for (octave_idx_type i = 0; i < n; i++)
02567 {
02568 octave_quit ();
02569
02570
02571 if (sparse_list[i].is_empty ())
02572 continue;
02573
02574 octave_idx_type u = l + sparse_list[i].columns ();
02575 retval.assign (idx_vector::colon, idx_vector (l, u), sparse_list[i]);
02576 l = u;
02577 }
02578
02579 break;
02580 }
02581 default:
02582 assert (false);
02583 }
02584
02585 return retval;
02586 }
02587
02588 template <class T>
02589 Array<T>
02590 Sparse<T>::array_value () const
02591 {
02592 NoAlias< Array<T> > retval (dims (), T());
02593 if (rows () == 1)
02594 {
02595 octave_idx_type i = 0;
02596 for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
02597 {
02598 if (cidx(j+1) > i)
02599 retval(j) = data (i++);
02600 }
02601 }
02602 else
02603 {
02604 for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
02605 for (octave_idx_type i = cidx(j), iu = cidx(j+1); i < iu; i++)
02606 retval (ridx(i), j) = data (i);
02607 }
02608
02609 return retval;
02610 }
02611
02612
02613
02614
02615
02616
02617
02618
02619
02620
02621
02622
02623
02624
02625
02626
02627
02628
02629
02630
02631
02632
02633
02634
02635
02636
02637
02638
02639
02640
02641
02642
02643
02644
02645
02646
02647
02648
02649
02650
02651
02652
02653
02654
02655
02656
02657
02658
02659
02660
02661
02662
02663
02664
02665
02666
02667
02668
02669
02670
02671
02672
02673
02674
02675
02676
02677
02678
02679
02680
02681
02682
02683
02684
02685
02686
02687
02688
02689
02690
02691
02692
02693
02694
02695
02696
02697
02698
02699
02700
02701
02702
02703
02704
02705
02706
02707
02708
02709
02710
02711
02712
02713
02714
02715
02716
02717
02718
02719
02720
02721
02722
02723
02724
02725
02726
02727
02728
02729
02730
02731
02732
02733
02734
02735
02736
02737
02738
02739
02740
02741
02742
02743
02744
02745
02746
02747
02748
02749
02750
02751
02752
02753
02754
02755
02756
02757
02758
02759
02760
02761
02762
02763
02764
02765
02766
02767
02768
02769
02770
02771
02772
02773
02774
02775
02776
02777
02778
02779
02780
02781
02782
02783
02784
02785
02786
02787
02788
02789 template <class T>
02790 void
02791 Sparse<T>::print_info (std::ostream& os, const std::string& prefix) const
02792 {
02793 os << prefix << "rep address: " << rep << "\n"
02794 << prefix << "rep->nzmx: " << rep->nzmx << "\n"
02795 << prefix << "rep->nrows: " << rep->nrows << "\n"
02796 << prefix << "rep->ncols: " << rep->ncols << "\n"
02797 << prefix << "rep->data: " << static_cast<void *> (rep->d) << "\n"
02798 << prefix << "rep->ridx: " << static_cast<void *> (rep->r) << "\n"
02799 << prefix << "rep->cidx: " << static_cast<void *> (rep->c) << "\n"
02800 << prefix << "rep->count: " << rep->count << "\n";
02801 }
02802
02803 #define INSTANTIATE_SPARSE(T, API) \
02804 template class API Sparse<T>;
02805