2 //g++ -O3 -g0 -DNDEBUG sparse_product.cpp -I.. -I/home/gael/Coding/LinearAlgebra/mtl4/ -DDENSITY=0.005 -DSIZE=10000 && ./a.out
3 //g++ -O3 -g0 -DNDEBUG sparse_product.cpp -I.. -I/home/gael/Coding/LinearAlgebra/mtl4/ -DDENSITY=0.05 -DSIZE=2000 && ./a.out
4 // -DNOGMM -DNOMTL -DCSPARSE
5 // -I /home/gael/Coding/LinearAlgebra/CSparse/Include/ /home/gael/Coding/LinearAlgebra/CSparse/Lib/libcsparse.a
27 #define EIGEN_GOOGLEHASH_SUPPORT
28 #include <google/sparse_hash_map>
31 #include "BenchSparseUtil.h"
34 // #define CHECK_MEM std/**/::cout << "check mem\n"; getchar();
38 for (int _j=0; _j<NBTRIES; ++_j) { \
40 for (int _k=0; _k<REPEAT; ++_k) { \
44 typedef std::vector
<Vector2i
> Coordinates
;
45 typedef std::vector
<float> Values
;
47 EIGEN_DONT_INLINE Scalar
* setinnerrand_eigen(const Coordinates
& coords
, const Values
& vals
);
48 EIGEN_DONT_INLINE Scalar
* setrand_eigen_dynamic(const Coordinates
& coords
, const Values
& vals
);
49 EIGEN_DONT_INLINE Scalar
* setrand_eigen_compact(const Coordinates
& coords
, const Values
& vals
);
50 EIGEN_DONT_INLINE Scalar
* setrand_eigen_sumeq(const Coordinates
& coords
, const Values
& vals
);
51 EIGEN_DONT_INLINE Scalar
* setrand_eigen_gnu_hash(const Coordinates
& coords
, const Values
& vals
);
52 EIGEN_DONT_INLINE Scalar
* setrand_eigen_google_dense(const Coordinates
& coords
, const Values
& vals
);
53 EIGEN_DONT_INLINE Scalar
* setrand_eigen_google_sparse(const Coordinates
& coords
, const Values
& vals
);
54 EIGEN_DONT_INLINE Scalar
* setrand_scipy(const Coordinates
& coords
, const Values
& vals
);
55 EIGEN_DONT_INLINE Scalar
* setrand_ublas_mapped(const Coordinates
& coords
, const Values
& vals
);
56 EIGEN_DONT_INLINE Scalar
* setrand_ublas_coord(const Coordinates
& coords
, const Values
& vals
);
57 EIGEN_DONT_INLINE Scalar
* setrand_ublas_compressed(const Coordinates
& coords
, const Values
& vals
);
58 EIGEN_DONT_INLINE Scalar
* setrand_ublas_genvec(const Coordinates
& coords
, const Values
& vals
);
59 EIGEN_DONT_INLINE Scalar
* setrand_mtl(const Coordinates
& coords
, const Values
& vals
);
61 int main(int argc
, char *argv
[])
65 bool fullyrand
= true;
73 pool
.reserve(cols
*NBPERROW
);
74 std::cerr
<< "fill pool" << "\n";
75 for (int i
=0; i
<cols
*NBPERROW
; )
77 // DynamicSparseMatrix<int> stencil(SIZE,SIZE);
78 Vector2i
ij(internal::random
<int>(0,rows
-1),internal::random
<int>(0,cols
-1));
79 // if(stencil.coeffRef(ij.x(), ij.y())==0)
81 // stencil.coeffRef(ij.x(), ij.y()) = 1;
87 std::cerr
<< "pool ok" << "\n";
88 int n
= cols
*NBPERROW
*KK
;
91 for (int i
=0; i
<n
; ++i
)
93 int i
= internal::random
<int>(0,pool
.size());
94 coords
.push_back(pool
[i
]);
95 values
.push_back(internal::random
<Scalar
>());
100 for (int j
=0; j
<cols
; ++j
)
101 for (int i
=0; i
<NBPERROW
; ++i
)
103 coords
.push_back(Vector2i(internal::random
<int>(0,rows
-1),j
));
104 values
.push_back(internal::random
<Scalar
>());
107 std::cout
<< "nnz = " << coords
.size() << "\n";
113 BENCH(setrand_eigen_dense(coords
,values
);)
114 std::cout
<< "Eigen Dense\t" << timer
.value() << "\n";
118 // eigen sparse matrices
121 // BENCH(setinnerrand_eigen(coords,values);)
122 // std::cout << "Eigen fillrand\t" << timer.value() << "\n";
125 BENCH(setrand_eigen_dynamic(coords
,values
);)
126 std::cout
<< "Eigen dynamic\t" << timer
.value() << "\n";
129 // BENCH(setrand_eigen_compact(coords,values);)
130 // std::cout << "Eigen compact\t" << timer.value() << "\n";
133 BENCH(setrand_eigen_sumeq(coords
,values
);)
134 std::cout
<< "Eigen sumeq\t" << timer
.value() << "\n";
137 // BENCH(setrand_eigen_gnu_hash(coords,values);)
138 // std::cout << "Eigen std::map\t" << timer.value() << "\n";
141 BENCH(setrand_scipy(coords
,values
);)
142 std::cout
<< "scipy\t" << timer
.value() << "\n";
146 BENCH(setrand_eigen_google_dense(coords
,values
);)
147 std::cout
<< "Eigen google dense\t" << timer
.value() << "\n";
150 BENCH(setrand_eigen_google_sparse(coords
,values
);)
151 std::cout
<< "Eigen google sparse\t" << timer
.value() << "\n";
157 // BENCH(setrand_ublas_mapped(coords,values);)
158 // std::cout << "ublas mapped\t" << timer.value() << "\n";
161 BENCH(setrand_ublas_genvec(coords
,values
);)
162 std::cout
<< "ublas vecofvec\t" << timer
.value() << "\n";
167 for (int k=0; k<REPEAT; ++k)
168 setrand_ublas_compressed(coords,values);
170 std::cout << "ublas comp\t" << timer.value() << "\n";
175 for (int k=0; k<REPEAT; ++k)
176 setrand_ublas_coord(coords,values);
178 std::cout << "ublas coord\t" << timer.value() << "\n";
186 BENCH(setrand_mtl(coords
,values
));
187 std::cout
<< "MTL\t" << timer
.value() << "\n";
194 EIGEN_DONT_INLINE Scalar
* setinnerrand_eigen(const Coordinates
& coords
, const Values
& vals
)
196 using namespace Eigen
;
197 SparseMatrix
<Scalar
> mat(SIZE
,SIZE
);
198 //mat.startFill(2000000/*coords.size()*/);
199 for (int i
=0; i
<coords
.size(); ++i
)
201 mat
.insert(coords
[i
].x(), coords
[i
].y()) = vals
[i
];
208 EIGEN_DONT_INLINE Scalar
* setrand_eigen_dynamic(const Coordinates
& coords
, const Values
& vals
)
210 using namespace Eigen
;
211 DynamicSparseMatrix
<Scalar
> mat(SIZE
,SIZE
);
212 mat
.reserve(coords
.size()/10);
213 for (int i
=0; i
<coords
.size(); ++i
)
215 mat
.coeffRef(coords
[i
].x(), coords
[i
].y()) += vals
[i
];
219 return &mat
.coeffRef(coords
[0].x(), coords
[0].y());
222 EIGEN_DONT_INLINE Scalar
* setrand_eigen_sumeq(const Coordinates
& coords
, const Values
& vals
)
224 using namespace Eigen
;
225 int n
= coords
.size()/KK
;
226 DynamicSparseMatrix
<Scalar
> mat(SIZE
,SIZE
);
227 for (int j
=0; j
<KK
; ++j
)
229 DynamicSparseMatrix
<Scalar
> aux(SIZE
,SIZE
);
231 for (int i
=j
*n
; i
<(j
+1)*n
; ++i
)
233 aux
.insert(coords
[i
].x(), coords
[i
].y()) += vals
[i
];
238 return &mat
.coeffRef(coords
[0].x(), coords
[0].y());
241 EIGEN_DONT_INLINE Scalar
* setrand_eigen_compact(const Coordinates
& coords
, const Values
& vals
)
243 using namespace Eigen
;
244 DynamicSparseMatrix
<Scalar
> setter(SIZE
,SIZE
);
245 setter
.reserve(coords
.size()/10);
246 for (int i
=0; i
<coords
.size(); ++i
)
248 setter
.coeffRef(coords
[i
].x(), coords
[i
].y()) += vals
[i
];
250 SparseMatrix
<Scalar
> mat
= setter
;
252 return &mat
.coeffRef(coords
[0].x(), coords
[0].y());
255 EIGEN_DONT_INLINE Scalar
* setrand_eigen_gnu_hash(const Coordinates
& coords
, const Values
& vals
)
257 using namespace Eigen
;
258 SparseMatrix
<Scalar
> mat(SIZE
,SIZE
);
260 RandomSetter
<SparseMatrix
<Scalar
>, StdMapTraits
> setter(mat
);
261 for (int i
=0; i
<coords
.size(); ++i
)
263 setter(coords
[i
].x(), coords
[i
].y()) += vals
[i
];
267 return &mat
.coeffRef(coords
[0].x(), coords
[0].y());
271 EIGEN_DONT_INLINE Scalar
* setrand_eigen_google_dense(const Coordinates
& coords
, const Values
& vals
)
273 using namespace Eigen
;
274 SparseMatrix
<Scalar
> mat(SIZE
,SIZE
);
276 RandomSetter
<SparseMatrix
<Scalar
>, GoogleDenseHashMapTraits
> setter(mat
);
277 for (int i
=0; i
<coords
.size(); ++i
)
278 setter(coords
[i
].x(), coords
[i
].y()) += vals
[i
];
281 return &mat
.coeffRef(coords
[0].x(), coords
[0].y());
284 EIGEN_DONT_INLINE Scalar
* setrand_eigen_google_sparse(const Coordinates
& coords
, const Values
& vals
)
286 using namespace Eigen
;
287 SparseMatrix
<Scalar
> mat(SIZE
,SIZE
);
289 RandomSetter
<SparseMatrix
<Scalar
>, GoogleSparseHashMapTraits
> setter(mat
);
290 for (int i
=0; i
<coords
.size(); ++i
)
291 setter(coords
[i
].x(), coords
[i
].y()) += vals
[i
];
294 return &mat
.coeffRef(coords
[0].x(), coords
[0].y());
300 void coo_tocsr(const int n_row
,
303 const Coordinates Aij
,
309 //compute number of non-zero entries per row of A coo_tocsr
310 std::fill(Bp
, Bp
+ n_row
, 0);
312 for (int n
= 0; n
< nnz
; n
++){
316 //cumsum the nnz per row to get Bp[]
317 for(int i
= 0, cumsum
= 0; i
< n_row
; i
++){
324 //write Aj,Ax into Bj,Bx
325 for(int n
= 0; n
< nnz
; n
++){
326 int row
= Aij
[n
].x();
329 Bj
[dest
] = Aij
[n
].y();
335 for(int i
= 0, last
= 0; i
<= n_row
; i
++){
341 //now Bp,Bj,Bx form a CSR representation (with possible duplicates)
344 template< class T1
, class T2
>
345 bool kv_pair_less(const std::pair
<T1
,T2
>& x
, const std::pair
<T1
,T2
>& y
){
346 return x
.first
< y
.first
;
350 template<class I
, class T
>
351 void csr_sort_indices(const I n_row
,
356 std::vector
< std::pair
<I
,T
> > temp
;
358 for(I i
= 0; i
< n_row
; i
++){
364 for(I jj
= row_start
; jj
< row_end
; jj
++){
365 temp
.push_back(std::make_pair(Aj
[jj
],Ax
[jj
]));
368 std::sort(temp
.begin(),temp
.end(),kv_pair_less
<I
,T
>);
370 for(I jj
= row_start
, n
= 0; jj
< row_end
; jj
++, n
++){
371 Aj
[jj
] = temp
[n
].first
;
372 Ax
[jj
] = temp
[n
].second
;
377 template <class I
, class T
>
378 void csr_sum_duplicates(const I n_row
,
386 for(I i
= 0; i
< n_row
; i
++){
389 while( jj
< row_end
){
393 while( jj
< row_end
&& Aj
[jj
] == j
){
405 EIGEN_DONT_INLINE Scalar
* setrand_scipy(const Coordinates
& coords
, const Values
& vals
)
407 using namespace Eigen
;
408 SparseMatrix
<Scalar
> mat(SIZE
,SIZE
);
409 mat
.resizeNonZeros(coords
.size());
410 // std::cerr << "setrand_scipy...\n";
411 coo_tocsr
<Scalar
>(SIZE
,SIZE
, coords
.size(), coords
, vals
, mat
._outerIndexPtr(), mat
._innerIndexPtr(), mat
._valuePtr());
412 // std::cerr << "coo_tocsr ok\n";
414 csr_sort_indices(SIZE
, mat
._outerIndexPtr(), mat
._innerIndexPtr(), mat
._valuePtr());
416 csr_sum_duplicates(SIZE
, SIZE
, mat
._outerIndexPtr(), mat
._innerIndexPtr(), mat
._valuePtr());
418 mat
.resizeNonZeros(mat
._outerIndexPtr()[SIZE
]);
420 return &mat
.coeffRef(coords
[0].x(), coords
[0].y());
425 EIGEN_DONT_INLINE Scalar
* setrand_ublas_mapped(const Coordinates
& coords
, const Values
& vals
)
427 using namespace boost
;
428 using namespace boost::numeric
;
429 using namespace boost::numeric::ublas
;
430 mapped_matrix
<Scalar
> aux(SIZE
,SIZE
);
431 for (int i
=0; i
<coords
.size(); ++i
)
433 aux(coords
[i
].x(), coords
[i
].y()) += vals
[i
];
436 compressed_matrix
<Scalar
> mat(aux
);
437 return 0;// &mat(coords[0].x(), coords[0].y());
439 /*EIGEN_DONT_INLINE Scalar* setrand_ublas_coord(const Coordinates& coords, const Values& vals)
441 using namespace boost;
442 using namespace boost::numeric;
443 using namespace boost::numeric::ublas;
444 coordinate_matrix<Scalar> aux(SIZE,SIZE);
445 for (int i=0; i<coords.size(); ++i)
447 aux(coords[i].x(), coords[i].y()) = vals[i];
449 compressed_matrix<Scalar> mat(aux);
450 return 0;//&mat(coords[0].x(), coords[0].y());
452 EIGEN_DONT_INLINE Scalar* setrand_ublas_compressed(const Coordinates& coords, const Values& vals)
454 using namespace boost;
455 using namespace boost::numeric;
456 using namespace boost::numeric::ublas;
457 compressed_matrix<Scalar> mat(SIZE,SIZE);
458 for (int i=0; i<coords.size(); ++i)
460 mat(coords[i].x(), coords[i].y()) = vals[i];
462 return 0;//&mat(coords[0].x(), coords[0].y());
464 EIGEN_DONT_INLINE Scalar
* setrand_ublas_genvec(const Coordinates
& coords
, const Values
& vals
)
466 using namespace boost
;
467 using namespace boost::numeric
;
468 using namespace boost::numeric::ublas
;
470 // ublas::vector<coordinate_vector<Scalar> > foo;
471 generalized_vector_of_vector
<Scalar
, row_major
, ublas::vector
<coordinate_vector
<Scalar
> > > aux(SIZE
,SIZE
);
472 for (int i
=0; i
<coords
.size(); ++i
)
474 aux(coords
[i
].x(), coords
[i
].y()) += vals
[i
];
477 compressed_matrix
<Scalar
,row_major
> mat(aux
);
478 return 0;//&mat(coords[0].x(), coords[0].y());
483 EIGEN_DONT_INLINE
void setrand_mtl(const Coordinates
& coords
, const Values
& vals
);