2 # Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
4 diff -r -u /tmp/839450/libmlx4-1.0.1/Makefile.am libmlx4-1.0.1/Makefile.am
5 --- /tmp/839450/libmlx4-1.0.1/Makefile.am Tue Sep 8 06:40:35 2009
6 +++ libmlx4-1.0.1/Makefile.am Tue Mar 15 06:49:47 2011
8 src_libmlx4_la_SOURCES = $(MLX4_SOURCES)
9 src_libmlx4_la_LDFLAGS = -avoid-version -release @IBV_DEVICE_LIBRARY_EXTENSION@ \
10 $(mlx4_version_script)
11 - mlx4confdir = $(sysconfdir)/libibverbs.d
12 + mlx4confdir = $(datadir)/libibverbs.d
13 mlx4conf_DATA = mlx4.driver
15 mlx4libdir = $(libdir)/infiniband
16 diff -r -u /tmp/839450/libmlx4-1.0.1/src/mlx4-abi.h libmlx4-1.0.1/src/mlx4-abi.h
17 --- /tmp/839450/libmlx4-1.0.1/src/mlx4-abi.h Thu Mar 10 04:48:34 2011
18 +++ libmlx4-1.0.1/src/mlx4-abi.h Fri Feb 11 03:49:51 2011
21 #include <infiniband/kern-abi.h>
23 +#if defined(__SVR4) && defined(__sun)
24 +#include <sys/ib/adapters/mlnx_umap.h> /* Opaque CI data out definitions */
27 #define MLX4_UVERBS_MIN_ABI_VERSION 2
28 #define MLX4_UVERBS_MAX_ABI_VERSION 3
31 struct ibv_alloc_pd_resp ibv_resp;
36 +struct mlx4_share_pd_resp {
37 + struct ibv_share_pd_resp ibv_resp;
42 struct mlx4_create_cq {
43 diff -r -u /tmp/839450/libmlx4-1.0.1/src/verbs.c libmlx4-1.0.1/src/verbs.c
44 --- /tmp/839450/libmlx4-1.0.1/src/verbs.c Thu Mar 10 04:48:34 2011
45 +++ libmlx4-1.0.1/src/verbs.c Fri Mar 11 14:40:18 2011
50 +#if defined(__SVR4) && defined(__sun)
52 + * To be consistent with OFED and so the queue operations in srq.c work
53 + * we need to report the max as actual max less 1. In OFED this is
54 + * done in the HCA driver.
56 + attr->max_srq_wr -=1;
58 major = (raw_fw_ver >> 32) & 0xffff;
59 minor = (raw_fw_ver >> 16) & 0xffff;
60 sub_minor = raw_fw_ver & 0xffff;
62 struct ibv_alloc_pd cmd;
63 struct mlx4_alloc_pd_resp resp;
65 +#if defined(__SVR4) && defined(__sun)
66 + mlnx_umap_pd_data_out_t *mdd;
69 pd = malloc(sizeof *pd);
75 +#if defined(__SVR4) && defined(__sun)
77 + * kernel driver passes back the PD table index as opaque data. This
78 + * is required for specifying the PD in user space address vectors.
80 + mdd = (mlnx_umap_pd_data_out_t *) &resp.ibv_resp.drv_out;
81 + pd->pdn = mdd->mpd_pdnum;
89 +struct ibv_shpd *mlx4_alloc_shpd(struct ibv_pd *pd, uint64_t share_key, struct ibv_shpd *shpd)
91 + struct ibv_alloc_shpd cmd;
92 + struct ibv_alloc_shpd_resp resp;
94 + if (ibv_cmd_alloc_shpd(pd->context, pd, share_key, shpd, &cmd, sizeof cmd,
95 + &resp, sizeof resp)) {
103 +struct ibv_pd *mlx4_share_pd(struct ibv_context *context, struct ibv_shpd *shpd, uint64_t share_key)
105 + struct ibv_share_pd cmd;
106 + struct mlx4_share_pd_resp resp;
107 + struct mlx4_pd *pd;
108 +#if defined(__SVR4) && defined(__sun)
109 + mlnx_umap_pd_data_out_t *mdd;
112 + pd = malloc(sizeof *pd);
116 + if (ibv_cmd_share_pd(context, shpd, share_key, &pd->ibv_pd, &cmd, sizeof cmd,
117 + &resp.ibv_resp, sizeof resp)) {
122 +#if defined(__SVR4) && defined(__sun)
124 + * kernel driver passes back the PD table index as opaque data. This
125 + * is required for specifying the PD in user space address vectors.
127 + mdd = (mlnx_umap_pd_data_out_t *) &resp.ibv_resp.drv_out;
128 + pd->pdn = mdd->mpd_pdnum;
130 + pd->pdn = resp.pdn;
133 + return &pd->ibv_pd;
136 int mlx4_free_pd(struct ibv_pd *pd)
143 +struct ibv_mr *mlx4_reg_mr_relaxed(struct ibv_pd *pd, void *addr, size_t length,
147 + struct ibv_reg_mr cmd;
150 + mr = malloc(sizeof *mr);
154 +#ifdef IBV_CMD_REG_MR_RELAXED_HAS_RESP_PARAMS
156 + struct ibv_reg_mr_resp resp;
158 + ret = ibv_cmd_reg_mr_relaxed(pd, addr, length, (uintptr_t) addr,
159 + access, mr, &cmd, sizeof cmd,
160 + &resp, sizeof resp);
163 + ret = ibv_cmd_reg_mr_relaxed(pd, addr, length, (uintptr_t) addr, access, mr,
174 int mlx4_dereg_mr(struct ibv_mr *mr)
181 +int mlx4_dereg_mr_relaxed(struct ibv_mr *mr)
185 + ret = ibv_cmd_dereg_mr_relaxed(mr);
193 +int mlx4_flush_relaxed_mr(struct ibv_pd *pd)
197 + ret = ibv_cmd_flush_relaxed_mr(pd);
204 static int align_queue_size(int req)
208 struct mlx4_create_cq_resp resp;
211 +#if defined(__SVR4) && defined(__sun)
213 + mlnx_umap_cq_data_out_t *mdd;
216 /* Sanity check CQ size before proceeding */
220 cqe = align_queue_size(cqe + 1);
222 - if (mlx4_alloc_cq_buf(to_mdev(context->device), &cq->buf, cqe))
223 +#if !(defined(__SVR4) && defined(__sun))
224 + if (mlx4_alloc_cq_buf((to_mdev(context->device), &cq->buf, cqe))
227 cq->set_ci_db = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ);
228 @@ -198,15 +324,78 @@
230 cmd.buf_addr = (uintptr_t) cq->buf.buf;
231 cmd.db_addr = (uintptr_t) cq->set_ci_db;
233 + cq->buf.buf = NULL;
234 + cq->buf.length = 0;
236 + cq->set_ci_db = NULL;
239 ret = ibv_cmd_create_cq(context, cqe - 1, channel, comp_vector,
240 &cq->ibv_cq, &cmd.ibv_cmd, sizeof cmd,
241 &resp.ibv_resp, sizeof resp);
242 +#if defined(__SVR4) && defined(__sun)
252 +#if defined(__SVR4) && defined(__sun)
254 + * For Solaris the kernel driver passes back mmap information for
255 + * mapping the CQ memory it allocated.
257 + mdd = (mlnx_umap_cq_data_out_t *) &resp.ibv_resp.drv_out;
258 + if (mdd->mcq_rev < MLNX_UMAP_IF_VERSION) {
259 + fprintf(stderr, PFX "libmlx4_create_cq: libmlx4/hermon umap "
260 + "rev mismatch (kernel rev=%d)\n", mdd->mcq_rev);
264 + cqbuf = mmap64((void *)0, mdd->mcq_maplen, (PROT_READ | PROT_WRITE),
265 + MAP_SHARED, context->mmap_fd, mdd->mcq_mapoffset);
267 + if (cqbuf == MAP_FAILED)
271 + * Extract hardware driver values for the number of CQEs and the
272 + * hardware CQ number to use (needed for user space doorbells).
274 + cqe = mdd->mcq_numcqe;
275 + cq->cqn = mdd->mcq_cqnum;
276 + cq->buf.buf = cqbuf;
277 + cq->buf.length = mdd->mcq_maplen;
278 + cq->ibv_cq.cqe = cqe-1;
281 + * We map both poll and arm as seperate doorbells (OFED assumes 1 word
282 + * offset and just bumpts the address) since Solaris provides a
283 + * separate offst. This will amount to the same thing (a second
284 + * reference to the first doorbell is added) but is more flexible.
286 + cq->set_ci_db = mlx4_alloc_db(to_mctx(context),
287 + mdd->mcq_polldbr_mapoffset,
288 + mdd->mcq_polldbr_maplen,
289 + mdd->mcq_polldbr_offset);
290 + if (cq->set_ci_db == NULL)
293 + cq->arm_db = mlx4_alloc_db(to_mctx(context),
294 + mdd->mcq_armdbr_mapoffset,
295 + mdd->mcq_armdbr_maplen,
296 + mdd->mcq_armdbr_offset);
297 + if (cq->arm_db == NULL)
302 + *cq->set_ci_db = 0;
309 mlx4_free_buf(&cq->buf);
311 +#if defined(__SVR4) && defined(__sun)
314 + * Calling ibv_cmd_destroy_cq() will try and take the ibv_cq
315 + * mutext that is initialised by the ibv_create_cq() entry point
316 + * that called us AFETR we return, so its not initialised yet.
317 + * So initialised it here so the destroy call doesn't hang.
319 + pthread_mutex_init(&(cq->ibv_cq.mutex), NULL);
320 + pthread_cond_init(&(cq->ibv_cq.cond), NULL);
321 + cq->ibv_cq.comp_events_completed = 0;
322 + cq->ibv_cq.async_events_completed = 0;
324 + ibv_cmd_destroy_cq(&cq->ibv_cq);
329 @@ -225,12 +429,16 @@
331 struct mlx4_cq *cq = to_mcq(ibcq);
332 struct mlx4_resize_cq cmd;
333 + struct ibv_resize_cq_resp resp;
335 int old_cqe, outst_cqe, ret;
337 +#if defined(__SVR4) && defined(__sun)
339 + mlnx_umap_cq_data_out_t *mdd;
341 /* Sanity check CQ size before proceeding */
346 pthread_spin_lock(&cq->lock);
348 @@ -247,32 +455,76 @@
352 +#if !(defined(__SVR4) && defined(__sun))
353 ret = mlx4_alloc_cq_buf(to_mdev(ibcq->context->device), &buf, cqe);
357 - old_cqe = ibcq->cqe;
358 cmd.buf_addr = (uintptr_t) buf.buf;
360 + old_cqe = ibcq->cqe;
362 #ifdef IBV_CMD_RESIZE_CQ_HAS_RESP_PARAMS
364 - struct ibv_resize_cq_resp resp;
365 - ret = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof cmd,
366 + ret = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof cmd,
370 ret = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof cmd);
373 - mlx4_free_buf(&buf);
376 +#if !(defined(__SVR4) && defined(__sun))
377 + mlx4_free_buf(&buf);
381 - mlx4_cq_resize_copy_cqes(cq, buf.buf, old_cqe);
382 + mlx4_cq_resize_copy_cqes(cq, buf.buf, old_cqe);
383 + mlx4_free_buf(&cq->buf);
388 + if (cq->buf.buf != NULL) {
389 + buf.buf = malloc(cq->buf.length);
395 - mlx4_free_buf(&cq->buf);
397 + memcpy(buf.buf, cq->buf.buf, cq->buf.length);
398 + buf.length = cq->buf.length;
399 + ret = munmap((char *)cq->buf.buf, cq->buf.length);
406 + * For Solaris the kernel driver passes back mmap information for
407 + * mapping the CQ memory it allocated.
409 + mdd = (mlnx_umap_cq_data_out_t *) &resp.drv_out;
410 + if (mdd->mcq_rev < MLNX_UMAP_IF_VERSION) {
411 + fprintf(stderr, PFX "libmlx4_resize_cq: libmlx4/hermon umap "
412 + "rev mismatch (kernel rev=%d)\n", mdd->mcq_rev);
417 + cqbuf = mmap64((void *)0, mdd->mcq_maplen, (PROT_READ | PROT_WRITE),
418 + MAP_SHARED, ibcq->context->mmap_fd, mdd->mcq_mapoffset);
420 + if (cqbuf == MAP_FAILED) {
424 + cq->buf.buf = buf.buf;
425 + cq->buf.length = buf.length;
426 + mlx4_cq_resize_copy_cqes(cq, cqbuf, old_cqe);
427 + cq->buf.buf = cqbuf;
428 + cq->buf.length = mdd->mcq_maplen;
430 + cq->ibv_cq.cqe = mdd->mcq_numcqe - 1;
431 + cq->cqn = mdd->mcq_cqnum;
434 pthread_spin_unlock(&cq->lock);
439 mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db);
440 +#if defined(__SVR4) && defined(__sun)
441 + mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->arm_db);
443 mlx4_free_buf(&to_mcq(cq)->buf);
447 struct mlx4_create_srq_resp resp;
448 struct mlx4_srq *srq;
450 +#if defined(__SVR4) && defined(__sun)
451 + mlnx_umap_srq_data_out_t *mdd;
455 /* Sanity check SRQ size before proceeding */
456 if (attr->attr.max_wr > 1 << 16 || attr->attr.max_sge > 64)
458 if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE))
461 +#if !(defined(__SVR4) && defined(__sun))
462 srq->max = align_queue_size(attr->attr.max_wr + 1);
463 srq->max_gs = attr->attr.max_sge;
471 + * Solaris SRQ WQE memory is supplied by the kernel; we'll update
472 + * these after the creation.
474 + srq->buf.buf = NULL;
475 + srq->buf.length = 0;
479 + * Need solaris to allocate space for the spare WR in
480 + * the list that makes the queue work. The Solaris driver
481 + * will round up to the nearest power of 2 as align_queue_size()
484 + attr->attr.max_wr += 1;
486 cmd.buf_addr = (uintptr_t) srq->buf.buf;
487 cmd.db_addr = (uintptr_t) srq->db;
489 @@ -331,19 +607,97 @@
490 ret = ibv_cmd_create_srq(pd, &srq->ibv_srq, attr,
491 &cmd.ibv_cmd, sizeof cmd,
492 &resp.ibv_resp, sizeof resp);
493 +#if defined(__SVR4) && defined(__sun)
499 + * The kernel driver passes back mmap information for mapping the
500 + * SRQ work queue memory it allocated and the doorbell for
503 + mdd = (mlnx_umap_srq_data_out_t *) &resp.ibv_resp.drv_out;
504 + if (mdd->msrq_rev < 1) {
505 + fprintf(stderr, PFX "libmlx4_create_srq libmlx4/hermon umap "
506 + "rev mismatch (kernel rev=%d)\n", mdd->msrq_rev);
510 + srqbuf = mmap64((void *)0, mdd->msrq_maplen, (PROT_READ | PROT_WRITE),
511 + MAP_SHARED, pd->context->mmap_fd, mdd->msrq_mapoffset);
513 + if (srqbuf == MAP_FAILED) {
517 + srq->buf.buf = srqbuf;
518 + srq->buf.length = mdd->msrq_maplen;
519 + srq->max = resp.ibv_resp.max_wr;
520 + srq->max_gs = resp.ibv_resp.max_sge;
521 + srq->srqn = mdd->msrq_srqnum;
524 + srq->db = mlx4_alloc_db(to_mctx(pd->context),
525 + mdd->msrq_rdbr_mapoffset,
526 + mdd->msrq_rdbr_maplen,
527 + mdd->msrq_rdbr_offset);
528 + if (srq->db == NULL) {
533 + * The following call only initializes memory and control structures,
534 + * it utilizes the memory allocated by the kernel.
535 + * It also allocates the srq->wrid memory.
537 + if (mlx4_set_srq_buf(pd, srq, mdd->msrq_wqesz, mdd->msrq_numwqe)) {
542 + * The rturned max wr will have been rounded up to the nearest
543 + * power of 2, subtracting 1 from that and rporting that value
544 + * as the max will give us the required free WR in the queue, as
547 + attr->attr.max_wr -= 1;
552 srq->srqn = resp.srqn;
555 return &srq->ibv_srq;
558 mlx4_free_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ, srq->db);
560 +#if defined(__SVR4) && defined(__sun)
564 + mlx4_free_buf(&srq->buf);
568 + * Calling ibv_cmd_destroy_srq() will try and take the ibv_srq
569 + * mutext that is initialised by the ibv_create_srq() entry point
570 + * that called us AFETR we return, so its not initialised yet.
571 + * So initialised it here so the destroy call doesn't hang.
573 + pthread_mutex_init(&(srq->ibv_srq.mutex), NULL);
574 + pthread_cond_init(&(srq->ibv_srq.cond), NULL);
575 + srq->ibv_srq.events_completed = 0;
577 + ibv_cmd_destroy_srq(&srq->ibv_srq);
582 mlx4_free_buf(&srq->buf);
589 struct ibv_modify_srq cmd;
591 +#if !(defined(__SVR4) && defined(__sun))
592 return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
596 + attr->max_wr += 1; /* See create_srq */
597 + ret = ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
603 int mlx4_query_srq(struct ibv_srq *srq,
606 struct ibv_query_srq cmd;
608 +#if !(defined(__SVR4) && defined(__sun))
609 return ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd);
614 + ret = ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd);
615 + attr->max_wr -= 1; /* See create_srq */
621 int mlx4_destroy_srq(struct ibv_srq *ibsrq)
625 struct mlx4_context *context = to_mctx(pd->context);
626 +#if defined(__SVR4) && defined(__sun)
627 + mlnx_umap_qp_data_out_t *mdd;
632 /* Sanity check QP size before proceeding */
637 +#if !(defined(__SVR4) && defined(__sun))
638 mlx4_calc_sq_wqe_size(&attr->cap, attr->qp_type, qp);
642 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
643 qp->sq.wqe_cnt = align_queue_size(attr->cap.max_send_wr + qp->sq_spare_wqes);
644 qp->rq.wqe_cnt = align_queue_size(attr->cap.max_recv_wr);
647 if (attr->srq || attr->qp_type == IBV_QPT_XRC)
648 attr->cap.max_recv_wr = qp->rq.wqe_cnt = 0;
650 attr->cap.max_recv_wr = 1;
653 +#if defined(__SVR4) && defined(__sun)
654 + if (pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE) ||
655 + pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE))
659 + * Solaris QP work queue memory is supplied by the kernel, so
660 + * we will update this after creation.
662 + qp->buf.buf = NULL;
663 + qp->sq.wrid = NULL;
664 + qp->rq.wrid = NULL;
665 + qp->buf.length = 0;
667 + memset(&cmd, 0, sizeof(cmd));
669 if (mlx4_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp))
672 @@ -505,17 +900,84 @@
674 cmd.sq_no_prefetch = 0; /* OK for ABI 2: just a reserved field */
675 memset(cmd.reserved, 0, sizeof cmd.reserved);
678 pthread_mutex_lock(&to_mctx(pd->context)->qp_table_mutex);
680 ret = ibv_cmd_create_qp(pd, &qp->ibv_qp, attr, &cmd.ibv_cmd, sizeof cmd,
682 +#if defined(__SVR4) && defined(__sun)
687 + * The kernel driver passes back mmap information for mapping the
688 + * QP work queue memory it allocated back into user space.
690 + mdd = (mlnx_umap_qp_data_out_t *) &resp.drv_out;
691 + if (mdd->mqp_rev < 2) {
692 + fprintf(stderr, PFX "libmlx4_create_qp: libmlx4/hermon umap "
693 + "rev mismatch (kernel rev=%d)\n", mdd->mqp_rev);
696 + qpbuf = mmap64((void *)0, mdd->mqp_maplen, (PROT_READ | PROT_WRITE),
697 + MAP_SHARED, pd->context->mmap_fd, mdd->mqp_mapoffset);
699 + if (qpbuf == MAP_FAILED)
703 + * Need to set qp->buf here in case alloc_db fails then
704 + * we'll call mlx4_free_buf() to umap.
706 + qp->buf.buf = qpbuf;
707 + qp->buf.length = mdd->mqp_maplen;
709 + if (!attr->srq && attr->qp_type != IBV_QPT_XRC) {
710 + qp->db = mlx4_alloc_db(to_mctx(pd->context),
711 + mdd->mqp_rdbr_mapoffset,
712 + mdd->mqp_rdbr_maplen,
713 + mdd->mqp_rdbr_offset);
714 + if (qp->db == NULL)
721 + * Retrieve sendqueue actual size, and the number of headroom WQEs
722 + * that were required based on kernel setup of prefetch or not for
724 + * Note: mqp_sq_numwqe includes the head room wqes. The private
725 + * wqe.cnt also includes headroom wqes, the verbs count
726 + * should reflect the wqe count that is usable.
728 + qp->sq_spare_wqes = mdd->mqp_sq_headroomwqes;
729 + qp->sq.wqe_cnt = mdd->mqp_sq_numwqe;
732 + qp->rq.wqe_cnt = 0;
734 + qp->rq.wqe_cnt = mdd->mqp_rq_numwqe;
736 + if (mlx4_set_qp_buf(pd, qp, qpbuf, mdd->mqp_maplen,
737 + mdd->mqp_rq_wqesz, mdd->mqp_rq_off,
738 + mdd->mqp_sq_wqesz, mdd->mqp_sq_off))
741 + mlx4_init_qp_indices(qp);
743 ret = mlx4_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp);
750 + ret = mlx4_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp);
754 pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
756 qp->rq.wqe_cnt = attr->cap.max_recv_wr;
761 +#if defined(__SVR4) && defined(__sun)
763 + if (!attr->srq && attr->qp_type != IBV_QPT_XRC)
764 + mlx4_free_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ, qp->db);
766 + mlx4_free_buf(&qp->buf);
770 + * Calling ibv_cmd_destroy_qp() will try and take the ibv_qp
771 + * mutext that is initialised by the ibv_create_qp() entry point
772 + * that called us AFETR we retrun, so its not initialised yet.
773 + * So initialised it here so the destroy call doesn't hang.
775 + pthread_mutex_init(&(qp->ibv_qp.mutex), NULL);
776 + pthread_cond_init(&(qp->ibv_qp.cond), NULL);
777 + qp->ibv_qp.events_completed = 0;
778 ibv_cmd_destroy_qp(&qp->ibv_qp);
780 + pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
791 + ibv_cmd_destroy_qp(&qp->ibv_qp);
794 pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
795 if (!attr->srq && attr->qp_type != IBV_QPT_XRC)
804 @@ -745,6 +1237,13 @@
805 struct ibv_cq *xrc_cq,
806 struct ibv_srq_init_attr *attr)
808 +#if defined(__SVR4) && defined(__sun)
810 + * Not supported by Solaris kenrel driver. When/if supported
811 + * this routine will need to be ported.
815 struct mlx4_create_xrc_srq cmd;
816 struct mlx4_create_srq_resp resp;
817 struct mlx4_srq *srq;
825 struct ibv_xrc_domain *mlx4_open_xrc_domain(struct ibv_context *context,
826 diff -r -u /tmp/839450/libmlx4-1.0.1/src/qp.c libmlx4-1.0.1/src/qp.c
827 --- /tmp/839450/libmlx4-1.0.1/src/qp.c Thu Mar 10 04:48:34 2011
828 +++ libmlx4-1.0.1/src/qp.c Tue Mar 15 07:09:43 2011
831 ctx = to_mctx(ibqp->context);
833 - if (nreq == 1 && inl && size > 1 && size < ctx->bf_buf_size / 16) {
834 + if (ctx->ibv_ctx.device->blueflame_enabled && nreq == 1 && inl &&
835 + size > 1 && size < ctx->bf_buf_size / 16) {
836 ctrl->owner_opcode |= htonl((qp->sq.head & 0xffff) << 8);
837 *(uint32_t *) (&ctrl->vlan_tag) |= qp->doorbell_qpn;
843 +#if defined(__SVR4) && defined(__sun)
844 +int mlx4_set_qp_buf(struct ibv_pd *pd, struct mlx4_qp *qp, void *qpbuf,
845 + uint64_t buflen, uint32_t rq_wqesz, uint32_t rq_off,
846 + uint32_t sq_wqesz, uint32_t sq_off)
848 + qp->buf.buf = qpbuf;
849 + qp->buf.length = buflen;
851 + qp->sq.wrid = malloc(qp->sq.wqe_cnt * sizeof (uint64_t));
855 + if (qp->rq.wqe_cnt) {
856 + qp->rq.wrid = malloc(qp->rq.wqe_cnt * sizeof (uint64_t));
857 + if (!qp->rq.wrid) {
863 + for (qp->rq.wqe_shift = 4; 1 << qp->rq.wqe_shift < rq_wqesz;
864 + qp->rq.wqe_shift++) {
868 + for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < sq_wqesz;
869 + qp->sq.wqe_shift++) {
873 + qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
874 + (qp->sq.wqe_cnt << qp->sq.wqe_shift);
875 + if (qp->rq.wqe_shift > qp->sq.wqe_shift) {
877 + qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
879 + qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift;
883 + if ((long int)qp->buf.length < (long int)qp->buf_size) {
884 + fprintf(stderr, PFX "QP kernel buffer size %d < user buf size %d\n",
885 + qp->buf.length, qp->buf_size);
887 + if ((!rq_off && qp->rq.offset) || (!sq_off && qp->sq.offset)) {
888 + fprintf(stderr, PFX "QP kernel and user out of sync on buffer order\n");
891 + memset(qp->buf.buf, 0, qp->buf_size);
895 int mlx4_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
896 enum ibv_qp_type type, struct mlx4_qp *qp)
898 diff -r -u /tmp/839450/libmlx4-1.0.1/src/mlx4.h libmlx4-1.0.1/src/mlx4.h
899 --- /tmp/839450/libmlx4-1.0.1/src/mlx4.h Thu Mar 10 04:48:34 2011
900 +++ libmlx4-1.0.1/src/mlx4.h Fri Feb 11 04:05:04 2011
902 int xrc_srq_table_shift;
903 int xrc_srq_table_mask;
905 +#if defined(__SVR4) && defined(__sun)
906 + struct mlx4_db_page *db_page_list;
908 struct mlx4_db_page *db_list[MLX4_NUM_DB_TYPE];
910 pthread_mutex_t db_list_mutex;
914 int mlx4_alloc_buf(struct mlx4_buf *buf, size_t size, int page_size);
915 void mlx4_free_buf(struct mlx4_buf *buf);
917 +#if defined(__SVR4) && defined(__sun)
918 +uint32_t *mlx4_alloc_db(struct mlx4_context *context, uint64_t mapoffset,
919 + uint64_t maplen, uint32_t offset);
921 uint32_t *mlx4_alloc_db(struct mlx4_context *context, enum mlx4_db_type type);
923 void mlx4_free_db(struct mlx4_context *context, enum mlx4_db_type type, uint32_t *db);
925 int mlx4_query_device(struct ibv_context *context,
926 @@ -360,11 +369,17 @@
927 struct ibv_port_attr *attr);
929 struct ibv_pd *mlx4_alloc_pd(struct ibv_context *context);
930 +struct ibv_shpd *mlx4_alloc_shpd(struct ibv_pd *pd, uint64_t share_key, struct ibv_shpd *shpd);
931 +struct ibv_pd *mlx4_share_pd(struct ibv_context *context, struct ibv_shpd *shpd, uint64_t share_key);
932 int mlx4_free_pd(struct ibv_pd *pd);
934 struct ibv_mr *mlx4_reg_mr(struct ibv_pd *pd, void *addr,
935 size_t length, int access);
936 +struct ibv_mr *mlx4_reg_mr_relaxed(struct ibv_pd *pd, void *addr,
937 + size_t length, int access);
938 int mlx4_dereg_mr(struct ibv_mr *mr);
939 +int mlx4_dereg_mr_relaxed(struct ibv_mr *mr);
940 +int mlx4_flush_relaxed_mr(struct ibv_pd *pd);
942 struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe,
943 struct ibv_comp_channel *channel,
945 int mlx4_query_srq(struct ibv_srq *srq,
946 struct ibv_srq_attr *attr);
947 int mlx4_destroy_srq(struct ibv_srq *srq);
948 +#if defined(__SVR4) && defined(__sun)
949 +int mlx4_set_srq_buf(struct ibv_pd *pd, struct mlx4_srq *srq,
950 + uint32_t srq_wqesz, uint32_t srq_numwqe);
952 int mlx4_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
953 struct mlx4_srq *srq);
955 void mlx4_free_srq_wqe(struct mlx4_srq *srq, int ind);
956 int mlx4_post_srq_recv(struct ibv_srq *ibsrq,
957 struct ibv_recv_wr *wr,
959 void mlx4_calc_sq_wqe_size(struct ibv_qp_cap *cap, enum ibv_qp_type type,
961 int num_inline_segs(int data, enum ibv_qp_type type);
962 +#if defined(__SVR4) && defined(__sun)
963 +int mlx4_set_qp_buf(struct ibv_pd *pd, struct mlx4_qp *qp,
964 + void *qpbuf, uint64_t buflen, uint32_t rq_wqesz,
965 + uint32_t rq_off, uint32_t sq_wqesz, uint32_t sq_off);
967 int mlx4_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap,
968 enum ibv_qp_type type, struct mlx4_qp *qp);
970 void mlx4_set_sq_sizes(struct mlx4_qp *qp, struct ibv_qp_cap *cap,
971 enum ibv_qp_type type);
972 struct mlx4_qp *mlx4_find_qp(struct mlx4_context *ctx, uint32_t qpn);
973 diff -r -u /tmp/839450/libmlx4-1.0.1/src/srq.c libmlx4-1.0.1/src/srq.c
974 --- /tmp/839450/libmlx4-1.0.1/src/srq.c Thu Mar 10 04:48:34 2011
975 +++ libmlx4-1.0.1/src/srq.c Fri Feb 11 04:05:05 2011
980 +#if defined(__SVR4) && defined(__sun)
982 + * The Solaris kernel allocates the SRQ WQE buffer, this routine
983 + * initializes the control structures and buffer contents for the
984 + * SRQ memory. That memory is mapped into the caller's address
985 + * space prior to this call.
987 +int mlx4_set_srq_buf(struct ibv_pd *pd, struct mlx4_srq *srq,
988 + uint32_t srq_wqesz, uint32_t srq_numwqe)
990 + struct mlx4_wqe_srq_next_seg *next;
991 + struct mlx4_wqe_data_seg *scatter;
994 + srq->max = srq_numwqe;
996 + srq->wrid = malloc(srq->max * sizeof (uint64_t));
1001 + for (srq->wqe_shift = 5; 1 << srq->wqe_shift < srq_wqesz; ++srq->wqe_shift) {
1005 + memset(srq->buf.buf, 0, srq->buf.length);
1008 + * Now initialize the SRQ buffer so that all of the WQEs are
1009 + * linked into the list of free WQEs.
1011 + for (i = 0; i < srq->max; ++i) {
1012 + next = get_wqe(srq, i);
1013 + next->next_wqe_index = htons((i + 1) & (srq->max - 1));
1015 + for (scatter = (void *) (next + 1);
1016 + (void *) scatter < (void *) next + (1 << srq->wqe_shift);
1018 + scatter->lkey = htonl(MLX4_INVALID_LKEY);
1022 + srq->tail = srq->max - 1;
1028 int mlx4_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr,
1029 struct mlx4_srq *srq)
1031 diff -r -u /tmp/839450/libmlx4-1.0.1/src/dbrec.c libmlx4-1.0.1/src/dbrec.c
1032 --- /tmp/839450/libmlx4-1.0.1/src/dbrec.c Tue Sep 8 06:40:36 2009
1033 +++ libmlx4-1.0.1/src/dbrec.c Fri Feb 11 04:05:04 2011
1038 +#if defined(__SVR4) && defined(__sun)
1040 + * In Solaris the doorbell UAR is setup by the kernel, we only
1041 + * mmap the offset specified for the doorbell into the user
1042 + * address space. A reference counted page list is maintained
1043 + * per user context of doorbell pages that have been mapped.
1045 struct mlx4_db_page {
1046 + struct mlx4_db_page *prev, *next;
1047 + uint32_t *dbp_page_addr;
1048 + uint32_t *dbp_page_addr_end;
1049 + uint64_t dbp_map_offset;
1050 + uint64_t dbp_map_len;
1056 + * These are not required in Solaris, but we keep them to pass
1057 + * as dummy arguments so that the the doorbell function calls can
1058 + * keep the same API.
1060 +static const int db_size[] = {
1061 + [MLX4_DB_TYPE_CQ] = 8,
1062 + [MLX4_DB_TYPE_RQ] = 4,
1066 + * Return a doorbell pointer for the specified map offset. If this
1067 + * offset has not been previously mapped it will be mmap'd and the
1068 + * appropriate doorbell returned; otherwise, the reference count
1069 + * will be updated and the appropriate doorbell will be returned.
1070 + * Each successful call to mlx4_alloc_db() must call mlx4_free_db()
1071 + * to release the reference to the doorbell page when the doorbell
1072 + * is no longer required.
1074 +uint32_t *mlx4_alloc_db(struct mlx4_context *context,
1075 + uint64_t mapoffset,
1079 + struct mlx4_db_page *page;
1080 + uint32_t *db = NULL;
1082 + pthread_mutex_lock(&context->db_list_mutex);
1084 + for (page = context->db_page_list; page; page = page->next) {
1085 + if (page->dbp_map_offset == mapoffset &&
1086 + page->fd == context->ibv_ctx.mmap_fd) {
1087 + if (page->dbp_map_len == maplen) {
1091 + PFX "mlx4_alloc_db: Bad maplen (%" PRId64 ")\n",
1097 + page = malloc(sizeof(struct mlx4_db_page));
1099 + fprintf(stderr, PFX "DB alloc memory allocate failure\n");
1103 + page->dbp_page_addr = (uint32_t *) mmap64((void *)0, maplen,
1104 + (PROT_READ | PROT_WRITE),
1106 + context->ibv_ctx.mmap_fd,
1108 + if (page->dbp_page_addr == MAP_FAILED) {
1109 + fprintf(stderr, PFX
1110 + "Unable to map doorbell entry: maplen:%" PRId64 ", offset:%" PRId64
1111 + "\n", maplen, mapoffset);
1116 + page->dbp_page_addr_end = (uint32_t *)((uint8_t *) page->dbp_page_addr + maplen);
1118 + page->dbp_map_offset = mapoffset;
1119 + page->dbp_map_len = maplen;
1120 + page->dbp_use_cnt = 0;
1121 + page->fd = context->ibv_ctx.mmap_fd;
1122 + page->next = context->db_page_list;
1124 + if (context->db_page_list)
1125 + context->db_page_list->prev = page;
1127 + page->prev = NULL;
1128 + context->db_page_list = page;
1131 + page->dbp_use_cnt++;
1134 + db = (uint32_t *) ((uint8_t *)page->dbp_page_addr + offset);
1136 + pthread_mutex_unlock(&context->db_list_mutex);
1142 + * Dereference doorbell page mappinge associated with the specified doorbell.
1143 + * If this is the last doorbell that references a mapped region, then that
1144 + * region will be unmapped.
1146 +void mlx4_free_db(struct mlx4_context *context,
1147 + enum mlx4_db_type dummy,
1150 + struct mlx4_db_page *page;
1152 + pthread_mutex_lock(&context->db_list_mutex);
1154 + for (page = context->db_page_list; page; page = page->next) {
1155 + if (db >= page->dbp_page_addr && db < page->dbp_page_addr_end) {
1160 + if (page == NULL) {
1161 + fprintf(stderr, PFX "mlx4_free_db: Doorbell not mapped\n");
1165 + page->dbp_use_cnt--;
1166 + if (page->dbp_use_cnt > 0) {
1170 + munmap((void *)page->dbp_page_addr, page->dbp_map_len);
1172 + page->next->prev = page->prev;
1175 + page->prev->next = page->next;
1177 + context->db_page_list = page->next;
1183 + pthread_mutex_unlock(&context->db_list_mutex);
1188 +struct mlx4_db_page {
1189 struct mlx4_db_page *prev, *next;
1190 struct mlx4_buf buf;
1194 pthread_mutex_unlock(&context->db_list_mutex);
1197 diff -r -u /tmp/839450/libmlx4-1.0.1/src/buf.c libmlx4-1.0.1/src/buf.c
1198 --- /tmp/839450/libmlx4-1.0.1/src/buf.c Tue Sep 8 06:41:26 2009
1199 +++ libmlx4-1.0.1/src/buf.c Fri Feb 11 04:05:03 2011
1202 void mlx4_free_buf(struct mlx4_buf *buf)
1204 +#if !(defined(__SVR4) && defined(__sun))
1205 ibv_dofork_range(buf->buf, buf->length);
1207 munmap(buf->buf, buf->length);
1209 diff -r -u /tmp/839450/libmlx4-1.0.1/src/mlx4.c libmlx4-1.0.1/src/mlx4.c
1210 --- /tmp/839450/libmlx4-1.0.1/src/mlx4.c Thu Mar 10 04:48:34 2011
1211 +++ libmlx4-1.0.1/src/mlx4.c Fri Mar 11 14:05:26 2011
1212 @@ -112,8 +112,13 @@
1213 .query_port = mlx4_query_port,
1214 .alloc_pd = mlx4_alloc_pd,
1215 .dealloc_pd = mlx4_free_pd,
1216 + .alloc_shpd = mlx4_alloc_shpd,
1217 + .share_pd = mlx4_share_pd,
1218 .reg_mr = mlx4_reg_mr,
1219 + .reg_mr_relaxed = mlx4_reg_mr_relaxed,
1220 .dereg_mr = mlx4_dereg_mr,
1221 + .dereg_mr_relaxed = mlx4_dereg_mr_relaxed,
1222 + .flush_relaxed_mr = mlx4_flush_relaxed_mr,
1223 .create_cq = mlx4_create_cq,
1224 .poll_cq = mlx4_poll_cq,
1225 .req_notify_cq = mlx4_arm_cq,
1226 @@ -144,6 +149,11 @@
1227 struct mlx4_alloc_ucontext_resp resp;
1229 struct ibv_device_attr dev_attrs;
1230 +#if defined(__SVR4) && defined(__sun)
1232 + off64_t uarpg_offset;
1233 + uint32_t temp_qp_num;
1236 context = calloc(1, sizeof *context);
1238 @@ -150,11 +160,30 @@
1241 context->ibv_ctx.cmd_fd = cmd_fd;
1242 +#if defined(__SVR4) && defined(__sun)
1243 + context->ibv_ctx.device = ibdev;
1246 if (ibv_cmd_get_context(&context->ibv_ctx, &cmd, sizeof cmd,
1247 &resp.ibv_resp, sizeof resp))
1250 +#if defined(__SVR4) && defined(__sun)
1252 + * OFED expects power of two, round up here to make user table
1255 + for (temp_qp_num = 1; temp_qp_num < resp.qp_tab_size; temp_qp_num <<= 1)
1257 + resp.qp_tab_size = temp_qp_num;
1260 + * NOTE: In Solaris this value is not returned in the channel interface
1261 + * opaque data and is assumed to be 2*256 by the dapl code. We have
1262 + * made the same assumption here.
1264 + resp.bf_reg_size = 512;
1266 context->num_qps = resp.qp_tab_size;
1267 context->qp_table_shift = ffs(context->num_qps) - 1 - MLX4_QP_TABLE_BITS;
1268 context->qp_table_mask = (1 << context->qp_table_shift) - 1;
1269 @@ -172,20 +201,44 @@
1270 for (i = 0; i < MLX4_XRC_SRQ_TABLE_SIZE; ++i)
1271 context->xrc_srq_table[i].refcnt = 0;
1273 +#if defined(__SVR4) && defined(__sun)
1274 + context->db_page_list = NULL;
1276 for (i = 0; i < MLX4_NUM_DB_TYPE; ++i)
1277 context->db_list[i] = NULL;
1280 pthread_mutex_init(&context->db_list_mutex, NULL);
1282 +#if defined(__SVR4) && defined(__sun)
1283 + cur_pid = getpid();
1284 + uarpg_offset = (((off64_t) cur_pid << MLNX_UMAP_RSRC_TYPE_SHIFT) |
1285 + MLNX_UMAP_UARPG_RSRC) * to_mdev(ibdev)->page_size;
1286 + context->uar = mmap64((void *)0, to_mdev(ibdev)->page_size, PROT_WRITE,
1287 + MAP_SHARED, context->ibv_ctx.mmap_fd, uarpg_offset);
1289 context->uar = mmap(NULL, to_mdev(ibdev)->page_size, PROT_WRITE,
1290 MAP_SHARED, cmd_fd, 0);
1292 if (context->uar == MAP_FAILED)
1295 if (resp.bf_reg_size) {
1296 +#if defined(__SVR4) && defined(__sun)
1298 + * If kernel driver is supporting Blue Flame feature, map
1299 + * the Blue Flame user access region as well.
1301 + uarpg_offset = (((off64_t) cur_pid << MLNX_UMAP_RSRC_TYPE_SHIFT) |
1302 + MLNX_UMAP_BLUEFLAMEPG_RSRC) * to_mdev(ibdev)->page_size;
1303 + context->bf_page = mmap64((void *)0, to_mdev(ibdev)->page_size,
1304 + PROT_WRITE, MAP_SHARED, context->ibv_ctx.mmap_fd,
1307 context->bf_page = mmap(NULL, to_mdev(ibdev)->page_size,
1308 PROT_WRITE, MAP_SHARED, cmd_fd,
1309 to_mdev(ibdev)->page_size);
1311 if (context->bf_page == MAP_FAILED) {
1312 fprintf(stderr, PFX "Warning: BlueFlame available, "
1313 "but failed to mmap() BlueFlame page.\n");
1315 context->max_qp_wr = dev_attrs.max_qp_wr;
1316 context->max_sge = dev_attrs.max_sge;
1317 context->max_cqe = dev_attrs.max_cqe;
1318 +#ifdef HAVE_IBV_XRC_OPS
1319 if (!(dev_attrs.device_cap_flags & IBV_DEVICE_XRC)) {
1320 fprintf(stderr, PFX "There is a mismatch between "
1321 "the kernel and the userspace libraries: "
1323 "Kernel does not support XRC. Exiting.\n");
1328 return &context->ibv_ctx;
1331 munmap(context->uar, to_mdev(ibctx->device)->page_size);
1332 if (context->bf_page)
1333 munmap(context->bf_page, to_mdev(ibctx->device)->page_size);
1338 diff -r -u /tmp/839450/libmlx4-1.0.1/configure.in libmlx4-1.0.1/configure.in
1339 --- /tmp/839450/libmlx4-1.0.1/configure.in Thu Mar 10 04:48:34 2011
1340 +++ libmlx4-1.0.1/configure.in Tue Mar 15 07:41:46 2011
1343 AC_CHECK_SIZEOF(long)
1344 AC_CHECK_MEMBER(struct ibv_context.more_ops,
1345 - [AC_DEFINE([HAVE_IBV_MORE_OPS], 1, [Define to 1 if more_ops is a member of ibv_context])],,
1346 + [AC_DEFINE([HAVE_IBV_MORE_OPS], 0, [Define to 1 if more_ops is a member of ibv_context])],,
1347 [#include <infiniband/verbs.h>])
1348 -AC_CHECK_MEMBER(struct ibv_more_ops.create_xrc_srq,
1349 - [AC_DEFINE([HAVE_IBV_XRC_OPS], 1, [Define to 1 if have xrc ops])],,
1350 - [#include <infiniband/verbs.h>])
1351 +#AC_CHECK_MEMBER(struct ibv_more_ops.create_xrc_srq,
1352 +# [AC_DEFINE([HAVE_IBV_XRC_OPS], 1, [Define to 1 if have xrc ops])],,
1353 +# [#include <infiniband/verbs.h>])
1355 dnl Checks for library functions
1356 AC_CHECK_FUNC(ibv_read_sysfs_file, [],
1359 AC_SUBST(MLX4_VERSION_SCRIPT)
1361 -SHAVE_INIT([], [enable])
1362 AC_CONFIG_FILES([Makefile libmlx4.spec shave shave-libtool])
1364 diff -r -u /tmp/839450/libmlx4-1.0.1/libmlx4.spec.in libmlx4-1.0.1/libmlx4.spec.in
1365 --- /tmp/839450/libmlx4-1.0.1/libmlx4.spec.in Thu Mar 10 00:23:34 2011
1366 +++ libmlx4-1.0.1/libmlx4.spec.in Tue Mar 15 07:43:54 2011
1368 Group: System Environment/Libraries
1369 License: GPLv2 or BSD
1370 Url: http://openfabrics.org/
1371 -Source: http://openfabrics.org/downloads/libmlx4/libmlx4-1.0.1.tar.gz
1372 +Source: http://openfabrics.org/downloads/libmlx4-1.0.1.tar.gz
1373 BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
1375 BuildRequires: libibverbs-devel >= 1.1-0.1.rc2
1376 diff -r -u /tmp/839450/libmlx4-1.0.1/configure libmlx4-1.0.1/configure
1377 --- /tmp/839450/libmlx4-1.0.1/configure Thu Mar 10 04:48:41 2011
1378 +++ libmlx4-1.0.1/configure Tue Mar 15 07:35:49 2011
1379 @@ -3899,13 +3899,13 @@
1380 CFLAGS=$ac_save_CFLAGS
1381 elif test $ac_cv_prog_cc_g = yes; then
1382 if test "$GCC" = yes; then
1389 if test "$GCC" = yes; then
1395 @@ -8890,6 +8890,7 @@
1398 link_all_deplibs=yes
1399 + hardcode_libdir_flag_spec=
1403 @@ -11113,13 +11114,13 @@
1404 CFLAGS=$ac_save_CFLAGS
1405 elif test $ac_cv_prog_cc_g = yes; then
1406 if test "$GCC" = yes; then
1413 if test "$GCC" = yes; then
1419 @@ -11654,11 +11655,11 @@
1421 ac_fn_c_check_member "$LINENO" "struct ibv_more_ops" "create_xrc_srq" "ac_cv_member_struct_ibv_more_ops_create_xrc_srq" "#include <infiniband/verbs.h>
1423 -if test "x$ac_cv_member_struct_ibv_more_ops_create_xrc_srq" = x""yes; then :
1424 +#if test "x$ac_cv_member_struct_ibv_more_ops_create_xrc_srq" = x""yes; then :
1426 -$as_echo "#define HAVE_IBV_XRC_OPS 1" >>confdefs.h
1427 +#$as_echo "#define HAVE_IBV_XRC_OPS 1" >>confdefs.h
1433 ac_fn_c_check_func "$LINENO" "ibv_read_sysfs_file" "ac_cv_func_ibv_read_sysfs_file"
1434 diff -r -u /tmp/839450/libmlx4-1.0.1/Makefile.in libmlx4-1.0.1/Makefile.in
1435 --- /tmp/839450/libmlx4-1.0.1/Makefile.in Thu Mar 10 04:48:40 2011
1436 +++ libmlx4-1.0.1/Makefile.in Tue Mar 15 07:48:16 2011
1438 @HAVE_IBV_DEVICE_LIBRARY_EXTENSION_TRUE@src_libmlx4_la_LDFLAGS = -avoid-version -release @IBV_DEVICE_LIBRARY_EXTENSION@ \
1439 @HAVE_IBV_DEVICE_LIBRARY_EXTENSION_TRUE@ $(mlx4_version_script)
1441 -@HAVE_IBV_DEVICE_LIBRARY_EXTENSION_TRUE@mlx4confdir = $(sysconfdir)/libibverbs.d
1442 +@HAVE_IBV_DEVICE_LIBRARY_EXTENSION_TRUE@mlx4confdir = $(datadir)/libibverbs.d
1443 @HAVE_IBV_DEVICE_LIBRARY_EXTENSION_TRUE@mlx4conf_DATA = mlx4.driver
1444 @HAVE_IBV_DEVICE_LIBRARY_EXTENSION_FALSE@mlx4libdir = $(libdir)/infiniband
1445 @HAVE_IBV_DEVICE_LIBRARY_EXTENSION_FALSE@mlx4lib_LTLIBRARIES = src/mlx4.la
1449 test -z "$$list2" || { \
1450 - echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
1451 - $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
1452 + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) -m 755 $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \
1453 + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) -m 755 $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \
1456 uninstall-libLTLIBRARIES:
1460 test -z "$$list2" || { \
1461 - echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(mlx4libdir)'"; \
1462 - $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(mlx4libdir)"; \
1463 + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) -m755 $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(mlx4libdir)'"; \
1464 + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) -m755 $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(mlx4libdir)"; \
1467 uninstall-mlx4libLTLIBRARIES: