2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
37 #include "mthca_dev.h"
38 #include "mthca_cmd.h"
40 static int mthca_query_device(struct ib_device
*ibdev
,
41 struct ib_device_attr
*props
)
43 struct ib_smp
*in_mad
= NULL
;
44 struct ib_smp
*out_mad
= NULL
;
46 struct mthca_dev
* mdev
= to_mdev(ibdev
);
50 in_mad
= kmalloc(sizeof *in_mad
, GFP_KERNEL
);
51 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
52 if (!in_mad
|| !out_mad
)
55 memset(props
, 0, sizeof props
);
57 props
->fw_ver
= mdev
->fw_ver
;
59 memset(in_mad
, 0, sizeof *in_mad
);
60 in_mad
->base_version
= 1;
61 in_mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
62 in_mad
->class_version
= 1;
63 in_mad
->method
= IB_MGMT_METHOD_GET
;
64 in_mad
->attr_id
= IB_SMP_ATTR_NODE_INFO
;
66 err
= mthca_MAD_IFC(mdev
, 1, 1,
67 1, NULL
, NULL
, in_mad
, out_mad
,
76 props
->device_cap_flags
= mdev
->device_cap_flags
;
77 props
->vendor_id
= be32_to_cpup((u32
*) (out_mad
->data
+ 36)) &
79 props
->vendor_part_id
= be16_to_cpup((u16
*) (out_mad
->data
+ 30));
80 props
->hw_ver
= be16_to_cpup((u16
*) (out_mad
->data
+ 32));
81 memcpy(&props
->sys_image_guid
, out_mad
->data
+ 4, 8);
82 memcpy(&props
->node_guid
, out_mad
->data
+ 12, 8);
84 props
->max_mr_size
= ~0ull;
85 props
->max_qp
= mdev
->limits
.num_qps
- mdev
->limits
.reserved_qps
;
86 props
->max_qp_wr
= 0xffff;
87 props
->max_sge
= mdev
->limits
.max_sg
;
88 props
->max_cq
= mdev
->limits
.num_cqs
- mdev
->limits
.reserved_cqs
;
89 props
->max_cqe
= 0xffff;
90 props
->max_mr
= mdev
->limits
.num_mpts
- mdev
->limits
.reserved_mrws
;
91 props
->max_pd
= mdev
->limits
.num_pds
- mdev
->limits
.reserved_pds
;
92 props
->max_qp_rd_atom
= 1 << mdev
->qp_table
.rdb_shift
;
93 props
->max_qp_init_rd_atom
= 1 << mdev
->qp_table
.rdb_shift
;
94 props
->local_ca_ack_delay
= mdev
->limits
.local_ca_ack_delay
;
103 static int mthca_query_port(struct ib_device
*ibdev
,
104 u8 port
, struct ib_port_attr
*props
)
106 struct ib_smp
*in_mad
= NULL
;
107 struct ib_smp
*out_mad
= NULL
;
111 in_mad
= kmalloc(sizeof *in_mad
, GFP_KERNEL
);
112 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
113 if (!in_mad
|| !out_mad
)
116 memset(in_mad
, 0, sizeof *in_mad
);
117 in_mad
->base_version
= 1;
118 in_mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
119 in_mad
->class_version
= 1;
120 in_mad
->method
= IB_MGMT_METHOD_GET
;
121 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
122 in_mad
->attr_mod
= cpu_to_be32(port
);
124 err
= mthca_MAD_IFC(to_mdev(ibdev
), 1, 1,
125 port
, NULL
, NULL
, in_mad
, out_mad
,
134 props
->lid
= be16_to_cpup((u16
*) (out_mad
->data
+ 16));
135 props
->lmc
= out_mad
->data
[34] & 0x7;
136 props
->sm_lid
= be16_to_cpup((u16
*) (out_mad
->data
+ 18));
137 props
->sm_sl
= out_mad
->data
[36] & 0xf;
138 props
->state
= out_mad
->data
[32] & 0xf;
139 props
->phys_state
= out_mad
->data
[33] >> 4;
140 props
->port_cap_flags
= be32_to_cpup((u32
*) (out_mad
->data
+ 20));
141 props
->gid_tbl_len
= to_mdev(ibdev
)->limits
.gid_table_len
;
142 props
->pkey_tbl_len
= to_mdev(ibdev
)->limits
.pkey_table_len
;
143 props
->qkey_viol_cntr
= be16_to_cpup((u16
*) (out_mad
->data
+ 48));
144 props
->active_width
= out_mad
->data
[31] & 0xf;
145 props
->active_speed
= out_mad
->data
[35] >> 4;
153 static int mthca_modify_port(struct ib_device
*ibdev
,
154 u8 port
, int port_modify_mask
,
155 struct ib_port_modify
*props
)
157 struct mthca_set_ib_param set_ib
;
158 struct ib_port_attr attr
;
162 if (down_interruptible(&to_mdev(ibdev
)->cap_mask_mutex
))
165 err
= mthca_query_port(ibdev
, port
, &attr
);
169 set_ib
.set_si_guid
= 0;
170 set_ib
.reset_qkey_viol
= !!(port_modify_mask
& IB_PORT_RESET_QKEY_CNTR
);
172 set_ib
.cap_mask
= (attr
.port_cap_flags
| props
->set_port_cap_mask
) &
173 ~props
->clr_port_cap_mask
;
175 err
= mthca_SET_IB(to_mdev(ibdev
), &set_ib
, port
, &status
);
184 up(&to_mdev(ibdev
)->cap_mask_mutex
);
188 static int mthca_query_pkey(struct ib_device
*ibdev
,
189 u8 port
, u16 index
, u16
*pkey
)
191 struct ib_smp
*in_mad
= NULL
;
192 struct ib_smp
*out_mad
= NULL
;
196 in_mad
= kmalloc(sizeof *in_mad
, GFP_KERNEL
);
197 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
198 if (!in_mad
|| !out_mad
)
201 memset(in_mad
, 0, sizeof *in_mad
);
202 in_mad
->base_version
= 1;
203 in_mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
204 in_mad
->class_version
= 1;
205 in_mad
->method
= IB_MGMT_METHOD_GET
;
206 in_mad
->attr_id
= IB_SMP_ATTR_PKEY_TABLE
;
207 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
209 err
= mthca_MAD_IFC(to_mdev(ibdev
), 1, 1,
210 port
, NULL
, NULL
, in_mad
, out_mad
,
219 *pkey
= be16_to_cpu(((u16
*) out_mad
->data
)[index
% 32]);
227 static int mthca_query_gid(struct ib_device
*ibdev
, u8 port
,
228 int index
, union ib_gid
*gid
)
230 struct ib_smp
*in_mad
= NULL
;
231 struct ib_smp
*out_mad
= NULL
;
235 in_mad
= kmalloc(sizeof *in_mad
, GFP_KERNEL
);
236 out_mad
= kmalloc(sizeof *out_mad
, GFP_KERNEL
);
237 if (!in_mad
|| !out_mad
)
240 memset(in_mad
, 0, sizeof *in_mad
);
241 in_mad
->base_version
= 1;
242 in_mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
243 in_mad
->class_version
= 1;
244 in_mad
->method
= IB_MGMT_METHOD_GET
;
245 in_mad
->attr_id
= IB_SMP_ATTR_PORT_INFO
;
246 in_mad
->attr_mod
= cpu_to_be32(port
);
248 err
= mthca_MAD_IFC(to_mdev(ibdev
), 1, 1,
249 port
, NULL
, NULL
, in_mad
, out_mad
,
258 memcpy(gid
->raw
, out_mad
->data
+ 8, 8);
260 memset(in_mad
, 0, sizeof *in_mad
);
261 in_mad
->base_version
= 1;
262 in_mad
->mgmt_class
= IB_MGMT_CLASS_SUBN_LID_ROUTED
;
263 in_mad
->class_version
= 1;
264 in_mad
->method
= IB_MGMT_METHOD_GET
;
265 in_mad
->attr_id
= IB_SMP_ATTR_GUID_INFO
;
266 in_mad
->attr_mod
= cpu_to_be32(index
/ 8);
268 err
= mthca_MAD_IFC(to_mdev(ibdev
), 1, 1,
269 port
, NULL
, NULL
, in_mad
, out_mad
,
278 memcpy(gid
->raw
+ 8, out_mad
->data
+ (index
% 8) * 16, 8);
286 static struct ib_pd
*mthca_alloc_pd(struct ib_device
*ibdev
)
291 pd
= kmalloc(sizeof *pd
, GFP_KERNEL
);
293 return ERR_PTR(-ENOMEM
);
295 err
= mthca_pd_alloc(to_mdev(ibdev
), pd
);
304 static int mthca_dealloc_pd(struct ib_pd
*pd
)
306 mthca_pd_free(to_mdev(pd
->device
), to_mpd(pd
));
312 static struct ib_ah
*mthca_ah_create(struct ib_pd
*pd
,
313 struct ib_ah_attr
*ah_attr
)
318 ah
= kmalloc(sizeof *ah
, GFP_ATOMIC
);
320 return ERR_PTR(-ENOMEM
);
322 err
= mthca_create_ah(to_mdev(pd
->device
), to_mpd(pd
), ah_attr
, ah
);
331 static int mthca_ah_destroy(struct ib_ah
*ah
)
333 mthca_destroy_ah(to_mdev(ah
->device
), to_mah(ah
));
339 static struct ib_qp
*mthca_create_qp(struct ib_pd
*pd
,
340 struct ib_qp_init_attr
*init_attr
)
345 switch (init_attr
->qp_type
) {
350 qp
= kmalloc(sizeof *qp
, GFP_KERNEL
);
352 return ERR_PTR(-ENOMEM
);
354 qp
->sq
.max
= init_attr
->cap
.max_send_wr
;
355 qp
->rq
.max
= init_attr
->cap
.max_recv_wr
;
356 qp
->sq
.max_gs
= init_attr
->cap
.max_send_sge
;
357 qp
->rq
.max_gs
= init_attr
->cap
.max_recv_sge
;
359 err
= mthca_alloc_qp(to_mdev(pd
->device
), to_mpd(pd
),
360 to_mcq(init_attr
->send_cq
),
361 to_mcq(init_attr
->recv_cq
),
362 init_attr
->qp_type
, init_attr
->sq_sig_type
,
364 qp
->ibqp
.qp_num
= qp
->qpn
;
370 qp
= kmalloc(sizeof (struct mthca_sqp
), GFP_KERNEL
);
372 return ERR_PTR(-ENOMEM
);
374 qp
->sq
.max
= init_attr
->cap
.max_send_wr
;
375 qp
->rq
.max
= init_attr
->cap
.max_recv_wr
;
376 qp
->sq
.max_gs
= init_attr
->cap
.max_send_sge
;
377 qp
->rq
.max_gs
= init_attr
->cap
.max_recv_sge
;
379 qp
->ibqp
.qp_num
= init_attr
->qp_type
== IB_QPT_SMI
? 0 : 1;
381 err
= mthca_alloc_sqp(to_mdev(pd
->device
), to_mpd(pd
),
382 to_mcq(init_attr
->send_cq
),
383 to_mcq(init_attr
->recv_cq
),
384 init_attr
->sq_sig_type
,
385 qp
->ibqp
.qp_num
, init_attr
->port_num
,
390 /* Don't support raw QPs */
391 return ERR_PTR(-ENOSYS
);
399 init_attr
->cap
.max_inline_data
= 0;
404 static int mthca_destroy_qp(struct ib_qp
*qp
)
406 mthca_free_qp(to_mdev(qp
->device
), to_mqp(qp
));
411 static struct ib_cq
*mthca_create_cq(struct ib_device
*ibdev
, int entries
)
417 cq
= kmalloc(sizeof *cq
, GFP_KERNEL
);
419 return ERR_PTR(-ENOMEM
);
421 for (nent
= 1; nent
<= entries
; nent
<<= 1)
424 err
= mthca_init_cq(to_mdev(ibdev
), nent
, cq
);
433 static int mthca_destroy_cq(struct ib_cq
*cq
)
435 mthca_free_cq(to_mdev(cq
->device
), to_mcq(cq
));
441 static inline u32
convert_access(int acc
)
443 return (acc
& IB_ACCESS_REMOTE_ATOMIC
? MTHCA_MPT_FLAG_ATOMIC
: 0) |
444 (acc
& IB_ACCESS_REMOTE_WRITE
? MTHCA_MPT_FLAG_REMOTE_WRITE
: 0) |
445 (acc
& IB_ACCESS_REMOTE_READ
? MTHCA_MPT_FLAG_REMOTE_READ
: 0) |
446 (acc
& IB_ACCESS_LOCAL_WRITE
? MTHCA_MPT_FLAG_LOCAL_WRITE
: 0) |
447 MTHCA_MPT_FLAG_LOCAL_READ
;
450 static struct ib_mr
*mthca_get_dma_mr(struct ib_pd
*pd
, int acc
)
455 mr
= kmalloc(sizeof *mr
, GFP_KERNEL
);
457 return ERR_PTR(-ENOMEM
);
459 err
= mthca_mr_alloc_notrans(to_mdev(pd
->device
),
461 convert_access(acc
), mr
);
471 static struct ib_mr
*mthca_reg_phys_mr(struct ib_pd
*pd
,
472 struct ib_phys_buf
*buffer_list
,
486 /* First check that we have enough alignment */
487 if ((*iova_start
& ~PAGE_MASK
) != (buffer_list
[0].addr
& ~PAGE_MASK
))
488 return ERR_PTR(-EINVAL
);
490 if (num_phys_buf
> 1 &&
491 ((buffer_list
[0].addr
+ buffer_list
[0].size
) & ~PAGE_MASK
))
492 return ERR_PTR(-EINVAL
);
496 for (i
= 0; i
< num_phys_buf
; ++i
) {
497 if (i
!= 0 && buffer_list
[i
].addr
& ~PAGE_MASK
)
498 return ERR_PTR(-EINVAL
);
499 if (i
!= 0 && i
!= num_phys_buf
- 1 &&
500 (buffer_list
[i
].size
& ~PAGE_MASK
))
501 return ERR_PTR(-EINVAL
);
503 total_size
+= buffer_list
[i
].size
;
505 mask
|= buffer_list
[i
].addr
;
508 /* Find largest page shift we can use to cover buffers */
509 for (shift
= PAGE_SHIFT
; shift
< 31; ++shift
)
510 if (num_phys_buf
> 1) {
511 if ((1ULL << shift
) & mask
)
515 buffer_list
[0].size
+
516 (buffer_list
[0].addr
& ((1ULL << shift
) - 1)))
520 buffer_list
[0].size
+= buffer_list
[0].addr
& ((1ULL << shift
) - 1);
521 buffer_list
[0].addr
&= ~0ull << shift
;
523 mr
= kmalloc(sizeof *mr
, GFP_KERNEL
);
525 return ERR_PTR(-ENOMEM
);
528 for (i
= 0; i
< num_phys_buf
; ++i
)
529 npages
+= (buffer_list
[i
].size
+ (1ULL << shift
) - 1) >> shift
;
534 page_list
= kmalloc(npages
* sizeof *page_list
, GFP_KERNEL
);
537 return ERR_PTR(-ENOMEM
);
541 for (i
= 0; i
< num_phys_buf
; ++i
)
543 j
< (buffer_list
[i
].size
+ (1ULL << shift
) - 1) >> shift
;
545 page_list
[n
++] = buffer_list
[i
].addr
+ ((u64
) j
<< shift
);
547 mthca_dbg(to_mdev(pd
->device
), "Registering memory at %llx (iova %llx) "
548 "in PD %x; shift %d, npages %d.\n",
549 (unsigned long long) buffer_list
[0].addr
,
550 (unsigned long long) *iova_start
,
554 err
= mthca_mr_alloc_phys(to_mdev(pd
->device
),
556 page_list
, shift
, npages
,
557 *iova_start
, total_size
,
558 convert_access(acc
), mr
);
569 static int mthca_dereg_mr(struct ib_mr
*mr
)
571 struct mthca_mr
*mmr
= to_mmr(mr
);
572 mthca_free_mr(to_mdev(mr
->device
), mmr
);
577 static struct ib_fmr
*mthca_alloc_fmr(struct ib_pd
*pd
, int mr_access_flags
,
578 struct ib_fmr_attr
*fmr_attr
)
580 struct mthca_fmr
*fmr
;
583 fmr
= kmalloc(sizeof *fmr
, GFP_KERNEL
);
585 return ERR_PTR(-ENOMEM
);
587 memcpy(&fmr
->attr
, fmr_attr
, sizeof *fmr_attr
);
588 err
= mthca_fmr_alloc(to_mdev(pd
->device
), to_mpd(pd
)->pd_num
,
589 convert_access(mr_access_flags
), fmr
);
599 static int mthca_dealloc_fmr(struct ib_fmr
*fmr
)
601 struct mthca_fmr
*mfmr
= to_mfmr(fmr
);
604 err
= mthca_free_fmr(to_mdev(fmr
->device
), mfmr
);
612 static int mthca_unmap_fmr(struct list_head
*fmr_list
)
617 struct mthca_dev
*mdev
= NULL
;
619 list_for_each_entry(fmr
, fmr_list
, list
) {
620 if (mdev
&& to_mdev(fmr
->device
) != mdev
)
622 mdev
= to_mdev(fmr
->device
);
628 if (mthca_is_memfree(mdev
)) {
629 list_for_each_entry(fmr
, fmr_list
, list
)
630 mthca_arbel_fmr_unmap(mdev
, to_mfmr(fmr
));
634 list_for_each_entry(fmr
, fmr_list
, list
)
635 mthca_tavor_fmr_unmap(mdev
, to_mfmr(fmr
));
637 err
= mthca_SYNC_TPT(mdev
, &status
);
645 static ssize_t
show_rev(struct class_device
*cdev
, char *buf
)
647 struct mthca_dev
*dev
= container_of(cdev
, struct mthca_dev
, ib_dev
.class_dev
);
648 return sprintf(buf
, "%x\n", dev
->rev_id
);
651 static ssize_t
show_fw_ver(struct class_device
*cdev
, char *buf
)
653 struct mthca_dev
*dev
= container_of(cdev
, struct mthca_dev
, ib_dev
.class_dev
);
654 return sprintf(buf
, "%x.%x.%x\n", (int) (dev
->fw_ver
>> 32),
655 (int) (dev
->fw_ver
>> 16) & 0xffff,
656 (int) dev
->fw_ver
& 0xffff);
659 static ssize_t
show_hca(struct class_device
*cdev
, char *buf
)
661 struct mthca_dev
*dev
= container_of(cdev
, struct mthca_dev
, ib_dev
.class_dev
);
662 switch (dev
->pdev
->device
) {
663 case PCI_DEVICE_ID_MELLANOX_TAVOR
:
664 return sprintf(buf
, "MT23108\n");
665 case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT
:
666 return sprintf(buf
, "MT25208 (MT23108 compat mode)\n");
667 case PCI_DEVICE_ID_MELLANOX_ARBEL
:
668 return sprintf(buf
, "MT25208\n");
669 case PCI_DEVICE_ID_MELLANOX_SINAI
:
670 case PCI_DEVICE_ID_MELLANOX_SINAI_OLD
:
671 return sprintf(buf
, "MT25204\n");
673 return sprintf(buf
, "unknown\n");
677 static CLASS_DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
678 static CLASS_DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
679 static CLASS_DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
681 static struct class_device_attribute
*mthca_class_attributes
[] = {
682 &class_device_attr_hw_rev
,
683 &class_device_attr_fw_ver
,
684 &class_device_attr_hca_type
687 int mthca_register_device(struct mthca_dev
*dev
)
692 strlcpy(dev
->ib_dev
.name
, "mthca%d", IB_DEVICE_NAME_MAX
);
693 dev
->ib_dev
.node_type
= IB_NODE_CA
;
694 dev
->ib_dev
.phys_port_cnt
= dev
->limits
.num_ports
;
695 dev
->ib_dev
.dma_device
= &dev
->pdev
->dev
;
696 dev
->ib_dev
.class_dev
.dev
= &dev
->pdev
->dev
;
697 dev
->ib_dev
.query_device
= mthca_query_device
;
698 dev
->ib_dev
.query_port
= mthca_query_port
;
699 dev
->ib_dev
.modify_port
= mthca_modify_port
;
700 dev
->ib_dev
.query_pkey
= mthca_query_pkey
;
701 dev
->ib_dev
.query_gid
= mthca_query_gid
;
702 dev
->ib_dev
.alloc_pd
= mthca_alloc_pd
;
703 dev
->ib_dev
.dealloc_pd
= mthca_dealloc_pd
;
704 dev
->ib_dev
.create_ah
= mthca_ah_create
;
705 dev
->ib_dev
.destroy_ah
= mthca_ah_destroy
;
706 dev
->ib_dev
.create_qp
= mthca_create_qp
;
707 dev
->ib_dev
.modify_qp
= mthca_modify_qp
;
708 dev
->ib_dev
.destroy_qp
= mthca_destroy_qp
;
709 dev
->ib_dev
.create_cq
= mthca_create_cq
;
710 dev
->ib_dev
.destroy_cq
= mthca_destroy_cq
;
711 dev
->ib_dev
.poll_cq
= mthca_poll_cq
;
712 dev
->ib_dev
.get_dma_mr
= mthca_get_dma_mr
;
713 dev
->ib_dev
.reg_phys_mr
= mthca_reg_phys_mr
;
714 dev
->ib_dev
.dereg_mr
= mthca_dereg_mr
;
716 if (dev
->mthca_flags
& MTHCA_FLAG_FMR
) {
717 dev
->ib_dev
.alloc_fmr
= mthca_alloc_fmr
;
718 dev
->ib_dev
.unmap_fmr
= mthca_unmap_fmr
;
719 dev
->ib_dev
.dealloc_fmr
= mthca_dealloc_fmr
;
720 if (mthca_is_memfree(dev
))
721 dev
->ib_dev
.map_phys_fmr
= mthca_arbel_map_phys_fmr
;
723 dev
->ib_dev
.map_phys_fmr
= mthca_tavor_map_phys_fmr
;
726 dev
->ib_dev
.attach_mcast
= mthca_multicast_attach
;
727 dev
->ib_dev
.detach_mcast
= mthca_multicast_detach
;
728 dev
->ib_dev
.process_mad
= mthca_process_mad
;
730 if (mthca_is_memfree(dev
)) {
731 dev
->ib_dev
.req_notify_cq
= mthca_arbel_arm_cq
;
732 dev
->ib_dev
.post_send
= mthca_arbel_post_send
;
733 dev
->ib_dev
.post_recv
= mthca_arbel_post_receive
;
735 dev
->ib_dev
.req_notify_cq
= mthca_tavor_arm_cq
;
736 dev
->ib_dev
.post_send
= mthca_tavor_post_send
;
737 dev
->ib_dev
.post_recv
= mthca_tavor_post_receive
;
740 init_MUTEX(&dev
->cap_mask_mutex
);
742 ret
= ib_register_device(&dev
->ib_dev
);
746 for (i
= 0; i
< ARRAY_SIZE(mthca_class_attributes
); ++i
) {
747 ret
= class_device_create_file(&dev
->ib_dev
.class_dev
,
748 mthca_class_attributes
[i
]);
750 ib_unregister_device(&dev
->ib_dev
);
758 void mthca_unregister_device(struct mthca_dev
*dev
)
760 ib_unregister_device(&dev
->ib_dev
);