2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/spinlock.h>
41 #include <linux/ethtool.h>
42 #include <linux/rtnetlink.h>
43 #include <linux/inetdevice.h>
47 #include <asm/byteorder.h>
49 #include <rdma/iw_cm.h>
50 #include <rdma/ib_verbs.h>
51 #include <rdma/ib_smi.h>
52 #include <rdma/ib_umem.h>
53 #include <rdma/ib_user_verbs.h>
57 #include "iwch_provider.h"
59 #include "iwch_user.h"
62 static int iwch_modify_port(struct ib_device
*ibdev
,
63 u8 port
, int port_modify_mask
,
64 struct ib_port_modify
*props
)
69 static struct ib_ah
*iwch_ah_create(struct ib_pd
*pd
,
70 struct ib_ah_attr
*ah_attr
)
72 return ERR_PTR(-ENOSYS
);
75 static int iwch_ah_destroy(struct ib_ah
*ah
)
80 static int iwch_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
85 static int iwch_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
90 static int iwch_process_mad(struct ib_device
*ibdev
,
94 struct ib_grh
*in_grh
,
95 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
)
100 static int iwch_dealloc_ucontext(struct ib_ucontext
*context
)
102 struct iwch_dev
*rhp
= to_iwch_dev(context
->device
);
103 struct iwch_ucontext
*ucontext
= to_iwch_ucontext(context
);
104 struct iwch_mm_entry
*mm
, *tmp
;
106 PDBG("%s context %p\n", __func__
, context
);
107 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
109 cxio_release_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
114 static struct ib_ucontext
*iwch_alloc_ucontext(struct ib_device
*ibdev
,
115 struct ib_udata
*udata
)
117 struct iwch_ucontext
*context
;
118 struct iwch_dev
*rhp
= to_iwch_dev(ibdev
);
120 PDBG("%s ibdev %p\n", __func__
, ibdev
);
121 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
123 return ERR_PTR(-ENOMEM
);
124 cxio_init_ucontext(&rhp
->rdev
, &context
->uctx
);
125 INIT_LIST_HEAD(&context
->mmaps
);
126 spin_lock_init(&context
->mmap_lock
);
127 return &context
->ibucontext
;
130 static int iwch_destroy_cq(struct ib_cq
*ib_cq
)
134 PDBG("%s ib_cq %p\n", __func__
, ib_cq
);
135 chp
= to_iwch_cq(ib_cq
);
137 remove_handle(chp
->rhp
, &chp
->rhp
->cqidr
, chp
->cq
.cqid
);
138 atomic_dec(&chp
->refcnt
);
139 wait_event(chp
->wait
, !atomic_read(&chp
->refcnt
));
141 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
146 static struct ib_cq
*iwch_create_cq(struct ib_device
*ibdev
, int entries
, int vector
,
147 struct ib_ucontext
*ib_context
,
148 struct ib_udata
*udata
)
150 struct iwch_dev
*rhp
;
152 struct iwch_create_cq_resp uresp
;
153 struct iwch_create_cq_req ureq
;
154 struct iwch_ucontext
*ucontext
= NULL
;
156 PDBG("%s ib_dev %p entries %d\n", __func__
, ibdev
, entries
);
157 rhp
= to_iwch_dev(ibdev
);
158 chp
= kzalloc(sizeof(*chp
), GFP_KERNEL
);
160 return ERR_PTR(-ENOMEM
);
163 ucontext
= to_iwch_ucontext(ib_context
);
164 if (!t3a_device(rhp
)) {
165 if (ib_copy_from_udata(&ureq
, udata
, sizeof (ureq
))) {
167 return ERR_PTR(-EFAULT
);
169 chp
->user_rptr_addr
= (u32 __user
*)(unsigned long)ureq
.user_rptr_addr
;
173 if (t3a_device(rhp
)) {
176 * T3A: Add some fluff to handle extra CQEs inserted
177 * for various errors.
178 * Additional CQE possibilities:
180 * incoming RDMA WRITE Failures
181 * incoming RDMA READ REQUEST FAILUREs
182 * NOTE: We cannot ensure the CQ won't overflow.
186 entries
= roundup_pow_of_two(entries
);
187 chp
->cq
.size_log2
= ilog2(entries
);
189 if (cxio_create_cq(&rhp
->rdev
, &chp
->cq
)) {
191 return ERR_PTR(-ENOMEM
);
194 chp
->ibcq
.cqe
= 1 << chp
->cq
.size_log2
;
195 spin_lock_init(&chp
->lock
);
196 atomic_set(&chp
->refcnt
, 1);
197 init_waitqueue_head(&chp
->wait
);
198 if (insert_handle(rhp
, &rhp
->cqidr
, chp
, chp
->cq
.cqid
)) {
199 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
201 return ERR_PTR(-ENOMEM
);
205 struct iwch_mm_entry
*mm
;
207 mm
= kmalloc(sizeof *mm
, GFP_KERNEL
);
209 iwch_destroy_cq(&chp
->ibcq
);
210 return ERR_PTR(-ENOMEM
);
212 uresp
.cqid
= chp
->cq
.cqid
;
213 uresp
.size_log2
= chp
->cq
.size_log2
;
214 spin_lock(&ucontext
->mmap_lock
);
215 uresp
.key
= ucontext
->key
;
216 ucontext
->key
+= PAGE_SIZE
;
217 spin_unlock(&ucontext
->mmap_lock
);
218 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
220 iwch_destroy_cq(&chp
->ibcq
);
221 return ERR_PTR(-EFAULT
);
224 mm
->addr
= virt_to_phys(chp
->cq
.queue
);
225 mm
->len
= PAGE_ALIGN((1UL << uresp
.size_log2
) *
226 sizeof (struct t3_cqe
));
227 insert_mmap(ucontext
, mm
);
229 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
230 chp
->cq
.cqid
, chp
, (1 << chp
->cq
.size_log2
),
231 (unsigned long long) chp
->cq
.dma_addr
);
235 static int iwch_resize_cq(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
)
238 struct iwch_cq
*chp
= to_iwch_cq(cq
);
239 struct t3_cq oldcq
, newcq
;
242 PDBG("%s ib_cq %p cqe %d\n", __func__
, cq
, cqe
);
244 /* We don't downsize... */
248 /* create new t3_cq with new size */
249 cqe
= roundup_pow_of_two(cqe
+1);
250 newcq
.size_log2
= ilog2(cqe
);
252 /* Dont allow resize to less than the current wce count */
253 if (cqe
< Q_COUNT(chp
->cq
.rptr
, chp
->cq
.wptr
)) {
257 /* Quiesce all QPs using this CQ */
258 ret
= iwch_quiesce_qps(chp
);
263 ret
= cxio_create_cq(&chp
->rhp
->rdev
, &newcq
);
269 memcpy(newcq
.queue
, chp
->cq
.queue
, (1 << chp
->cq
.size_log2
) *
270 sizeof(struct t3_cqe
));
272 /* old iwch_qp gets new t3_cq but keeps old cqid */
275 chp
->cq
.cqid
= oldcq
.cqid
;
277 /* resize new t3_cq to update the HW context */
278 ret
= cxio_resize_cq(&chp
->rhp
->rdev
, &chp
->cq
);
283 chp
->ibcq
.cqe
= (1<<chp
->cq
.size_log2
) - 1;
285 /* destroy old t3_cq */
286 oldcq
.cqid
= newcq
.cqid
;
287 ret
= cxio_destroy_cq(&chp
->rhp
->rdev
, &oldcq
);
289 printk(KERN_ERR MOD
"%s - cxio_destroy_cq failed %d\n",
293 /* add user hooks here */
296 ret
= iwch_resume_qps(chp
);
303 static int iwch_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
305 struct iwch_dev
*rhp
;
307 enum t3_cq_opcode cq_op
;
312 chp
= to_iwch_cq(ibcq
);
314 if ((flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
)
318 if (chp
->user_rptr_addr
) {
319 if (get_user(rptr
, chp
->user_rptr_addr
))
321 spin_lock_irqsave(&chp
->lock
, flag
);
324 spin_lock_irqsave(&chp
->lock
, flag
);
325 PDBG("%s rptr 0x%x\n", __func__
, chp
->cq
.rptr
);
326 err
= cxio_hal_cq_op(&rhp
->rdev
, &chp
->cq
, cq_op
, 0);
327 spin_unlock_irqrestore(&chp
->lock
, flag
);
329 printk(KERN_ERR MOD
"Error %d rearming CQID 0x%x\n", err
,
331 if (err
> 0 && !(flags
& IB_CQ_REPORT_MISSED_EVENTS
))
336 static int iwch_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
338 int len
= vma
->vm_end
- vma
->vm_start
;
339 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
340 struct cxio_rdev
*rdev_p
;
342 struct iwch_mm_entry
*mm
;
343 struct iwch_ucontext
*ucontext
;
346 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__
, vma
->vm_pgoff
,
349 if (vma
->vm_start
& (PAGE_SIZE
-1)) {
353 rdev_p
= &(to_iwch_dev(context
->device
)->rdev
);
354 ucontext
= to_iwch_ucontext(context
);
356 mm
= remove_mmap(ucontext
, key
, len
);
362 if ((addr
>= rdev_p
->rnic_info
.udbell_physbase
) &&
363 (addr
< (rdev_p
->rnic_info
.udbell_physbase
+
364 rdev_p
->rnic_info
.udbell_len
))) {
367 * Map T3 DB register.
369 if (vma
->vm_flags
& VM_READ
) {
373 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
374 vma
->vm_flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
375 vma
->vm_flags
&= ~VM_MAYREAD
;
376 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
378 len
, vma
->vm_page_prot
);
382 * Map WQ or CQ contig dma memory...
384 ret
= remap_pfn_range(vma
, vma
->vm_start
,
386 len
, vma
->vm_page_prot
);
392 static int iwch_deallocate_pd(struct ib_pd
*pd
)
394 struct iwch_dev
*rhp
;
397 php
= to_iwch_pd(pd
);
399 PDBG("%s ibpd %p pdid 0x%x\n", __func__
, pd
, php
->pdid
);
400 cxio_hal_put_pdid(rhp
->rdev
.rscp
, php
->pdid
);
405 static struct ib_pd
*iwch_allocate_pd(struct ib_device
*ibdev
,
406 struct ib_ucontext
*context
,
407 struct ib_udata
*udata
)
411 struct iwch_dev
*rhp
;
413 PDBG("%s ibdev %p\n", __func__
, ibdev
);
414 rhp
= (struct iwch_dev
*) ibdev
;
415 pdid
= cxio_hal_get_pdid(rhp
->rdev
.rscp
);
417 return ERR_PTR(-EINVAL
);
418 php
= kzalloc(sizeof(*php
), GFP_KERNEL
);
420 cxio_hal_put_pdid(rhp
->rdev
.rscp
, pdid
);
421 return ERR_PTR(-ENOMEM
);
426 if (ib_copy_to_udata(udata
, &php
->pdid
, sizeof (__u32
))) {
427 iwch_deallocate_pd(&php
->ibpd
);
428 return ERR_PTR(-EFAULT
);
431 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__
, pdid
, php
);
435 static int iwch_dereg_mr(struct ib_mr
*ib_mr
)
437 struct iwch_dev
*rhp
;
441 PDBG("%s ib_mr %p\n", __func__
, ib_mr
);
442 /* There can be no memory windows */
443 if (atomic_read(&ib_mr
->usecnt
))
446 mhp
= to_iwch_mr(ib_mr
);
448 mmid
= mhp
->attr
.stag
>> 8;
449 cxio_dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
452 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
454 kfree((void *) (unsigned long) mhp
->kva
);
456 ib_umem_release(mhp
->umem
);
457 PDBG("%s mmid 0x%x ptr %p\n", __func__
, mmid
, mhp
);
462 static struct ib_mr
*iwch_register_phys_mem(struct ib_pd
*pd
,
463 struct ib_phys_buf
*buffer_list
,
472 struct iwch_dev
*rhp
;
477 PDBG("%s ib_pd %p\n", __func__
, pd
);
478 php
= to_iwch_pd(pd
);
481 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
483 return ERR_PTR(-ENOMEM
);
487 /* First check that we have enough alignment */
488 if ((*iova_start
& ~PAGE_MASK
) != (buffer_list
[0].addr
& ~PAGE_MASK
)) {
493 if (num_phys_buf
> 1 &&
494 ((buffer_list
[0].addr
+ buffer_list
[0].size
) & ~PAGE_MASK
)) {
499 ret
= build_phys_page_list(buffer_list
, num_phys_buf
, iova_start
,
500 &total_size
, &npages
, &shift
, &page_list
);
504 ret
= iwch_alloc_pbl(mhp
, npages
);
510 ret
= iwch_write_pbl(mhp
, page_list
, npages
, 0);
515 mhp
->attr
.pdid
= php
->pdid
;
518 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
519 mhp
->attr
.va_fbo
= *iova_start
;
520 mhp
->attr
.page_size
= shift
- 12;
522 mhp
->attr
.len
= (u32
) total_size
;
523 mhp
->attr
.pbl_size
= npages
;
524 ret
= iwch_register_mem(rhp
, php
, mhp
, shift
);
539 static int iwch_reregister_phys_mem(struct ib_mr
*mr
,
542 struct ib_phys_buf
*buffer_list
,
544 int acc
, u64
* iova_start
)
547 struct iwch_mr mh
, *mhp
;
549 struct iwch_dev
*rhp
;
550 __be64
*page_list
= NULL
;
556 PDBG("%s ib_mr %p ib_pd %p\n", __func__
, mr
, pd
);
558 /* There can be no memory windows */
559 if (atomic_read(&mr
->usecnt
))
562 mhp
= to_iwch_mr(mr
);
564 php
= to_iwch_pd(mr
->pd
);
566 /* make sure we are on the same adapter */
570 memcpy(&mh
, mhp
, sizeof *mhp
);
572 if (mr_rereg_mask
& IB_MR_REREG_PD
)
573 php
= to_iwch_pd(pd
);
574 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
575 mh
.attr
.perms
= iwch_ib_to_tpt_access(acc
);
576 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
577 ret
= build_phys_page_list(buffer_list
, num_phys_buf
,
579 &total_size
, &npages
,
585 ret
= iwch_reregister_mem(rhp
, php
, &mh
, shift
, npages
);
590 if (mr_rereg_mask
& IB_MR_REREG_PD
)
591 mhp
->attr
.pdid
= php
->pdid
;
592 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
593 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
594 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
596 mhp
->attr
.va_fbo
= *iova_start
;
597 mhp
->attr
.page_size
= shift
- 12;
598 mhp
->attr
.len
= (u32
) total_size
;
599 mhp
->attr
.pbl_size
= npages
;
606 static struct ib_mr
*iwch_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
607 u64 virt
, int acc
, struct ib_udata
*udata
)
613 struct ib_umem_chunk
*chunk
;
614 struct iwch_dev
*rhp
;
617 struct iwch_reg_user_mr_resp uresp
;
619 PDBG("%s ib_pd %p\n", __func__
, pd
);
621 php
= to_iwch_pd(pd
);
623 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
625 return ERR_PTR(-ENOMEM
);
629 mhp
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
, acc
, 0);
630 if (IS_ERR(mhp
->umem
)) {
631 err
= PTR_ERR(mhp
->umem
);
636 shift
= ffs(mhp
->umem
->page_size
) - 1;
639 list_for_each_entry(chunk
, &mhp
->umem
->chunk_list
, list
)
642 err
= iwch_alloc_pbl(mhp
, n
);
646 pages
= (__be64
*) __get_free_page(GFP_KERNEL
);
654 list_for_each_entry(chunk
, &mhp
->umem
->chunk_list
, list
)
655 for (j
= 0; j
< chunk
->nmap
; ++j
) {
656 len
= sg_dma_len(&chunk
->page_list
[j
]) >> shift
;
657 for (k
= 0; k
< len
; ++k
) {
658 pages
[i
++] = cpu_to_be64(sg_dma_address(
659 &chunk
->page_list
[j
]) +
660 mhp
->umem
->page_size
* k
);
661 if (i
== PAGE_SIZE
/ sizeof *pages
) {
662 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
672 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
675 free_page((unsigned long) pages
);
679 mhp
->attr
.pdid
= php
->pdid
;
681 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
682 mhp
->attr
.va_fbo
= virt
;
683 mhp
->attr
.page_size
= shift
- 12;
684 mhp
->attr
.len
= (u32
) length
;
686 err
= iwch_register_mem(rhp
, php
, mhp
, shift
);
690 if (udata
&& !t3a_device(rhp
)) {
691 uresp
.pbl_addr
= (mhp
->attr
.pbl_addr
-
692 rhp
->rdev
.rnic_info
.pbl_base
) >> 3;
693 PDBG("%s user resp pbl_addr 0x%x\n", __func__
,
696 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
697 iwch_dereg_mr(&mhp
->ibmr
);
709 ib_umem_release(mhp
->umem
);
714 static struct ib_mr
*iwch_get_dma_mr(struct ib_pd
*pd
, int acc
)
716 struct ib_phys_buf bl
;
720 PDBG("%s ib_pd %p\n", __func__
, pd
);
723 * T3 only supports 32 bits of size.
725 bl
.size
= 0xffffffff;
728 ibmr
= iwch_register_phys_mem(pd
, &bl
, 1, acc
, &kva
);
732 static struct ib_mw
*iwch_alloc_mw(struct ib_pd
*pd
)
734 struct iwch_dev
*rhp
;
741 php
= to_iwch_pd(pd
);
743 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
745 return ERR_PTR(-ENOMEM
);
746 ret
= cxio_allocate_window(&rhp
->rdev
, &stag
, php
->pdid
);
752 mhp
->attr
.pdid
= php
->pdid
;
753 mhp
->attr
.type
= TPT_MW
;
754 mhp
->attr
.stag
= stag
;
756 mhp
->ibmw
.rkey
= stag
;
757 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
)) {
758 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
760 return ERR_PTR(-ENOMEM
);
762 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
766 static int iwch_dealloc_mw(struct ib_mw
*mw
)
768 struct iwch_dev
*rhp
;
772 mhp
= to_iwch_mw(mw
);
774 mmid
= (mw
->rkey
) >> 8;
775 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
776 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
778 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__
, mw
, mmid
, mhp
);
782 static struct ib_mr
*iwch_alloc_fast_reg_mr(struct ib_pd
*pd
, int pbl_depth
)
784 struct iwch_dev
*rhp
;
791 php
= to_iwch_pd(pd
);
793 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
798 ret
= iwch_alloc_pbl(mhp
, pbl_depth
);
801 mhp
->attr
.pbl_size
= pbl_depth
;
802 ret
= cxio_allocate_stag(&rhp
->rdev
, &stag
, php
->pdid
,
803 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
806 mhp
->attr
.pdid
= php
->pdid
;
807 mhp
->attr
.type
= TPT_NON_SHARED_MR
;
808 mhp
->attr
.stag
= stag
;
811 mhp
->ibmr
.rkey
= mhp
->ibmr
.lkey
= stag
;
812 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
))
815 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
818 cxio_dereg_mem(&rhp
->rdev
, stag
, mhp
->attr
.pbl_size
,
828 static struct ib_fast_reg_page_list
*iwch_alloc_fastreg_pbl(
829 struct ib_device
*device
,
832 struct ib_fast_reg_page_list
*page_list
;
834 page_list
= kmalloc(sizeof *page_list
+ page_list_len
* sizeof(u64
),
837 return ERR_PTR(-ENOMEM
);
839 page_list
->page_list
= (u64
*)(page_list
+ 1);
840 page_list
->max_page_list_len
= page_list_len
;
845 static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list
*page_list
)
850 static int iwch_destroy_qp(struct ib_qp
*ib_qp
)
852 struct iwch_dev
*rhp
;
854 struct iwch_qp_attributes attrs
;
855 struct iwch_ucontext
*ucontext
;
857 qhp
= to_iwch_qp(ib_qp
);
860 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
861 iwch_modify_qp(rhp
, qhp
, IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 0);
862 wait_event(qhp
->wait
, !qhp
->ep
);
864 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.qpid
);
866 atomic_dec(&qhp
->refcnt
);
867 wait_event(qhp
->wait
, !atomic_read(&qhp
->refcnt
));
869 ucontext
= ib_qp
->uobject
? to_iwch_ucontext(ib_qp
->uobject
->context
)
871 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
872 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
874 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__
,
875 ib_qp
, qhp
->wq
.qpid
, qhp
);
880 static struct ib_qp
*iwch_create_qp(struct ib_pd
*pd
,
881 struct ib_qp_init_attr
*attrs
,
882 struct ib_udata
*udata
)
884 struct iwch_dev
*rhp
;
887 struct iwch_cq
*schp
;
888 struct iwch_cq
*rchp
;
889 struct iwch_create_qp_resp uresp
;
890 int wqsize
, sqsize
, rqsize
;
891 struct iwch_ucontext
*ucontext
;
893 PDBG("%s ib_pd %p\n", __func__
, pd
);
894 if (attrs
->qp_type
!= IB_QPT_RC
)
895 return ERR_PTR(-EINVAL
);
896 php
= to_iwch_pd(pd
);
898 schp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
);
899 rchp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
);
901 return ERR_PTR(-EINVAL
);
903 /* The RQT size must be # of entries + 1 rounded up to a power of two */
904 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
);
905 if (rqsize
== attrs
->cap
.max_recv_wr
)
906 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
+1);
908 /* T3 doesn't support RQT depth < 16 */
912 if (rqsize
> T3_MAX_RQ_SIZE
)
913 return ERR_PTR(-EINVAL
);
915 if (attrs
->cap
.max_inline_data
> T3_MAX_INLINE
)
916 return ERR_PTR(-EINVAL
);
919 * NOTE: The SQ and total WQ sizes don't need to be
920 * a power of two. However, all the code assumes
921 * they are. EG: Q_FREECNT() and friends.
923 sqsize
= roundup_pow_of_two(attrs
->cap
.max_send_wr
);
924 wqsize
= roundup_pow_of_two(rqsize
+ sqsize
);
927 * Kernel users need more wq space for fastreg WRs which can take
930 ucontext
= pd
->uobject
? to_iwch_ucontext(pd
->uobject
->context
) : NULL
;
931 if (!ucontext
&& wqsize
< (rqsize
+ (2 * sqsize
)))
932 wqsize
= roundup_pow_of_two(rqsize
+
933 roundup_pow_of_two(attrs
->cap
.max_send_wr
* 2));
934 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__
,
935 wqsize
, sqsize
, rqsize
);
936 qhp
= kzalloc(sizeof(*qhp
), GFP_KERNEL
);
938 return ERR_PTR(-ENOMEM
);
939 qhp
->wq
.size_log2
= ilog2(wqsize
);
940 qhp
->wq
.rq_size_log2
= ilog2(rqsize
);
941 qhp
->wq
.sq_size_log2
= ilog2(sqsize
);
942 if (cxio_create_qp(&rhp
->rdev
, !udata
, &qhp
->wq
,
943 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
)) {
945 return ERR_PTR(-ENOMEM
);
948 attrs
->cap
.max_recv_wr
= rqsize
- 1;
949 attrs
->cap
.max_send_wr
= sqsize
;
950 attrs
->cap
.max_inline_data
= T3_MAX_INLINE
;
953 qhp
->attr
.pd
= php
->pdid
;
954 qhp
->attr
.scq
= ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
;
955 qhp
->attr
.rcq
= ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
;
956 qhp
->attr
.sq_num_entries
= attrs
->cap
.max_send_wr
;
957 qhp
->attr
.rq_num_entries
= attrs
->cap
.max_recv_wr
;
958 qhp
->attr
.sq_max_sges
= attrs
->cap
.max_send_sge
;
959 qhp
->attr
.sq_max_sges_rdma_write
= attrs
->cap
.max_send_sge
;
960 qhp
->attr
.rq_max_sges
= attrs
->cap
.max_recv_sge
;
961 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
962 qhp
->attr
.next_state
= IWCH_QP_STATE_IDLE
;
965 * XXX - These don't get passed in from the openib user
966 * at create time. The CM sets them via a QP modify.
967 * Need to fix... I think the CM should
969 qhp
->attr
.enable_rdma_read
= 1;
970 qhp
->attr
.enable_rdma_write
= 1;
971 qhp
->attr
.enable_bind
= 1;
972 qhp
->attr
.max_ord
= 1;
973 qhp
->attr
.max_ird
= 1;
975 spin_lock_init(&qhp
->lock
);
976 init_waitqueue_head(&qhp
->wait
);
977 atomic_set(&qhp
->refcnt
, 1);
979 if (insert_handle(rhp
, &rhp
->qpidr
, qhp
, qhp
->wq
.qpid
)) {
980 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
981 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
983 return ERR_PTR(-ENOMEM
);
988 struct iwch_mm_entry
*mm1
, *mm2
;
990 mm1
= kmalloc(sizeof *mm1
, GFP_KERNEL
);
992 iwch_destroy_qp(&qhp
->ibqp
);
993 return ERR_PTR(-ENOMEM
);
996 mm2
= kmalloc(sizeof *mm2
, GFP_KERNEL
);
999 iwch_destroy_qp(&qhp
->ibqp
);
1000 return ERR_PTR(-ENOMEM
);
1003 uresp
.qpid
= qhp
->wq
.qpid
;
1004 uresp
.size_log2
= qhp
->wq
.size_log2
;
1005 uresp
.sq_size_log2
= qhp
->wq
.sq_size_log2
;
1006 uresp
.rq_size_log2
= qhp
->wq
.rq_size_log2
;
1007 spin_lock(&ucontext
->mmap_lock
);
1008 uresp
.key
= ucontext
->key
;
1009 ucontext
->key
+= PAGE_SIZE
;
1010 uresp
.db_key
= ucontext
->key
;
1011 ucontext
->key
+= PAGE_SIZE
;
1012 spin_unlock(&ucontext
->mmap_lock
);
1013 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
1016 iwch_destroy_qp(&qhp
->ibqp
);
1017 return ERR_PTR(-EFAULT
);
1019 mm1
->key
= uresp
.key
;
1020 mm1
->addr
= virt_to_phys(qhp
->wq
.queue
);
1021 mm1
->len
= PAGE_ALIGN(wqsize
* sizeof (union t3_wr
));
1022 insert_mmap(ucontext
, mm1
);
1023 mm2
->key
= uresp
.db_key
;
1024 mm2
->addr
= qhp
->wq
.udb
& PAGE_MASK
;
1025 mm2
->len
= PAGE_SIZE
;
1026 insert_mmap(ucontext
, mm2
);
1028 qhp
->ibqp
.qp_num
= qhp
->wq
.qpid
;
1029 init_timer(&(qhp
->timer
));
1030 PDBG("%s sq_num_entries %d, rq_num_entries %d "
1031 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
1032 __func__
, qhp
->attr
.sq_num_entries
, qhp
->attr
.rq_num_entries
,
1033 qhp
->wq
.qpid
, qhp
, (unsigned long long) qhp
->wq
.dma_addr
,
1034 1 << qhp
->wq
.size_log2
, qhp
->wq
.rq_addr
);
1038 static int iwch_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1039 int attr_mask
, struct ib_udata
*udata
)
1041 struct iwch_dev
*rhp
;
1042 struct iwch_qp
*qhp
;
1043 enum iwch_qp_attr_mask mask
= 0;
1044 struct iwch_qp_attributes attrs
;
1046 PDBG("%s ib_qp %p\n", __func__
, ibqp
);
1048 /* iwarp does not support the RTR state */
1049 if ((attr_mask
& IB_QP_STATE
) && (attr
->qp_state
== IB_QPS_RTR
))
1050 attr_mask
&= ~IB_QP_STATE
;
1052 /* Make sure we still have something left to do */
1056 memset(&attrs
, 0, sizeof attrs
);
1057 qhp
= to_iwch_qp(ibqp
);
1060 attrs
.next_state
= iwch_convert_state(attr
->qp_state
);
1061 attrs
.enable_rdma_read
= (attr
->qp_access_flags
&
1062 IB_ACCESS_REMOTE_READ
) ? 1 : 0;
1063 attrs
.enable_rdma_write
= (attr
->qp_access_flags
&
1064 IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
1065 attrs
.enable_bind
= (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
) ? 1 : 0;
1068 mask
|= (attr_mask
& IB_QP_STATE
) ? IWCH_QP_ATTR_NEXT_STATE
: 0;
1069 mask
|= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
1070 (IWCH_QP_ATTR_ENABLE_RDMA_READ
|
1071 IWCH_QP_ATTR_ENABLE_RDMA_WRITE
|
1072 IWCH_QP_ATTR_ENABLE_RDMA_BIND
) : 0;
1074 return iwch_modify_qp(rhp
, qhp
, mask
, &attrs
, 0);
1077 void iwch_qp_add_ref(struct ib_qp
*qp
)
1079 PDBG("%s ib_qp %p\n", __func__
, qp
);
1080 atomic_inc(&(to_iwch_qp(qp
)->refcnt
));
1083 void iwch_qp_rem_ref(struct ib_qp
*qp
)
1085 PDBG("%s ib_qp %p\n", __func__
, qp
);
1086 if (atomic_dec_and_test(&(to_iwch_qp(qp
)->refcnt
)))
1087 wake_up(&(to_iwch_qp(qp
)->wait
));
1090 static struct ib_qp
*iwch_get_qp(struct ib_device
*dev
, int qpn
)
1092 PDBG("%s ib_dev %p qpn 0x%x\n", __func__
, dev
, qpn
);
1093 return (struct ib_qp
*)get_qhp(to_iwch_dev(dev
), qpn
);
1097 static int iwch_query_pkey(struct ib_device
*ibdev
,
1098 u8 port
, u16 index
, u16
* pkey
)
1100 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1105 static int iwch_query_gid(struct ib_device
*ibdev
, u8 port
,
1106 int index
, union ib_gid
*gid
)
1108 struct iwch_dev
*dev
;
1110 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
1111 __func__
, ibdev
, port
, index
, gid
);
1112 dev
= to_iwch_dev(ibdev
);
1113 BUG_ON(port
== 0 || port
> 2);
1114 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
1115 memcpy(&(gid
->raw
[0]), dev
->rdev
.port_info
.lldevs
[port
-1]->dev_addr
, 6);
1119 static u64
fw_vers_string_to_u64(struct iwch_dev
*iwch_dev
)
1121 struct ethtool_drvinfo info
;
1122 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1124 unsigned fw_maj
, fw_min
, fw_mic
;
1126 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1128 next
= info
.fw_version
+ 1;
1129 cp
= strsep(&next
, ".");
1130 sscanf(cp
, "%i", &fw_maj
);
1131 cp
= strsep(&next
, ".");
1132 sscanf(cp
, "%i", &fw_min
);
1133 cp
= strsep(&next
, ".");
1134 sscanf(cp
, "%i", &fw_mic
);
1136 return (((u64
)fw_maj
& 0xffff) << 32) | ((fw_min
& 0xffff) << 16) |
1140 static int iwch_query_device(struct ib_device
*ibdev
,
1141 struct ib_device_attr
*props
)
1144 struct iwch_dev
*dev
;
1145 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1147 dev
= to_iwch_dev(ibdev
);
1148 memset(props
, 0, sizeof *props
);
1149 memcpy(&props
->sys_image_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1150 props
->hw_ver
= dev
->rdev
.t3cdev_p
->type
;
1151 props
->fw_ver
= fw_vers_string_to_u64(dev
);
1152 props
->device_cap_flags
= dev
->device_cap_flags
;
1153 props
->page_size_cap
= dev
->attr
.mem_pgsizes_bitmask
;
1154 props
->vendor_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->vendor
;
1155 props
->vendor_part_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->device
;
1156 props
->max_mr_size
= dev
->attr
.max_mr_size
;
1157 props
->max_qp
= dev
->attr
.max_qps
;
1158 props
->max_qp_wr
= dev
->attr
.max_wrs
;
1159 props
->max_sge
= dev
->attr
.max_sge_per_wr
;
1160 props
->max_sge_rd
= 1;
1161 props
->max_qp_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1162 props
->max_qp_init_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1163 props
->max_cq
= dev
->attr
.max_cqs
;
1164 props
->max_cqe
= dev
->attr
.max_cqes_per_cq
;
1165 props
->max_mr
= dev
->attr
.max_mem_regs
;
1166 props
->max_pd
= dev
->attr
.max_pds
;
1167 props
->local_ca_ack_delay
= 0;
1168 props
->max_fast_reg_page_list_len
= T3_MAX_FASTREG_DEPTH
;
1173 static int iwch_query_port(struct ib_device
*ibdev
,
1174 u8 port
, struct ib_port_attr
*props
)
1176 struct iwch_dev
*dev
;
1177 struct net_device
*netdev
;
1178 struct in_device
*inetdev
;
1180 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1182 dev
= to_iwch_dev(ibdev
);
1183 netdev
= dev
->rdev
.port_info
.lldevs
[port
-1];
1185 memset(props
, 0, sizeof(struct ib_port_attr
));
1186 props
->max_mtu
= IB_MTU_4096
;
1187 if (netdev
->mtu
>= 4096)
1188 props
->active_mtu
= IB_MTU_4096
;
1189 else if (netdev
->mtu
>= 2048)
1190 props
->active_mtu
= IB_MTU_2048
;
1191 else if (netdev
->mtu
>= 1024)
1192 props
->active_mtu
= IB_MTU_1024
;
1193 else if (netdev
->mtu
>= 512)
1194 props
->active_mtu
= IB_MTU_512
;
1196 props
->active_mtu
= IB_MTU_256
;
1198 if (!netif_carrier_ok(netdev
))
1199 props
->state
= IB_PORT_DOWN
;
1201 inetdev
= in_dev_get(netdev
);
1202 if (inetdev
->ifa_list
)
1203 props
->state
= IB_PORT_ACTIVE
;
1205 props
->state
= IB_PORT_INIT
;
1206 in_dev_put(inetdev
);
1209 props
->port_cap_flags
=
1211 IB_PORT_SNMP_TUNNEL_SUP
|
1212 IB_PORT_REINIT_SUP
|
1213 IB_PORT_DEVICE_MGMT_SUP
|
1214 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
1215 props
->gid_tbl_len
= 1;
1216 props
->pkey_tbl_len
= 1;
1217 props
->active_width
= 2;
1218 props
->active_speed
= 2;
1219 props
->max_msg_sz
= -1;
1224 static ssize_t
show_rev(struct device
*dev
, struct device_attribute
*attr
,
1227 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1229 PDBG("%s dev 0x%p\n", __func__
, dev
);
1230 return sprintf(buf
, "%d\n", iwch_dev
->rdev
.t3cdev_p
->type
);
1233 static ssize_t
show_fw_ver(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1235 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1237 struct ethtool_drvinfo info
;
1238 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1240 PDBG("%s dev 0x%p\n", __func__
, dev
);
1241 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1242 return sprintf(buf
, "%s\n", info
.fw_version
);
1245 static ssize_t
show_hca(struct device
*dev
, struct device_attribute
*attr
,
1248 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1250 struct ethtool_drvinfo info
;
1251 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1253 PDBG("%s dev 0x%p\n", __func__
, dev
);
1254 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1255 return sprintf(buf
, "%s\n", info
.driver
);
1258 static ssize_t
show_board(struct device
*dev
, struct device_attribute
*attr
,
1261 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1263 PDBG("%s dev 0x%p\n", __func__
, dev
);
1264 return sprintf(buf
, "%x.%x\n", iwch_dev
->rdev
.rnic_info
.pdev
->vendor
,
1265 iwch_dev
->rdev
.rnic_info
.pdev
->device
);
1268 static int iwch_get_mib(struct ib_device
*ibdev
,
1269 union rdma_protocol_stats
*stats
)
1271 struct iwch_dev
*dev
;
1272 struct tp_mib_stats m
;
1275 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1276 dev
= to_iwch_dev(ibdev
);
1277 ret
= dev
->rdev
.t3cdev_p
->ctl(dev
->rdev
.t3cdev_p
, RDMA_GET_MIB
, &m
);
1281 memset(stats
, 0, sizeof *stats
);
1282 stats
->iw
.ipInReceives
= ((u64
) m
.ipInReceive_hi
<< 32) +
1284 stats
->iw
.ipInHdrErrors
= ((u64
) m
.ipInHdrErrors_hi
<< 32) +
1286 stats
->iw
.ipInAddrErrors
= ((u64
) m
.ipInAddrErrors_hi
<< 32) +
1287 m
.ipInAddrErrors_lo
;
1288 stats
->iw
.ipInUnknownProtos
= ((u64
) m
.ipInUnknownProtos_hi
<< 32) +
1289 m
.ipInUnknownProtos_lo
;
1290 stats
->iw
.ipInDiscards
= ((u64
) m
.ipInDiscards_hi
<< 32) +
1292 stats
->iw
.ipInDelivers
= ((u64
) m
.ipInDelivers_hi
<< 32) +
1294 stats
->iw
.ipOutRequests
= ((u64
) m
.ipOutRequests_hi
<< 32) +
1296 stats
->iw
.ipOutDiscards
= ((u64
) m
.ipOutDiscards_hi
<< 32) +
1298 stats
->iw
.ipOutNoRoutes
= ((u64
) m
.ipOutNoRoutes_hi
<< 32) +
1300 stats
->iw
.ipReasmTimeout
= (u64
) m
.ipReasmTimeout
;
1301 stats
->iw
.ipReasmReqds
= (u64
) m
.ipReasmReqds
;
1302 stats
->iw
.ipReasmOKs
= (u64
) m
.ipReasmOKs
;
1303 stats
->iw
.ipReasmFails
= (u64
) m
.ipReasmFails
;
1304 stats
->iw
.tcpActiveOpens
= (u64
) m
.tcpActiveOpens
;
1305 stats
->iw
.tcpPassiveOpens
= (u64
) m
.tcpPassiveOpens
;
1306 stats
->iw
.tcpAttemptFails
= (u64
) m
.tcpAttemptFails
;
1307 stats
->iw
.tcpEstabResets
= (u64
) m
.tcpEstabResets
;
1308 stats
->iw
.tcpOutRsts
= (u64
) m
.tcpOutRsts
;
1309 stats
->iw
.tcpCurrEstab
= (u64
) m
.tcpCurrEstab
;
1310 stats
->iw
.tcpInSegs
= ((u64
) m
.tcpInSegs_hi
<< 32) +
1312 stats
->iw
.tcpOutSegs
= ((u64
) m
.tcpOutSegs_hi
<< 32) +
1314 stats
->iw
.tcpRetransSegs
= ((u64
) m
.tcpRetransSeg_hi
<< 32) +
1316 stats
->iw
.tcpInErrs
= ((u64
) m
.tcpInErrs_hi
<< 32) +
1318 stats
->iw
.tcpRtoMin
= (u64
) m
.tcpRtoMin
;
1319 stats
->iw
.tcpRtoMax
= (u64
) m
.tcpRtoMax
;
1323 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
1324 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
1325 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
1326 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
1328 static struct device_attribute
*iwch_class_attributes
[] = {
1335 int iwch_register_device(struct iwch_dev
*dev
)
1340 PDBG("%s iwch_dev %p\n", __func__
, dev
);
1341 strlcpy(dev
->ibdev
.name
, "cxgb3_%d", IB_DEVICE_NAME_MAX
);
1342 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
1343 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1344 dev
->ibdev
.owner
= THIS_MODULE
;
1345 dev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
|
1346 IB_DEVICE_MEM_WINDOW
|
1347 IB_DEVICE_MEM_MGT_EXTENSIONS
;
1349 /* cxgb3 supports STag 0. */
1350 dev
->ibdev
.local_dma_lkey
= 0;
1352 dev
->ibdev
.uverbs_cmd_mask
=
1353 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1354 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1355 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1356 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
1357 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
1358 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
1359 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
1360 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
1361 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
1362 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
1363 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
1364 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
1365 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
1366 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
1367 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
1368 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
1369 (1ull << IB_USER_VERBS_CMD_POST_RECV
);
1370 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
1371 memcpy(dev
->ibdev
.node_desc
, IWCH_NODE_DESC
, sizeof(IWCH_NODE_DESC
));
1372 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.port_info
.nports
;
1373 dev
->ibdev
.num_comp_vectors
= 1;
1374 dev
->ibdev
.dma_device
= &(dev
->rdev
.rnic_info
.pdev
->dev
);
1375 dev
->ibdev
.query_device
= iwch_query_device
;
1376 dev
->ibdev
.query_port
= iwch_query_port
;
1377 dev
->ibdev
.modify_port
= iwch_modify_port
;
1378 dev
->ibdev
.query_pkey
= iwch_query_pkey
;
1379 dev
->ibdev
.query_gid
= iwch_query_gid
;
1380 dev
->ibdev
.alloc_ucontext
= iwch_alloc_ucontext
;
1381 dev
->ibdev
.dealloc_ucontext
= iwch_dealloc_ucontext
;
1382 dev
->ibdev
.mmap
= iwch_mmap
;
1383 dev
->ibdev
.alloc_pd
= iwch_allocate_pd
;
1384 dev
->ibdev
.dealloc_pd
= iwch_deallocate_pd
;
1385 dev
->ibdev
.create_ah
= iwch_ah_create
;
1386 dev
->ibdev
.destroy_ah
= iwch_ah_destroy
;
1387 dev
->ibdev
.create_qp
= iwch_create_qp
;
1388 dev
->ibdev
.modify_qp
= iwch_ib_modify_qp
;
1389 dev
->ibdev
.destroy_qp
= iwch_destroy_qp
;
1390 dev
->ibdev
.create_cq
= iwch_create_cq
;
1391 dev
->ibdev
.destroy_cq
= iwch_destroy_cq
;
1392 dev
->ibdev
.resize_cq
= iwch_resize_cq
;
1393 dev
->ibdev
.poll_cq
= iwch_poll_cq
;
1394 dev
->ibdev
.get_dma_mr
= iwch_get_dma_mr
;
1395 dev
->ibdev
.reg_phys_mr
= iwch_register_phys_mem
;
1396 dev
->ibdev
.rereg_phys_mr
= iwch_reregister_phys_mem
;
1397 dev
->ibdev
.reg_user_mr
= iwch_reg_user_mr
;
1398 dev
->ibdev
.dereg_mr
= iwch_dereg_mr
;
1399 dev
->ibdev
.alloc_mw
= iwch_alloc_mw
;
1400 dev
->ibdev
.bind_mw
= iwch_bind_mw
;
1401 dev
->ibdev
.dealloc_mw
= iwch_dealloc_mw
;
1402 dev
->ibdev
.alloc_fast_reg_mr
= iwch_alloc_fast_reg_mr
;
1403 dev
->ibdev
.alloc_fast_reg_page_list
= iwch_alloc_fastreg_pbl
;
1404 dev
->ibdev
.free_fast_reg_page_list
= iwch_free_fastreg_pbl
;
1405 dev
->ibdev
.attach_mcast
= iwch_multicast_attach
;
1406 dev
->ibdev
.detach_mcast
= iwch_multicast_detach
;
1407 dev
->ibdev
.process_mad
= iwch_process_mad
;
1408 dev
->ibdev
.req_notify_cq
= iwch_arm_cq
;
1409 dev
->ibdev
.post_send
= iwch_post_send
;
1410 dev
->ibdev
.post_recv
= iwch_post_receive
;
1411 dev
->ibdev
.get_protocol_stats
= iwch_get_mib
;
1413 dev
->ibdev
.iwcm
= kmalloc(sizeof(struct iw_cm_verbs
), GFP_KERNEL
);
1414 if (!dev
->ibdev
.iwcm
)
1417 dev
->ibdev
.iwcm
->connect
= iwch_connect
;
1418 dev
->ibdev
.iwcm
->accept
= iwch_accept_cr
;
1419 dev
->ibdev
.iwcm
->reject
= iwch_reject_cr
;
1420 dev
->ibdev
.iwcm
->create_listen
= iwch_create_listen
;
1421 dev
->ibdev
.iwcm
->destroy_listen
= iwch_destroy_listen
;
1422 dev
->ibdev
.iwcm
->add_ref
= iwch_qp_add_ref
;
1423 dev
->ibdev
.iwcm
->rem_ref
= iwch_qp_rem_ref
;
1424 dev
->ibdev
.iwcm
->get_qp
= iwch_get_qp
;
1426 ret
= ib_register_device(&dev
->ibdev
);
1430 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
) {
1431 ret
= device_create_file(&dev
->ibdev
.dev
,
1432 iwch_class_attributes
[i
]);
1439 ib_unregister_device(&dev
->ibdev
);
1441 kfree(dev
->ibdev
.iwcm
);
1445 void iwch_unregister_device(struct iwch_dev
*dev
)
1449 PDBG("%s iwch_dev %p\n", __func__
, dev
);
1450 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
)
1451 device_remove_file(&dev
->ibdev
.dev
,
1452 iwch_class_attributes
[i
]);
1453 ib_unregister_device(&dev
->ibdev
);
1454 kfree(dev
->ibdev
.iwcm
);