2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/sched.h>
41 #include <linux/spinlock.h>
42 #include <linux/ethtool.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/inetdevice.h>
48 #include <asm/byteorder.h>
50 #include <rdma/iw_cm.h>
51 #include <rdma/ib_verbs.h>
52 #include <rdma/ib_smi.h>
53 #include <rdma/ib_umem.h>
54 #include <rdma/ib_user_verbs.h>
58 #include "iwch_provider.h"
60 #include "iwch_user.h"
63 static int iwch_modify_port(struct ib_device
*ibdev
,
64 u8 port
, int port_modify_mask
,
65 struct ib_port_modify
*props
)
70 static struct ib_ah
*iwch_ah_create(struct ib_pd
*pd
,
71 struct ib_ah_attr
*ah_attr
)
73 return ERR_PTR(-ENOSYS
);
76 static int iwch_ah_destroy(struct ib_ah
*ah
)
81 static int iwch_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
86 static int iwch_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
91 static int iwch_process_mad(struct ib_device
*ibdev
,
95 struct ib_grh
*in_grh
,
96 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
)
101 static int iwch_dealloc_ucontext(struct ib_ucontext
*context
)
103 struct iwch_dev
*rhp
= to_iwch_dev(context
->device
);
104 struct iwch_ucontext
*ucontext
= to_iwch_ucontext(context
);
105 struct iwch_mm_entry
*mm
, *tmp
;
107 PDBG("%s context %p\n", __func__
, context
);
108 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
110 cxio_release_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
115 static struct ib_ucontext
*iwch_alloc_ucontext(struct ib_device
*ibdev
,
116 struct ib_udata
*udata
)
118 struct iwch_ucontext
*context
;
119 struct iwch_dev
*rhp
= to_iwch_dev(ibdev
);
121 PDBG("%s ibdev %p\n", __func__
, ibdev
);
122 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
124 return ERR_PTR(-ENOMEM
);
125 cxio_init_ucontext(&rhp
->rdev
, &context
->uctx
);
126 INIT_LIST_HEAD(&context
->mmaps
);
127 spin_lock_init(&context
->mmap_lock
);
128 return &context
->ibucontext
;
131 static int iwch_destroy_cq(struct ib_cq
*ib_cq
)
135 PDBG("%s ib_cq %p\n", __func__
, ib_cq
);
136 chp
= to_iwch_cq(ib_cq
);
138 remove_handle(chp
->rhp
, &chp
->rhp
->cqidr
, chp
->cq
.cqid
);
139 atomic_dec(&chp
->refcnt
);
140 wait_event(chp
->wait
, !atomic_read(&chp
->refcnt
));
142 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
147 static struct ib_cq
*iwch_create_cq(struct ib_device
*ibdev
, int entries
, int vector
,
148 struct ib_ucontext
*ib_context
,
149 struct ib_udata
*udata
)
151 struct iwch_dev
*rhp
;
153 struct iwch_create_cq_resp uresp
;
154 struct iwch_create_cq_req ureq
;
155 struct iwch_ucontext
*ucontext
= NULL
;
157 PDBG("%s ib_dev %p entries %d\n", __func__
, ibdev
, entries
);
158 rhp
= to_iwch_dev(ibdev
);
159 chp
= kzalloc(sizeof(*chp
), GFP_KERNEL
);
161 return ERR_PTR(-ENOMEM
);
164 ucontext
= to_iwch_ucontext(ib_context
);
165 if (!t3a_device(rhp
)) {
166 if (ib_copy_from_udata(&ureq
, udata
, sizeof (ureq
))) {
168 return ERR_PTR(-EFAULT
);
170 chp
->user_rptr_addr
= (u32 __user
*)(unsigned long)ureq
.user_rptr_addr
;
174 if (t3a_device(rhp
)) {
177 * T3A: Add some fluff to handle extra CQEs inserted
178 * for various errors.
179 * Additional CQE possibilities:
181 * incoming RDMA WRITE Failures
182 * incoming RDMA READ REQUEST FAILUREs
183 * NOTE: We cannot ensure the CQ won't overflow.
187 entries
= roundup_pow_of_two(entries
);
188 chp
->cq
.size_log2
= ilog2(entries
);
190 if (cxio_create_cq(&rhp
->rdev
, &chp
->cq
)) {
192 return ERR_PTR(-ENOMEM
);
195 chp
->ibcq
.cqe
= 1 << chp
->cq
.size_log2
;
196 spin_lock_init(&chp
->lock
);
197 atomic_set(&chp
->refcnt
, 1);
198 init_waitqueue_head(&chp
->wait
);
199 if (insert_handle(rhp
, &rhp
->cqidr
, chp
, chp
->cq
.cqid
)) {
200 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
202 return ERR_PTR(-ENOMEM
);
206 struct iwch_mm_entry
*mm
;
208 mm
= kmalloc(sizeof *mm
, GFP_KERNEL
);
210 iwch_destroy_cq(&chp
->ibcq
);
211 return ERR_PTR(-ENOMEM
);
213 uresp
.cqid
= chp
->cq
.cqid
;
214 uresp
.size_log2
= chp
->cq
.size_log2
;
215 spin_lock(&ucontext
->mmap_lock
);
216 uresp
.key
= ucontext
->key
;
217 ucontext
->key
+= PAGE_SIZE
;
218 spin_unlock(&ucontext
->mmap_lock
);
219 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
221 iwch_destroy_cq(&chp
->ibcq
);
222 return ERR_PTR(-EFAULT
);
225 mm
->addr
= virt_to_phys(chp
->cq
.queue
);
226 mm
->len
= PAGE_ALIGN((1UL << uresp
.size_log2
) *
227 sizeof (struct t3_cqe
));
228 insert_mmap(ucontext
, mm
);
230 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
231 chp
->cq
.cqid
, chp
, (1 << chp
->cq
.size_log2
),
232 (unsigned long long) chp
->cq
.dma_addr
);
236 static int iwch_resize_cq(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
)
239 struct iwch_cq
*chp
= to_iwch_cq(cq
);
240 struct t3_cq oldcq
, newcq
;
243 PDBG("%s ib_cq %p cqe %d\n", __func__
, cq
, cqe
);
245 /* We don't downsize... */
249 /* create new t3_cq with new size */
250 cqe
= roundup_pow_of_two(cqe
+1);
251 newcq
.size_log2
= ilog2(cqe
);
253 /* Dont allow resize to less than the current wce count */
254 if (cqe
< Q_COUNT(chp
->cq
.rptr
, chp
->cq
.wptr
)) {
258 /* Quiesce all QPs using this CQ */
259 ret
= iwch_quiesce_qps(chp
);
264 ret
= cxio_create_cq(&chp
->rhp
->rdev
, &newcq
);
270 memcpy(newcq
.queue
, chp
->cq
.queue
, (1 << chp
->cq
.size_log2
) *
271 sizeof(struct t3_cqe
));
273 /* old iwch_qp gets new t3_cq but keeps old cqid */
276 chp
->cq
.cqid
= oldcq
.cqid
;
278 /* resize new t3_cq to update the HW context */
279 ret
= cxio_resize_cq(&chp
->rhp
->rdev
, &chp
->cq
);
284 chp
->ibcq
.cqe
= (1<<chp
->cq
.size_log2
) - 1;
286 /* destroy old t3_cq */
287 oldcq
.cqid
= newcq
.cqid
;
288 ret
= cxio_destroy_cq(&chp
->rhp
->rdev
, &oldcq
);
290 printk(KERN_ERR MOD
"%s - cxio_destroy_cq failed %d\n",
294 /* add user hooks here */
297 ret
= iwch_resume_qps(chp
);
304 static int iwch_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
306 struct iwch_dev
*rhp
;
308 enum t3_cq_opcode cq_op
;
313 chp
= to_iwch_cq(ibcq
);
315 if ((flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
)
319 if (chp
->user_rptr_addr
) {
320 if (get_user(rptr
, chp
->user_rptr_addr
))
322 spin_lock_irqsave(&chp
->lock
, flag
);
325 spin_lock_irqsave(&chp
->lock
, flag
);
326 PDBG("%s rptr 0x%x\n", __func__
, chp
->cq
.rptr
);
327 err
= cxio_hal_cq_op(&rhp
->rdev
, &chp
->cq
, cq_op
, 0);
328 spin_unlock_irqrestore(&chp
->lock
, flag
);
330 printk(KERN_ERR MOD
"Error %d rearming CQID 0x%x\n", err
,
332 if (err
> 0 && !(flags
& IB_CQ_REPORT_MISSED_EVENTS
))
337 static int iwch_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
339 int len
= vma
->vm_end
- vma
->vm_start
;
340 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
341 struct cxio_rdev
*rdev_p
;
343 struct iwch_mm_entry
*mm
;
344 struct iwch_ucontext
*ucontext
;
347 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__
, vma
->vm_pgoff
,
350 if (vma
->vm_start
& (PAGE_SIZE
-1)) {
354 rdev_p
= &(to_iwch_dev(context
->device
)->rdev
);
355 ucontext
= to_iwch_ucontext(context
);
357 mm
= remove_mmap(ucontext
, key
, len
);
363 if ((addr
>= rdev_p
->rnic_info
.udbell_physbase
) &&
364 (addr
< (rdev_p
->rnic_info
.udbell_physbase
+
365 rdev_p
->rnic_info
.udbell_len
))) {
368 * Map T3 DB register.
370 if (vma
->vm_flags
& VM_READ
) {
374 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
375 vma
->vm_flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
376 vma
->vm_flags
&= ~VM_MAYREAD
;
377 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
379 len
, vma
->vm_page_prot
);
383 * Map WQ or CQ contig dma memory...
385 ret
= remap_pfn_range(vma
, vma
->vm_start
,
387 len
, vma
->vm_page_prot
);
393 static int iwch_deallocate_pd(struct ib_pd
*pd
)
395 struct iwch_dev
*rhp
;
398 php
= to_iwch_pd(pd
);
400 PDBG("%s ibpd %p pdid 0x%x\n", __func__
, pd
, php
->pdid
);
401 cxio_hal_put_pdid(rhp
->rdev
.rscp
, php
->pdid
);
406 static struct ib_pd
*iwch_allocate_pd(struct ib_device
*ibdev
,
407 struct ib_ucontext
*context
,
408 struct ib_udata
*udata
)
412 struct iwch_dev
*rhp
;
414 PDBG("%s ibdev %p\n", __func__
, ibdev
);
415 rhp
= (struct iwch_dev
*) ibdev
;
416 pdid
= cxio_hal_get_pdid(rhp
->rdev
.rscp
);
418 return ERR_PTR(-EINVAL
);
419 php
= kzalloc(sizeof(*php
), GFP_KERNEL
);
421 cxio_hal_put_pdid(rhp
->rdev
.rscp
, pdid
);
422 return ERR_PTR(-ENOMEM
);
427 if (ib_copy_to_udata(udata
, &php
->pdid
, sizeof (__u32
))) {
428 iwch_deallocate_pd(&php
->ibpd
);
429 return ERR_PTR(-EFAULT
);
432 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__
, pdid
, php
);
436 static int iwch_dereg_mr(struct ib_mr
*ib_mr
)
438 struct iwch_dev
*rhp
;
442 PDBG("%s ib_mr %p\n", __func__
, ib_mr
);
443 /* There can be no memory windows */
444 if (atomic_read(&ib_mr
->usecnt
))
447 mhp
= to_iwch_mr(ib_mr
);
449 mmid
= mhp
->attr
.stag
>> 8;
450 cxio_dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
453 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
455 kfree((void *) (unsigned long) mhp
->kva
);
457 ib_umem_release(mhp
->umem
);
458 PDBG("%s mmid 0x%x ptr %p\n", __func__
, mmid
, mhp
);
463 static struct ib_mr
*iwch_register_phys_mem(struct ib_pd
*pd
,
464 struct ib_phys_buf
*buffer_list
,
473 struct iwch_dev
*rhp
;
478 PDBG("%s ib_pd %p\n", __func__
, pd
);
479 php
= to_iwch_pd(pd
);
482 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
484 return ERR_PTR(-ENOMEM
);
488 /* First check that we have enough alignment */
489 if ((*iova_start
& ~PAGE_MASK
) != (buffer_list
[0].addr
& ~PAGE_MASK
)) {
494 if (num_phys_buf
> 1 &&
495 ((buffer_list
[0].addr
+ buffer_list
[0].size
) & ~PAGE_MASK
)) {
500 ret
= build_phys_page_list(buffer_list
, num_phys_buf
, iova_start
,
501 &total_size
, &npages
, &shift
, &page_list
);
505 ret
= iwch_alloc_pbl(mhp
, npages
);
511 ret
= iwch_write_pbl(mhp
, page_list
, npages
, 0);
516 mhp
->attr
.pdid
= php
->pdid
;
519 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
520 mhp
->attr
.va_fbo
= *iova_start
;
521 mhp
->attr
.page_size
= shift
- 12;
523 mhp
->attr
.len
= (u32
) total_size
;
524 mhp
->attr
.pbl_size
= npages
;
525 ret
= iwch_register_mem(rhp
, php
, mhp
, shift
);
540 static int iwch_reregister_phys_mem(struct ib_mr
*mr
,
543 struct ib_phys_buf
*buffer_list
,
545 int acc
, u64
* iova_start
)
548 struct iwch_mr mh
, *mhp
;
550 struct iwch_dev
*rhp
;
551 __be64
*page_list
= NULL
;
557 PDBG("%s ib_mr %p ib_pd %p\n", __func__
, mr
, pd
);
559 /* There can be no memory windows */
560 if (atomic_read(&mr
->usecnt
))
563 mhp
= to_iwch_mr(mr
);
565 php
= to_iwch_pd(mr
->pd
);
567 /* make sure we are on the same adapter */
571 memcpy(&mh
, mhp
, sizeof *mhp
);
573 if (mr_rereg_mask
& IB_MR_REREG_PD
)
574 php
= to_iwch_pd(pd
);
575 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
576 mh
.attr
.perms
= iwch_ib_to_tpt_access(acc
);
577 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
578 ret
= build_phys_page_list(buffer_list
, num_phys_buf
,
580 &total_size
, &npages
,
586 ret
= iwch_reregister_mem(rhp
, php
, &mh
, shift
, npages
);
591 if (mr_rereg_mask
& IB_MR_REREG_PD
)
592 mhp
->attr
.pdid
= php
->pdid
;
593 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
594 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
595 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
597 mhp
->attr
.va_fbo
= *iova_start
;
598 mhp
->attr
.page_size
= shift
- 12;
599 mhp
->attr
.len
= (u32
) total_size
;
600 mhp
->attr
.pbl_size
= npages
;
607 static struct ib_mr
*iwch_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
608 u64 virt
, int acc
, struct ib_udata
*udata
)
614 struct ib_umem_chunk
*chunk
;
615 struct iwch_dev
*rhp
;
618 struct iwch_reg_user_mr_resp uresp
;
620 PDBG("%s ib_pd %p\n", __func__
, pd
);
622 php
= to_iwch_pd(pd
);
624 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
626 return ERR_PTR(-ENOMEM
);
630 mhp
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
, acc
, 0);
631 if (IS_ERR(mhp
->umem
)) {
632 err
= PTR_ERR(mhp
->umem
);
637 shift
= ffs(mhp
->umem
->page_size
) - 1;
640 list_for_each_entry(chunk
, &mhp
->umem
->chunk_list
, list
)
643 err
= iwch_alloc_pbl(mhp
, n
);
647 pages
= (__be64
*) __get_free_page(GFP_KERNEL
);
655 list_for_each_entry(chunk
, &mhp
->umem
->chunk_list
, list
)
656 for (j
= 0; j
< chunk
->nmap
; ++j
) {
657 len
= sg_dma_len(&chunk
->page_list
[j
]) >> shift
;
658 for (k
= 0; k
< len
; ++k
) {
659 pages
[i
++] = cpu_to_be64(sg_dma_address(
660 &chunk
->page_list
[j
]) +
661 mhp
->umem
->page_size
* k
);
662 if (i
== PAGE_SIZE
/ sizeof *pages
) {
663 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
673 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
676 free_page((unsigned long) pages
);
680 mhp
->attr
.pdid
= php
->pdid
;
682 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
683 mhp
->attr
.va_fbo
= virt
;
684 mhp
->attr
.page_size
= shift
- 12;
685 mhp
->attr
.len
= (u32
) length
;
687 err
= iwch_register_mem(rhp
, php
, mhp
, shift
);
691 if (udata
&& !t3a_device(rhp
)) {
692 uresp
.pbl_addr
= (mhp
->attr
.pbl_addr
-
693 rhp
->rdev
.rnic_info
.pbl_base
) >> 3;
694 PDBG("%s user resp pbl_addr 0x%x\n", __func__
,
697 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
698 iwch_dereg_mr(&mhp
->ibmr
);
710 ib_umem_release(mhp
->umem
);
715 static struct ib_mr
*iwch_get_dma_mr(struct ib_pd
*pd
, int acc
)
717 struct ib_phys_buf bl
;
721 PDBG("%s ib_pd %p\n", __func__
, pd
);
724 * T3 only supports 32 bits of size.
726 bl
.size
= 0xffffffff;
729 ibmr
= iwch_register_phys_mem(pd
, &bl
, 1, acc
, &kva
);
733 static struct ib_mw
*iwch_alloc_mw(struct ib_pd
*pd
)
735 struct iwch_dev
*rhp
;
742 php
= to_iwch_pd(pd
);
744 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
746 return ERR_PTR(-ENOMEM
);
747 ret
= cxio_allocate_window(&rhp
->rdev
, &stag
, php
->pdid
);
753 mhp
->attr
.pdid
= php
->pdid
;
754 mhp
->attr
.type
= TPT_MW
;
755 mhp
->attr
.stag
= stag
;
757 mhp
->ibmw
.rkey
= stag
;
758 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
)) {
759 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
761 return ERR_PTR(-ENOMEM
);
763 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
767 static int iwch_dealloc_mw(struct ib_mw
*mw
)
769 struct iwch_dev
*rhp
;
773 mhp
= to_iwch_mw(mw
);
775 mmid
= (mw
->rkey
) >> 8;
776 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
777 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
779 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__
, mw
, mmid
, mhp
);
783 static struct ib_mr
*iwch_alloc_fast_reg_mr(struct ib_pd
*pd
, int pbl_depth
)
785 struct iwch_dev
*rhp
;
792 php
= to_iwch_pd(pd
);
794 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
799 ret
= iwch_alloc_pbl(mhp
, pbl_depth
);
802 mhp
->attr
.pbl_size
= pbl_depth
;
803 ret
= cxio_allocate_stag(&rhp
->rdev
, &stag
, php
->pdid
,
804 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
807 mhp
->attr
.pdid
= php
->pdid
;
808 mhp
->attr
.type
= TPT_NON_SHARED_MR
;
809 mhp
->attr
.stag
= stag
;
812 mhp
->ibmr
.rkey
= mhp
->ibmr
.lkey
= stag
;
813 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
))
816 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
819 cxio_dereg_mem(&rhp
->rdev
, stag
, mhp
->attr
.pbl_size
,
829 static struct ib_fast_reg_page_list
*iwch_alloc_fastreg_pbl(
830 struct ib_device
*device
,
833 struct ib_fast_reg_page_list
*page_list
;
835 page_list
= kmalloc(sizeof *page_list
+ page_list_len
* sizeof(u64
),
838 return ERR_PTR(-ENOMEM
);
840 page_list
->page_list
= (u64
*)(page_list
+ 1);
841 page_list
->max_page_list_len
= page_list_len
;
846 static void iwch_free_fastreg_pbl(struct ib_fast_reg_page_list
*page_list
)
851 static int iwch_destroy_qp(struct ib_qp
*ib_qp
)
853 struct iwch_dev
*rhp
;
855 struct iwch_qp_attributes attrs
;
856 struct iwch_ucontext
*ucontext
;
858 qhp
= to_iwch_qp(ib_qp
);
861 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
862 iwch_modify_qp(rhp
, qhp
, IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 0);
863 wait_event(qhp
->wait
, !qhp
->ep
);
865 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.qpid
);
867 atomic_dec(&qhp
->refcnt
);
868 wait_event(qhp
->wait
, !atomic_read(&qhp
->refcnt
));
870 ucontext
= ib_qp
->uobject
? to_iwch_ucontext(ib_qp
->uobject
->context
)
872 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
873 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
875 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__
,
876 ib_qp
, qhp
->wq
.qpid
, qhp
);
881 static struct ib_qp
*iwch_create_qp(struct ib_pd
*pd
,
882 struct ib_qp_init_attr
*attrs
,
883 struct ib_udata
*udata
)
885 struct iwch_dev
*rhp
;
888 struct iwch_cq
*schp
;
889 struct iwch_cq
*rchp
;
890 struct iwch_create_qp_resp uresp
;
891 int wqsize
, sqsize
, rqsize
;
892 struct iwch_ucontext
*ucontext
;
894 PDBG("%s ib_pd %p\n", __func__
, pd
);
895 if (attrs
->qp_type
!= IB_QPT_RC
)
896 return ERR_PTR(-EINVAL
);
897 php
= to_iwch_pd(pd
);
899 schp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
);
900 rchp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
);
902 return ERR_PTR(-EINVAL
);
904 /* The RQT size must be # of entries + 1 rounded up to a power of two */
905 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
);
906 if (rqsize
== attrs
->cap
.max_recv_wr
)
907 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
+1);
909 /* T3 doesn't support RQT depth < 16 */
913 if (rqsize
> T3_MAX_RQ_SIZE
)
914 return ERR_PTR(-EINVAL
);
916 if (attrs
->cap
.max_inline_data
> T3_MAX_INLINE
)
917 return ERR_PTR(-EINVAL
);
920 * NOTE: The SQ and total WQ sizes don't need to be
921 * a power of two. However, all the code assumes
922 * they are. EG: Q_FREECNT() and friends.
924 sqsize
= roundup_pow_of_two(attrs
->cap
.max_send_wr
);
925 wqsize
= roundup_pow_of_two(rqsize
+ sqsize
);
928 * Kernel users need more wq space for fastreg WRs which can take
931 ucontext
= pd
->uobject
? to_iwch_ucontext(pd
->uobject
->context
) : NULL
;
932 if (!ucontext
&& wqsize
< (rqsize
+ (2 * sqsize
)))
933 wqsize
= roundup_pow_of_two(rqsize
+
934 roundup_pow_of_two(attrs
->cap
.max_send_wr
* 2));
935 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__
,
936 wqsize
, sqsize
, rqsize
);
937 qhp
= kzalloc(sizeof(*qhp
), GFP_KERNEL
);
939 return ERR_PTR(-ENOMEM
);
940 qhp
->wq
.size_log2
= ilog2(wqsize
);
941 qhp
->wq
.rq_size_log2
= ilog2(rqsize
);
942 qhp
->wq
.sq_size_log2
= ilog2(sqsize
);
943 if (cxio_create_qp(&rhp
->rdev
, !udata
, &qhp
->wq
,
944 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
)) {
946 return ERR_PTR(-ENOMEM
);
949 attrs
->cap
.max_recv_wr
= rqsize
- 1;
950 attrs
->cap
.max_send_wr
= sqsize
;
951 attrs
->cap
.max_inline_data
= T3_MAX_INLINE
;
954 qhp
->attr
.pd
= php
->pdid
;
955 qhp
->attr
.scq
= ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
;
956 qhp
->attr
.rcq
= ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
;
957 qhp
->attr
.sq_num_entries
= attrs
->cap
.max_send_wr
;
958 qhp
->attr
.rq_num_entries
= attrs
->cap
.max_recv_wr
;
959 qhp
->attr
.sq_max_sges
= attrs
->cap
.max_send_sge
;
960 qhp
->attr
.sq_max_sges_rdma_write
= attrs
->cap
.max_send_sge
;
961 qhp
->attr
.rq_max_sges
= attrs
->cap
.max_recv_sge
;
962 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
963 qhp
->attr
.next_state
= IWCH_QP_STATE_IDLE
;
966 * XXX - These don't get passed in from the openib user
967 * at create time. The CM sets them via a QP modify.
968 * Need to fix... I think the CM should
970 qhp
->attr
.enable_rdma_read
= 1;
971 qhp
->attr
.enable_rdma_write
= 1;
972 qhp
->attr
.enable_bind
= 1;
973 qhp
->attr
.max_ord
= 1;
974 qhp
->attr
.max_ird
= 1;
976 spin_lock_init(&qhp
->lock
);
977 init_waitqueue_head(&qhp
->wait
);
978 atomic_set(&qhp
->refcnt
, 1);
980 if (insert_handle(rhp
, &rhp
->qpidr
, qhp
, qhp
->wq
.qpid
)) {
981 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
982 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
984 return ERR_PTR(-ENOMEM
);
989 struct iwch_mm_entry
*mm1
, *mm2
;
991 mm1
= kmalloc(sizeof *mm1
, GFP_KERNEL
);
993 iwch_destroy_qp(&qhp
->ibqp
);
994 return ERR_PTR(-ENOMEM
);
997 mm2
= kmalloc(sizeof *mm2
, GFP_KERNEL
);
1000 iwch_destroy_qp(&qhp
->ibqp
);
1001 return ERR_PTR(-ENOMEM
);
1004 uresp
.qpid
= qhp
->wq
.qpid
;
1005 uresp
.size_log2
= qhp
->wq
.size_log2
;
1006 uresp
.sq_size_log2
= qhp
->wq
.sq_size_log2
;
1007 uresp
.rq_size_log2
= qhp
->wq
.rq_size_log2
;
1008 spin_lock(&ucontext
->mmap_lock
);
1009 uresp
.key
= ucontext
->key
;
1010 ucontext
->key
+= PAGE_SIZE
;
1011 uresp
.db_key
= ucontext
->key
;
1012 ucontext
->key
+= PAGE_SIZE
;
1013 spin_unlock(&ucontext
->mmap_lock
);
1014 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
1017 iwch_destroy_qp(&qhp
->ibqp
);
1018 return ERR_PTR(-EFAULT
);
1020 mm1
->key
= uresp
.key
;
1021 mm1
->addr
= virt_to_phys(qhp
->wq
.queue
);
1022 mm1
->len
= PAGE_ALIGN(wqsize
* sizeof (union t3_wr
));
1023 insert_mmap(ucontext
, mm1
);
1024 mm2
->key
= uresp
.db_key
;
1025 mm2
->addr
= qhp
->wq
.udb
& PAGE_MASK
;
1026 mm2
->len
= PAGE_SIZE
;
1027 insert_mmap(ucontext
, mm2
);
1029 qhp
->ibqp
.qp_num
= qhp
->wq
.qpid
;
1030 init_timer(&(qhp
->timer
));
1031 PDBG("%s sq_num_entries %d, rq_num_entries %d "
1032 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
1033 __func__
, qhp
->attr
.sq_num_entries
, qhp
->attr
.rq_num_entries
,
1034 qhp
->wq
.qpid
, qhp
, (unsigned long long) qhp
->wq
.dma_addr
,
1035 1 << qhp
->wq
.size_log2
, qhp
->wq
.rq_addr
);
1039 static int iwch_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1040 int attr_mask
, struct ib_udata
*udata
)
1042 struct iwch_dev
*rhp
;
1043 struct iwch_qp
*qhp
;
1044 enum iwch_qp_attr_mask mask
= 0;
1045 struct iwch_qp_attributes attrs
;
1047 PDBG("%s ib_qp %p\n", __func__
, ibqp
);
1049 /* iwarp does not support the RTR state */
1050 if ((attr_mask
& IB_QP_STATE
) && (attr
->qp_state
== IB_QPS_RTR
))
1051 attr_mask
&= ~IB_QP_STATE
;
1053 /* Make sure we still have something left to do */
1057 memset(&attrs
, 0, sizeof attrs
);
1058 qhp
= to_iwch_qp(ibqp
);
1061 attrs
.next_state
= iwch_convert_state(attr
->qp_state
);
1062 attrs
.enable_rdma_read
= (attr
->qp_access_flags
&
1063 IB_ACCESS_REMOTE_READ
) ? 1 : 0;
1064 attrs
.enable_rdma_write
= (attr
->qp_access_flags
&
1065 IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
1066 attrs
.enable_bind
= (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
) ? 1 : 0;
1069 mask
|= (attr_mask
& IB_QP_STATE
) ? IWCH_QP_ATTR_NEXT_STATE
: 0;
1070 mask
|= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
1071 (IWCH_QP_ATTR_ENABLE_RDMA_READ
|
1072 IWCH_QP_ATTR_ENABLE_RDMA_WRITE
|
1073 IWCH_QP_ATTR_ENABLE_RDMA_BIND
) : 0;
1075 return iwch_modify_qp(rhp
, qhp
, mask
, &attrs
, 0);
1078 void iwch_qp_add_ref(struct ib_qp
*qp
)
1080 PDBG("%s ib_qp %p\n", __func__
, qp
);
1081 atomic_inc(&(to_iwch_qp(qp
)->refcnt
));
1084 void iwch_qp_rem_ref(struct ib_qp
*qp
)
1086 PDBG("%s ib_qp %p\n", __func__
, qp
);
1087 if (atomic_dec_and_test(&(to_iwch_qp(qp
)->refcnt
)))
1088 wake_up(&(to_iwch_qp(qp
)->wait
));
1091 static struct ib_qp
*iwch_get_qp(struct ib_device
*dev
, int qpn
)
1093 PDBG("%s ib_dev %p qpn 0x%x\n", __func__
, dev
, qpn
);
1094 return (struct ib_qp
*)get_qhp(to_iwch_dev(dev
), qpn
);
1098 static int iwch_query_pkey(struct ib_device
*ibdev
,
1099 u8 port
, u16 index
, u16
* pkey
)
1101 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1106 static int iwch_query_gid(struct ib_device
*ibdev
, u8 port
,
1107 int index
, union ib_gid
*gid
)
1109 struct iwch_dev
*dev
;
1111 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
1112 __func__
, ibdev
, port
, index
, gid
);
1113 dev
= to_iwch_dev(ibdev
);
1114 BUG_ON(port
== 0 || port
> 2);
1115 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
1116 memcpy(&(gid
->raw
[0]), dev
->rdev
.port_info
.lldevs
[port
-1]->dev_addr
, 6);
1120 static u64
fw_vers_string_to_u64(struct iwch_dev
*iwch_dev
)
1122 struct ethtool_drvinfo info
;
1123 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1125 unsigned fw_maj
, fw_min
, fw_mic
;
1127 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1129 next
= info
.fw_version
+ 1;
1130 cp
= strsep(&next
, ".");
1131 sscanf(cp
, "%i", &fw_maj
);
1132 cp
= strsep(&next
, ".");
1133 sscanf(cp
, "%i", &fw_min
);
1134 cp
= strsep(&next
, ".");
1135 sscanf(cp
, "%i", &fw_mic
);
1137 return (((u64
)fw_maj
& 0xffff) << 32) | ((fw_min
& 0xffff) << 16) |
1141 static int iwch_query_device(struct ib_device
*ibdev
,
1142 struct ib_device_attr
*props
)
1145 struct iwch_dev
*dev
;
1146 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1148 dev
= to_iwch_dev(ibdev
);
1149 memset(props
, 0, sizeof *props
);
1150 memcpy(&props
->sys_image_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1151 props
->hw_ver
= dev
->rdev
.t3cdev_p
->type
;
1152 props
->fw_ver
= fw_vers_string_to_u64(dev
);
1153 props
->device_cap_flags
= dev
->device_cap_flags
;
1154 props
->page_size_cap
= dev
->attr
.mem_pgsizes_bitmask
;
1155 props
->vendor_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->vendor
;
1156 props
->vendor_part_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->device
;
1157 props
->max_mr_size
= dev
->attr
.max_mr_size
;
1158 props
->max_qp
= dev
->attr
.max_qps
;
1159 props
->max_qp_wr
= dev
->attr
.max_wrs
;
1160 props
->max_sge
= dev
->attr
.max_sge_per_wr
;
1161 props
->max_sge_rd
= 1;
1162 props
->max_qp_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1163 props
->max_qp_init_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1164 props
->max_cq
= dev
->attr
.max_cqs
;
1165 props
->max_cqe
= dev
->attr
.max_cqes_per_cq
;
1166 props
->max_mr
= dev
->attr
.max_mem_regs
;
1167 props
->max_pd
= dev
->attr
.max_pds
;
1168 props
->local_ca_ack_delay
= 0;
1169 props
->max_fast_reg_page_list_len
= T3_MAX_FASTREG_DEPTH
;
1174 static int iwch_query_port(struct ib_device
*ibdev
,
1175 u8 port
, struct ib_port_attr
*props
)
1177 struct iwch_dev
*dev
;
1178 struct net_device
*netdev
;
1179 struct in_device
*inetdev
;
1181 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1183 dev
= to_iwch_dev(ibdev
);
1184 netdev
= dev
->rdev
.port_info
.lldevs
[port
-1];
1186 memset(props
, 0, sizeof(struct ib_port_attr
));
1187 props
->max_mtu
= IB_MTU_4096
;
1188 if (netdev
->mtu
>= 4096)
1189 props
->active_mtu
= IB_MTU_4096
;
1190 else if (netdev
->mtu
>= 2048)
1191 props
->active_mtu
= IB_MTU_2048
;
1192 else if (netdev
->mtu
>= 1024)
1193 props
->active_mtu
= IB_MTU_1024
;
1194 else if (netdev
->mtu
>= 512)
1195 props
->active_mtu
= IB_MTU_512
;
1197 props
->active_mtu
= IB_MTU_256
;
1199 if (!netif_carrier_ok(netdev
))
1200 props
->state
= IB_PORT_DOWN
;
1202 inetdev
= in_dev_get(netdev
);
1204 if (inetdev
->ifa_list
)
1205 props
->state
= IB_PORT_ACTIVE
;
1207 props
->state
= IB_PORT_INIT
;
1208 in_dev_put(inetdev
);
1210 props
->state
= IB_PORT_INIT
;
1213 props
->port_cap_flags
=
1215 IB_PORT_SNMP_TUNNEL_SUP
|
1216 IB_PORT_REINIT_SUP
|
1217 IB_PORT_DEVICE_MGMT_SUP
|
1218 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
1219 props
->gid_tbl_len
= 1;
1220 props
->pkey_tbl_len
= 1;
1221 props
->active_width
= 2;
1222 props
->active_speed
= 2;
1223 props
->max_msg_sz
= -1;
1228 static ssize_t
show_rev(struct device
*dev
, struct device_attribute
*attr
,
1231 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1233 PDBG("%s dev 0x%p\n", __func__
, dev
);
1234 return sprintf(buf
, "%d\n", iwch_dev
->rdev
.t3cdev_p
->type
);
1237 static ssize_t
show_fw_ver(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1239 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1241 struct ethtool_drvinfo info
;
1242 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1244 PDBG("%s dev 0x%p\n", __func__
, dev
);
1245 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1246 return sprintf(buf
, "%s\n", info
.fw_version
);
1249 static ssize_t
show_hca(struct device
*dev
, struct device_attribute
*attr
,
1252 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1254 struct ethtool_drvinfo info
;
1255 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1257 PDBG("%s dev 0x%p\n", __func__
, dev
);
1258 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1259 return sprintf(buf
, "%s\n", info
.driver
);
1262 static ssize_t
show_board(struct device
*dev
, struct device_attribute
*attr
,
1265 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1267 PDBG("%s dev 0x%p\n", __func__
, dev
);
1268 return sprintf(buf
, "%x.%x\n", iwch_dev
->rdev
.rnic_info
.pdev
->vendor
,
1269 iwch_dev
->rdev
.rnic_info
.pdev
->device
);
1272 static int iwch_get_mib(struct ib_device
*ibdev
,
1273 union rdma_protocol_stats
*stats
)
1275 struct iwch_dev
*dev
;
1276 struct tp_mib_stats m
;
1279 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1280 dev
= to_iwch_dev(ibdev
);
1281 ret
= dev
->rdev
.t3cdev_p
->ctl(dev
->rdev
.t3cdev_p
, RDMA_GET_MIB
, &m
);
1285 memset(stats
, 0, sizeof *stats
);
1286 stats
->iw
.ipInReceives
= ((u64
) m
.ipInReceive_hi
<< 32) +
1288 stats
->iw
.ipInHdrErrors
= ((u64
) m
.ipInHdrErrors_hi
<< 32) +
1290 stats
->iw
.ipInAddrErrors
= ((u64
) m
.ipInAddrErrors_hi
<< 32) +
1291 m
.ipInAddrErrors_lo
;
1292 stats
->iw
.ipInUnknownProtos
= ((u64
) m
.ipInUnknownProtos_hi
<< 32) +
1293 m
.ipInUnknownProtos_lo
;
1294 stats
->iw
.ipInDiscards
= ((u64
) m
.ipInDiscards_hi
<< 32) +
1296 stats
->iw
.ipInDelivers
= ((u64
) m
.ipInDelivers_hi
<< 32) +
1298 stats
->iw
.ipOutRequests
= ((u64
) m
.ipOutRequests_hi
<< 32) +
1300 stats
->iw
.ipOutDiscards
= ((u64
) m
.ipOutDiscards_hi
<< 32) +
1302 stats
->iw
.ipOutNoRoutes
= ((u64
) m
.ipOutNoRoutes_hi
<< 32) +
1304 stats
->iw
.ipReasmTimeout
= (u64
) m
.ipReasmTimeout
;
1305 stats
->iw
.ipReasmReqds
= (u64
) m
.ipReasmReqds
;
1306 stats
->iw
.ipReasmOKs
= (u64
) m
.ipReasmOKs
;
1307 stats
->iw
.ipReasmFails
= (u64
) m
.ipReasmFails
;
1308 stats
->iw
.tcpActiveOpens
= (u64
) m
.tcpActiveOpens
;
1309 stats
->iw
.tcpPassiveOpens
= (u64
) m
.tcpPassiveOpens
;
1310 stats
->iw
.tcpAttemptFails
= (u64
) m
.tcpAttemptFails
;
1311 stats
->iw
.tcpEstabResets
= (u64
) m
.tcpEstabResets
;
1312 stats
->iw
.tcpOutRsts
= (u64
) m
.tcpOutRsts
;
1313 stats
->iw
.tcpCurrEstab
= (u64
) m
.tcpCurrEstab
;
1314 stats
->iw
.tcpInSegs
= ((u64
) m
.tcpInSegs_hi
<< 32) +
1316 stats
->iw
.tcpOutSegs
= ((u64
) m
.tcpOutSegs_hi
<< 32) +
1318 stats
->iw
.tcpRetransSegs
= ((u64
) m
.tcpRetransSeg_hi
<< 32) +
1320 stats
->iw
.tcpInErrs
= ((u64
) m
.tcpInErrs_hi
<< 32) +
1322 stats
->iw
.tcpRtoMin
= (u64
) m
.tcpRtoMin
;
1323 stats
->iw
.tcpRtoMax
= (u64
) m
.tcpRtoMax
;
1327 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
1328 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
1329 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
1330 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
1332 static struct device_attribute
*iwch_class_attributes
[] = {
1339 int iwch_register_device(struct iwch_dev
*dev
)
1344 PDBG("%s iwch_dev %p\n", __func__
, dev
);
1345 strlcpy(dev
->ibdev
.name
, "cxgb3_%d", IB_DEVICE_NAME_MAX
);
1346 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
1347 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1348 dev
->ibdev
.owner
= THIS_MODULE
;
1349 dev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
|
1350 IB_DEVICE_MEM_WINDOW
|
1351 IB_DEVICE_MEM_MGT_EXTENSIONS
;
1353 /* cxgb3 supports STag 0. */
1354 dev
->ibdev
.local_dma_lkey
= 0;
1356 dev
->ibdev
.uverbs_cmd_mask
=
1357 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1358 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1359 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1360 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
1361 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
1362 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
1363 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
1364 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
1365 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
1366 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
1367 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
1368 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
1369 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
1370 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
1371 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
1372 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
1373 (1ull << IB_USER_VERBS_CMD_POST_RECV
);
1374 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
1375 memcpy(dev
->ibdev
.node_desc
, IWCH_NODE_DESC
, sizeof(IWCH_NODE_DESC
));
1376 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.port_info
.nports
;
1377 dev
->ibdev
.num_comp_vectors
= 1;
1378 dev
->ibdev
.dma_device
= &(dev
->rdev
.rnic_info
.pdev
->dev
);
1379 dev
->ibdev
.query_device
= iwch_query_device
;
1380 dev
->ibdev
.query_port
= iwch_query_port
;
1381 dev
->ibdev
.modify_port
= iwch_modify_port
;
1382 dev
->ibdev
.query_pkey
= iwch_query_pkey
;
1383 dev
->ibdev
.query_gid
= iwch_query_gid
;
1384 dev
->ibdev
.alloc_ucontext
= iwch_alloc_ucontext
;
1385 dev
->ibdev
.dealloc_ucontext
= iwch_dealloc_ucontext
;
1386 dev
->ibdev
.mmap
= iwch_mmap
;
1387 dev
->ibdev
.alloc_pd
= iwch_allocate_pd
;
1388 dev
->ibdev
.dealloc_pd
= iwch_deallocate_pd
;
1389 dev
->ibdev
.create_ah
= iwch_ah_create
;
1390 dev
->ibdev
.destroy_ah
= iwch_ah_destroy
;
1391 dev
->ibdev
.create_qp
= iwch_create_qp
;
1392 dev
->ibdev
.modify_qp
= iwch_ib_modify_qp
;
1393 dev
->ibdev
.destroy_qp
= iwch_destroy_qp
;
1394 dev
->ibdev
.create_cq
= iwch_create_cq
;
1395 dev
->ibdev
.destroy_cq
= iwch_destroy_cq
;
1396 dev
->ibdev
.resize_cq
= iwch_resize_cq
;
1397 dev
->ibdev
.poll_cq
= iwch_poll_cq
;
1398 dev
->ibdev
.get_dma_mr
= iwch_get_dma_mr
;
1399 dev
->ibdev
.reg_phys_mr
= iwch_register_phys_mem
;
1400 dev
->ibdev
.rereg_phys_mr
= iwch_reregister_phys_mem
;
1401 dev
->ibdev
.reg_user_mr
= iwch_reg_user_mr
;
1402 dev
->ibdev
.dereg_mr
= iwch_dereg_mr
;
1403 dev
->ibdev
.alloc_mw
= iwch_alloc_mw
;
1404 dev
->ibdev
.bind_mw
= iwch_bind_mw
;
1405 dev
->ibdev
.dealloc_mw
= iwch_dealloc_mw
;
1406 dev
->ibdev
.alloc_fast_reg_mr
= iwch_alloc_fast_reg_mr
;
1407 dev
->ibdev
.alloc_fast_reg_page_list
= iwch_alloc_fastreg_pbl
;
1408 dev
->ibdev
.free_fast_reg_page_list
= iwch_free_fastreg_pbl
;
1409 dev
->ibdev
.attach_mcast
= iwch_multicast_attach
;
1410 dev
->ibdev
.detach_mcast
= iwch_multicast_detach
;
1411 dev
->ibdev
.process_mad
= iwch_process_mad
;
1412 dev
->ibdev
.req_notify_cq
= iwch_arm_cq
;
1413 dev
->ibdev
.post_send
= iwch_post_send
;
1414 dev
->ibdev
.post_recv
= iwch_post_receive
;
1415 dev
->ibdev
.get_protocol_stats
= iwch_get_mib
;
1417 dev
->ibdev
.iwcm
= kmalloc(sizeof(struct iw_cm_verbs
), GFP_KERNEL
);
1418 if (!dev
->ibdev
.iwcm
)
1421 dev
->ibdev
.iwcm
->connect
= iwch_connect
;
1422 dev
->ibdev
.iwcm
->accept
= iwch_accept_cr
;
1423 dev
->ibdev
.iwcm
->reject
= iwch_reject_cr
;
1424 dev
->ibdev
.iwcm
->create_listen
= iwch_create_listen
;
1425 dev
->ibdev
.iwcm
->destroy_listen
= iwch_destroy_listen
;
1426 dev
->ibdev
.iwcm
->add_ref
= iwch_qp_add_ref
;
1427 dev
->ibdev
.iwcm
->rem_ref
= iwch_qp_rem_ref
;
1428 dev
->ibdev
.iwcm
->get_qp
= iwch_get_qp
;
1430 ret
= ib_register_device(&dev
->ibdev
);
1434 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
) {
1435 ret
= device_create_file(&dev
->ibdev
.dev
,
1436 iwch_class_attributes
[i
]);
1443 ib_unregister_device(&dev
->ibdev
);
1445 kfree(dev
->ibdev
.iwcm
);
1449 void iwch_unregister_device(struct iwch_dev
*dev
)
1453 PDBG("%s iwch_dev %p\n", __func__
, dev
);
1454 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
)
1455 device_remove_file(&dev
->ibdev
.dev
,
1456 iwch_class_attributes
[i
]);
1457 ib_unregister_device(&dev
->ibdev
);
1458 kfree(dev
->ibdev
.iwcm
);