2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/spinlock.h>
41 #include <linux/ethtool.h>
45 #include <asm/byteorder.h>
47 #include <rdma/iw_cm.h>
48 #include <rdma/ib_verbs.h>
49 #include <rdma/ib_smi.h>
50 #include <rdma/ib_user_verbs.h>
54 #include "iwch_provider.h"
56 #include "iwch_user.h"
58 static int iwch_modify_port(struct ib_device
*ibdev
,
59 u8 port
, int port_modify_mask
,
60 struct ib_port_modify
*props
)
65 static struct ib_ah
*iwch_ah_create(struct ib_pd
*pd
,
66 struct ib_ah_attr
*ah_attr
)
68 return ERR_PTR(-ENOSYS
);
71 static int iwch_ah_destroy(struct ib_ah
*ah
)
76 static int iwch_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
81 static int iwch_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
86 static int iwch_process_mad(struct ib_device
*ibdev
,
90 struct ib_grh
*in_grh
,
91 struct ib_mad
*in_mad
, struct ib_mad
*out_mad
)
96 static int iwch_dealloc_ucontext(struct ib_ucontext
*context
)
98 struct iwch_dev
*rhp
= to_iwch_dev(context
->device
);
99 struct iwch_ucontext
*ucontext
= to_iwch_ucontext(context
);
100 struct iwch_mm_entry
*mm
, *tmp
;
102 PDBG("%s context %p\n", __FUNCTION__
, context
);
103 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
105 cxio_release_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
110 static struct ib_ucontext
*iwch_alloc_ucontext(struct ib_device
*ibdev
,
111 struct ib_udata
*udata
)
113 struct iwch_ucontext
*context
;
114 struct iwch_dev
*rhp
= to_iwch_dev(ibdev
);
116 PDBG("%s ibdev %p\n", __FUNCTION__
, ibdev
);
117 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
119 return ERR_PTR(-ENOMEM
);
120 cxio_init_ucontext(&rhp
->rdev
, &context
->uctx
);
121 INIT_LIST_HEAD(&context
->mmaps
);
122 spin_lock_init(&context
->mmap_lock
);
123 return &context
->ibucontext
;
126 static int iwch_destroy_cq(struct ib_cq
*ib_cq
)
130 PDBG("%s ib_cq %p\n", __FUNCTION__
, ib_cq
);
131 chp
= to_iwch_cq(ib_cq
);
133 remove_handle(chp
->rhp
, &chp
->rhp
->cqidr
, chp
->cq
.cqid
);
134 atomic_dec(&chp
->refcnt
);
135 wait_event(chp
->wait
, !atomic_read(&chp
->refcnt
));
137 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
142 static struct ib_cq
*iwch_create_cq(struct ib_device
*ibdev
, int entries
,
143 struct ib_ucontext
*ib_context
,
144 struct ib_udata
*udata
)
146 struct iwch_dev
*rhp
;
148 struct iwch_create_cq_resp uresp
;
149 struct iwch_create_cq_req ureq
;
150 struct iwch_ucontext
*ucontext
= NULL
;
152 PDBG("%s ib_dev %p entries %d\n", __FUNCTION__
, ibdev
, entries
);
153 rhp
= to_iwch_dev(ibdev
);
154 chp
= kzalloc(sizeof(*chp
), GFP_KERNEL
);
156 return ERR_PTR(-ENOMEM
);
159 ucontext
= to_iwch_ucontext(ib_context
);
160 if (!t3a_device(rhp
)) {
161 if (ib_copy_from_udata(&ureq
, udata
, sizeof (ureq
))) {
163 return ERR_PTR(-EFAULT
);
165 chp
->user_rptr_addr
= (u32 __user
*)(unsigned long)ureq
.user_rptr_addr
;
169 if (t3a_device(rhp
)) {
172 * T3A: Add some fluff to handle extra CQEs inserted
173 * for various errors.
174 * Additional CQE possibilities:
176 * incoming RDMA WRITE Failures
177 * incoming RDMA READ REQUEST FAILUREs
178 * NOTE: We cannot ensure the CQ won't overflow.
182 entries
= roundup_pow_of_two(entries
);
183 chp
->cq
.size_log2
= ilog2(entries
);
185 if (cxio_create_cq(&rhp
->rdev
, &chp
->cq
)) {
187 return ERR_PTR(-ENOMEM
);
190 chp
->ibcq
.cqe
= (1 << chp
->cq
.size_log2
) - 1;
191 spin_lock_init(&chp
->lock
);
192 atomic_set(&chp
->refcnt
, 1);
193 init_waitqueue_head(&chp
->wait
);
194 insert_handle(rhp
, &rhp
->cqidr
, chp
, chp
->cq
.cqid
);
197 struct iwch_mm_entry
*mm
;
199 mm
= kmalloc(sizeof *mm
, GFP_KERNEL
);
201 iwch_destroy_cq(&chp
->ibcq
);
202 return ERR_PTR(-ENOMEM
);
204 uresp
.cqid
= chp
->cq
.cqid
;
205 uresp
.size_log2
= chp
->cq
.size_log2
;
206 spin_lock(&ucontext
->mmap_lock
);
207 uresp
.key
= ucontext
->key
;
208 ucontext
->key
+= PAGE_SIZE
;
209 spin_unlock(&ucontext
->mmap_lock
);
210 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
212 iwch_destroy_cq(&chp
->ibcq
);
213 return ERR_PTR(-EFAULT
);
216 mm
->addr
= virt_to_phys(chp
->cq
.queue
);
217 mm
->len
= PAGE_ALIGN((1UL << uresp
.size_log2
) *
218 sizeof (struct t3_cqe
));
219 insert_mmap(ucontext
, mm
);
221 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
222 chp
->cq
.cqid
, chp
, (1 << chp
->cq
.size_log2
),
223 (unsigned long long) chp
->cq
.dma_addr
);
227 static int iwch_resize_cq(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
)
230 struct iwch_cq
*chp
= to_iwch_cq(cq
);
231 struct t3_cq oldcq
, newcq
;
234 PDBG("%s ib_cq %p cqe %d\n", __FUNCTION__
, cq
, cqe
);
236 /* We don't downsize... */
240 /* create new t3_cq with new size */
241 cqe
= roundup_pow_of_two(cqe
+1);
242 newcq
.size_log2
= ilog2(cqe
);
244 /* Dont allow resize to less than the current wce count */
245 if (cqe
< Q_COUNT(chp
->cq
.rptr
, chp
->cq
.wptr
)) {
249 /* Quiesce all QPs using this CQ */
250 ret
= iwch_quiesce_qps(chp
);
255 ret
= cxio_create_cq(&chp
->rhp
->rdev
, &newcq
);
261 memcpy(newcq
.queue
, chp
->cq
.queue
, (1 << chp
->cq
.size_log2
) *
262 sizeof(struct t3_cqe
));
264 /* old iwch_qp gets new t3_cq but keeps old cqid */
267 chp
->cq
.cqid
= oldcq
.cqid
;
269 /* resize new t3_cq to update the HW context */
270 ret
= cxio_resize_cq(&chp
->rhp
->rdev
, &chp
->cq
);
275 chp
->ibcq
.cqe
= (1<<chp
->cq
.size_log2
) - 1;
277 /* destroy old t3_cq */
278 oldcq
.cqid
= newcq
.cqid
;
279 ret
= cxio_destroy_cq(&chp
->rhp
->rdev
, &oldcq
);
281 printk(KERN_ERR MOD
"%s - cxio_destroy_cq failed %d\n",
285 /* add user hooks here */
288 ret
= iwch_resume_qps(chp
);
295 static int iwch_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify notify
)
297 struct iwch_dev
*rhp
;
299 enum t3_cq_opcode cq_op
;
304 chp
= to_iwch_cq(ibcq
);
306 if (notify
== IB_CQ_SOLICITED
)
310 if (chp
->user_rptr_addr
) {
311 if (get_user(rptr
, chp
->user_rptr_addr
))
313 spin_lock_irqsave(&chp
->lock
, flag
);
316 spin_lock_irqsave(&chp
->lock
, flag
);
317 PDBG("%s rptr 0x%x\n", __FUNCTION__
, chp
->cq
.rptr
);
318 err
= cxio_hal_cq_op(&rhp
->rdev
, &chp
->cq
, cq_op
, 0);
319 spin_unlock_irqrestore(&chp
->lock
, flag
);
321 printk(KERN_ERR MOD
"Error %d rearming CQID 0x%x\n", err
,
326 static int iwch_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
328 int len
= vma
->vm_end
- vma
->vm_start
;
329 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
330 struct cxio_rdev
*rdev_p
;
332 struct iwch_mm_entry
*mm
;
333 struct iwch_ucontext
*ucontext
;
336 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __FUNCTION__
, vma
->vm_pgoff
,
339 if (vma
->vm_start
& (PAGE_SIZE
-1)) {
343 rdev_p
= &(to_iwch_dev(context
->device
)->rdev
);
344 ucontext
= to_iwch_ucontext(context
);
346 mm
= remove_mmap(ucontext
, key
, len
);
352 if ((addr
>= rdev_p
->rnic_info
.udbell_physbase
) &&
353 (addr
< (rdev_p
->rnic_info
.udbell_physbase
+
354 rdev_p
->rnic_info
.udbell_len
))) {
357 * Map T3 DB register.
359 if (vma
->vm_flags
& VM_READ
) {
363 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
364 vma
->vm_flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
365 vma
->vm_flags
&= ~VM_MAYREAD
;
366 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
368 len
, vma
->vm_page_prot
);
372 * Map WQ or CQ contig dma memory...
374 ret
= remap_pfn_range(vma
, vma
->vm_start
,
376 len
, vma
->vm_page_prot
);
382 static int iwch_deallocate_pd(struct ib_pd
*pd
)
384 struct iwch_dev
*rhp
;
387 php
= to_iwch_pd(pd
);
389 PDBG("%s ibpd %p pdid 0x%x\n", __FUNCTION__
, pd
, php
->pdid
);
390 cxio_hal_put_pdid(rhp
->rdev
.rscp
, php
->pdid
);
395 static struct ib_pd
*iwch_allocate_pd(struct ib_device
*ibdev
,
396 struct ib_ucontext
*context
,
397 struct ib_udata
*udata
)
401 struct iwch_dev
*rhp
;
403 PDBG("%s ibdev %p\n", __FUNCTION__
, ibdev
);
404 rhp
= (struct iwch_dev
*) ibdev
;
405 pdid
= cxio_hal_get_pdid(rhp
->rdev
.rscp
);
407 return ERR_PTR(-EINVAL
);
408 php
= kzalloc(sizeof(*php
), GFP_KERNEL
);
410 cxio_hal_put_pdid(rhp
->rdev
.rscp
, pdid
);
411 return ERR_PTR(-ENOMEM
);
416 if (ib_copy_to_udata(udata
, &php
->pdid
, sizeof (__u32
))) {
417 iwch_deallocate_pd(&php
->ibpd
);
418 return ERR_PTR(-EFAULT
);
421 PDBG("%s pdid 0x%0x ptr 0x%p\n", __FUNCTION__
, pdid
, php
);
425 static int iwch_dereg_mr(struct ib_mr
*ib_mr
)
427 struct iwch_dev
*rhp
;
431 PDBG("%s ib_mr %p\n", __FUNCTION__
, ib_mr
);
432 /* There can be no memory windows */
433 if (atomic_read(&ib_mr
->usecnt
))
436 mhp
= to_iwch_mr(ib_mr
);
438 mmid
= mhp
->attr
.stag
>> 8;
439 cxio_dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
441 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
443 kfree((void *) (unsigned long) mhp
->kva
);
444 PDBG("%s mmid 0x%x ptr %p\n", __FUNCTION__
, mmid
, mhp
);
449 static struct ib_mr
*iwch_register_phys_mem(struct ib_pd
*pd
,
450 struct ib_phys_buf
*buffer_list
,
459 struct iwch_dev
*rhp
;
464 PDBG("%s ib_pd %p\n", __FUNCTION__
, pd
);
465 php
= to_iwch_pd(pd
);
468 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
470 return ERR_PTR(-ENOMEM
);
472 /* First check that we have enough alignment */
473 if ((*iova_start
& ~PAGE_MASK
) != (buffer_list
[0].addr
& ~PAGE_MASK
)) {
478 if (num_phys_buf
> 1 &&
479 ((buffer_list
[0].addr
+ buffer_list
[0].size
) & ~PAGE_MASK
)) {
484 ret
= build_phys_page_list(buffer_list
, num_phys_buf
, iova_start
,
485 &total_size
, &npages
, &shift
, &page_list
);
490 mhp
->attr
.pdid
= php
->pdid
;
493 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
494 mhp
->attr
.va_fbo
= *iova_start
;
495 mhp
->attr
.page_size
= shift
- 12;
497 mhp
->attr
.len
= (u32
) total_size
;
498 mhp
->attr
.pbl_size
= npages
;
499 ret
= iwch_register_mem(rhp
, php
, mhp
, shift
, page_list
);
511 static int iwch_reregister_phys_mem(struct ib_mr
*mr
,
514 struct ib_phys_buf
*buffer_list
,
516 int acc
, u64
* iova_start
)
519 struct iwch_mr mh
, *mhp
;
521 struct iwch_dev
*rhp
;
522 __be64
*page_list
= NULL
;
528 PDBG("%s ib_mr %p ib_pd %p\n", __FUNCTION__
, mr
, pd
);
530 /* There can be no memory windows */
531 if (atomic_read(&mr
->usecnt
))
534 mhp
= to_iwch_mr(mr
);
536 php
= to_iwch_pd(mr
->pd
);
538 /* make sure we are on the same adapter */
542 memcpy(&mh
, mhp
, sizeof *mhp
);
544 if (mr_rereg_mask
& IB_MR_REREG_PD
)
545 php
= to_iwch_pd(pd
);
546 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
547 mh
.attr
.perms
= iwch_ib_to_tpt_access(acc
);
548 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
549 ret
= build_phys_page_list(buffer_list
, num_phys_buf
,
551 &total_size
, &npages
,
557 ret
= iwch_reregister_mem(rhp
, php
, &mh
, shift
, page_list
, npages
);
562 if (mr_rereg_mask
& IB_MR_REREG_PD
)
563 mhp
->attr
.pdid
= php
->pdid
;
564 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
565 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
566 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
568 mhp
->attr
.va_fbo
= *iova_start
;
569 mhp
->attr
.page_size
= shift
- 12;
570 mhp
->attr
.len
= (u32
) total_size
;
571 mhp
->attr
.pbl_size
= npages
;
578 static struct ib_mr
*iwch_reg_user_mr(struct ib_pd
*pd
, struct ib_umem
*region
,
579 int acc
, struct ib_udata
*udata
)
585 struct ib_umem_chunk
*chunk
;
586 struct iwch_dev
*rhp
;
589 struct iwch_reg_user_mr_resp uresp
;
591 PDBG("%s ib_pd %p\n", __FUNCTION__
, pd
);
592 shift
= ffs(region
->page_size
) - 1;
594 php
= to_iwch_pd(pd
);
596 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
598 return ERR_PTR(-ENOMEM
);
601 list_for_each_entry(chunk
, ®ion
->chunk_list
, list
)
604 pages
= kmalloc(n
* sizeof(u64
), GFP_KERNEL
);
612 list_for_each_entry(chunk
, ®ion
->chunk_list
, list
)
613 for (j
= 0; j
< chunk
->nmap
; ++j
) {
614 len
= sg_dma_len(&chunk
->page_list
[j
]) >> shift
;
615 for (k
= 0; k
< len
; ++k
) {
616 pages
[i
++] = cpu_to_be64(sg_dma_address(
617 &chunk
->page_list
[j
]) +
618 region
->page_size
* k
);
623 mhp
->attr
.pdid
= php
->pdid
;
625 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
626 mhp
->attr
.va_fbo
= region
->virt_base
;
627 mhp
->attr
.page_size
= shift
- 12;
628 mhp
->attr
.len
= (u32
) region
->length
;
629 mhp
->attr
.pbl_size
= i
;
630 err
= iwch_register_mem(rhp
, php
, mhp
, shift
, pages
);
635 if (udata
&& t3b_device(rhp
)) {
636 uresp
.pbl_addr
= (mhp
->attr
.pbl_addr
-
637 rhp
->rdev
.rnic_info
.pbl_base
) >> 3;
638 PDBG("%s user resp pbl_addr 0x%x\n", __FUNCTION__
,
641 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
642 iwch_dereg_mr(&mhp
->ibmr
);
655 static struct ib_mr
*iwch_get_dma_mr(struct ib_pd
*pd
, int acc
)
657 struct ib_phys_buf bl
;
661 PDBG("%s ib_pd %p\n", __FUNCTION__
, pd
);
664 * T3 only supports 32 bits of size.
666 bl
.size
= 0xffffffff;
669 ibmr
= iwch_register_phys_mem(pd
, &bl
, 1, acc
, &kva
);
673 static struct ib_mw
*iwch_alloc_mw(struct ib_pd
*pd
)
675 struct iwch_dev
*rhp
;
682 php
= to_iwch_pd(pd
);
684 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
686 return ERR_PTR(-ENOMEM
);
687 ret
= cxio_allocate_window(&rhp
->rdev
, &stag
, php
->pdid
);
693 mhp
->attr
.pdid
= php
->pdid
;
694 mhp
->attr
.type
= TPT_MW
;
695 mhp
->attr
.stag
= stag
;
697 insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
);
698 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __FUNCTION__
, mmid
, mhp
, stag
);
702 static int iwch_dealloc_mw(struct ib_mw
*mw
)
704 struct iwch_dev
*rhp
;
708 mhp
= to_iwch_mw(mw
);
710 mmid
= (mw
->rkey
) >> 8;
711 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
712 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
714 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __FUNCTION__
, mw
, mmid
, mhp
);
718 static int iwch_destroy_qp(struct ib_qp
*ib_qp
)
720 struct iwch_dev
*rhp
;
722 struct iwch_qp_attributes attrs
;
723 struct iwch_ucontext
*ucontext
;
725 qhp
= to_iwch_qp(ib_qp
);
728 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
729 iwch_modify_qp(rhp
, qhp
, IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 0);
730 wait_event(qhp
->wait
, !qhp
->ep
);
732 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.qpid
);
734 atomic_dec(&qhp
->refcnt
);
735 wait_event(qhp
->wait
, !atomic_read(&qhp
->refcnt
));
737 ucontext
= ib_qp
->uobject
? to_iwch_ucontext(ib_qp
->uobject
->context
)
739 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
740 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
742 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __FUNCTION__
,
743 ib_qp
, qhp
->wq
.qpid
, qhp
);
748 static struct ib_qp
*iwch_create_qp(struct ib_pd
*pd
,
749 struct ib_qp_init_attr
*attrs
,
750 struct ib_udata
*udata
)
752 struct iwch_dev
*rhp
;
755 struct iwch_cq
*schp
;
756 struct iwch_cq
*rchp
;
757 struct iwch_create_qp_resp uresp
;
758 int wqsize
, sqsize
, rqsize
;
759 struct iwch_ucontext
*ucontext
;
761 PDBG("%s ib_pd %p\n", __FUNCTION__
, pd
);
762 if (attrs
->qp_type
!= IB_QPT_RC
)
763 return ERR_PTR(-EINVAL
);
764 php
= to_iwch_pd(pd
);
766 schp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
);
767 rchp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
);
769 return ERR_PTR(-EINVAL
);
771 /* The RQT size must be # of entries + 1 rounded up to a power of two */
772 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
);
773 if (rqsize
== attrs
->cap
.max_recv_wr
)
774 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
+1);
776 /* T3 doesn't support RQT depth < 16 */
780 if (rqsize
> T3_MAX_RQ_SIZE
)
781 return ERR_PTR(-EINVAL
);
784 * NOTE: The SQ and total WQ sizes don't need to be
785 * a power of two. However, all the code assumes
786 * they are. EG: Q_FREECNT() and friends.
788 sqsize
= roundup_pow_of_two(attrs
->cap
.max_send_wr
);
789 wqsize
= roundup_pow_of_two(rqsize
+ sqsize
);
790 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __FUNCTION__
,
791 wqsize
, sqsize
, rqsize
);
792 qhp
= kzalloc(sizeof(*qhp
), GFP_KERNEL
);
794 return ERR_PTR(-ENOMEM
);
795 qhp
->wq
.size_log2
= ilog2(wqsize
);
796 qhp
->wq
.rq_size_log2
= ilog2(rqsize
);
797 qhp
->wq
.sq_size_log2
= ilog2(sqsize
);
798 ucontext
= pd
->uobject
? to_iwch_ucontext(pd
->uobject
->context
) : NULL
;
799 if (cxio_create_qp(&rhp
->rdev
, !udata
, &qhp
->wq
,
800 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
)) {
802 return ERR_PTR(-ENOMEM
);
804 attrs
->cap
.max_recv_wr
= rqsize
- 1;
805 attrs
->cap
.max_send_wr
= sqsize
;
807 qhp
->attr
.pd
= php
->pdid
;
808 qhp
->attr
.scq
= ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
;
809 qhp
->attr
.rcq
= ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
;
810 qhp
->attr
.sq_num_entries
= attrs
->cap
.max_send_wr
;
811 qhp
->attr
.rq_num_entries
= attrs
->cap
.max_recv_wr
;
812 qhp
->attr
.sq_max_sges
= attrs
->cap
.max_send_sge
;
813 qhp
->attr
.sq_max_sges_rdma_write
= attrs
->cap
.max_send_sge
;
814 qhp
->attr
.rq_max_sges
= attrs
->cap
.max_recv_sge
;
815 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
816 qhp
->attr
.next_state
= IWCH_QP_STATE_IDLE
;
819 * XXX - These don't get passed in from the openib user
820 * at create time. The CM sets them via a QP modify.
821 * Need to fix... I think the CM should
823 qhp
->attr
.enable_rdma_read
= 1;
824 qhp
->attr
.enable_rdma_write
= 1;
825 qhp
->attr
.enable_bind
= 1;
826 qhp
->attr
.max_ord
= 1;
827 qhp
->attr
.max_ird
= 1;
829 spin_lock_init(&qhp
->lock
);
830 init_waitqueue_head(&qhp
->wait
);
831 atomic_set(&qhp
->refcnt
, 1);
832 insert_handle(rhp
, &rhp
->qpidr
, qhp
, qhp
->wq
.qpid
);
836 struct iwch_mm_entry
*mm1
, *mm2
;
838 mm1
= kmalloc(sizeof *mm1
, GFP_KERNEL
);
840 iwch_destroy_qp(&qhp
->ibqp
);
841 return ERR_PTR(-ENOMEM
);
844 mm2
= kmalloc(sizeof *mm2
, GFP_KERNEL
);
847 iwch_destroy_qp(&qhp
->ibqp
);
848 return ERR_PTR(-ENOMEM
);
851 uresp
.qpid
= qhp
->wq
.qpid
;
852 uresp
.size_log2
= qhp
->wq
.size_log2
;
853 uresp
.sq_size_log2
= qhp
->wq
.sq_size_log2
;
854 uresp
.rq_size_log2
= qhp
->wq
.rq_size_log2
;
855 spin_lock(&ucontext
->mmap_lock
);
856 uresp
.key
= ucontext
->key
;
857 ucontext
->key
+= PAGE_SIZE
;
858 uresp
.db_key
= ucontext
->key
;
859 ucontext
->key
+= PAGE_SIZE
;
860 spin_unlock(&ucontext
->mmap_lock
);
861 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
864 iwch_destroy_qp(&qhp
->ibqp
);
865 return ERR_PTR(-EFAULT
);
867 mm1
->key
= uresp
.key
;
868 mm1
->addr
= virt_to_phys(qhp
->wq
.queue
);
869 mm1
->len
= PAGE_ALIGN(wqsize
* sizeof (union t3_wr
));
870 insert_mmap(ucontext
, mm1
);
871 mm2
->key
= uresp
.db_key
;
872 mm2
->addr
= qhp
->wq
.udb
& PAGE_MASK
;
873 mm2
->len
= PAGE_SIZE
;
874 insert_mmap(ucontext
, mm2
);
876 qhp
->ibqp
.qp_num
= qhp
->wq
.qpid
;
877 init_timer(&(qhp
->timer
));
878 PDBG("%s sq_num_entries %d, rq_num_entries %d "
879 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d\n",
880 __FUNCTION__
, qhp
->attr
.sq_num_entries
, qhp
->attr
.rq_num_entries
,
881 qhp
->wq
.qpid
, qhp
, (unsigned long long) qhp
->wq
.dma_addr
,
882 1 << qhp
->wq
.size_log2
);
886 static int iwch_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
887 int attr_mask
, struct ib_udata
*udata
)
889 struct iwch_dev
*rhp
;
891 enum iwch_qp_attr_mask mask
= 0;
892 struct iwch_qp_attributes attrs
;
894 PDBG("%s ib_qp %p\n", __FUNCTION__
, ibqp
);
896 /* iwarp does not support the RTR state */
897 if ((attr_mask
& IB_QP_STATE
) && (attr
->qp_state
== IB_QPS_RTR
))
898 attr_mask
&= ~IB_QP_STATE
;
900 /* Make sure we still have something left to do */
904 memset(&attrs
, 0, sizeof attrs
);
905 qhp
= to_iwch_qp(ibqp
);
908 attrs
.next_state
= iwch_convert_state(attr
->qp_state
);
909 attrs
.enable_rdma_read
= (attr
->qp_access_flags
&
910 IB_ACCESS_REMOTE_READ
) ? 1 : 0;
911 attrs
.enable_rdma_write
= (attr
->qp_access_flags
&
912 IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
913 attrs
.enable_bind
= (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
) ? 1 : 0;
916 mask
|= (attr_mask
& IB_QP_STATE
) ? IWCH_QP_ATTR_NEXT_STATE
: 0;
917 mask
|= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
918 (IWCH_QP_ATTR_ENABLE_RDMA_READ
|
919 IWCH_QP_ATTR_ENABLE_RDMA_WRITE
|
920 IWCH_QP_ATTR_ENABLE_RDMA_BIND
) : 0;
922 return iwch_modify_qp(rhp
, qhp
, mask
, &attrs
, 0);
925 void iwch_qp_add_ref(struct ib_qp
*qp
)
927 PDBG("%s ib_qp %p\n", __FUNCTION__
, qp
);
928 atomic_inc(&(to_iwch_qp(qp
)->refcnt
));
931 void iwch_qp_rem_ref(struct ib_qp
*qp
)
933 PDBG("%s ib_qp %p\n", __FUNCTION__
, qp
);
934 if (atomic_dec_and_test(&(to_iwch_qp(qp
)->refcnt
)))
935 wake_up(&(to_iwch_qp(qp
)->wait
));
938 static struct ib_qp
*iwch_get_qp(struct ib_device
*dev
, int qpn
)
940 PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__
, dev
, qpn
);
941 return (struct ib_qp
*)get_qhp(to_iwch_dev(dev
), qpn
);
945 static int iwch_query_pkey(struct ib_device
*ibdev
,
946 u8 port
, u16 index
, u16
* pkey
)
948 PDBG("%s ibdev %p\n", __FUNCTION__
, ibdev
);
953 static int iwch_query_gid(struct ib_device
*ibdev
, u8 port
,
954 int index
, union ib_gid
*gid
)
956 struct iwch_dev
*dev
;
958 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
959 __FUNCTION__
, ibdev
, port
, index
, gid
);
960 dev
= to_iwch_dev(ibdev
);
961 BUG_ON(port
== 0 || port
> 2);
962 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
963 memcpy(&(gid
->raw
[0]), dev
->rdev
.port_info
.lldevs
[port
-1]->dev_addr
, 6);
967 static int iwch_query_device(struct ib_device
*ibdev
,
968 struct ib_device_attr
*props
)
971 struct iwch_dev
*dev
;
972 PDBG("%s ibdev %p\n", __FUNCTION__
, ibdev
);
974 dev
= to_iwch_dev(ibdev
);
975 memset(props
, 0, sizeof *props
);
976 memcpy(&props
->sys_image_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
977 props
->device_cap_flags
= dev
->device_cap_flags
;
978 props
->vendor_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->vendor
;
979 props
->vendor_part_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->device
;
980 props
->max_mr_size
= ~0ull;
981 props
->max_qp
= dev
->attr
.max_qps
;
982 props
->max_qp_wr
= dev
->attr
.max_wrs
;
983 props
->max_sge
= dev
->attr
.max_sge_per_wr
;
984 props
->max_sge_rd
= 1;
985 props
->max_qp_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
986 props
->max_cq
= dev
->attr
.max_cqs
;
987 props
->max_cqe
= dev
->attr
.max_cqes_per_cq
;
988 props
->max_mr
= dev
->attr
.max_mem_regs
;
989 props
->max_pd
= dev
->attr
.max_pds
;
990 props
->local_ca_ack_delay
= 0;
995 static int iwch_query_port(struct ib_device
*ibdev
,
996 u8 port
, struct ib_port_attr
*props
)
998 PDBG("%s ibdev %p\n", __FUNCTION__
, ibdev
);
999 props
->max_mtu
= IB_MTU_4096
;
1004 props
->state
= IB_PORT_ACTIVE
;
1005 props
->phys_state
= 0;
1006 props
->port_cap_flags
=
1008 IB_PORT_SNMP_TUNNEL_SUP
|
1009 IB_PORT_REINIT_SUP
|
1010 IB_PORT_DEVICE_MGMT_SUP
|
1011 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
1012 props
->gid_tbl_len
= 1;
1013 props
->pkey_tbl_len
= 1;
1014 props
->qkey_viol_cntr
= 0;
1015 props
->active_width
= 2;
1016 props
->active_speed
= 2;
1017 props
->max_msg_sz
= -1;
1022 static ssize_t
show_rev(struct class_device
*cdev
, char *buf
)
1024 struct iwch_dev
*dev
= container_of(cdev
, struct iwch_dev
,
1026 PDBG("%s class dev 0x%p\n", __FUNCTION__
, cdev
);
1027 return sprintf(buf
, "%d\n", dev
->rdev
.t3cdev_p
->type
);
1030 static ssize_t
show_fw_ver(struct class_device
*cdev
, char *buf
)
1032 struct iwch_dev
*dev
= container_of(cdev
, struct iwch_dev
,
1034 struct ethtool_drvinfo info
;
1035 struct net_device
*lldev
= dev
->rdev
.t3cdev_p
->lldev
;
1037 PDBG("%s class dev 0x%p\n", __FUNCTION__
, cdev
);
1038 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1039 return sprintf(buf
, "%s\n", info
.fw_version
);
1042 static ssize_t
show_hca(struct class_device
*cdev
, char *buf
)
1044 struct iwch_dev
*dev
= container_of(cdev
, struct iwch_dev
,
1046 struct ethtool_drvinfo info
;
1047 struct net_device
*lldev
= dev
->rdev
.t3cdev_p
->lldev
;
1049 PDBG("%s class dev 0x%p\n", __FUNCTION__
, cdev
);
1050 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1051 return sprintf(buf
, "%s\n", info
.driver
);
1054 static ssize_t
show_board(struct class_device
*cdev
, char *buf
)
1056 struct iwch_dev
*dev
= container_of(cdev
, struct iwch_dev
,
1058 PDBG("%s class dev 0x%p\n", __FUNCTION__
, dev
);
1059 return sprintf(buf
, "%x.%x\n", dev
->rdev
.rnic_info
.pdev
->vendor
,
1060 dev
->rdev
.rnic_info
.pdev
->device
);
1063 static CLASS_DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
1064 static CLASS_DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
1065 static CLASS_DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
1066 static CLASS_DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
1068 static struct class_device_attribute
*iwch_class_attributes
[] = {
1069 &class_device_attr_hw_rev
,
1070 &class_device_attr_fw_ver
,
1071 &class_device_attr_hca_type
,
1072 &class_device_attr_board_id
1075 int iwch_register_device(struct iwch_dev
*dev
)
1080 PDBG("%s iwch_dev %p\n", __FUNCTION__
, dev
);
1081 strlcpy(dev
->ibdev
.name
, "cxgb3_%d", IB_DEVICE_NAME_MAX
);
1082 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
1083 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1084 dev
->ibdev
.owner
= THIS_MODULE
;
1085 dev
->device_cap_flags
=
1086 (IB_DEVICE_ZERO_STAG
|
1087 IB_DEVICE_SEND_W_INV
| IB_DEVICE_MEM_WINDOW
);
1089 dev
->ibdev
.uverbs_cmd_mask
=
1090 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1091 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1092 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1093 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
1094 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
1095 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
1096 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
1097 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
1098 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
1099 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
1100 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
1101 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
1102 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
1103 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
1104 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
1105 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
1106 (1ull << IB_USER_VERBS_CMD_POST_RECV
);
1107 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
1108 memcpy(dev
->ibdev
.node_desc
, IWCH_NODE_DESC
, sizeof(IWCH_NODE_DESC
));
1109 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.port_info
.nports
;
1110 dev
->ibdev
.dma_device
= &(dev
->rdev
.rnic_info
.pdev
->dev
);
1111 dev
->ibdev
.class_dev
.dev
= &(dev
->rdev
.rnic_info
.pdev
->dev
);
1112 dev
->ibdev
.query_device
= iwch_query_device
;
1113 dev
->ibdev
.query_port
= iwch_query_port
;
1114 dev
->ibdev
.modify_port
= iwch_modify_port
;
1115 dev
->ibdev
.query_pkey
= iwch_query_pkey
;
1116 dev
->ibdev
.query_gid
= iwch_query_gid
;
1117 dev
->ibdev
.alloc_ucontext
= iwch_alloc_ucontext
;
1118 dev
->ibdev
.dealloc_ucontext
= iwch_dealloc_ucontext
;
1119 dev
->ibdev
.mmap
= iwch_mmap
;
1120 dev
->ibdev
.alloc_pd
= iwch_allocate_pd
;
1121 dev
->ibdev
.dealloc_pd
= iwch_deallocate_pd
;
1122 dev
->ibdev
.create_ah
= iwch_ah_create
;
1123 dev
->ibdev
.destroy_ah
= iwch_ah_destroy
;
1124 dev
->ibdev
.create_qp
= iwch_create_qp
;
1125 dev
->ibdev
.modify_qp
= iwch_ib_modify_qp
;
1126 dev
->ibdev
.destroy_qp
= iwch_destroy_qp
;
1127 dev
->ibdev
.create_cq
= iwch_create_cq
;
1128 dev
->ibdev
.destroy_cq
= iwch_destroy_cq
;
1129 dev
->ibdev
.resize_cq
= iwch_resize_cq
;
1130 dev
->ibdev
.poll_cq
= iwch_poll_cq
;
1131 dev
->ibdev
.get_dma_mr
= iwch_get_dma_mr
;
1132 dev
->ibdev
.reg_phys_mr
= iwch_register_phys_mem
;
1133 dev
->ibdev
.rereg_phys_mr
= iwch_reregister_phys_mem
;
1134 dev
->ibdev
.reg_user_mr
= iwch_reg_user_mr
;
1135 dev
->ibdev
.dereg_mr
= iwch_dereg_mr
;
1136 dev
->ibdev
.alloc_mw
= iwch_alloc_mw
;
1137 dev
->ibdev
.bind_mw
= iwch_bind_mw
;
1138 dev
->ibdev
.dealloc_mw
= iwch_dealloc_mw
;
1140 dev
->ibdev
.attach_mcast
= iwch_multicast_attach
;
1141 dev
->ibdev
.detach_mcast
= iwch_multicast_detach
;
1142 dev
->ibdev
.process_mad
= iwch_process_mad
;
1144 dev
->ibdev
.req_notify_cq
= iwch_arm_cq
;
1145 dev
->ibdev
.post_send
= iwch_post_send
;
1146 dev
->ibdev
.post_recv
= iwch_post_receive
;
1150 (struct iw_cm_verbs
*) kmalloc(sizeof(struct iw_cm_verbs
),
1152 dev
->ibdev
.iwcm
->connect
= iwch_connect
;
1153 dev
->ibdev
.iwcm
->accept
= iwch_accept_cr
;
1154 dev
->ibdev
.iwcm
->reject
= iwch_reject_cr
;
1155 dev
->ibdev
.iwcm
->create_listen
= iwch_create_listen
;
1156 dev
->ibdev
.iwcm
->destroy_listen
= iwch_destroy_listen
;
1157 dev
->ibdev
.iwcm
->add_ref
= iwch_qp_add_ref
;
1158 dev
->ibdev
.iwcm
->rem_ref
= iwch_qp_rem_ref
;
1159 dev
->ibdev
.iwcm
->get_qp
= iwch_get_qp
;
1161 ret
= ib_register_device(&dev
->ibdev
);
1165 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
) {
1166 ret
= class_device_create_file(&dev
->ibdev
.class_dev
,
1167 iwch_class_attributes
[i
]);
1174 ib_unregister_device(&dev
->ibdev
);
1179 void iwch_unregister_device(struct iwch_dev
*dev
)
1183 PDBG("%s iwch_dev %p\n", __FUNCTION__
, dev
);
1184 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
)
1185 class_device_remove_file(&dev
->ibdev
.class_dev
,
1186 iwch_class_attributes
[i
]);
1187 ib_unregister_device(&dev
->ibdev
);