2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/sched.h>
41 #include <linux/spinlock.h>
42 #include <linux/ethtool.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/inetdevice.h>
45 #include <linux/slab.h>
49 #include <asm/byteorder.h>
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_smi.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_user_verbs.h>
59 #include "iwch_provider.h"
61 #include "iwch_user.h"
64 static struct ib_ah
*iwch_ah_create(struct ib_pd
*pd
,
65 struct ib_ah_attr
*ah_attr
)
67 return ERR_PTR(-ENOSYS
);
70 static int iwch_ah_destroy(struct ib_ah
*ah
)
75 static int iwch_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
80 static int iwch_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
85 static int iwch_process_mad(struct ib_device
*ibdev
,
88 const struct ib_wc
*in_wc
,
89 const struct ib_grh
*in_grh
,
90 const struct ib_mad_hdr
*in_mad
,
92 struct ib_mad_hdr
*out_mad
,
94 u16
*out_mad_pkey_index
)
99 static int iwch_dealloc_ucontext(struct ib_ucontext
*context
)
101 struct iwch_dev
*rhp
= to_iwch_dev(context
->device
);
102 struct iwch_ucontext
*ucontext
= to_iwch_ucontext(context
);
103 struct iwch_mm_entry
*mm
, *tmp
;
105 PDBG("%s context %p\n", __func__
, context
);
106 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
108 cxio_release_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
113 static struct ib_ucontext
*iwch_alloc_ucontext(struct ib_device
*ibdev
,
114 struct ib_udata
*udata
)
116 struct iwch_ucontext
*context
;
117 struct iwch_dev
*rhp
= to_iwch_dev(ibdev
);
119 PDBG("%s ibdev %p\n", __func__
, ibdev
);
120 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
122 return ERR_PTR(-ENOMEM
);
123 cxio_init_ucontext(&rhp
->rdev
, &context
->uctx
);
124 INIT_LIST_HEAD(&context
->mmaps
);
125 spin_lock_init(&context
->mmap_lock
);
126 return &context
->ibucontext
;
129 static int iwch_destroy_cq(struct ib_cq
*ib_cq
)
133 PDBG("%s ib_cq %p\n", __func__
, ib_cq
);
134 chp
= to_iwch_cq(ib_cq
);
136 remove_handle(chp
->rhp
, &chp
->rhp
->cqidr
, chp
->cq
.cqid
);
137 atomic_dec(&chp
->refcnt
);
138 wait_event(chp
->wait
, !atomic_read(&chp
->refcnt
));
140 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
145 static struct ib_cq
*iwch_create_cq(struct ib_device
*ibdev
,
146 const struct ib_cq_init_attr
*attr
,
147 struct ib_ucontext
*ib_context
,
148 struct ib_udata
*udata
)
150 int entries
= attr
->cqe
;
151 struct iwch_dev
*rhp
;
153 struct iwch_create_cq_resp uresp
;
154 struct iwch_create_cq_req ureq
;
155 struct iwch_ucontext
*ucontext
= NULL
;
159 PDBG("%s ib_dev %p entries %d\n", __func__
, ibdev
, entries
);
161 return ERR_PTR(-EINVAL
);
163 rhp
= to_iwch_dev(ibdev
);
164 chp
= kzalloc(sizeof(*chp
), GFP_KERNEL
);
166 return ERR_PTR(-ENOMEM
);
169 ucontext
= to_iwch_ucontext(ib_context
);
170 if (!t3a_device(rhp
)) {
171 if (ib_copy_from_udata(&ureq
, udata
, sizeof (ureq
))) {
173 return ERR_PTR(-EFAULT
);
175 chp
->user_rptr_addr
= (u32 __user
*)(unsigned long)ureq
.user_rptr_addr
;
179 if (t3a_device(rhp
)) {
182 * T3A: Add some fluff to handle extra CQEs inserted
183 * for various errors.
184 * Additional CQE possibilities:
186 * incoming RDMA WRITE Failures
187 * incoming RDMA READ REQUEST FAILUREs
188 * NOTE: We cannot ensure the CQ won't overflow.
192 entries
= roundup_pow_of_two(entries
);
193 chp
->cq
.size_log2
= ilog2(entries
);
195 if (cxio_create_cq(&rhp
->rdev
, &chp
->cq
, !ucontext
)) {
197 return ERR_PTR(-ENOMEM
);
200 chp
->ibcq
.cqe
= 1 << chp
->cq
.size_log2
;
201 spin_lock_init(&chp
->lock
);
202 spin_lock_init(&chp
->comp_handler_lock
);
203 atomic_set(&chp
->refcnt
, 1);
204 init_waitqueue_head(&chp
->wait
);
205 if (insert_handle(rhp
, &rhp
->cqidr
, chp
, chp
->cq
.cqid
)) {
206 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
208 return ERR_PTR(-ENOMEM
);
212 struct iwch_mm_entry
*mm
;
214 mm
= kmalloc(sizeof *mm
, GFP_KERNEL
);
216 iwch_destroy_cq(&chp
->ibcq
);
217 return ERR_PTR(-ENOMEM
);
219 uresp
.cqid
= chp
->cq
.cqid
;
220 uresp
.size_log2
= chp
->cq
.size_log2
;
221 spin_lock(&ucontext
->mmap_lock
);
222 uresp
.key
= ucontext
->key
;
223 ucontext
->key
+= PAGE_SIZE
;
224 spin_unlock(&ucontext
->mmap_lock
);
226 mm
->addr
= virt_to_phys(chp
->cq
.queue
);
227 if (udata
->outlen
< sizeof uresp
) {
229 printk(KERN_WARNING MOD
"Warning - "
230 "downlevel libcxgb3 (non-fatal).\n");
231 mm
->len
= PAGE_ALIGN((1UL << uresp
.size_log2
) *
232 sizeof(struct t3_cqe
));
233 resplen
= sizeof(struct iwch_create_cq_resp_v0
);
235 mm
->len
= PAGE_ALIGN(((1UL << uresp
.size_log2
) + 1) *
236 sizeof(struct t3_cqe
));
237 uresp
.memsize
= mm
->len
;
239 resplen
= sizeof uresp
;
241 if (ib_copy_to_udata(udata
, &uresp
, resplen
)) {
243 iwch_destroy_cq(&chp
->ibcq
);
244 return ERR_PTR(-EFAULT
);
246 insert_mmap(ucontext
, mm
);
248 PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
249 chp
->cq
.cqid
, chp
, (1 << chp
->cq
.size_log2
),
250 (unsigned long long) chp
->cq
.dma_addr
);
254 static int iwch_resize_cq(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
)
257 struct iwch_cq
*chp
= to_iwch_cq(cq
);
258 struct t3_cq oldcq
, newcq
;
261 PDBG("%s ib_cq %p cqe %d\n", __func__
, cq
, cqe
);
263 /* We don't downsize... */
267 /* create new t3_cq with new size */
268 cqe
= roundup_pow_of_two(cqe
+1);
269 newcq
.size_log2
= ilog2(cqe
);
271 /* Dont allow resize to less than the current wce count */
272 if (cqe
< Q_COUNT(chp
->cq
.rptr
, chp
->cq
.wptr
)) {
276 /* Quiesce all QPs using this CQ */
277 ret
= iwch_quiesce_qps(chp
);
282 ret
= cxio_create_cq(&chp
->rhp
->rdev
, &newcq
);
288 memcpy(newcq
.queue
, chp
->cq
.queue
, (1 << chp
->cq
.size_log2
) *
289 sizeof(struct t3_cqe
));
291 /* old iwch_qp gets new t3_cq but keeps old cqid */
294 chp
->cq
.cqid
= oldcq
.cqid
;
296 /* resize new t3_cq to update the HW context */
297 ret
= cxio_resize_cq(&chp
->rhp
->rdev
, &chp
->cq
);
302 chp
->ibcq
.cqe
= (1<<chp
->cq
.size_log2
) - 1;
304 /* destroy old t3_cq */
305 oldcq
.cqid
= newcq
.cqid
;
306 ret
= cxio_destroy_cq(&chp
->rhp
->rdev
, &oldcq
);
308 printk(KERN_ERR MOD
"%s - cxio_destroy_cq failed %d\n",
312 /* add user hooks here */
315 ret
= iwch_resume_qps(chp
);
322 static int iwch_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
324 struct iwch_dev
*rhp
;
326 enum t3_cq_opcode cq_op
;
331 chp
= to_iwch_cq(ibcq
);
333 if ((flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
)
337 if (chp
->user_rptr_addr
) {
338 if (get_user(rptr
, chp
->user_rptr_addr
))
340 spin_lock_irqsave(&chp
->lock
, flag
);
343 spin_lock_irqsave(&chp
->lock
, flag
);
344 PDBG("%s rptr 0x%x\n", __func__
, chp
->cq
.rptr
);
345 err
= cxio_hal_cq_op(&rhp
->rdev
, &chp
->cq
, cq_op
, 0);
346 spin_unlock_irqrestore(&chp
->lock
, flag
);
348 printk(KERN_ERR MOD
"Error %d rearming CQID 0x%x\n", err
,
350 if (err
> 0 && !(flags
& IB_CQ_REPORT_MISSED_EVENTS
))
355 static int iwch_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
357 int len
= vma
->vm_end
- vma
->vm_start
;
358 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
359 struct cxio_rdev
*rdev_p
;
361 struct iwch_mm_entry
*mm
;
362 struct iwch_ucontext
*ucontext
;
365 PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__
, vma
->vm_pgoff
,
368 if (vma
->vm_start
& (PAGE_SIZE
-1)) {
372 rdev_p
= &(to_iwch_dev(context
->device
)->rdev
);
373 ucontext
= to_iwch_ucontext(context
);
375 mm
= remove_mmap(ucontext
, key
, len
);
381 if ((addr
>= rdev_p
->rnic_info
.udbell_physbase
) &&
382 (addr
< (rdev_p
->rnic_info
.udbell_physbase
+
383 rdev_p
->rnic_info
.udbell_len
))) {
386 * Map T3 DB register.
388 if (vma
->vm_flags
& VM_READ
) {
392 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
393 vma
->vm_flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
394 vma
->vm_flags
&= ~VM_MAYREAD
;
395 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
397 len
, vma
->vm_page_prot
);
401 * Map WQ or CQ contig dma memory...
403 ret
= remap_pfn_range(vma
, vma
->vm_start
,
405 len
, vma
->vm_page_prot
);
411 static int iwch_deallocate_pd(struct ib_pd
*pd
)
413 struct iwch_dev
*rhp
;
416 php
= to_iwch_pd(pd
);
418 PDBG("%s ibpd %p pdid 0x%x\n", __func__
, pd
, php
->pdid
);
419 cxio_hal_put_pdid(rhp
->rdev
.rscp
, php
->pdid
);
424 static struct ib_pd
*iwch_allocate_pd(struct ib_device
*ibdev
,
425 struct ib_ucontext
*context
,
426 struct ib_udata
*udata
)
430 struct iwch_dev
*rhp
;
432 PDBG("%s ibdev %p\n", __func__
, ibdev
);
433 rhp
= (struct iwch_dev
*) ibdev
;
434 pdid
= cxio_hal_get_pdid(rhp
->rdev
.rscp
);
436 return ERR_PTR(-EINVAL
);
437 php
= kzalloc(sizeof(*php
), GFP_KERNEL
);
439 cxio_hal_put_pdid(rhp
->rdev
.rscp
, pdid
);
440 return ERR_PTR(-ENOMEM
);
445 if (ib_copy_to_udata(udata
, &php
->pdid
, sizeof (__u32
))) {
446 iwch_deallocate_pd(&php
->ibpd
);
447 return ERR_PTR(-EFAULT
);
450 PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__
, pdid
, php
);
454 static int iwch_dereg_mr(struct ib_mr
*ib_mr
)
456 struct iwch_dev
*rhp
;
460 PDBG("%s ib_mr %p\n", __func__
, ib_mr
);
461 /* There can be no memory windows */
462 if (atomic_read(&ib_mr
->usecnt
))
465 mhp
= to_iwch_mr(ib_mr
);
468 mmid
= mhp
->attr
.stag
>> 8;
469 cxio_dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
472 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
474 kfree((void *) (unsigned long) mhp
->kva
);
476 ib_umem_release(mhp
->umem
);
477 PDBG("%s mmid 0x%x ptr %p\n", __func__
, mmid
, mhp
);
482 static struct ib_mr
*iwch_register_phys_mem(struct ib_pd
*pd
,
483 struct ib_phys_buf
*buffer_list
,
492 struct iwch_dev
*rhp
;
497 PDBG("%s ib_pd %p\n", __func__
, pd
);
498 php
= to_iwch_pd(pd
);
501 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
503 return ERR_PTR(-ENOMEM
);
507 /* First check that we have enough alignment */
508 if ((*iova_start
& ~PAGE_MASK
) != (buffer_list
[0].addr
& ~PAGE_MASK
)) {
513 if (num_phys_buf
> 1 &&
514 ((buffer_list
[0].addr
+ buffer_list
[0].size
) & ~PAGE_MASK
)) {
519 ret
= build_phys_page_list(buffer_list
, num_phys_buf
, iova_start
,
520 &total_size
, &npages
, &shift
, &page_list
);
524 ret
= iwch_alloc_pbl(mhp
, npages
);
530 ret
= iwch_write_pbl(mhp
, page_list
, npages
, 0);
535 mhp
->attr
.pdid
= php
->pdid
;
538 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
539 mhp
->attr
.va_fbo
= *iova_start
;
540 mhp
->attr
.page_size
= shift
- 12;
542 mhp
->attr
.len
= (u32
) total_size
;
543 mhp
->attr
.pbl_size
= npages
;
544 ret
= iwch_register_mem(rhp
, php
, mhp
, shift
);
559 static int iwch_reregister_phys_mem(struct ib_mr
*mr
,
562 struct ib_phys_buf
*buffer_list
,
564 int acc
, u64
* iova_start
)
567 struct iwch_mr mh
, *mhp
;
569 struct iwch_dev
*rhp
;
570 __be64
*page_list
= NULL
;
576 PDBG("%s ib_mr %p ib_pd %p\n", __func__
, mr
, pd
);
578 /* There can be no memory windows */
579 if (atomic_read(&mr
->usecnt
))
582 mhp
= to_iwch_mr(mr
);
584 php
= to_iwch_pd(mr
->pd
);
586 /* make sure we are on the same adapter */
590 memcpy(&mh
, mhp
, sizeof *mhp
);
592 if (mr_rereg_mask
& IB_MR_REREG_PD
)
593 php
= to_iwch_pd(pd
);
594 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
595 mh
.attr
.perms
= iwch_ib_to_tpt_access(acc
);
596 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
597 ret
= build_phys_page_list(buffer_list
, num_phys_buf
,
599 &total_size
, &npages
,
605 ret
= iwch_reregister_mem(rhp
, php
, &mh
, shift
, npages
);
610 if (mr_rereg_mask
& IB_MR_REREG_PD
)
611 mhp
->attr
.pdid
= php
->pdid
;
612 if (mr_rereg_mask
& IB_MR_REREG_ACCESS
)
613 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
614 if (mr_rereg_mask
& IB_MR_REREG_TRANS
) {
616 mhp
->attr
.va_fbo
= *iova_start
;
617 mhp
->attr
.page_size
= shift
- 12;
618 mhp
->attr
.len
= (u32
) total_size
;
619 mhp
->attr
.pbl_size
= npages
;
626 static struct ib_mr
*iwch_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
627 u64 virt
, int acc
, struct ib_udata
*udata
)
633 struct iwch_dev
*rhp
;
636 struct iwch_reg_user_mr_resp uresp
;
637 struct scatterlist
*sg
;
638 PDBG("%s ib_pd %p\n", __func__
, pd
);
640 php
= to_iwch_pd(pd
);
642 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
644 return ERR_PTR(-ENOMEM
);
648 mhp
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
, acc
, 0);
649 if (IS_ERR(mhp
->umem
)) {
650 err
= PTR_ERR(mhp
->umem
);
655 shift
= ffs(mhp
->umem
->page_size
) - 1;
659 err
= iwch_alloc_pbl(mhp
, n
);
663 pages
= (__be64
*) __get_free_page(GFP_KERNEL
);
671 for_each_sg(mhp
->umem
->sg_head
.sgl
, sg
, mhp
->umem
->nmap
, entry
) {
672 len
= sg_dma_len(sg
) >> shift
;
673 for (k
= 0; k
< len
; ++k
) {
674 pages
[i
++] = cpu_to_be64(sg_dma_address(sg
) +
675 mhp
->umem
->page_size
* k
);
676 if (i
== PAGE_SIZE
/ sizeof *pages
) {
677 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
687 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
690 free_page((unsigned long) pages
);
694 mhp
->attr
.pdid
= php
->pdid
;
696 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
697 mhp
->attr
.va_fbo
= virt
;
698 mhp
->attr
.page_size
= shift
- 12;
699 mhp
->attr
.len
= (u32
) length
;
701 err
= iwch_register_mem(rhp
, php
, mhp
, shift
);
705 if (udata
&& !t3a_device(rhp
)) {
706 uresp
.pbl_addr
= (mhp
->attr
.pbl_addr
-
707 rhp
->rdev
.rnic_info
.pbl_base
) >> 3;
708 PDBG("%s user resp pbl_addr 0x%x\n", __func__
,
711 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
712 iwch_dereg_mr(&mhp
->ibmr
);
724 ib_umem_release(mhp
->umem
);
729 static struct ib_mr
*iwch_get_dma_mr(struct ib_pd
*pd
, int acc
)
731 struct ib_phys_buf bl
;
735 PDBG("%s ib_pd %p\n", __func__
, pd
);
738 * T3 only supports 32 bits of size.
740 if (sizeof(phys_addr_t
) > 4) {
741 pr_warn_once(MOD
"Cannot support dma_mrs on this platform.\n");
742 return ERR_PTR(-ENOTSUPP
);
744 bl
.size
= 0xffffffff;
747 ibmr
= iwch_register_phys_mem(pd
, &bl
, 1, acc
, &kva
);
751 static struct ib_mw
*iwch_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
)
753 struct iwch_dev
*rhp
;
760 if (type
!= IB_MW_TYPE_1
)
761 return ERR_PTR(-EINVAL
);
763 php
= to_iwch_pd(pd
);
765 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
767 return ERR_PTR(-ENOMEM
);
768 ret
= cxio_allocate_window(&rhp
->rdev
, &stag
, php
->pdid
);
774 mhp
->attr
.pdid
= php
->pdid
;
775 mhp
->attr
.type
= TPT_MW
;
776 mhp
->attr
.stag
= stag
;
778 mhp
->ibmw
.rkey
= stag
;
779 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
)) {
780 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
782 return ERR_PTR(-ENOMEM
);
784 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
788 static int iwch_dealloc_mw(struct ib_mw
*mw
)
790 struct iwch_dev
*rhp
;
794 mhp
= to_iwch_mw(mw
);
796 mmid
= (mw
->rkey
) >> 8;
797 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
798 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
799 PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__
, mw
, mmid
, mhp
);
804 static struct ib_mr
*iwch_alloc_mr(struct ib_pd
*pd
,
805 enum ib_mr_type mr_type
,
808 struct iwch_dev
*rhp
;
815 if (mr_type
!= IB_MR_TYPE_MEM_REG
||
816 max_num_sg
> T3_MAX_FASTREG_DEPTH
)
817 return ERR_PTR(-EINVAL
);
819 php
= to_iwch_pd(pd
);
821 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
825 mhp
->pages
= kcalloc(max_num_sg
, sizeof(u64
), GFP_KERNEL
);
832 ret
= iwch_alloc_pbl(mhp
, max_num_sg
);
835 mhp
->attr
.pbl_size
= max_num_sg
;
836 ret
= cxio_allocate_stag(&rhp
->rdev
, &stag
, php
->pdid
,
837 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
840 mhp
->attr
.pdid
= php
->pdid
;
841 mhp
->attr
.type
= TPT_NON_SHARED_MR
;
842 mhp
->attr
.stag
= stag
;
845 mhp
->ibmr
.rkey
= mhp
->ibmr
.lkey
= stag
;
846 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
))
849 PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
852 cxio_dereg_mem(&rhp
->rdev
, stag
, mhp
->attr
.pbl_size
,
864 static int iwch_set_page(struct ib_mr
*ibmr
, u64 addr
)
866 struct iwch_mr
*mhp
= to_iwch_mr(ibmr
);
868 if (unlikely(mhp
->npages
== mhp
->attr
.pbl_size
))
871 mhp
->pages
[mhp
->npages
++] = addr
;
876 static int iwch_map_mr_sg(struct ib_mr
*ibmr
,
877 struct scatterlist
*sg
,
880 struct iwch_mr
*mhp
= to_iwch_mr(ibmr
);
884 return ib_sg_to_pages(ibmr
, sg
, sg_nents
, iwch_set_page
);
887 static int iwch_destroy_qp(struct ib_qp
*ib_qp
)
889 struct iwch_dev
*rhp
;
891 struct iwch_qp_attributes attrs
;
892 struct iwch_ucontext
*ucontext
;
894 qhp
= to_iwch_qp(ib_qp
);
897 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
898 iwch_modify_qp(rhp
, qhp
, IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 0);
899 wait_event(qhp
->wait
, !qhp
->ep
);
901 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.qpid
);
903 atomic_dec(&qhp
->refcnt
);
904 wait_event(qhp
->wait
, !atomic_read(&qhp
->refcnt
));
906 ucontext
= ib_qp
->uobject
? to_iwch_ucontext(ib_qp
->uobject
->context
)
908 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
909 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
911 PDBG("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__
,
912 ib_qp
, qhp
->wq
.qpid
, qhp
);
917 static struct ib_qp
*iwch_create_qp(struct ib_pd
*pd
,
918 struct ib_qp_init_attr
*attrs
,
919 struct ib_udata
*udata
)
921 struct iwch_dev
*rhp
;
924 struct iwch_cq
*schp
;
925 struct iwch_cq
*rchp
;
926 struct iwch_create_qp_resp uresp
;
927 int wqsize
, sqsize
, rqsize
;
928 struct iwch_ucontext
*ucontext
;
930 PDBG("%s ib_pd %p\n", __func__
, pd
);
931 if (attrs
->qp_type
!= IB_QPT_RC
)
932 return ERR_PTR(-EINVAL
);
933 php
= to_iwch_pd(pd
);
935 schp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
);
936 rchp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
);
938 return ERR_PTR(-EINVAL
);
940 /* The RQT size must be # of entries + 1 rounded up to a power of two */
941 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
);
942 if (rqsize
== attrs
->cap
.max_recv_wr
)
943 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
+1);
945 /* T3 doesn't support RQT depth < 16 */
949 if (rqsize
> T3_MAX_RQ_SIZE
)
950 return ERR_PTR(-EINVAL
);
952 if (attrs
->cap
.max_inline_data
> T3_MAX_INLINE
)
953 return ERR_PTR(-EINVAL
);
956 * NOTE: The SQ and total WQ sizes don't need to be
957 * a power of two. However, all the code assumes
958 * they are. EG: Q_FREECNT() and friends.
960 sqsize
= roundup_pow_of_two(attrs
->cap
.max_send_wr
);
961 wqsize
= roundup_pow_of_two(rqsize
+ sqsize
);
964 * Kernel users need more wq space for fastreg WRs which can take
967 ucontext
= pd
->uobject
? to_iwch_ucontext(pd
->uobject
->context
) : NULL
;
968 if (!ucontext
&& wqsize
< (rqsize
+ (2 * sqsize
)))
969 wqsize
= roundup_pow_of_two(rqsize
+
970 roundup_pow_of_two(attrs
->cap
.max_send_wr
* 2));
971 PDBG("%s wqsize %d sqsize %d rqsize %d\n", __func__
,
972 wqsize
, sqsize
, rqsize
);
973 qhp
= kzalloc(sizeof(*qhp
), GFP_KERNEL
);
975 return ERR_PTR(-ENOMEM
);
976 qhp
->wq
.size_log2
= ilog2(wqsize
);
977 qhp
->wq
.rq_size_log2
= ilog2(rqsize
);
978 qhp
->wq
.sq_size_log2
= ilog2(sqsize
);
979 if (cxio_create_qp(&rhp
->rdev
, !udata
, &qhp
->wq
,
980 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
)) {
982 return ERR_PTR(-ENOMEM
);
985 attrs
->cap
.max_recv_wr
= rqsize
- 1;
986 attrs
->cap
.max_send_wr
= sqsize
;
987 attrs
->cap
.max_inline_data
= T3_MAX_INLINE
;
990 qhp
->attr
.pd
= php
->pdid
;
991 qhp
->attr
.scq
= ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
;
992 qhp
->attr
.rcq
= ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
;
993 qhp
->attr
.sq_num_entries
= attrs
->cap
.max_send_wr
;
994 qhp
->attr
.rq_num_entries
= attrs
->cap
.max_recv_wr
;
995 qhp
->attr
.sq_max_sges
= attrs
->cap
.max_send_sge
;
996 qhp
->attr
.sq_max_sges_rdma_write
= attrs
->cap
.max_send_sge
;
997 qhp
->attr
.rq_max_sges
= attrs
->cap
.max_recv_sge
;
998 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
999 qhp
->attr
.next_state
= IWCH_QP_STATE_IDLE
;
1002 * XXX - These don't get passed in from the openib user
1003 * at create time. The CM sets them via a QP modify.
1004 * Need to fix... I think the CM should
1006 qhp
->attr
.enable_rdma_read
= 1;
1007 qhp
->attr
.enable_rdma_write
= 1;
1008 qhp
->attr
.enable_bind
= 1;
1009 qhp
->attr
.max_ord
= 1;
1010 qhp
->attr
.max_ird
= 1;
1012 spin_lock_init(&qhp
->lock
);
1013 init_waitqueue_head(&qhp
->wait
);
1014 atomic_set(&qhp
->refcnt
, 1);
1016 if (insert_handle(rhp
, &rhp
->qpidr
, qhp
, qhp
->wq
.qpid
)) {
1017 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
1018 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
1020 return ERR_PTR(-ENOMEM
);
1025 struct iwch_mm_entry
*mm1
, *mm2
;
1027 mm1
= kmalloc(sizeof *mm1
, GFP_KERNEL
);
1029 iwch_destroy_qp(&qhp
->ibqp
);
1030 return ERR_PTR(-ENOMEM
);
1033 mm2
= kmalloc(sizeof *mm2
, GFP_KERNEL
);
1036 iwch_destroy_qp(&qhp
->ibqp
);
1037 return ERR_PTR(-ENOMEM
);
1040 uresp
.qpid
= qhp
->wq
.qpid
;
1041 uresp
.size_log2
= qhp
->wq
.size_log2
;
1042 uresp
.sq_size_log2
= qhp
->wq
.sq_size_log2
;
1043 uresp
.rq_size_log2
= qhp
->wq
.rq_size_log2
;
1044 spin_lock(&ucontext
->mmap_lock
);
1045 uresp
.key
= ucontext
->key
;
1046 ucontext
->key
+= PAGE_SIZE
;
1047 uresp
.db_key
= ucontext
->key
;
1048 ucontext
->key
+= PAGE_SIZE
;
1049 spin_unlock(&ucontext
->mmap_lock
);
1050 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
1053 iwch_destroy_qp(&qhp
->ibqp
);
1054 return ERR_PTR(-EFAULT
);
1056 mm1
->key
= uresp
.key
;
1057 mm1
->addr
= virt_to_phys(qhp
->wq
.queue
);
1058 mm1
->len
= PAGE_ALIGN(wqsize
* sizeof (union t3_wr
));
1059 insert_mmap(ucontext
, mm1
);
1060 mm2
->key
= uresp
.db_key
;
1061 mm2
->addr
= qhp
->wq
.udb
& PAGE_MASK
;
1062 mm2
->len
= PAGE_SIZE
;
1063 insert_mmap(ucontext
, mm2
);
1065 qhp
->ibqp
.qp_num
= qhp
->wq
.qpid
;
1066 init_timer(&(qhp
->timer
));
1067 PDBG("%s sq_num_entries %d, rq_num_entries %d "
1068 "qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
1069 __func__
, qhp
->attr
.sq_num_entries
, qhp
->attr
.rq_num_entries
,
1070 qhp
->wq
.qpid
, qhp
, (unsigned long long) qhp
->wq
.dma_addr
,
1071 1 << qhp
->wq
.size_log2
, qhp
->wq
.rq_addr
);
1075 static int iwch_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1076 int attr_mask
, struct ib_udata
*udata
)
1078 struct iwch_dev
*rhp
;
1079 struct iwch_qp
*qhp
;
1080 enum iwch_qp_attr_mask mask
= 0;
1081 struct iwch_qp_attributes attrs
;
1083 PDBG("%s ib_qp %p\n", __func__
, ibqp
);
1085 /* iwarp does not support the RTR state */
1086 if ((attr_mask
& IB_QP_STATE
) && (attr
->qp_state
== IB_QPS_RTR
))
1087 attr_mask
&= ~IB_QP_STATE
;
1089 /* Make sure we still have something left to do */
1093 memset(&attrs
, 0, sizeof attrs
);
1094 qhp
= to_iwch_qp(ibqp
);
1097 attrs
.next_state
= iwch_convert_state(attr
->qp_state
);
1098 attrs
.enable_rdma_read
= (attr
->qp_access_flags
&
1099 IB_ACCESS_REMOTE_READ
) ? 1 : 0;
1100 attrs
.enable_rdma_write
= (attr
->qp_access_flags
&
1101 IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
1102 attrs
.enable_bind
= (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
) ? 1 : 0;
1105 mask
|= (attr_mask
& IB_QP_STATE
) ? IWCH_QP_ATTR_NEXT_STATE
: 0;
1106 mask
|= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
1107 (IWCH_QP_ATTR_ENABLE_RDMA_READ
|
1108 IWCH_QP_ATTR_ENABLE_RDMA_WRITE
|
1109 IWCH_QP_ATTR_ENABLE_RDMA_BIND
) : 0;
1111 return iwch_modify_qp(rhp
, qhp
, mask
, &attrs
, 0);
1114 void iwch_qp_add_ref(struct ib_qp
*qp
)
1116 PDBG("%s ib_qp %p\n", __func__
, qp
);
1117 atomic_inc(&(to_iwch_qp(qp
)->refcnt
));
1120 void iwch_qp_rem_ref(struct ib_qp
*qp
)
1122 PDBG("%s ib_qp %p\n", __func__
, qp
);
1123 if (atomic_dec_and_test(&(to_iwch_qp(qp
)->refcnt
)))
1124 wake_up(&(to_iwch_qp(qp
)->wait
));
1127 static struct ib_qp
*iwch_get_qp(struct ib_device
*dev
, int qpn
)
1129 PDBG("%s ib_dev %p qpn 0x%x\n", __func__
, dev
, qpn
);
1130 return (struct ib_qp
*)get_qhp(to_iwch_dev(dev
), qpn
);
1134 static int iwch_query_pkey(struct ib_device
*ibdev
,
1135 u8 port
, u16 index
, u16
* pkey
)
1137 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1142 static int iwch_query_gid(struct ib_device
*ibdev
, u8 port
,
1143 int index
, union ib_gid
*gid
)
1145 struct iwch_dev
*dev
;
1147 PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
1148 __func__
, ibdev
, port
, index
, gid
);
1149 dev
= to_iwch_dev(ibdev
);
1150 BUG_ON(port
== 0 || port
> 2);
1151 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
1152 memcpy(&(gid
->raw
[0]), dev
->rdev
.port_info
.lldevs
[port
-1]->dev_addr
, 6);
1156 static u64
fw_vers_string_to_u64(struct iwch_dev
*iwch_dev
)
1158 struct ethtool_drvinfo info
;
1159 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1161 unsigned fw_maj
, fw_min
, fw_mic
;
1163 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1165 next
= info
.fw_version
+ 1;
1166 cp
= strsep(&next
, ".");
1167 sscanf(cp
, "%i", &fw_maj
);
1168 cp
= strsep(&next
, ".");
1169 sscanf(cp
, "%i", &fw_min
);
1170 cp
= strsep(&next
, ".");
1171 sscanf(cp
, "%i", &fw_mic
);
1173 return (((u64
)fw_maj
& 0xffff) << 32) | ((fw_min
& 0xffff) << 16) |
1177 static int iwch_query_device(struct ib_device
*ibdev
, struct ib_device_attr
*props
,
1178 struct ib_udata
*uhw
)
1181 struct iwch_dev
*dev
;
1183 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1185 if (uhw
->inlen
|| uhw
->outlen
)
1188 dev
= to_iwch_dev(ibdev
);
1189 memset(props
, 0, sizeof *props
);
1190 memcpy(&props
->sys_image_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1191 props
->hw_ver
= dev
->rdev
.t3cdev_p
->type
;
1192 props
->fw_ver
= fw_vers_string_to_u64(dev
);
1193 props
->device_cap_flags
= dev
->device_cap_flags
;
1194 props
->page_size_cap
= dev
->attr
.mem_pgsizes_bitmask
;
1195 props
->vendor_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->vendor
;
1196 props
->vendor_part_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->device
;
1197 props
->max_mr_size
= dev
->attr
.max_mr_size
;
1198 props
->max_qp
= dev
->attr
.max_qps
;
1199 props
->max_qp_wr
= dev
->attr
.max_wrs
;
1200 props
->max_sge
= dev
->attr
.max_sge_per_wr
;
1201 props
->max_sge_rd
= 1;
1202 props
->max_qp_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1203 props
->max_qp_init_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1204 props
->max_cq
= dev
->attr
.max_cqs
;
1205 props
->max_cqe
= dev
->attr
.max_cqes_per_cq
;
1206 props
->max_mr
= dev
->attr
.max_mem_regs
;
1207 props
->max_pd
= dev
->attr
.max_pds
;
1208 props
->local_ca_ack_delay
= 0;
1209 props
->max_fast_reg_page_list_len
= T3_MAX_FASTREG_DEPTH
;
1214 static int iwch_query_port(struct ib_device
*ibdev
,
1215 u8 port
, struct ib_port_attr
*props
)
1217 struct iwch_dev
*dev
;
1218 struct net_device
*netdev
;
1219 struct in_device
*inetdev
;
1221 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1223 dev
= to_iwch_dev(ibdev
);
1224 netdev
= dev
->rdev
.port_info
.lldevs
[port
-1];
1226 memset(props
, 0, sizeof(struct ib_port_attr
));
1227 props
->max_mtu
= IB_MTU_4096
;
1228 if (netdev
->mtu
>= 4096)
1229 props
->active_mtu
= IB_MTU_4096
;
1230 else if (netdev
->mtu
>= 2048)
1231 props
->active_mtu
= IB_MTU_2048
;
1232 else if (netdev
->mtu
>= 1024)
1233 props
->active_mtu
= IB_MTU_1024
;
1234 else if (netdev
->mtu
>= 512)
1235 props
->active_mtu
= IB_MTU_512
;
1237 props
->active_mtu
= IB_MTU_256
;
1239 if (!netif_carrier_ok(netdev
))
1240 props
->state
= IB_PORT_DOWN
;
1242 inetdev
= in_dev_get(netdev
);
1244 if (inetdev
->ifa_list
)
1245 props
->state
= IB_PORT_ACTIVE
;
1247 props
->state
= IB_PORT_INIT
;
1248 in_dev_put(inetdev
);
1250 props
->state
= IB_PORT_INIT
;
1253 props
->port_cap_flags
=
1255 IB_PORT_SNMP_TUNNEL_SUP
|
1256 IB_PORT_REINIT_SUP
|
1257 IB_PORT_DEVICE_MGMT_SUP
|
1258 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
1259 props
->gid_tbl_len
= 1;
1260 props
->pkey_tbl_len
= 1;
1261 props
->active_width
= 2;
1262 props
->active_speed
= IB_SPEED_DDR
;
1263 props
->max_msg_sz
= -1;
1268 static ssize_t
show_rev(struct device
*dev
, struct device_attribute
*attr
,
1271 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1273 PDBG("%s dev 0x%p\n", __func__
, dev
);
1274 return sprintf(buf
, "%d\n", iwch_dev
->rdev
.t3cdev_p
->type
);
1277 static ssize_t
show_fw_ver(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
1279 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1281 struct ethtool_drvinfo info
;
1282 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1284 PDBG("%s dev 0x%p\n", __func__
, dev
);
1285 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1286 return sprintf(buf
, "%s\n", info
.fw_version
);
1289 static ssize_t
show_hca(struct device
*dev
, struct device_attribute
*attr
,
1292 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1294 struct ethtool_drvinfo info
;
1295 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1297 PDBG("%s dev 0x%p\n", __func__
, dev
);
1298 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1299 return sprintf(buf
, "%s\n", info
.driver
);
1302 static ssize_t
show_board(struct device
*dev
, struct device_attribute
*attr
,
1305 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1307 PDBG("%s dev 0x%p\n", __func__
, dev
);
1308 return sprintf(buf
, "%x.%x\n", iwch_dev
->rdev
.rnic_info
.pdev
->vendor
,
1309 iwch_dev
->rdev
.rnic_info
.pdev
->device
);
1312 static int iwch_get_mib(struct ib_device
*ibdev
,
1313 union rdma_protocol_stats
*stats
)
1315 struct iwch_dev
*dev
;
1316 struct tp_mib_stats m
;
1319 PDBG("%s ibdev %p\n", __func__
, ibdev
);
1320 dev
= to_iwch_dev(ibdev
);
1321 ret
= dev
->rdev
.t3cdev_p
->ctl(dev
->rdev
.t3cdev_p
, RDMA_GET_MIB
, &m
);
1325 memset(stats
, 0, sizeof *stats
);
1326 stats
->iw
.ipInReceives
= ((u64
) m
.ipInReceive_hi
<< 32) +
1328 stats
->iw
.ipInHdrErrors
= ((u64
) m
.ipInHdrErrors_hi
<< 32) +
1330 stats
->iw
.ipInAddrErrors
= ((u64
) m
.ipInAddrErrors_hi
<< 32) +
1331 m
.ipInAddrErrors_lo
;
1332 stats
->iw
.ipInUnknownProtos
= ((u64
) m
.ipInUnknownProtos_hi
<< 32) +
1333 m
.ipInUnknownProtos_lo
;
1334 stats
->iw
.ipInDiscards
= ((u64
) m
.ipInDiscards_hi
<< 32) +
1336 stats
->iw
.ipInDelivers
= ((u64
) m
.ipInDelivers_hi
<< 32) +
1338 stats
->iw
.ipOutRequests
= ((u64
) m
.ipOutRequests_hi
<< 32) +
1340 stats
->iw
.ipOutDiscards
= ((u64
) m
.ipOutDiscards_hi
<< 32) +
1342 stats
->iw
.ipOutNoRoutes
= ((u64
) m
.ipOutNoRoutes_hi
<< 32) +
1344 stats
->iw
.ipReasmTimeout
= (u64
) m
.ipReasmTimeout
;
1345 stats
->iw
.ipReasmReqds
= (u64
) m
.ipReasmReqds
;
1346 stats
->iw
.ipReasmOKs
= (u64
) m
.ipReasmOKs
;
1347 stats
->iw
.ipReasmFails
= (u64
) m
.ipReasmFails
;
1348 stats
->iw
.tcpActiveOpens
= (u64
) m
.tcpActiveOpens
;
1349 stats
->iw
.tcpPassiveOpens
= (u64
) m
.tcpPassiveOpens
;
1350 stats
->iw
.tcpAttemptFails
= (u64
) m
.tcpAttemptFails
;
1351 stats
->iw
.tcpEstabResets
= (u64
) m
.tcpEstabResets
;
1352 stats
->iw
.tcpOutRsts
= (u64
) m
.tcpOutRsts
;
1353 stats
->iw
.tcpCurrEstab
= (u64
) m
.tcpCurrEstab
;
1354 stats
->iw
.tcpInSegs
= ((u64
) m
.tcpInSegs_hi
<< 32) +
1356 stats
->iw
.tcpOutSegs
= ((u64
) m
.tcpOutSegs_hi
<< 32) +
1358 stats
->iw
.tcpRetransSegs
= ((u64
) m
.tcpRetransSeg_hi
<< 32) +
1360 stats
->iw
.tcpInErrs
= ((u64
) m
.tcpInErrs_hi
<< 32) +
1362 stats
->iw
.tcpRtoMin
= (u64
) m
.tcpRtoMin
;
1363 stats
->iw
.tcpRtoMax
= (u64
) m
.tcpRtoMax
;
1367 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
1368 static DEVICE_ATTR(fw_ver
, S_IRUGO
, show_fw_ver
, NULL
);
1369 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
1370 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
1372 static struct device_attribute
*iwch_class_attributes
[] = {
1379 static int iwch_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
1380 struct ib_port_immutable
*immutable
)
1382 struct ib_port_attr attr
;
1385 err
= iwch_query_port(ibdev
, port_num
, &attr
);
1389 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
1390 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
1391 immutable
->core_cap_flags
= RDMA_CORE_PORT_IWARP
;
1396 int iwch_register_device(struct iwch_dev
*dev
)
1401 PDBG("%s iwch_dev %p\n", __func__
, dev
);
1402 strlcpy(dev
->ibdev
.name
, "cxgb3_%d", IB_DEVICE_NAME_MAX
);
1403 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
1404 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1405 dev
->ibdev
.owner
= THIS_MODULE
;
1406 dev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
|
1407 IB_DEVICE_MEM_WINDOW
|
1408 IB_DEVICE_MEM_MGT_EXTENSIONS
;
1410 /* cxgb3 supports STag 0. */
1411 dev
->ibdev
.local_dma_lkey
= 0;
1413 dev
->ibdev
.uverbs_cmd_mask
=
1414 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1415 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1416 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1417 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
1418 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
1419 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
1420 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
1421 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
1422 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
1423 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
1424 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
1425 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
1426 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
1427 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
1428 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
1429 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
1430 (1ull << IB_USER_VERBS_CMD_POST_RECV
);
1431 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
1432 memcpy(dev
->ibdev
.node_desc
, IWCH_NODE_DESC
, sizeof(IWCH_NODE_DESC
));
1433 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.port_info
.nports
;
1434 dev
->ibdev
.num_comp_vectors
= 1;
1435 dev
->ibdev
.dma_device
= &(dev
->rdev
.rnic_info
.pdev
->dev
);
1436 dev
->ibdev
.query_device
= iwch_query_device
;
1437 dev
->ibdev
.query_port
= iwch_query_port
;
1438 dev
->ibdev
.query_pkey
= iwch_query_pkey
;
1439 dev
->ibdev
.query_gid
= iwch_query_gid
;
1440 dev
->ibdev
.alloc_ucontext
= iwch_alloc_ucontext
;
1441 dev
->ibdev
.dealloc_ucontext
= iwch_dealloc_ucontext
;
1442 dev
->ibdev
.mmap
= iwch_mmap
;
1443 dev
->ibdev
.alloc_pd
= iwch_allocate_pd
;
1444 dev
->ibdev
.dealloc_pd
= iwch_deallocate_pd
;
1445 dev
->ibdev
.create_ah
= iwch_ah_create
;
1446 dev
->ibdev
.destroy_ah
= iwch_ah_destroy
;
1447 dev
->ibdev
.create_qp
= iwch_create_qp
;
1448 dev
->ibdev
.modify_qp
= iwch_ib_modify_qp
;
1449 dev
->ibdev
.destroy_qp
= iwch_destroy_qp
;
1450 dev
->ibdev
.create_cq
= iwch_create_cq
;
1451 dev
->ibdev
.destroy_cq
= iwch_destroy_cq
;
1452 dev
->ibdev
.resize_cq
= iwch_resize_cq
;
1453 dev
->ibdev
.poll_cq
= iwch_poll_cq
;
1454 dev
->ibdev
.get_dma_mr
= iwch_get_dma_mr
;
1455 dev
->ibdev
.reg_phys_mr
= iwch_register_phys_mem
;
1456 dev
->ibdev
.rereg_phys_mr
= iwch_reregister_phys_mem
;
1457 dev
->ibdev
.reg_user_mr
= iwch_reg_user_mr
;
1458 dev
->ibdev
.dereg_mr
= iwch_dereg_mr
;
1459 dev
->ibdev
.alloc_mw
= iwch_alloc_mw
;
1460 dev
->ibdev
.bind_mw
= iwch_bind_mw
;
1461 dev
->ibdev
.dealloc_mw
= iwch_dealloc_mw
;
1462 dev
->ibdev
.alloc_mr
= iwch_alloc_mr
;
1463 dev
->ibdev
.map_mr_sg
= iwch_map_mr_sg
;
1464 dev
->ibdev
.attach_mcast
= iwch_multicast_attach
;
1465 dev
->ibdev
.detach_mcast
= iwch_multicast_detach
;
1466 dev
->ibdev
.process_mad
= iwch_process_mad
;
1467 dev
->ibdev
.req_notify_cq
= iwch_arm_cq
;
1468 dev
->ibdev
.post_send
= iwch_post_send
;
1469 dev
->ibdev
.post_recv
= iwch_post_receive
;
1470 dev
->ibdev
.get_protocol_stats
= iwch_get_mib
;
1471 dev
->ibdev
.uverbs_abi_ver
= IWCH_UVERBS_ABI_VERSION
;
1472 dev
->ibdev
.get_port_immutable
= iwch_port_immutable
;
1474 dev
->ibdev
.iwcm
= kmalloc(sizeof(struct iw_cm_verbs
), GFP_KERNEL
);
1475 if (!dev
->ibdev
.iwcm
)
1478 dev
->ibdev
.iwcm
->connect
= iwch_connect
;
1479 dev
->ibdev
.iwcm
->accept
= iwch_accept_cr
;
1480 dev
->ibdev
.iwcm
->reject
= iwch_reject_cr
;
1481 dev
->ibdev
.iwcm
->create_listen
= iwch_create_listen
;
1482 dev
->ibdev
.iwcm
->destroy_listen
= iwch_destroy_listen
;
1483 dev
->ibdev
.iwcm
->add_ref
= iwch_qp_add_ref
;
1484 dev
->ibdev
.iwcm
->rem_ref
= iwch_qp_rem_ref
;
1485 dev
->ibdev
.iwcm
->get_qp
= iwch_get_qp
;
1487 ret
= ib_register_device(&dev
->ibdev
, NULL
);
1491 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
) {
1492 ret
= device_create_file(&dev
->ibdev
.dev
,
1493 iwch_class_attributes
[i
]);
1500 ib_unregister_device(&dev
->ibdev
);
1502 kfree(dev
->ibdev
.iwcm
);
1506 void iwch_unregister_device(struct iwch_dev
*dev
)
1510 PDBG("%s iwch_dev %p\n", __func__
, dev
);
1511 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
)
1512 device_remove_file(&dev
->ibdev
.dev
,
1513 iwch_class_attributes
[i
]);
1514 ib_unregister_device(&dev
->ibdev
);
1515 kfree(dev
->ibdev
.iwcm
);