2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/sched/mm.h>
41 #include <linux/spinlock.h>
42 #include <linux/ethtool.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/inetdevice.h>
45 #include <linux/slab.h>
49 #include <asm/byteorder.h>
51 #include <rdma/iw_cm.h>
52 #include <rdma/ib_verbs.h>
53 #include <rdma/ib_smi.h>
54 #include <rdma/ib_umem.h>
55 #include <rdma/ib_user_verbs.h>
59 #include "iwch_provider.h"
61 #include <rdma/cxgb3-abi.h>
64 static struct ib_ah
*iwch_ah_create(struct ib_pd
*pd
,
65 struct rdma_ah_attr
*ah_attr
,
66 struct ib_udata
*udata
)
68 return ERR_PTR(-ENOSYS
);
71 static int iwch_ah_destroy(struct ib_ah
*ah
)
76 static int iwch_multicast_attach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
81 static int iwch_multicast_detach(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
86 static int iwch_process_mad(struct ib_device
*ibdev
,
89 const struct ib_wc
*in_wc
,
90 const struct ib_grh
*in_grh
,
91 const struct ib_mad_hdr
*in_mad
,
93 struct ib_mad_hdr
*out_mad
,
95 u16
*out_mad_pkey_index
)
100 static int iwch_dealloc_ucontext(struct ib_ucontext
*context
)
102 struct iwch_dev
*rhp
= to_iwch_dev(context
->device
);
103 struct iwch_ucontext
*ucontext
= to_iwch_ucontext(context
);
104 struct iwch_mm_entry
*mm
, *tmp
;
106 pr_debug("%s context %p\n", __func__
, context
);
107 list_for_each_entry_safe(mm
, tmp
, &ucontext
->mmaps
, entry
)
109 cxio_release_ucontext(&rhp
->rdev
, &ucontext
->uctx
);
114 static struct ib_ucontext
*iwch_alloc_ucontext(struct ib_device
*ibdev
,
115 struct ib_udata
*udata
)
117 struct iwch_ucontext
*context
;
118 struct iwch_dev
*rhp
= to_iwch_dev(ibdev
);
120 pr_debug("%s ibdev %p\n", __func__
, ibdev
);
121 context
= kzalloc(sizeof(*context
), GFP_KERNEL
);
123 return ERR_PTR(-ENOMEM
);
124 cxio_init_ucontext(&rhp
->rdev
, &context
->uctx
);
125 INIT_LIST_HEAD(&context
->mmaps
);
126 spin_lock_init(&context
->mmap_lock
);
127 return &context
->ibucontext
;
130 static int iwch_destroy_cq(struct ib_cq
*ib_cq
)
134 pr_debug("%s ib_cq %p\n", __func__
, ib_cq
);
135 chp
= to_iwch_cq(ib_cq
);
137 remove_handle(chp
->rhp
, &chp
->rhp
->cqidr
, chp
->cq
.cqid
);
138 atomic_dec(&chp
->refcnt
);
139 wait_event(chp
->wait
, !atomic_read(&chp
->refcnt
));
141 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
146 static struct ib_cq
*iwch_create_cq(struct ib_device
*ibdev
,
147 const struct ib_cq_init_attr
*attr
,
148 struct ib_ucontext
*ib_context
,
149 struct ib_udata
*udata
)
151 int entries
= attr
->cqe
;
152 struct iwch_dev
*rhp
;
154 struct iwch_create_cq_resp uresp
;
155 struct iwch_create_cq_req ureq
;
156 struct iwch_ucontext
*ucontext
= NULL
;
160 pr_debug("%s ib_dev %p entries %d\n", __func__
, ibdev
, entries
);
162 return ERR_PTR(-EINVAL
);
164 rhp
= to_iwch_dev(ibdev
);
165 chp
= kzalloc(sizeof(*chp
), GFP_KERNEL
);
167 return ERR_PTR(-ENOMEM
);
170 ucontext
= to_iwch_ucontext(ib_context
);
171 if (!t3a_device(rhp
)) {
172 if (ib_copy_from_udata(&ureq
, udata
, sizeof (ureq
))) {
174 return ERR_PTR(-EFAULT
);
176 chp
->user_rptr_addr
= (u32 __user
*)(unsigned long)ureq
.user_rptr_addr
;
180 if (t3a_device(rhp
)) {
183 * T3A: Add some fluff to handle extra CQEs inserted
184 * for various errors.
185 * Additional CQE possibilities:
187 * incoming RDMA WRITE Failures
188 * incoming RDMA READ REQUEST FAILUREs
189 * NOTE: We cannot ensure the CQ won't overflow.
193 entries
= roundup_pow_of_two(entries
);
194 chp
->cq
.size_log2
= ilog2(entries
);
196 if (cxio_create_cq(&rhp
->rdev
, &chp
->cq
, !ucontext
)) {
198 return ERR_PTR(-ENOMEM
);
201 chp
->ibcq
.cqe
= 1 << chp
->cq
.size_log2
;
202 spin_lock_init(&chp
->lock
);
203 spin_lock_init(&chp
->comp_handler_lock
);
204 atomic_set(&chp
->refcnt
, 1);
205 init_waitqueue_head(&chp
->wait
);
206 if (insert_handle(rhp
, &rhp
->cqidr
, chp
, chp
->cq
.cqid
)) {
207 cxio_destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
);
209 return ERR_PTR(-ENOMEM
);
213 struct iwch_mm_entry
*mm
;
215 mm
= kmalloc(sizeof *mm
, GFP_KERNEL
);
217 iwch_destroy_cq(&chp
->ibcq
);
218 return ERR_PTR(-ENOMEM
);
220 uresp
.cqid
= chp
->cq
.cqid
;
221 uresp
.size_log2
= chp
->cq
.size_log2
;
222 spin_lock(&ucontext
->mmap_lock
);
223 uresp
.key
= ucontext
->key
;
224 ucontext
->key
+= PAGE_SIZE
;
225 spin_unlock(&ucontext
->mmap_lock
);
227 mm
->addr
= virt_to_phys(chp
->cq
.queue
);
228 if (udata
->outlen
< sizeof uresp
) {
230 pr_warn("Warning - downlevel libcxgb3 (non-fatal)\n");
231 mm
->len
= PAGE_ALIGN((1UL << uresp
.size_log2
) *
232 sizeof(struct t3_cqe
));
233 resplen
= sizeof(struct iwch_create_cq_resp_v0
);
235 mm
->len
= PAGE_ALIGN(((1UL << uresp
.size_log2
) + 1) *
236 sizeof(struct t3_cqe
));
237 uresp
.memsize
= mm
->len
;
239 resplen
= sizeof uresp
;
241 if (ib_copy_to_udata(udata
, &uresp
, resplen
)) {
243 iwch_destroy_cq(&chp
->ibcq
);
244 return ERR_PTR(-EFAULT
);
246 insert_mmap(ucontext
, mm
);
248 pr_debug("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
249 chp
->cq
.cqid
, chp
, (1 << chp
->cq
.size_log2
),
250 (unsigned long long)chp
->cq
.dma_addr
);
254 static int iwch_resize_cq(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
)
257 struct iwch_cq
*chp
= to_iwch_cq(cq
);
258 struct t3_cq oldcq
, newcq
;
261 pr_debug("%s ib_cq %p cqe %d\n", __func__
, cq
, cqe
);
263 /* We don't downsize... */
267 /* create new t3_cq with new size */
268 cqe
= roundup_pow_of_two(cqe
+1);
269 newcq
.size_log2
= ilog2(cqe
);
271 /* Dont allow resize to less than the current wce count */
272 if (cqe
< Q_COUNT(chp
->cq
.rptr
, chp
->cq
.wptr
)) {
276 /* Quiesce all QPs using this CQ */
277 ret
= iwch_quiesce_qps(chp
);
282 ret
= cxio_create_cq(&chp
->rhp
->rdev
, &newcq
);
288 memcpy(newcq
.queue
, chp
->cq
.queue
, (1 << chp
->cq
.size_log2
) *
289 sizeof(struct t3_cqe
));
291 /* old iwch_qp gets new t3_cq but keeps old cqid */
294 chp
->cq
.cqid
= oldcq
.cqid
;
296 /* resize new t3_cq to update the HW context */
297 ret
= cxio_resize_cq(&chp
->rhp
->rdev
, &chp
->cq
);
302 chp
->ibcq
.cqe
= (1<<chp
->cq
.size_log2
) - 1;
304 /* destroy old t3_cq */
305 oldcq
.cqid
= newcq
.cqid
;
306 ret
= cxio_destroy_cq(&chp
->rhp
->rdev
, &oldcq
);
308 pr_err("%s - cxio_destroy_cq failed %d\n", __func__
, ret
);
311 /* add user hooks here */
314 ret
= iwch_resume_qps(chp
);
321 static int iwch_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
323 struct iwch_dev
*rhp
;
325 enum t3_cq_opcode cq_op
;
330 chp
= to_iwch_cq(ibcq
);
332 if ((flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
)
336 if (chp
->user_rptr_addr
) {
337 if (get_user(rptr
, chp
->user_rptr_addr
))
339 spin_lock_irqsave(&chp
->lock
, flag
);
342 spin_lock_irqsave(&chp
->lock
, flag
);
343 pr_debug("%s rptr 0x%x\n", __func__
, chp
->cq
.rptr
);
344 err
= cxio_hal_cq_op(&rhp
->rdev
, &chp
->cq
, cq_op
, 0);
345 spin_unlock_irqrestore(&chp
->lock
, flag
);
347 pr_err("Error %d rearming CQID 0x%x\n", err
, chp
->cq
.cqid
);
348 if (err
> 0 && !(flags
& IB_CQ_REPORT_MISSED_EVENTS
))
353 static int iwch_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
355 int len
= vma
->vm_end
- vma
->vm_start
;
356 u32 key
= vma
->vm_pgoff
<< PAGE_SHIFT
;
357 struct cxio_rdev
*rdev_p
;
359 struct iwch_mm_entry
*mm
;
360 struct iwch_ucontext
*ucontext
;
363 pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__
, vma
->vm_pgoff
,
366 if (vma
->vm_start
& (PAGE_SIZE
-1)) {
370 rdev_p
= &(to_iwch_dev(context
->device
)->rdev
);
371 ucontext
= to_iwch_ucontext(context
);
373 mm
= remove_mmap(ucontext
, key
, len
);
379 if ((addr
>= rdev_p
->rnic_info
.udbell_physbase
) &&
380 (addr
< (rdev_p
->rnic_info
.udbell_physbase
+
381 rdev_p
->rnic_info
.udbell_len
))) {
384 * Map T3 DB register.
386 if (vma
->vm_flags
& VM_READ
) {
390 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
391 vma
->vm_flags
|= VM_DONTCOPY
| VM_DONTEXPAND
;
392 vma
->vm_flags
&= ~VM_MAYREAD
;
393 ret
= io_remap_pfn_range(vma
, vma
->vm_start
,
395 len
, vma
->vm_page_prot
);
399 * Map WQ or CQ contig dma memory...
401 ret
= remap_pfn_range(vma
, vma
->vm_start
,
403 len
, vma
->vm_page_prot
);
409 static int iwch_deallocate_pd(struct ib_pd
*pd
)
411 struct iwch_dev
*rhp
;
414 php
= to_iwch_pd(pd
);
416 pr_debug("%s ibpd %p pdid 0x%x\n", __func__
, pd
, php
->pdid
);
417 cxio_hal_put_pdid(rhp
->rdev
.rscp
, php
->pdid
);
422 static struct ib_pd
*iwch_allocate_pd(struct ib_device
*ibdev
,
423 struct ib_ucontext
*context
,
424 struct ib_udata
*udata
)
428 struct iwch_dev
*rhp
;
430 pr_debug("%s ibdev %p\n", __func__
, ibdev
);
431 rhp
= (struct iwch_dev
*) ibdev
;
432 pdid
= cxio_hal_get_pdid(rhp
->rdev
.rscp
);
434 return ERR_PTR(-EINVAL
);
435 php
= kzalloc(sizeof(*php
), GFP_KERNEL
);
437 cxio_hal_put_pdid(rhp
->rdev
.rscp
, pdid
);
438 return ERR_PTR(-ENOMEM
);
443 if (ib_copy_to_udata(udata
, &php
->pdid
, sizeof (__u32
))) {
444 iwch_deallocate_pd(&php
->ibpd
);
445 return ERR_PTR(-EFAULT
);
448 pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__
, pdid
, php
);
452 static int iwch_dereg_mr(struct ib_mr
*ib_mr
)
454 struct iwch_dev
*rhp
;
458 pr_debug("%s ib_mr %p\n", __func__
, ib_mr
);
460 mhp
= to_iwch_mr(ib_mr
);
463 mmid
= mhp
->attr
.stag
>> 8;
464 cxio_dereg_mem(&rhp
->rdev
, mhp
->attr
.stag
, mhp
->attr
.pbl_size
,
467 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
469 kfree((void *) (unsigned long) mhp
->kva
);
471 ib_umem_release(mhp
->umem
);
472 pr_debug("%s mmid 0x%x ptr %p\n", __func__
, mmid
, mhp
);
477 static struct ib_mr
*iwch_get_dma_mr(struct ib_pd
*pd
, int acc
)
479 const u64 total_size
= 0xffffffff;
480 const u64 mask
= (total_size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
481 struct iwch_pd
*php
= to_iwch_pd(pd
);
482 struct iwch_dev
*rhp
= php
->rhp
;
485 int shift
= 26, npages
, ret
, i
;
487 pr_debug("%s ib_pd %p\n", __func__
, pd
);
490 * T3 only supports 32 bits of size.
492 if (sizeof(phys_addr_t
) > 4) {
493 pr_warn_once("Cannot support dma_mrs on this platform\n");
494 return ERR_PTR(-ENOTSUPP
);
497 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
499 return ERR_PTR(-ENOMEM
);
503 npages
= (total_size
+ (1ULL << shift
) - 1) >> shift
;
509 page_list
= kmalloc_array(npages
, sizeof(u64
), GFP_KERNEL
);
515 for (i
= 0; i
< npages
; i
++)
516 page_list
[i
] = cpu_to_be64((u64
)i
<< shift
);
518 pr_debug("%s mask 0x%llx shift %d len %lld pbl_size %d\n",
519 __func__
, mask
, shift
, total_size
, npages
);
521 ret
= iwch_alloc_pbl(mhp
, npages
);
527 ret
= iwch_write_pbl(mhp
, page_list
, npages
, 0);
532 mhp
->attr
.pdid
= php
->pdid
;
535 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
536 mhp
->attr
.va_fbo
= 0;
537 mhp
->attr
.page_size
= shift
- 12;
539 mhp
->attr
.len
= (u32
) total_size
;
540 mhp
->attr
.pbl_size
= npages
;
541 ret
= iwch_register_mem(rhp
, php
, mhp
, shift
);
555 static struct ib_mr
*iwch_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
556 u64 virt
, int acc
, struct ib_udata
*udata
)
562 struct iwch_dev
*rhp
;
565 struct iwch_reg_user_mr_resp uresp
;
566 struct scatterlist
*sg
;
567 pr_debug("%s ib_pd %p\n", __func__
, pd
);
569 php
= to_iwch_pd(pd
);
571 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
573 return ERR_PTR(-ENOMEM
);
577 mhp
->umem
= ib_umem_get(pd
->uobject
->context
, start
, length
, acc
, 0);
578 if (IS_ERR(mhp
->umem
)) {
579 err
= PTR_ERR(mhp
->umem
);
584 shift
= mhp
->umem
->page_shift
;
588 err
= iwch_alloc_pbl(mhp
, n
);
592 pages
= (__be64
*) __get_free_page(GFP_KERNEL
);
600 for_each_sg(mhp
->umem
->sg_head
.sgl
, sg
, mhp
->umem
->nmap
, entry
) {
601 len
= sg_dma_len(sg
) >> shift
;
602 for (k
= 0; k
< len
; ++k
) {
603 pages
[i
++] = cpu_to_be64(sg_dma_address(sg
) +
605 if (i
== PAGE_SIZE
/ sizeof *pages
) {
606 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
616 err
= iwch_write_pbl(mhp
, pages
, i
, n
);
619 free_page((unsigned long) pages
);
623 mhp
->attr
.pdid
= php
->pdid
;
625 mhp
->attr
.perms
= iwch_ib_to_tpt_access(acc
);
626 mhp
->attr
.va_fbo
= virt
;
627 mhp
->attr
.page_size
= shift
- 12;
628 mhp
->attr
.len
= (u32
) length
;
630 err
= iwch_register_mem(rhp
, php
, mhp
, shift
);
634 if (udata
&& !t3a_device(rhp
)) {
635 uresp
.pbl_addr
= (mhp
->attr
.pbl_addr
-
636 rhp
->rdev
.rnic_info
.pbl_base
) >> 3;
637 pr_debug("%s user resp pbl_addr 0x%x\n", __func__
,
640 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
641 iwch_dereg_mr(&mhp
->ibmr
);
653 ib_umem_release(mhp
->umem
);
658 static struct ib_mw
*iwch_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
,
659 struct ib_udata
*udata
)
661 struct iwch_dev
*rhp
;
668 if (type
!= IB_MW_TYPE_1
)
669 return ERR_PTR(-EINVAL
);
671 php
= to_iwch_pd(pd
);
673 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
675 return ERR_PTR(-ENOMEM
);
676 ret
= cxio_allocate_window(&rhp
->rdev
, &stag
, php
->pdid
);
682 mhp
->attr
.pdid
= php
->pdid
;
683 mhp
->attr
.type
= TPT_MW
;
684 mhp
->attr
.stag
= stag
;
686 mhp
->ibmw
.rkey
= stag
;
687 if (insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
)) {
688 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
690 return ERR_PTR(-ENOMEM
);
692 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
696 static int iwch_dealloc_mw(struct ib_mw
*mw
)
698 struct iwch_dev
*rhp
;
702 mhp
= to_iwch_mw(mw
);
704 mmid
= (mw
->rkey
) >> 8;
705 cxio_deallocate_window(&rhp
->rdev
, mhp
->attr
.stag
);
706 remove_handle(rhp
, &rhp
->mmidr
, mmid
);
707 pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__
, mw
, mmid
, mhp
);
712 static struct ib_mr
*iwch_alloc_mr(struct ib_pd
*pd
,
713 enum ib_mr_type mr_type
,
716 struct iwch_dev
*rhp
;
723 if (mr_type
!= IB_MR_TYPE_MEM_REG
||
724 max_num_sg
> T3_MAX_FASTREG_DEPTH
)
725 return ERR_PTR(-EINVAL
);
727 php
= to_iwch_pd(pd
);
729 mhp
= kzalloc(sizeof(*mhp
), GFP_KERNEL
);
733 mhp
->pages
= kcalloc(max_num_sg
, sizeof(u64
), GFP_KERNEL
);
738 ret
= iwch_alloc_pbl(mhp
, max_num_sg
);
741 mhp
->attr
.pbl_size
= max_num_sg
;
742 ret
= cxio_allocate_stag(&rhp
->rdev
, &stag
, php
->pdid
,
743 mhp
->attr
.pbl_size
, mhp
->attr
.pbl_addr
);
746 mhp
->attr
.pdid
= php
->pdid
;
747 mhp
->attr
.type
= TPT_NON_SHARED_MR
;
748 mhp
->attr
.stag
= stag
;
751 mhp
->ibmr
.rkey
= mhp
->ibmr
.lkey
= stag
;
752 ret
= insert_handle(rhp
, &rhp
->mmidr
, mhp
, mmid
);
756 pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__
, mmid
, mhp
, stag
);
759 cxio_dereg_mem(&rhp
->rdev
, stag
, mhp
->attr
.pbl_size
,
771 static int iwch_set_page(struct ib_mr
*ibmr
, u64 addr
)
773 struct iwch_mr
*mhp
= to_iwch_mr(ibmr
);
775 if (unlikely(mhp
->npages
== mhp
->attr
.pbl_size
))
778 mhp
->pages
[mhp
->npages
++] = addr
;
783 static int iwch_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
,
784 int sg_nents
, unsigned int *sg_offset
)
786 struct iwch_mr
*mhp
= to_iwch_mr(ibmr
);
790 return ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
, iwch_set_page
);
793 static int iwch_destroy_qp(struct ib_qp
*ib_qp
)
795 struct iwch_dev
*rhp
;
797 struct iwch_qp_attributes attrs
;
798 struct iwch_ucontext
*ucontext
;
800 qhp
= to_iwch_qp(ib_qp
);
803 attrs
.next_state
= IWCH_QP_STATE_ERROR
;
804 iwch_modify_qp(rhp
, qhp
, IWCH_QP_ATTR_NEXT_STATE
, &attrs
, 0);
805 wait_event(qhp
->wait
, !qhp
->ep
);
807 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.qpid
);
809 atomic_dec(&qhp
->refcnt
);
810 wait_event(qhp
->wait
, !atomic_read(&qhp
->refcnt
));
812 ucontext
= ib_qp
->uobject
? to_iwch_ucontext(ib_qp
->uobject
->context
)
814 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
815 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
817 pr_debug("%s ib_qp %p qpid 0x%0x qhp %p\n", __func__
,
818 ib_qp
, qhp
->wq
.qpid
, qhp
);
823 static struct ib_qp
*iwch_create_qp(struct ib_pd
*pd
,
824 struct ib_qp_init_attr
*attrs
,
825 struct ib_udata
*udata
)
827 struct iwch_dev
*rhp
;
830 struct iwch_cq
*schp
;
831 struct iwch_cq
*rchp
;
832 struct iwch_create_qp_resp uresp
;
833 int wqsize
, sqsize
, rqsize
;
834 struct iwch_ucontext
*ucontext
;
836 pr_debug("%s ib_pd %p\n", __func__
, pd
);
837 if (attrs
->qp_type
!= IB_QPT_RC
)
838 return ERR_PTR(-EINVAL
);
839 php
= to_iwch_pd(pd
);
841 schp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
);
842 rchp
= get_chp(rhp
, ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
);
844 return ERR_PTR(-EINVAL
);
846 /* The RQT size must be # of entries + 1 rounded up to a power of two */
847 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
);
848 if (rqsize
== attrs
->cap
.max_recv_wr
)
849 rqsize
= roundup_pow_of_two(attrs
->cap
.max_recv_wr
+1);
851 /* T3 doesn't support RQT depth < 16 */
855 if (rqsize
> T3_MAX_RQ_SIZE
)
856 return ERR_PTR(-EINVAL
);
858 if (attrs
->cap
.max_inline_data
> T3_MAX_INLINE
)
859 return ERR_PTR(-EINVAL
);
862 * NOTE: The SQ and total WQ sizes don't need to be
863 * a power of two. However, all the code assumes
864 * they are. EG: Q_FREECNT() and friends.
866 sqsize
= roundup_pow_of_two(attrs
->cap
.max_send_wr
);
867 wqsize
= roundup_pow_of_two(rqsize
+ sqsize
);
870 * Kernel users need more wq space for fastreg WRs which can take
873 ucontext
= pd
->uobject
? to_iwch_ucontext(pd
->uobject
->context
) : NULL
;
874 if (!ucontext
&& wqsize
< (rqsize
+ (2 * sqsize
)))
875 wqsize
= roundup_pow_of_two(rqsize
+
876 roundup_pow_of_two(attrs
->cap
.max_send_wr
* 2));
877 pr_debug("%s wqsize %d sqsize %d rqsize %d\n", __func__
,
878 wqsize
, sqsize
, rqsize
);
879 qhp
= kzalloc(sizeof(*qhp
), GFP_KERNEL
);
881 return ERR_PTR(-ENOMEM
);
882 qhp
->wq
.size_log2
= ilog2(wqsize
);
883 qhp
->wq
.rq_size_log2
= ilog2(rqsize
);
884 qhp
->wq
.sq_size_log2
= ilog2(sqsize
);
885 if (cxio_create_qp(&rhp
->rdev
, !udata
, &qhp
->wq
,
886 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
)) {
888 return ERR_PTR(-ENOMEM
);
891 attrs
->cap
.max_recv_wr
= rqsize
- 1;
892 attrs
->cap
.max_send_wr
= sqsize
;
893 attrs
->cap
.max_inline_data
= T3_MAX_INLINE
;
896 qhp
->attr
.pd
= php
->pdid
;
897 qhp
->attr
.scq
= ((struct iwch_cq
*) attrs
->send_cq
)->cq
.cqid
;
898 qhp
->attr
.rcq
= ((struct iwch_cq
*) attrs
->recv_cq
)->cq
.cqid
;
899 qhp
->attr
.sq_num_entries
= attrs
->cap
.max_send_wr
;
900 qhp
->attr
.rq_num_entries
= attrs
->cap
.max_recv_wr
;
901 qhp
->attr
.sq_max_sges
= attrs
->cap
.max_send_sge
;
902 qhp
->attr
.sq_max_sges_rdma_write
= attrs
->cap
.max_send_sge
;
903 qhp
->attr
.rq_max_sges
= attrs
->cap
.max_recv_sge
;
904 qhp
->attr
.state
= IWCH_QP_STATE_IDLE
;
905 qhp
->attr
.next_state
= IWCH_QP_STATE_IDLE
;
908 * XXX - These don't get passed in from the openib user
909 * at create time. The CM sets them via a QP modify.
910 * Need to fix... I think the CM should
912 qhp
->attr
.enable_rdma_read
= 1;
913 qhp
->attr
.enable_rdma_write
= 1;
914 qhp
->attr
.enable_bind
= 1;
915 qhp
->attr
.max_ord
= 1;
916 qhp
->attr
.max_ird
= 1;
918 spin_lock_init(&qhp
->lock
);
919 init_waitqueue_head(&qhp
->wait
);
920 atomic_set(&qhp
->refcnt
, 1);
922 if (insert_handle(rhp
, &rhp
->qpidr
, qhp
, qhp
->wq
.qpid
)) {
923 cxio_destroy_qp(&rhp
->rdev
, &qhp
->wq
,
924 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
926 return ERR_PTR(-ENOMEM
);
931 struct iwch_mm_entry
*mm1
, *mm2
;
933 mm1
= kmalloc(sizeof *mm1
, GFP_KERNEL
);
935 iwch_destroy_qp(&qhp
->ibqp
);
936 return ERR_PTR(-ENOMEM
);
939 mm2
= kmalloc(sizeof *mm2
, GFP_KERNEL
);
942 iwch_destroy_qp(&qhp
->ibqp
);
943 return ERR_PTR(-ENOMEM
);
946 uresp
.qpid
= qhp
->wq
.qpid
;
947 uresp
.size_log2
= qhp
->wq
.size_log2
;
948 uresp
.sq_size_log2
= qhp
->wq
.sq_size_log2
;
949 uresp
.rq_size_log2
= qhp
->wq
.rq_size_log2
;
950 spin_lock(&ucontext
->mmap_lock
);
951 uresp
.key
= ucontext
->key
;
952 ucontext
->key
+= PAGE_SIZE
;
953 uresp
.db_key
= ucontext
->key
;
954 ucontext
->key
+= PAGE_SIZE
;
955 spin_unlock(&ucontext
->mmap_lock
);
956 if (ib_copy_to_udata(udata
, &uresp
, sizeof (uresp
))) {
959 iwch_destroy_qp(&qhp
->ibqp
);
960 return ERR_PTR(-EFAULT
);
962 mm1
->key
= uresp
.key
;
963 mm1
->addr
= virt_to_phys(qhp
->wq
.queue
);
964 mm1
->len
= PAGE_ALIGN(wqsize
* sizeof (union t3_wr
));
965 insert_mmap(ucontext
, mm1
);
966 mm2
->key
= uresp
.db_key
;
967 mm2
->addr
= qhp
->wq
.udb
& PAGE_MASK
;
968 mm2
->len
= PAGE_SIZE
;
969 insert_mmap(ucontext
, mm2
);
971 qhp
->ibqp
.qp_num
= qhp
->wq
.qpid
;
972 pr_debug("%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
973 __func__
, qhp
->attr
.sq_num_entries
, qhp
->attr
.rq_num_entries
,
974 qhp
->wq
.qpid
, qhp
, (unsigned long long)qhp
->wq
.dma_addr
,
975 1 << qhp
->wq
.size_log2
, qhp
->wq
.rq_addr
);
979 static int iwch_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
980 int attr_mask
, struct ib_udata
*udata
)
982 struct iwch_dev
*rhp
;
984 enum iwch_qp_attr_mask mask
= 0;
985 struct iwch_qp_attributes attrs
;
987 pr_debug("%s ib_qp %p\n", __func__
, ibqp
);
989 /* iwarp does not support the RTR state */
990 if ((attr_mask
& IB_QP_STATE
) && (attr
->qp_state
== IB_QPS_RTR
))
991 attr_mask
&= ~IB_QP_STATE
;
993 /* Make sure we still have something left to do */
997 memset(&attrs
, 0, sizeof attrs
);
998 qhp
= to_iwch_qp(ibqp
);
1001 attrs
.next_state
= iwch_convert_state(attr
->qp_state
);
1002 attrs
.enable_rdma_read
= (attr
->qp_access_flags
&
1003 IB_ACCESS_REMOTE_READ
) ? 1 : 0;
1004 attrs
.enable_rdma_write
= (attr
->qp_access_flags
&
1005 IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
1006 attrs
.enable_bind
= (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
) ? 1 : 0;
1009 mask
|= (attr_mask
& IB_QP_STATE
) ? IWCH_QP_ATTR_NEXT_STATE
: 0;
1010 mask
|= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
1011 (IWCH_QP_ATTR_ENABLE_RDMA_READ
|
1012 IWCH_QP_ATTR_ENABLE_RDMA_WRITE
|
1013 IWCH_QP_ATTR_ENABLE_RDMA_BIND
) : 0;
1015 return iwch_modify_qp(rhp
, qhp
, mask
, &attrs
, 0);
1018 void iwch_qp_add_ref(struct ib_qp
*qp
)
1020 pr_debug("%s ib_qp %p\n", __func__
, qp
);
1021 atomic_inc(&(to_iwch_qp(qp
)->refcnt
));
1024 void iwch_qp_rem_ref(struct ib_qp
*qp
)
1026 pr_debug("%s ib_qp %p\n", __func__
, qp
);
1027 if (atomic_dec_and_test(&(to_iwch_qp(qp
)->refcnt
)))
1028 wake_up(&(to_iwch_qp(qp
)->wait
));
1031 static struct ib_qp
*iwch_get_qp(struct ib_device
*dev
, int qpn
)
1033 pr_debug("%s ib_dev %p qpn 0x%x\n", __func__
, dev
, qpn
);
1034 return (struct ib_qp
*)get_qhp(to_iwch_dev(dev
), qpn
);
1038 static int iwch_query_pkey(struct ib_device
*ibdev
,
1039 u8 port
, u16 index
, u16
* pkey
)
1041 pr_debug("%s ibdev %p\n", __func__
, ibdev
);
1046 static int iwch_query_gid(struct ib_device
*ibdev
, u8 port
,
1047 int index
, union ib_gid
*gid
)
1049 struct iwch_dev
*dev
;
1051 pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
1052 __func__
, ibdev
, port
, index
, gid
);
1053 dev
= to_iwch_dev(ibdev
);
1054 BUG_ON(port
== 0 || port
> 2);
1055 memset(&(gid
->raw
[0]), 0, sizeof(gid
->raw
));
1056 memcpy(&(gid
->raw
[0]), dev
->rdev
.port_info
.lldevs
[port
-1]->dev_addr
, 6);
1060 static u64
fw_vers_string_to_u64(struct iwch_dev
*iwch_dev
)
1062 struct ethtool_drvinfo info
;
1063 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1065 unsigned fw_maj
, fw_min
, fw_mic
;
1067 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1069 next
= info
.fw_version
+ 1;
1070 cp
= strsep(&next
, ".");
1071 sscanf(cp
, "%i", &fw_maj
);
1072 cp
= strsep(&next
, ".");
1073 sscanf(cp
, "%i", &fw_min
);
1074 cp
= strsep(&next
, ".");
1075 sscanf(cp
, "%i", &fw_mic
);
1077 return (((u64
)fw_maj
& 0xffff) << 32) | ((fw_min
& 0xffff) << 16) |
1081 static int iwch_query_device(struct ib_device
*ibdev
, struct ib_device_attr
*props
,
1082 struct ib_udata
*uhw
)
1085 struct iwch_dev
*dev
;
1087 pr_debug("%s ibdev %p\n", __func__
, ibdev
);
1089 if (uhw
->inlen
|| uhw
->outlen
)
1092 dev
= to_iwch_dev(ibdev
);
1093 memset(props
, 0, sizeof *props
);
1094 memcpy(&props
->sys_image_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1095 props
->hw_ver
= dev
->rdev
.t3cdev_p
->type
;
1096 props
->fw_ver
= fw_vers_string_to_u64(dev
);
1097 props
->device_cap_flags
= dev
->device_cap_flags
;
1098 props
->page_size_cap
= dev
->attr
.mem_pgsizes_bitmask
;
1099 props
->vendor_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->vendor
;
1100 props
->vendor_part_id
= (u32
)dev
->rdev
.rnic_info
.pdev
->device
;
1101 props
->max_mr_size
= dev
->attr
.max_mr_size
;
1102 props
->max_qp
= dev
->attr
.max_qps
;
1103 props
->max_qp_wr
= dev
->attr
.max_wrs
;
1104 props
->max_sge
= dev
->attr
.max_sge_per_wr
;
1105 props
->max_sge_rd
= 1;
1106 props
->max_qp_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1107 props
->max_qp_init_rd_atom
= dev
->attr
.max_rdma_reads_per_qp
;
1108 props
->max_cq
= dev
->attr
.max_cqs
;
1109 props
->max_cqe
= dev
->attr
.max_cqes_per_cq
;
1110 props
->max_mr
= dev
->attr
.max_mem_regs
;
1111 props
->max_pd
= dev
->attr
.max_pds
;
1112 props
->local_ca_ack_delay
= 0;
1113 props
->max_fast_reg_page_list_len
= T3_MAX_FASTREG_DEPTH
;
1118 static int iwch_query_port(struct ib_device
*ibdev
,
1119 u8 port
, struct ib_port_attr
*props
)
1121 struct iwch_dev
*dev
;
1122 struct net_device
*netdev
;
1123 struct in_device
*inetdev
;
1125 pr_debug("%s ibdev %p\n", __func__
, ibdev
);
1127 dev
= to_iwch_dev(ibdev
);
1128 netdev
= dev
->rdev
.port_info
.lldevs
[port
-1];
1130 /* props being zeroed by the caller, avoid zeroing it here */
1131 props
->max_mtu
= IB_MTU_4096
;
1132 props
->active_mtu
= ib_mtu_int_to_enum(netdev
->mtu
);
1134 if (!netif_carrier_ok(netdev
))
1135 props
->state
= IB_PORT_DOWN
;
1137 inetdev
= in_dev_get(netdev
);
1139 if (inetdev
->ifa_list
)
1140 props
->state
= IB_PORT_ACTIVE
;
1142 props
->state
= IB_PORT_INIT
;
1143 in_dev_put(inetdev
);
1145 props
->state
= IB_PORT_INIT
;
1148 props
->port_cap_flags
=
1150 IB_PORT_SNMP_TUNNEL_SUP
|
1151 IB_PORT_REINIT_SUP
|
1152 IB_PORT_DEVICE_MGMT_SUP
|
1153 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
1154 props
->gid_tbl_len
= 1;
1155 props
->pkey_tbl_len
= 1;
1156 props
->active_width
= 2;
1157 props
->active_speed
= IB_SPEED_DDR
;
1158 props
->max_msg_sz
= -1;
1163 static ssize_t
show_rev(struct device
*dev
, struct device_attribute
*attr
,
1166 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1168 pr_debug("%s dev 0x%p\n", __func__
, dev
);
1169 return sprintf(buf
, "%d\n", iwch_dev
->rdev
.t3cdev_p
->type
);
1172 static ssize_t
show_hca(struct device
*dev
, struct device_attribute
*attr
,
1175 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1177 struct ethtool_drvinfo info
;
1178 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1180 pr_debug("%s dev 0x%p\n", __func__
, dev
);
1181 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1182 return sprintf(buf
, "%s\n", info
.driver
);
1185 static ssize_t
show_board(struct device
*dev
, struct device_attribute
*attr
,
1188 struct iwch_dev
*iwch_dev
= container_of(dev
, struct iwch_dev
,
1190 pr_debug("%s dev 0x%p\n", __func__
, dev
);
1191 return sprintf(buf
, "%x.%x\n", iwch_dev
->rdev
.rnic_info
.pdev
->vendor
,
1192 iwch_dev
->rdev
.rnic_info
.pdev
->device
);
1224 static const char * const names
[] = {
1225 [IPINRECEIVES
] = "ipInReceives",
1226 [IPINHDRERRORS
] = "ipInHdrErrors",
1227 [IPINADDRERRORS
] = "ipInAddrErrors",
1228 [IPINUNKNOWNPROTOS
] = "ipInUnknownProtos",
1229 [IPINDISCARDS
] = "ipInDiscards",
1230 [IPINDELIVERS
] = "ipInDelivers",
1231 [IPOUTREQUESTS
] = "ipOutRequests",
1232 [IPOUTDISCARDS
] = "ipOutDiscards",
1233 [IPOUTNOROUTES
] = "ipOutNoRoutes",
1234 [IPREASMTIMEOUT
] = "ipReasmTimeout",
1235 [IPREASMREQDS
] = "ipReasmReqds",
1236 [IPREASMOKS
] = "ipReasmOKs",
1237 [IPREASMFAILS
] = "ipReasmFails",
1238 [TCPACTIVEOPENS
] = "tcpActiveOpens",
1239 [TCPPASSIVEOPENS
] = "tcpPassiveOpens",
1240 [TCPATTEMPTFAILS
] = "tcpAttemptFails",
1241 [TCPESTABRESETS
] = "tcpEstabResets",
1242 [TCPCURRESTAB
] = "tcpCurrEstab",
1243 [TCPINSEGS
] = "tcpInSegs",
1244 [TCPOUTSEGS
] = "tcpOutSegs",
1245 [TCPRETRANSSEGS
] = "tcpRetransSegs",
1246 [TCPINERRS
] = "tcpInErrs",
1247 [TCPOUTRSTS
] = "tcpOutRsts",
1248 [TCPRTOMIN
] = "tcpRtoMin",
1249 [TCPRTOMAX
] = "tcpRtoMax",
1252 static struct rdma_hw_stats
*iwch_alloc_stats(struct ib_device
*ibdev
,
1255 BUILD_BUG_ON(ARRAY_SIZE(names
) != NR_COUNTERS
);
1257 /* Our driver only supports device level stats */
1261 return rdma_alloc_hw_stats_struct(names
, NR_COUNTERS
,
1262 RDMA_HW_STATS_DEFAULT_LIFESPAN
);
1265 static int iwch_get_mib(struct ib_device
*ibdev
, struct rdma_hw_stats
*stats
,
1268 struct iwch_dev
*dev
;
1269 struct tp_mib_stats m
;
1272 if (port
!= 0 || !stats
)
1275 pr_debug("%s ibdev %p\n", __func__
, ibdev
);
1276 dev
= to_iwch_dev(ibdev
);
1277 ret
= dev
->rdev
.t3cdev_p
->ctl(dev
->rdev
.t3cdev_p
, RDMA_GET_MIB
, &m
);
1281 stats
->value
[IPINRECEIVES
] = ((u64
)m
.ipInReceive_hi
<< 32) + m
.ipInReceive_lo
;
1282 stats
->value
[IPINHDRERRORS
] = ((u64
)m
.ipInHdrErrors_hi
<< 32) + m
.ipInHdrErrors_lo
;
1283 stats
->value
[IPINADDRERRORS
] = ((u64
)m
.ipInAddrErrors_hi
<< 32) + m
.ipInAddrErrors_lo
;
1284 stats
->value
[IPINUNKNOWNPROTOS
] = ((u64
)m
.ipInUnknownProtos_hi
<< 32) + m
.ipInUnknownProtos_lo
;
1285 stats
->value
[IPINDISCARDS
] = ((u64
)m
.ipInDiscards_hi
<< 32) + m
.ipInDiscards_lo
;
1286 stats
->value
[IPINDELIVERS
] = ((u64
)m
.ipInDelivers_hi
<< 32) + m
.ipInDelivers_lo
;
1287 stats
->value
[IPOUTREQUESTS
] = ((u64
)m
.ipOutRequests_hi
<< 32) + m
.ipOutRequests_lo
;
1288 stats
->value
[IPOUTDISCARDS
] = ((u64
)m
.ipOutDiscards_hi
<< 32) + m
.ipOutDiscards_lo
;
1289 stats
->value
[IPOUTNOROUTES
] = ((u64
)m
.ipOutNoRoutes_hi
<< 32) + m
.ipOutNoRoutes_lo
;
1290 stats
->value
[IPREASMTIMEOUT
] = m
.ipReasmTimeout
;
1291 stats
->value
[IPREASMREQDS
] = m
.ipReasmReqds
;
1292 stats
->value
[IPREASMOKS
] = m
.ipReasmOKs
;
1293 stats
->value
[IPREASMFAILS
] = m
.ipReasmFails
;
1294 stats
->value
[TCPACTIVEOPENS
] = m
.tcpActiveOpens
;
1295 stats
->value
[TCPPASSIVEOPENS
] = m
.tcpPassiveOpens
;
1296 stats
->value
[TCPATTEMPTFAILS
] = m
.tcpAttemptFails
;
1297 stats
->value
[TCPESTABRESETS
] = m
.tcpEstabResets
;
1298 stats
->value
[TCPCURRESTAB
] = m
.tcpOutRsts
;
1299 stats
->value
[TCPINSEGS
] = m
.tcpCurrEstab
;
1300 stats
->value
[TCPOUTSEGS
] = ((u64
)m
.tcpInSegs_hi
<< 32) + m
.tcpInSegs_lo
;
1301 stats
->value
[TCPRETRANSSEGS
] = ((u64
)m
.tcpOutSegs_hi
<< 32) + m
.tcpOutSegs_lo
;
1302 stats
->value
[TCPINERRS
] = ((u64
)m
.tcpRetransSeg_hi
<< 32) + m
.tcpRetransSeg_lo
,
1303 stats
->value
[TCPOUTRSTS
] = ((u64
)m
.tcpInErrs_hi
<< 32) + m
.tcpInErrs_lo
;
1304 stats
->value
[TCPRTOMIN
] = m
.tcpRtoMin
;
1305 stats
->value
[TCPRTOMAX
] = m
.tcpRtoMax
;
1307 return stats
->num_counters
;
1310 static DEVICE_ATTR(hw_rev
, S_IRUGO
, show_rev
, NULL
);
1311 static DEVICE_ATTR(hca_type
, S_IRUGO
, show_hca
, NULL
);
1312 static DEVICE_ATTR(board_id
, S_IRUGO
, show_board
, NULL
);
1314 static struct device_attribute
*iwch_class_attributes
[] = {
1320 static int iwch_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
1321 struct ib_port_immutable
*immutable
)
1323 struct ib_port_attr attr
;
1326 immutable
->core_cap_flags
= RDMA_CORE_PORT_IWARP
;
1328 err
= ib_query_port(ibdev
, port_num
, &attr
);
1332 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
1333 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
1338 static void get_dev_fw_ver_str(struct ib_device
*ibdev
, char *str
)
1340 struct iwch_dev
*iwch_dev
= to_iwch_dev(ibdev
);
1341 struct ethtool_drvinfo info
;
1342 struct net_device
*lldev
= iwch_dev
->rdev
.t3cdev_p
->lldev
;
1344 pr_debug("%s dev 0x%p\n", __func__
, iwch_dev
);
1345 lldev
->ethtool_ops
->get_drvinfo(lldev
, &info
);
1346 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%s", info
.fw_version
);
1349 int iwch_register_device(struct iwch_dev
*dev
)
1354 pr_debug("%s iwch_dev %p\n", __func__
, dev
);
1355 strlcpy(dev
->ibdev
.name
, "cxgb3_%d", IB_DEVICE_NAME_MAX
);
1356 memset(&dev
->ibdev
.node_guid
, 0, sizeof(dev
->ibdev
.node_guid
));
1357 memcpy(&dev
->ibdev
.node_guid
, dev
->rdev
.t3cdev_p
->lldev
->dev_addr
, 6);
1358 dev
->ibdev
.owner
= THIS_MODULE
;
1359 dev
->device_cap_flags
= IB_DEVICE_LOCAL_DMA_LKEY
|
1360 IB_DEVICE_MEM_WINDOW
|
1361 IB_DEVICE_MEM_MGT_EXTENSIONS
;
1363 /* cxgb3 supports STag 0. */
1364 dev
->ibdev
.local_dma_lkey
= 0;
1366 dev
->ibdev
.uverbs_cmd_mask
=
1367 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
1368 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
1369 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
1370 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
1371 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
1372 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
1373 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
1374 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
1375 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
1376 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
1377 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
1378 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
1379 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
1380 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
1381 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
1382 (1ull << IB_USER_VERBS_CMD_POST_SEND
) |
1383 (1ull << IB_USER_VERBS_CMD_POST_RECV
);
1384 dev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
1385 BUILD_BUG_ON(sizeof(IWCH_NODE_DESC
) > IB_DEVICE_NODE_DESC_MAX
);
1386 memcpy(dev
->ibdev
.node_desc
, IWCH_NODE_DESC
, sizeof(IWCH_NODE_DESC
));
1387 dev
->ibdev
.phys_port_cnt
= dev
->rdev
.port_info
.nports
;
1388 dev
->ibdev
.num_comp_vectors
= 1;
1389 dev
->ibdev
.dev
.parent
= &dev
->rdev
.rnic_info
.pdev
->dev
;
1390 dev
->ibdev
.query_device
= iwch_query_device
;
1391 dev
->ibdev
.query_port
= iwch_query_port
;
1392 dev
->ibdev
.query_pkey
= iwch_query_pkey
;
1393 dev
->ibdev
.query_gid
= iwch_query_gid
;
1394 dev
->ibdev
.alloc_ucontext
= iwch_alloc_ucontext
;
1395 dev
->ibdev
.dealloc_ucontext
= iwch_dealloc_ucontext
;
1396 dev
->ibdev
.mmap
= iwch_mmap
;
1397 dev
->ibdev
.alloc_pd
= iwch_allocate_pd
;
1398 dev
->ibdev
.dealloc_pd
= iwch_deallocate_pd
;
1399 dev
->ibdev
.create_ah
= iwch_ah_create
;
1400 dev
->ibdev
.destroy_ah
= iwch_ah_destroy
;
1401 dev
->ibdev
.create_qp
= iwch_create_qp
;
1402 dev
->ibdev
.modify_qp
= iwch_ib_modify_qp
;
1403 dev
->ibdev
.destroy_qp
= iwch_destroy_qp
;
1404 dev
->ibdev
.create_cq
= iwch_create_cq
;
1405 dev
->ibdev
.destroy_cq
= iwch_destroy_cq
;
1406 dev
->ibdev
.resize_cq
= iwch_resize_cq
;
1407 dev
->ibdev
.poll_cq
= iwch_poll_cq
;
1408 dev
->ibdev
.get_dma_mr
= iwch_get_dma_mr
;
1409 dev
->ibdev
.reg_user_mr
= iwch_reg_user_mr
;
1410 dev
->ibdev
.dereg_mr
= iwch_dereg_mr
;
1411 dev
->ibdev
.alloc_mw
= iwch_alloc_mw
;
1412 dev
->ibdev
.dealloc_mw
= iwch_dealloc_mw
;
1413 dev
->ibdev
.alloc_mr
= iwch_alloc_mr
;
1414 dev
->ibdev
.map_mr_sg
= iwch_map_mr_sg
;
1415 dev
->ibdev
.attach_mcast
= iwch_multicast_attach
;
1416 dev
->ibdev
.detach_mcast
= iwch_multicast_detach
;
1417 dev
->ibdev
.process_mad
= iwch_process_mad
;
1418 dev
->ibdev
.req_notify_cq
= iwch_arm_cq
;
1419 dev
->ibdev
.post_send
= iwch_post_send
;
1420 dev
->ibdev
.post_recv
= iwch_post_receive
;
1421 dev
->ibdev
.alloc_hw_stats
= iwch_alloc_stats
;
1422 dev
->ibdev
.get_hw_stats
= iwch_get_mib
;
1423 dev
->ibdev
.uverbs_abi_ver
= IWCH_UVERBS_ABI_VERSION
;
1424 dev
->ibdev
.get_port_immutable
= iwch_port_immutable
;
1425 dev
->ibdev
.get_dev_fw_str
= get_dev_fw_ver_str
;
1427 dev
->ibdev
.iwcm
= kmalloc(sizeof(struct iw_cm_verbs
), GFP_KERNEL
);
1428 if (!dev
->ibdev
.iwcm
)
1431 dev
->ibdev
.iwcm
->connect
= iwch_connect
;
1432 dev
->ibdev
.iwcm
->accept
= iwch_accept_cr
;
1433 dev
->ibdev
.iwcm
->reject
= iwch_reject_cr
;
1434 dev
->ibdev
.iwcm
->create_listen
= iwch_create_listen
;
1435 dev
->ibdev
.iwcm
->destroy_listen
= iwch_destroy_listen
;
1436 dev
->ibdev
.iwcm
->add_ref
= iwch_qp_add_ref
;
1437 dev
->ibdev
.iwcm
->rem_ref
= iwch_qp_rem_ref
;
1438 dev
->ibdev
.iwcm
->get_qp
= iwch_get_qp
;
1439 memcpy(dev
->ibdev
.iwcm
->ifname
, dev
->rdev
.t3cdev_p
->lldev
->name
,
1440 sizeof(dev
->ibdev
.iwcm
->ifname
));
1442 ret
= ib_register_device(&dev
->ibdev
, NULL
);
1446 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
) {
1447 ret
= device_create_file(&dev
->ibdev
.dev
,
1448 iwch_class_attributes
[i
]);
1455 ib_unregister_device(&dev
->ibdev
);
1457 kfree(dev
->ibdev
.iwcm
);
1461 void iwch_unregister_device(struct iwch_dev
*dev
)
1465 pr_debug("%s iwch_dev %p\n", __func__
, dev
);
1466 for (i
= 0; i
< ARRAY_SIZE(iwch_class_attributes
); ++i
)
1467 device_remove_file(&dev
->ibdev
.dev
,
1468 iwch_class_attributes
[i
]);
1469 ib_unregister_device(&dev
->ibdev
);
1470 kfree(dev
->ibdev
.iwcm
);