1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/random.h>
38 #include <linux/highmem.h>
39 #include <linux/time.h>
40 #include <linux/hugetlb.h>
41 #include <linux/irq.h>
42 #include <asm/byteorder.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/iw_cm.h>
46 #include <rdma/ib_user_verbs.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/uverbs_ioctl.h>
52 * i40iw_query_device - get device attributes
53 * @ibdev: device pointer from stack
54 * @props: returning device attributes
57 static int i40iw_query_device(struct ib_device
*ibdev
,
58 struct ib_device_attr
*props
,
59 struct ib_udata
*udata
)
61 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
63 if (udata
->inlen
|| udata
->outlen
)
65 memset(props
, 0, sizeof(*props
));
66 ether_addr_copy((u8
*)&props
->sys_image_guid
, iwdev
->netdev
->dev_addr
);
67 props
->fw_ver
= I40IW_FW_VERSION
;
68 props
->device_cap_flags
= iwdev
->device_cap_flags
;
69 props
->vendor_id
= iwdev
->ldev
->pcidev
->vendor
;
70 props
->vendor_part_id
= iwdev
->ldev
->pcidev
->device
;
71 props
->hw_ver
= (u32
)iwdev
->sc_dev
.hw_rev
;
72 props
->max_mr_size
= I40IW_MAX_OUTBOUND_MESSAGE_SIZE
;
73 props
->max_qp
= iwdev
->max_qp
- iwdev
->used_qps
;
74 props
->max_qp_wr
= I40IW_MAX_QP_WRS
;
75 props
->max_send_sge
= I40IW_MAX_WQ_FRAGMENT_COUNT
;
76 props
->max_recv_sge
= I40IW_MAX_WQ_FRAGMENT_COUNT
;
77 props
->max_cq
= iwdev
->max_cq
- iwdev
->used_cqs
;
78 props
->max_cqe
= iwdev
->max_cqe
;
79 props
->max_mr
= iwdev
->max_mr
- iwdev
->used_mrs
;
80 props
->max_pd
= iwdev
->max_pd
- iwdev
->used_pds
;
81 props
->max_sge_rd
= I40IW_MAX_SGE_RD
;
82 props
->max_qp_rd_atom
= I40IW_MAX_IRD_SIZE
;
83 props
->max_qp_init_rd_atom
= props
->max_qp_rd_atom
;
84 props
->atomic_cap
= IB_ATOMIC_NONE
;
85 props
->max_map_per_fmr
= 1;
86 props
->max_fast_reg_page_list_len
= I40IW_MAX_PAGES_PER_FMR
;
91 * i40iw_query_port - get port attrubutes
92 * @ibdev: device pointer from stack
93 * @port: port number for query
94 * @props: returning device attributes
96 static int i40iw_query_port(struct ib_device
*ibdev
,
98 struct ib_port_attr
*props
)
101 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
102 IB_PORT_VENDOR_CLASS_SUP
| IB_PORT_BOOT_MGMT_SUP
;
103 props
->gid_tbl_len
= 1;
104 props
->pkey_tbl_len
= 1;
105 props
->active_width
= IB_WIDTH_4X
;
106 props
->active_speed
= 1;
107 props
->max_msg_sz
= I40IW_MAX_OUTBOUND_MESSAGE_SIZE
;
112 * i40iw_alloc_ucontext - Allocate the user context data structure
113 * @uctx: Uverbs context pointer from stack
116 * This keeps track of all objects associated with a particular
119 static int i40iw_alloc_ucontext(struct ib_ucontext
*uctx
,
120 struct ib_udata
*udata
)
122 struct ib_device
*ibdev
= uctx
->device
;
123 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
124 struct i40iw_alloc_ucontext_req req
;
125 struct i40iw_alloc_ucontext_resp uresp
= {};
126 struct i40iw_ucontext
*ucontext
= to_ucontext(uctx
);
128 if (ib_copy_from_udata(&req
, udata
, sizeof(req
)))
131 if (req
.userspace_ver
< 4 || req
.userspace_ver
> I40IW_ABI_VER
) {
132 i40iw_pr_err("Unsupported provider library version %u.\n", req
.userspace_ver
);
136 uresp
.max_qps
= iwdev
->max_qp
;
137 uresp
.max_pds
= iwdev
->max_pd
;
138 uresp
.wq_size
= iwdev
->max_qp_wr
* 2;
139 uresp
.kernel_ver
= req
.userspace_ver
;
141 ucontext
->iwdev
= iwdev
;
142 ucontext
->abi_ver
= req
.userspace_ver
;
144 if (ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
)))
147 INIT_LIST_HEAD(&ucontext
->cq_reg_mem_list
);
148 spin_lock_init(&ucontext
->cq_reg_mem_list_lock
);
149 INIT_LIST_HEAD(&ucontext
->qp_reg_mem_list
);
150 spin_lock_init(&ucontext
->qp_reg_mem_list_lock
);
156 * i40iw_dealloc_ucontext - deallocate the user context data structure
157 * @context: user context created during alloc
159 static void i40iw_dealloc_ucontext(struct ib_ucontext
*context
)
165 * i40iw_mmap - user memory map
166 * @context: context created during alloc
167 * @vma: kernel info for user memory map
169 static int i40iw_mmap(struct ib_ucontext
*context
, struct vm_area_struct
*vma
)
171 struct i40iw_ucontext
*ucontext
;
172 u64 db_addr_offset
, push_offset
, pfn
;
174 ucontext
= to_ucontext(context
);
175 if (ucontext
->iwdev
->sc_dev
.is_pf
) {
176 db_addr_offset
= I40IW_DB_ADDR_OFFSET
;
177 push_offset
= I40IW_PUSH_OFFSET
;
179 vma
->vm_pgoff
+= I40IW_PF_FIRST_PUSH_PAGE_INDEX
- 1;
181 db_addr_offset
= I40IW_VF_DB_ADDR_OFFSET
;
182 push_offset
= I40IW_VF_PUSH_OFFSET
;
184 vma
->vm_pgoff
+= I40IW_VF_FIRST_PUSH_PAGE_INDEX
- 1;
187 vma
->vm_pgoff
+= db_addr_offset
>> PAGE_SHIFT
;
189 if (vma
->vm_pgoff
== (db_addr_offset
>> PAGE_SHIFT
)) {
190 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
192 if ((vma
->vm_pgoff
- (push_offset
>> PAGE_SHIFT
)) % 2)
193 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
195 vma
->vm_page_prot
= pgprot_writecombine(vma
->vm_page_prot
);
198 pfn
= vma
->vm_pgoff
+
199 (pci_resource_start(ucontext
->iwdev
->ldev
->pcidev
, 0) >>
202 return rdma_user_mmap_io(context
, vma
, pfn
, PAGE_SIZE
,
203 vma
->vm_page_prot
, NULL
);
207 * i40iw_alloc_push_page - allocate a push page for qp
208 * @iwdev: iwarp device
209 * @qp: hardware control qp
211 static void i40iw_alloc_push_page(struct i40iw_device
*iwdev
, struct i40iw_sc_qp
*qp
)
213 struct i40iw_cqp_request
*cqp_request
;
214 struct cqp_commands_info
*cqp_info
;
215 enum i40iw_status_code status
;
217 if (qp
->push_idx
!= I40IW_INVALID_PUSH_PAGE_INDEX
)
220 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
224 atomic_inc(&cqp_request
->refcount
);
226 cqp_info
= &cqp_request
->info
;
227 cqp_info
->cqp_cmd
= OP_MANAGE_PUSH_PAGE
;
228 cqp_info
->post_sq
= 1;
230 cqp_info
->in
.u
.manage_push_page
.info
.qs_handle
= qp
->qs_handle
;
231 cqp_info
->in
.u
.manage_push_page
.info
.free_page
= 0;
232 cqp_info
->in
.u
.manage_push_page
.cqp
= &iwdev
->cqp
.sc_cqp
;
233 cqp_info
->in
.u
.manage_push_page
.scratch
= (uintptr_t)cqp_request
;
235 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
237 qp
->push_idx
= cqp_request
->compl_info
.op_ret_val
;
239 i40iw_pr_err("CQP-OP Push page fail");
240 i40iw_put_cqp_request(&iwdev
->cqp
, cqp_request
);
244 * i40iw_dealloc_push_page - free a push page for qp
245 * @iwdev: iwarp device
246 * @qp: hardware control qp
248 static void i40iw_dealloc_push_page(struct i40iw_device
*iwdev
, struct i40iw_sc_qp
*qp
)
250 struct i40iw_cqp_request
*cqp_request
;
251 struct cqp_commands_info
*cqp_info
;
252 enum i40iw_status_code status
;
254 if (qp
->push_idx
== I40IW_INVALID_PUSH_PAGE_INDEX
)
257 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, false);
261 cqp_info
= &cqp_request
->info
;
262 cqp_info
->cqp_cmd
= OP_MANAGE_PUSH_PAGE
;
263 cqp_info
->post_sq
= 1;
265 cqp_info
->in
.u
.manage_push_page
.info
.push_idx
= qp
->push_idx
;
266 cqp_info
->in
.u
.manage_push_page
.info
.qs_handle
= qp
->qs_handle
;
267 cqp_info
->in
.u
.manage_push_page
.info
.free_page
= 1;
268 cqp_info
->in
.u
.manage_push_page
.cqp
= &iwdev
->cqp
.sc_cqp
;
269 cqp_info
->in
.u
.manage_push_page
.scratch
= (uintptr_t)cqp_request
;
271 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
273 qp
->push_idx
= I40IW_INVALID_PUSH_PAGE_INDEX
;
275 i40iw_pr_err("CQP-OP Push page fail");
279 * i40iw_alloc_pd - allocate protection domain
283 static int i40iw_alloc_pd(struct ib_pd
*pd
, struct ib_udata
*udata
)
285 struct i40iw_pd
*iwpd
= to_iwpd(pd
);
286 struct i40iw_device
*iwdev
= to_iwdev(pd
->device
);
287 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
288 struct i40iw_alloc_pd_resp uresp
;
289 struct i40iw_sc_pd
*sc_pd
;
296 err
= i40iw_alloc_resource(iwdev
, iwdev
->allocated_pds
,
297 iwdev
->max_pd
, &pd_id
, &iwdev
->next_pd
);
299 i40iw_pr_err("alloc resource failed\n");
303 sc_pd
= &iwpd
->sc_pd
;
306 struct i40iw_ucontext
*ucontext
= rdma_udata_to_drv_context(
307 udata
, struct i40iw_ucontext
, ibucontext
);
308 dev
->iw_pd_ops
->pd_init(dev
, sc_pd
, pd_id
, ucontext
->abi_ver
);
309 memset(&uresp
, 0, sizeof(uresp
));
311 if (ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
))) {
316 dev
->iw_pd_ops
->pd_init(dev
, sc_pd
, pd_id
, -1);
319 i40iw_add_pdusecount(iwpd
);
323 i40iw_free_resource(iwdev
, iwdev
->allocated_pds
, pd_id
);
328 * i40iw_dealloc_pd - deallocate pd
329 * @ibpd: ptr of pd to be deallocated
330 * @udata: user data or null for kernel object
332 static void i40iw_dealloc_pd(struct ib_pd
*ibpd
, struct ib_udata
*udata
)
334 struct i40iw_pd
*iwpd
= to_iwpd(ibpd
);
335 struct i40iw_device
*iwdev
= to_iwdev(ibpd
->device
);
337 i40iw_rem_pdusecount(iwpd
, iwdev
);
341 * i40iw_get_pbl - Retrieve pbl from a list given a virtual
343 * @va: user virtual address
344 * @pbl_list: pbl list to search in (QP's or CQ's)
346 static struct i40iw_pbl
*i40iw_get_pbl(unsigned long va
,
347 struct list_head
*pbl_list
)
349 struct i40iw_pbl
*iwpbl
;
351 list_for_each_entry(iwpbl
, pbl_list
, list
) {
352 if (iwpbl
->user_base
== va
) {
353 iwpbl
->on_list
= false;
354 list_del(&iwpbl
->list
);
362 * i40iw_free_qp_resources - free up memory resources for qp
363 * @iwdev: iwarp device
364 * @iwqp: qp ptr (user or kernel)
365 * @qp_num: qp number assigned
367 void i40iw_free_qp_resources(struct i40iw_device
*iwdev
,
368 struct i40iw_qp
*iwqp
,
371 struct i40iw_pbl
*iwpbl
= &iwqp
->iwpbl
;
373 i40iw_ieq_cleanup_qp(iwdev
->vsi
.ieq
, &iwqp
->sc_qp
);
374 i40iw_dealloc_push_page(iwdev
, &iwqp
->sc_qp
);
376 i40iw_free_resource(iwdev
, iwdev
->allocated_qps
, qp_num
);
377 if (iwpbl
->pbl_allocated
)
378 i40iw_free_pble(iwdev
->pble_rsrc
, &iwpbl
->pble_alloc
);
379 i40iw_free_dma_mem(iwdev
->sc_dev
.hw
, &iwqp
->q2_ctx_mem
);
380 i40iw_free_dma_mem(iwdev
->sc_dev
.hw
, &iwqp
->kqp
.dma_mem
);
381 kfree(iwqp
->kqp
.wrid_mem
);
382 iwqp
->kqp
.wrid_mem
= NULL
;
383 kfree(iwqp
->allocated_buffer
);
387 * i40iw_clean_cqes - clean cq entries for qp
388 * @iwqp: qp ptr (user or kernel)
391 static void i40iw_clean_cqes(struct i40iw_qp
*iwqp
, struct i40iw_cq
*iwcq
)
393 struct i40iw_cq_uk
*ukcq
= &iwcq
->sc_cq
.cq_uk
;
395 ukcq
->ops
.iw_cq_clean(&iwqp
->sc_qp
.qp_uk
, ukcq
);
399 * i40iw_destroy_qp - destroy qp
400 * @ibqp: qp's ib pointer also to get to device's qp address
402 static int i40iw_destroy_qp(struct ib_qp
*ibqp
, struct ib_udata
*udata
)
404 struct i40iw_qp
*iwqp
= to_iwqp(ibqp
);
408 if (iwqp
->ibqp_state
>= IB_QPS_INIT
&& iwqp
->ibqp_state
< IB_QPS_RTS
)
409 i40iw_next_iw_state(iwqp
, I40IW_QP_STATE_ERROR
, 0, 0, 0);
411 if (!iwqp
->user_mode
) {
413 i40iw_clean_cqes(iwqp
, iwqp
->iwscq
);
414 if (iwqp
->iwrcq
!= iwqp
->iwscq
)
415 i40iw_clean_cqes(iwqp
, iwqp
->iwrcq
);
419 i40iw_rem_ref(&iwqp
->ibqp
);
424 * i40iw_setup_virt_qp - setup for allocation of virtual qp
427 * @init_info: initialize info to return
429 static int i40iw_setup_virt_qp(struct i40iw_device
*iwdev
,
430 struct i40iw_qp
*iwqp
,
431 struct i40iw_qp_init_info
*init_info
)
433 struct i40iw_pbl
*iwpbl
= &iwqp
->iwpbl
;
434 struct i40iw_qp_mr
*qpmr
= &iwpbl
->qp_mr
;
436 iwqp
->page
= qpmr
->sq_page
;
437 init_info
->shadow_area_pa
= cpu_to_le64(qpmr
->shadow
);
438 if (iwpbl
->pbl_allocated
) {
439 init_info
->virtual_map
= true;
440 init_info
->sq_pa
= qpmr
->sq_pbl
.idx
;
441 init_info
->rq_pa
= qpmr
->rq_pbl
.idx
;
443 init_info
->sq_pa
= qpmr
->sq_pbl
.addr
;
444 init_info
->rq_pa
= qpmr
->rq_pbl
.addr
;
450 * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
451 * @iwdev: iwarp device
452 * @iwqp: qp ptr (user or kernel)
453 * @info: initialize info to return
455 static int i40iw_setup_kmode_qp(struct i40iw_device
*iwdev
,
456 struct i40iw_qp
*iwqp
,
457 struct i40iw_qp_init_info
*info
)
459 struct i40iw_dma_mem
*mem
= &iwqp
->kqp
.dma_mem
;
460 u32 sqdepth
, rqdepth
;
463 enum i40iw_status_code status
;
464 struct i40iw_qp_uk_init_info
*ukinfo
= &info
->qp_uk_init_info
;
466 i40iw_get_wqe_shift(ukinfo
->max_sq_frag_cnt
, ukinfo
->max_inline_data
, &sqshift
);
467 status
= i40iw_get_sqdepth(ukinfo
->sq_size
, sqshift
, &sqdepth
);
471 status
= i40iw_get_rqdepth(ukinfo
->rq_size
, I40IW_MAX_RQ_WQE_SHIFT
, &rqdepth
);
475 size
= sqdepth
* sizeof(struct i40iw_sq_uk_wr_trk_info
) + (rqdepth
<< 3);
476 iwqp
->kqp
.wrid_mem
= kzalloc(size
, GFP_KERNEL
);
478 ukinfo
->sq_wrtrk_array
= (struct i40iw_sq_uk_wr_trk_info
*)iwqp
->kqp
.wrid_mem
;
479 if (!ukinfo
->sq_wrtrk_array
)
482 ukinfo
->rq_wrid_array
= (u64
*)&ukinfo
->sq_wrtrk_array
[sqdepth
];
484 size
= (sqdepth
+ rqdepth
) * I40IW_QP_WQE_MIN_SIZE
;
485 size
+= (I40IW_SHADOW_AREA_SIZE
<< 3);
487 status
= i40iw_allocate_dma_mem(iwdev
->sc_dev
.hw
, mem
, size
, 256);
489 kfree(ukinfo
->sq_wrtrk_array
);
490 ukinfo
->sq_wrtrk_array
= NULL
;
494 ukinfo
->sq
= mem
->va
;
495 info
->sq_pa
= mem
->pa
;
497 ukinfo
->rq
= &ukinfo
->sq
[sqdepth
];
498 info
->rq_pa
= info
->sq_pa
+ (sqdepth
* I40IW_QP_WQE_MIN_SIZE
);
500 ukinfo
->shadow_area
= ukinfo
->rq
[rqdepth
].elem
;
501 info
->shadow_area_pa
= info
->rq_pa
+ (rqdepth
* I40IW_QP_WQE_MIN_SIZE
);
503 ukinfo
->sq_size
= sqdepth
>> sqshift
;
504 ukinfo
->rq_size
= rqdepth
>> I40IW_MAX_RQ_WQE_SHIFT
;
505 ukinfo
->qp_id
= iwqp
->ibqp
.qp_num
;
510 * i40iw_create_qp - create qp
512 * @init_attr: attributes for qp
513 * @udata: user data for create qp
515 static struct ib_qp
*i40iw_create_qp(struct ib_pd
*ibpd
,
516 struct ib_qp_init_attr
*init_attr
,
517 struct ib_udata
*udata
)
519 struct i40iw_pd
*iwpd
= to_iwpd(ibpd
);
520 struct i40iw_device
*iwdev
= to_iwdev(ibpd
->device
);
521 struct i40iw_cqp
*iwcqp
= &iwdev
->cqp
;
522 struct i40iw_qp
*iwqp
;
523 struct i40iw_ucontext
*ucontext
= rdma_udata_to_drv_context(
524 udata
, struct i40iw_ucontext
, ibucontext
);
525 struct i40iw_create_qp_req req
;
526 struct i40iw_create_qp_resp uresp
;
529 enum i40iw_status_code ret
;
533 struct i40iw_sc_qp
*qp
;
534 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
535 struct i40iw_qp_init_info init_info
;
536 struct i40iw_create_qp_info
*qp_info
;
537 struct i40iw_cqp_request
*cqp_request
;
538 struct cqp_commands_info
*cqp_info
;
540 struct i40iw_qp_host_ctx_info
*ctx_info
;
541 struct i40iwarp_offload_info
*iwarp_info
;
545 return ERR_PTR(-ENODEV
);
547 if (init_attr
->create_flags
)
548 return ERR_PTR(-EINVAL
);
549 if (init_attr
->cap
.max_inline_data
> I40IW_MAX_INLINE_DATA_SIZE
)
550 init_attr
->cap
.max_inline_data
= I40IW_MAX_INLINE_DATA_SIZE
;
552 if (init_attr
->cap
.max_send_sge
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
553 init_attr
->cap
.max_send_sge
= I40IW_MAX_WQ_FRAGMENT_COUNT
;
555 if (init_attr
->cap
.max_recv_sge
> I40IW_MAX_WQ_FRAGMENT_COUNT
)
556 init_attr
->cap
.max_recv_sge
= I40IW_MAX_WQ_FRAGMENT_COUNT
;
558 memset(&init_info
, 0, sizeof(init_info
));
560 sq_size
= init_attr
->cap
.max_send_wr
;
561 rq_size
= init_attr
->cap
.max_recv_wr
;
563 init_info
.vsi
= &iwdev
->vsi
;
564 init_info
.qp_uk_init_info
.sq_size
= sq_size
;
565 init_info
.qp_uk_init_info
.rq_size
= rq_size
;
566 init_info
.qp_uk_init_info
.max_sq_frag_cnt
= init_attr
->cap
.max_send_sge
;
567 init_info
.qp_uk_init_info
.max_rq_frag_cnt
= init_attr
->cap
.max_recv_sge
;
568 init_info
.qp_uk_init_info
.max_inline_data
= init_attr
->cap
.max_inline_data
;
570 mem
= kzalloc(sizeof(*iwqp
), GFP_KERNEL
);
572 return ERR_PTR(-ENOMEM
);
574 iwqp
= (struct i40iw_qp
*)mem
;
575 iwqp
->allocated_buffer
= mem
;
577 qp
->back_qp
= (void *)iwqp
;
578 qp
->push_idx
= I40IW_INVALID_PUSH_PAGE_INDEX
;
580 iwqp
->ctx_info
.iwarp_info
= &iwqp
->iwarp_info
;
582 if (i40iw_allocate_dma_mem(dev
->hw
,
584 I40IW_Q2_BUFFER_SIZE
+ I40IW_QP_CTX_SIZE
,
586 i40iw_pr_err("dma_mem failed\n");
591 init_info
.q2
= iwqp
->q2_ctx_mem
.va
;
592 init_info
.q2_pa
= iwqp
->q2_ctx_mem
.pa
;
594 init_info
.host_ctx
= (void *)init_info
.q2
+ I40IW_Q2_BUFFER_SIZE
;
595 init_info
.host_ctx_pa
= init_info
.q2_pa
+ I40IW_Q2_BUFFER_SIZE
;
597 err_code
= i40iw_alloc_resource(iwdev
, iwdev
->allocated_qps
, iwdev
->max_qp
,
598 &qp_num
, &iwdev
->next_qp
);
600 i40iw_pr_err("qp resource\n");
606 iwqp
->ibqp
.qp_num
= qp_num
;
608 iwqp
->iwscq
= to_iwcq(init_attr
->send_cq
);
609 iwqp
->iwrcq
= to_iwcq(init_attr
->recv_cq
);
611 iwqp
->host_ctx
.va
= init_info
.host_ctx
;
612 iwqp
->host_ctx
.pa
= init_info
.host_ctx_pa
;
613 iwqp
->host_ctx
.size
= I40IW_QP_CTX_SIZE
;
615 init_info
.pd
= &iwpd
->sc_pd
;
616 init_info
.qp_uk_init_info
.qp_id
= iwqp
->ibqp
.qp_num
;
617 iwqp
->ctx_info
.qp_compl_ctx
= (uintptr_t)qp
;
619 if (init_attr
->qp_type
!= IB_QPT_RC
) {
623 if (iwdev
->push_mode
)
624 i40iw_alloc_push_page(iwdev
, qp
);
626 err_code
= ib_copy_from_udata(&req
, udata
, sizeof(req
));
628 i40iw_pr_err("ib_copy_from_data\n");
631 iwqp
->ctx_info
.qp_compl_ctx
= req
.user_compl_ctx
;
634 if (req
.user_wqe_buffers
) {
635 struct i40iw_pbl
*iwpbl
;
638 &ucontext
->qp_reg_mem_list_lock
, flags
);
639 iwpbl
= i40iw_get_pbl(
640 (unsigned long)req
.user_wqe_buffers
,
641 &ucontext
->qp_reg_mem_list
);
642 spin_unlock_irqrestore(
643 &ucontext
->qp_reg_mem_list_lock
, flags
);
647 i40iw_pr_err("no pbl info\n");
650 memcpy(&iwqp
->iwpbl
, iwpbl
, sizeof(iwqp
->iwpbl
));
652 err_code
= i40iw_setup_virt_qp(iwdev
, iwqp
, &init_info
);
654 err_code
= i40iw_setup_kmode_qp(iwdev
, iwqp
, &init_info
);
658 i40iw_pr_err("setup qp failed\n");
662 init_info
.type
= I40IW_QP_TYPE_IWARP
;
663 ret
= dev
->iw_priv_qp_ops
->qp_init(qp
, &init_info
);
666 i40iw_pr_err("qp_init fail\n");
669 ctx_info
= &iwqp
->ctx_info
;
670 iwarp_info
= &iwqp
->iwarp_info
;
671 iwarp_info
->rd_enable
= true;
672 iwarp_info
->wr_rdresp_en
= true;
673 if (!iwqp
->user_mode
) {
674 iwarp_info
->fast_reg_en
= true;
675 iwarp_info
->priv_mode_en
= true;
677 iwarp_info
->ddp_ver
= 1;
678 iwarp_info
->rdmap_ver
= 1;
680 ctx_info
->iwarp_info_valid
= true;
681 ctx_info
->send_cq_num
= iwqp
->iwscq
->sc_cq
.cq_uk
.cq_id
;
682 ctx_info
->rcv_cq_num
= iwqp
->iwrcq
->sc_cq
.cq_uk
.cq_id
;
683 if (qp
->push_idx
== I40IW_INVALID_PUSH_PAGE_INDEX
) {
684 ctx_info
->push_mode_en
= false;
686 ctx_info
->push_mode_en
= true;
687 ctx_info
->push_idx
= qp
->push_idx
;
690 ret
= dev
->iw_priv_qp_ops
->qp_setctx(&iwqp
->sc_qp
,
691 (u64
*)iwqp
->host_ctx
.va
,
693 ctx_info
->iwarp_info_valid
= false;
694 cqp_request
= i40iw_get_cqp_request(iwcqp
, true);
699 cqp_info
= &cqp_request
->info
;
700 qp_info
= &cqp_request
->info
.in
.u
.qp_create
.info
;
702 memset(qp_info
, 0, sizeof(*qp_info
));
704 qp_info
->cq_num_valid
= true;
705 qp_info
->next_iwarp_state
= I40IW_QP_STATE_IDLE
;
707 cqp_info
->cqp_cmd
= OP_QP_CREATE
;
708 cqp_info
->post_sq
= 1;
709 cqp_info
->in
.u
.qp_create
.qp
= qp
;
710 cqp_info
->in
.u
.qp_create
.scratch
= (uintptr_t)cqp_request
;
711 ret
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
713 i40iw_pr_err("CQP-OP QP create fail");
718 i40iw_add_ref(&iwqp
->ibqp
);
719 spin_lock_init(&iwqp
->lock
);
720 iwqp
->sig_all
= (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
) ? 1 : 0;
721 iwdev
->qp_table
[qp_num
] = iwqp
;
722 i40iw_add_pdusecount(iwqp
->iwpd
);
723 i40iw_add_devusecount(iwdev
);
725 memset(&uresp
, 0, sizeof(uresp
));
726 uresp
.actual_sq_size
= sq_size
;
727 uresp
.actual_rq_size
= rq_size
;
728 uresp
.qp_id
= qp_num
;
729 uresp
.push_idx
= qp
->push_idx
;
730 err_code
= ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
));
732 i40iw_pr_err("copy_to_udata failed\n");
733 i40iw_destroy_qp(&iwqp
->ibqp
, udata
);
734 /* let the completion of the qp destroy free the qp */
735 return ERR_PTR(err_code
);
738 init_completion(&iwqp
->sq_drained
);
739 init_completion(&iwqp
->rq_drained
);
743 i40iw_free_qp_resources(iwdev
, iwqp
, qp_num
);
744 return ERR_PTR(err_code
);
748 * i40iw_query - query qp attributes
750 * @attr: attributes pointer
751 * @attr_mask: Not used
752 * @init_attr: qp attributes to return
754 static int i40iw_query_qp(struct ib_qp
*ibqp
,
755 struct ib_qp_attr
*attr
,
757 struct ib_qp_init_attr
*init_attr
)
759 struct i40iw_qp
*iwqp
= to_iwqp(ibqp
);
760 struct i40iw_sc_qp
*qp
= &iwqp
->sc_qp
;
762 attr
->qp_state
= iwqp
->ibqp_state
;
763 attr
->cur_qp_state
= attr
->qp_state
;
764 attr
->qp_access_flags
= 0;
765 attr
->cap
.max_send_wr
= qp
->qp_uk
.sq_size
;
766 attr
->cap
.max_recv_wr
= qp
->qp_uk
.rq_size
;
767 attr
->cap
.max_inline_data
= I40IW_MAX_INLINE_DATA_SIZE
;
768 attr
->cap
.max_send_sge
= I40IW_MAX_WQ_FRAGMENT_COUNT
;
769 attr
->cap
.max_recv_sge
= I40IW_MAX_WQ_FRAGMENT_COUNT
;
771 init_attr
->event_handler
= iwqp
->ibqp
.event_handler
;
772 init_attr
->qp_context
= iwqp
->ibqp
.qp_context
;
773 init_attr
->send_cq
= iwqp
->ibqp
.send_cq
;
774 init_attr
->recv_cq
= iwqp
->ibqp
.recv_cq
;
775 init_attr
->srq
= iwqp
->ibqp
.srq
;
776 init_attr
->cap
= attr
->cap
;
777 init_attr
->port_num
= 1;
782 * i40iw_hw_modify_qp - setup cqp for modify qp
783 * @iwdev: iwarp device
784 * @iwqp: qp ptr (user or kernel)
785 * @info: info for modify qp
786 * @wait: flag to wait or not for modify qp completion
788 void i40iw_hw_modify_qp(struct i40iw_device
*iwdev
, struct i40iw_qp
*iwqp
,
789 struct i40iw_modify_qp_info
*info
, bool wait
)
791 struct i40iw_cqp_request
*cqp_request
;
792 struct cqp_commands_info
*cqp_info
;
793 struct i40iw_modify_qp_info
*m_info
;
794 struct i40iw_gen_ae_info ae_info
;
796 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, wait
);
800 cqp_info
= &cqp_request
->info
;
801 m_info
= &cqp_info
->in
.u
.qp_modify
.info
;
802 memcpy(m_info
, info
, sizeof(*m_info
));
803 cqp_info
->cqp_cmd
= OP_QP_MODIFY
;
804 cqp_info
->post_sq
= 1;
805 cqp_info
->in
.u
.qp_modify
.qp
= &iwqp
->sc_qp
;
806 cqp_info
->in
.u
.qp_modify
.scratch
= (uintptr_t)cqp_request
;
807 if (!i40iw_handle_cqp_op(iwdev
, cqp_request
))
810 switch (m_info
->next_iwarp_state
) {
811 case I40IW_QP_STATE_RTS
:
812 if (iwqp
->iwarp_state
== I40IW_QP_STATE_IDLE
)
813 i40iw_send_reset(iwqp
->cm_node
);
815 case I40IW_QP_STATE_IDLE
:
816 case I40IW_QP_STATE_TERMINATE
:
817 case I40IW_QP_STATE_CLOSING
:
818 ae_info
.ae_code
= I40IW_AE_BAD_CLOSE
;
819 ae_info
.ae_source
= 0;
820 i40iw_gen_ae(iwdev
, &iwqp
->sc_qp
, &ae_info
, false);
822 case I40IW_QP_STATE_ERROR
:
829 * i40iw_modify_qp - modify qp request
830 * @ibqp: qp's pointer for modify
831 * @attr: access attributes
832 * @attr_mask: state mask
835 int i40iw_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
836 int attr_mask
, struct ib_udata
*udata
)
838 struct i40iw_qp
*iwqp
= to_iwqp(ibqp
);
839 struct i40iw_device
*iwdev
= iwqp
->iwdev
;
840 struct i40iw_qp_host_ctx_info
*ctx_info
;
841 struct i40iwarp_offload_info
*iwarp_info
;
842 struct i40iw_modify_qp_info info
;
843 u8 issue_modify_qp
= 0;
848 memset(&info
, 0, sizeof(info
));
849 ctx_info
= &iwqp
->ctx_info
;
850 iwarp_info
= &iwqp
->iwarp_info
;
852 spin_lock_irqsave(&iwqp
->lock
, flags
);
854 if (attr_mask
& IB_QP_STATE
) {
855 if (iwdev
->closing
&& attr
->qp_state
!= IB_QPS_ERR
) {
860 switch (attr
->qp_state
) {
863 if (iwqp
->iwarp_state
> (u32
)I40IW_QP_STATE_IDLE
) {
867 if (iwqp
->iwarp_state
== I40IW_QP_STATE_INVALID
) {
868 info
.next_iwarp_state
= I40IW_QP_STATE_IDLE
;
873 if ((iwqp
->iwarp_state
> (u32
)I40IW_QP_STATE_RTS
) ||
880 iwqp
->hw_tcp_state
= I40IW_TCP_STATE_ESTABLISHED
;
882 info
.next_iwarp_state
= I40IW_QP_STATE_RTS
;
883 info
.tcp_ctx_valid
= true;
884 info
.ord_valid
= true;
885 info
.arp_cache_idx_valid
= true;
886 info
.cq_num_valid
= true;
889 if (iwqp
->hw_iwarp_state
> (u32
)I40IW_QP_STATE_RTS
) {
893 if ((iwqp
->iwarp_state
== (u32
)I40IW_QP_STATE_CLOSING
) ||
894 (iwqp
->iwarp_state
< (u32
)I40IW_QP_STATE_RTS
)) {
898 if (iwqp
->iwarp_state
> (u32
)I40IW_QP_STATE_CLOSING
) {
902 info
.next_iwarp_state
= I40IW_QP_STATE_CLOSING
;
906 if (iwqp
->iwarp_state
>= (u32
)I40IW_QP_STATE_TERMINATE
) {
910 info
.next_iwarp_state
= I40IW_QP_STATE_TERMINATE
;
915 if (iwqp
->iwarp_state
== (u32
)I40IW_QP_STATE_ERROR
) {
919 if (iwqp
->sc_qp
.term_flags
)
920 i40iw_terminate_del_timer(&iwqp
->sc_qp
);
921 info
.next_iwarp_state
= I40IW_QP_STATE_ERROR
;
922 if ((iwqp
->hw_tcp_state
> I40IW_TCP_STATE_CLOSED
) &&
924 (iwqp
->hw_tcp_state
!= I40IW_TCP_STATE_TIME_WAIT
))
925 info
.reset_tcp_conn
= true;
929 info
.next_iwarp_state
= I40IW_QP_STATE_ERROR
;
936 iwqp
->ibqp_state
= attr
->qp_state
;
939 if (attr_mask
& IB_QP_ACCESS_FLAGS
) {
940 ctx_info
->iwarp_info_valid
= true;
941 if (attr
->qp_access_flags
& IB_ACCESS_LOCAL_WRITE
)
942 iwarp_info
->wr_rdresp_en
= true;
943 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
)
944 iwarp_info
->wr_rdresp_en
= true;
945 if (attr
->qp_access_flags
& IB_ACCESS_REMOTE_READ
)
946 iwarp_info
->rd_enable
= true;
947 if (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
)
948 iwarp_info
->bind_en
= true;
950 if (iwqp
->user_mode
) {
951 iwarp_info
->rd_enable
= true;
952 iwarp_info
->wr_rdresp_en
= true;
953 iwarp_info
->priv_mode_en
= false;
957 if (ctx_info
->iwarp_info_valid
) {
958 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
961 ctx_info
->send_cq_num
= iwqp
->iwscq
->sc_cq
.cq_uk
.cq_id
;
962 ctx_info
->rcv_cq_num
= iwqp
->iwrcq
->sc_cq
.cq_uk
.cq_id
;
963 ret
= dev
->iw_priv_qp_ops
->qp_setctx(&iwqp
->sc_qp
,
964 (u64
*)iwqp
->host_ctx
.va
,
967 i40iw_pr_err("setting QP context\n");
973 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
975 if (issue_modify_qp
) {
976 i40iw_hw_modify_qp(iwdev
, iwqp
, &info
, true);
978 spin_lock_irqsave(&iwqp
->lock
, flags
);
979 iwqp
->iwarp_state
= info
.next_iwarp_state
;
980 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
983 if (issue_modify_qp
&& (iwqp
->ibqp_state
> IB_QPS_RTS
)) {
985 if (iwqp
->cm_id
&& iwqp
->hw_tcp_state
) {
986 spin_lock_irqsave(&iwqp
->lock
, flags
);
987 iwqp
->hw_tcp_state
= I40IW_TCP_STATE_CLOSED
;
988 iwqp
->last_aeq
= I40IW_AE_RESET_SENT
;
989 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
990 i40iw_cm_disconn(iwqp
);
993 spin_lock_irqsave(&iwqp
->lock
, flags
);
995 if (atomic_inc_return(&iwqp
->close_timer_started
) == 1) {
996 iwqp
->cm_id
->add_ref(iwqp
->cm_id
);
997 i40iw_schedule_cm_timer(iwqp
->cm_node
,
998 (struct i40iw_puda_buf
*)iwqp
,
999 I40IW_TIMER_TYPE_CLOSE
, 1, 0);
1002 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
1007 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
1012 * cq_free_resources - free up recources for cq
1013 * @iwdev: iwarp device
1016 static void cq_free_resources(struct i40iw_device
*iwdev
, struct i40iw_cq
*iwcq
)
1018 struct i40iw_sc_cq
*cq
= &iwcq
->sc_cq
;
1020 if (!iwcq
->user_mode
)
1021 i40iw_free_dma_mem(iwdev
->sc_dev
.hw
, &iwcq
->kmem
);
1022 i40iw_free_resource(iwdev
, iwdev
->allocated_cqs
, cq
->cq_uk
.cq_id
);
1026 * i40iw_cq_wq_destroy - send cq destroy cqp
1027 * @iwdev: iwarp device
1028 * @cq: hardware control cq
1030 void i40iw_cq_wq_destroy(struct i40iw_device
*iwdev
, struct i40iw_sc_cq
*cq
)
1032 enum i40iw_status_code status
;
1033 struct i40iw_cqp_request
*cqp_request
;
1034 struct cqp_commands_info
*cqp_info
;
1036 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
1040 cqp_info
= &cqp_request
->info
;
1042 cqp_info
->cqp_cmd
= OP_CQ_DESTROY
;
1043 cqp_info
->post_sq
= 1;
1044 cqp_info
->in
.u
.cq_destroy
.cq
= cq
;
1045 cqp_info
->in
.u
.cq_destroy
.scratch
= (uintptr_t)cqp_request
;
1046 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
1048 i40iw_pr_err("CQP-OP Destroy QP fail");
1052 * i40iw_destroy_cq - destroy cq
1053 * @ib_cq: cq pointer
1054 * @udata: user data or NULL for kernel object
1056 static void i40iw_destroy_cq(struct ib_cq
*ib_cq
, struct ib_udata
*udata
)
1058 struct i40iw_cq
*iwcq
;
1059 struct i40iw_device
*iwdev
;
1060 struct i40iw_sc_cq
*cq
;
1062 iwcq
= to_iwcq(ib_cq
);
1063 iwdev
= to_iwdev(ib_cq
->device
);
1065 i40iw_cq_wq_destroy(iwdev
, cq
);
1066 cq_free_resources(iwdev
, iwcq
);
1067 i40iw_rem_devusecount(iwdev
);
1071 * i40iw_create_cq - create cq
1072 * @ibcq: CQ allocated
1073 * @attr: attributes for cq
1076 static int i40iw_create_cq(struct ib_cq
*ibcq
,
1077 const struct ib_cq_init_attr
*attr
,
1078 struct ib_udata
*udata
)
1080 struct ib_device
*ibdev
= ibcq
->device
;
1081 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
1082 struct i40iw_cq
*iwcq
= to_iwcq(ibcq
);
1083 struct i40iw_pbl
*iwpbl
;
1085 struct i40iw_sc_cq
*cq
;
1086 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
1087 struct i40iw_cq_init_info info
= {};
1088 enum i40iw_status_code status
;
1089 struct i40iw_cqp_request
*cqp_request
;
1090 struct cqp_commands_info
*cqp_info
;
1091 struct i40iw_cq_uk_init_info
*ukinfo
= &info
.cq_uk_init_info
;
1092 unsigned long flags
;
1094 int entries
= attr
->cqe
;
1099 if (entries
> iwdev
->max_cqe
)
1102 err_code
= i40iw_alloc_resource(iwdev
, iwdev
->allocated_cqs
,
1103 iwdev
->max_cq
, &cq_num
,
1109 cq
->back_cq
= (void *)iwcq
;
1110 spin_lock_init(&iwcq
->lock
);
1113 ukinfo
->cq_size
= max(entries
, 4);
1114 ukinfo
->cq_id
= cq_num
;
1115 iwcq
->ibcq
.cqe
= info
.cq_uk_init_info
.cq_size
;
1117 if (attr
->comp_vector
< iwdev
->ceqs_count
)
1118 info
.ceq_id
= attr
->comp_vector
;
1119 info
.ceq_id_valid
= true;
1121 info
.type
= I40IW_CQ_TYPE_IWARP
;
1123 struct i40iw_ucontext
*ucontext
= rdma_udata_to_drv_context(
1124 udata
, struct i40iw_ucontext
, ibucontext
);
1125 struct i40iw_create_cq_req req
;
1126 struct i40iw_cq_mr
*cqmr
;
1128 memset(&req
, 0, sizeof(req
));
1129 iwcq
->user_mode
= true;
1130 if (ib_copy_from_udata(&req
, udata
, sizeof(struct i40iw_create_cq_req
))) {
1132 goto cq_free_resources
;
1135 spin_lock_irqsave(&ucontext
->cq_reg_mem_list_lock
, flags
);
1136 iwpbl
= i40iw_get_pbl((unsigned long)req
.user_cq_buffer
,
1137 &ucontext
->cq_reg_mem_list
);
1138 spin_unlock_irqrestore(&ucontext
->cq_reg_mem_list_lock
, flags
);
1141 goto cq_free_resources
;
1144 iwcq
->iwpbl
= iwpbl
;
1145 iwcq
->cq_mem_size
= 0;
1146 cqmr
= &iwpbl
->cq_mr
;
1147 info
.shadow_area_pa
= cpu_to_le64(cqmr
->shadow
);
1148 if (iwpbl
->pbl_allocated
) {
1149 info
.virtual_map
= true;
1150 info
.pbl_chunk_size
= 1;
1151 info
.first_pm_pbl_idx
= cqmr
->cq_pbl
.idx
;
1153 info
.cq_base_pa
= cqmr
->cq_pbl
.addr
;
1156 /* Kmode allocations */
1160 rsize
= info
.cq_uk_init_info
.cq_size
* sizeof(struct i40iw_cqe
);
1161 rsize
= round_up(rsize
, 256);
1162 shadow
= I40IW_SHADOW_AREA_SIZE
<< 3;
1163 status
= i40iw_allocate_dma_mem(dev
->hw
, &iwcq
->kmem
,
1164 rsize
+ shadow
, 256);
1167 goto cq_free_resources
;
1169 ukinfo
->cq_base
= iwcq
->kmem
.va
;
1170 info
.cq_base_pa
= iwcq
->kmem
.pa
;
1171 info
.shadow_area_pa
= info
.cq_base_pa
+ rsize
;
1172 ukinfo
->shadow_area
= iwcq
->kmem
.va
+ rsize
;
1175 if (dev
->iw_priv_cq_ops
->cq_init(cq
, &info
)) {
1176 i40iw_pr_err("init cq fail\n");
1178 goto cq_free_resources
;
1181 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
1184 goto cq_free_resources
;
1187 cqp_info
= &cqp_request
->info
;
1188 cqp_info
->cqp_cmd
= OP_CQ_CREATE
;
1189 cqp_info
->post_sq
= 1;
1190 cqp_info
->in
.u
.cq_create
.cq
= cq
;
1191 cqp_info
->in
.u
.cq_create
.scratch
= (uintptr_t)cqp_request
;
1192 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
1194 i40iw_pr_err("CQP-OP Create QP fail");
1196 goto cq_free_resources
;
1200 struct i40iw_create_cq_resp resp
;
1202 memset(&resp
, 0, sizeof(resp
));
1203 resp
.cq_id
= info
.cq_uk_init_info
.cq_id
;
1204 resp
.cq_size
= info
.cq_uk_init_info
.cq_size
;
1205 if (ib_copy_to_udata(udata
, &resp
, sizeof(resp
))) {
1206 i40iw_pr_err("copy to user data\n");
1212 i40iw_add_devusecount(iwdev
);
1216 i40iw_cq_wq_destroy(iwdev
, cq
);
1218 cq_free_resources(iwdev
, iwcq
);
1223 * i40iw_get_user_access - get hw access from IB access
1224 * @acc: IB access to return hw access
1226 static inline u16
i40iw_get_user_access(int acc
)
1230 access
|= (acc
& IB_ACCESS_LOCAL_WRITE
) ? I40IW_ACCESS_FLAGS_LOCALWRITE
: 0;
1231 access
|= (acc
& IB_ACCESS_REMOTE_WRITE
) ? I40IW_ACCESS_FLAGS_REMOTEWRITE
: 0;
1232 access
|= (acc
& IB_ACCESS_REMOTE_READ
) ? I40IW_ACCESS_FLAGS_REMOTEREAD
: 0;
1233 access
|= (acc
& IB_ACCESS_MW_BIND
) ? I40IW_ACCESS_FLAGS_BIND_WINDOW
: 0;
1238 * i40iw_free_stag - free stag resource
1239 * @iwdev: iwarp device
1240 * @stag: stag to free
1242 static void i40iw_free_stag(struct i40iw_device
*iwdev
, u32 stag
)
1246 stag_idx
= (stag
& iwdev
->mr_stagmask
) >> I40IW_CQPSQ_STAG_IDX_SHIFT
;
1247 i40iw_free_resource(iwdev
, iwdev
->allocated_mrs
, stag_idx
);
1248 i40iw_rem_devusecount(iwdev
);
1252 * i40iw_create_stag - create random stag
1253 * @iwdev: iwarp device
1255 static u32
i40iw_create_stag(struct i40iw_device
*iwdev
)
1259 u32 next_stag_index
;
1265 get_random_bytes(&random
, sizeof(random
));
1266 consumer_key
= (u8
)random
;
1268 driver_key
= random
& ~iwdev
->mr_stagmask
;
1269 next_stag_index
= (random
& iwdev
->mr_stagmask
) >> 8;
1270 next_stag_index
%= iwdev
->max_mr
;
1272 ret
= i40iw_alloc_resource(iwdev
,
1273 iwdev
->allocated_mrs
, iwdev
->max_mr
,
1274 &stag_index
, &next_stag_index
);
1276 stag
= stag_index
<< I40IW_CQPSQ_STAG_IDX_SHIFT
;
1278 stag
+= (u32
)consumer_key
;
1279 i40iw_add_devusecount(iwdev
);
1285 * i40iw_next_pbl_addr - Get next pbl address
1286 * @pbl: pointer to a pble
1287 * @pinfo: info pointer
1290 static inline u64
*i40iw_next_pbl_addr(u64
*pbl
,
1291 struct i40iw_pble_info
**pinfo
,
1295 if ((!(*pinfo
)) || (*idx
!= (*pinfo
)->cnt
))
1299 return (u64
*)(*pinfo
)->addr
;
1303 * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
1304 * @iwmr: iwmr for IB's user page addresses
1305 * @pbl: ple pointer to save 1 level or 0 level pble
1306 * @level: indicated level 0, 1 or 2
1308 static void i40iw_copy_user_pgaddrs(struct i40iw_mr
*iwmr
,
1310 enum i40iw_pble_level level
)
1312 struct ib_umem
*region
= iwmr
->region
;
1313 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
1314 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
1315 struct i40iw_pble_info
*pinfo
;
1316 struct ib_block_iter biter
;
1319 pinfo
= (level
== I40IW_LEVEL_1
) ? NULL
: palloc
->level2
.leaf
;
1321 if (iwmr
->type
== IW_MEMREG_TYPE_QP
)
1322 iwpbl
->qp_mr
.sq_page
= sg_page(region
->sg_head
.sgl
);
1324 rdma_for_each_block(region
->sg_head
.sgl
, &biter
, region
->nmap
,
1326 *pbl
= rdma_block_iter_dma_address(&biter
);
1327 pbl
= i40iw_next_pbl_addr(pbl
, &pinfo
, &idx
);
1332 * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous
1333 * @arr: lvl1 pbl array
1334 * @npages: page count
1335 * pg_size: page size
1338 static bool i40iw_check_mem_contiguous(u64
*arr
, u32 npages
, u32 pg_size
)
1342 for (pg_idx
= 0; pg_idx
< npages
; pg_idx
++) {
1343 if ((*arr
+ (pg_size
* pg_idx
)) != arr
[pg_idx
])
1350 * i40iw_check_mr_contiguous - check if MR is physically contiguous
1351 * @palloc: pbl allocation struct
1352 * pg_size: page size
1354 static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc
*palloc
, u32 pg_size
)
1356 struct i40iw_pble_level2
*lvl2
= &palloc
->level2
;
1357 struct i40iw_pble_info
*leaf
= lvl2
->leaf
;
1359 u64
*start_addr
= NULL
;
1363 if (palloc
->level
== I40IW_LEVEL_1
) {
1364 arr
= (u64
*)palloc
->level1
.addr
;
1365 ret
= i40iw_check_mem_contiguous(arr
, palloc
->total_cnt
, pg_size
);
1369 start_addr
= (u64
*)leaf
->addr
;
1371 for (i
= 0; i
< lvl2
->leaf_cnt
; i
++, leaf
++) {
1372 arr
= (u64
*)leaf
->addr
;
1373 if ((*start_addr
+ (i
* pg_size
* PBLE_PER_PAGE
)) != *arr
)
1375 ret
= i40iw_check_mem_contiguous(arr
, leaf
->cnt
, pg_size
);
1384 * i40iw_setup_pbles - copy user pg address to pble's
1385 * @iwdev: iwarp device
1386 * @iwmr: mr pointer for this memory registration
1387 * @use_pbles: flag if to use pble's
1389 static int i40iw_setup_pbles(struct i40iw_device
*iwdev
,
1390 struct i40iw_mr
*iwmr
,
1393 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
1394 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
1395 struct i40iw_pble_info
*pinfo
;
1397 enum i40iw_status_code status
;
1398 enum i40iw_pble_level level
= I40IW_LEVEL_1
;
1401 mutex_lock(&iwdev
->pbl_mutex
);
1402 status
= i40iw_get_pble(&iwdev
->sc_dev
, iwdev
->pble_rsrc
, palloc
, iwmr
->page_cnt
);
1403 mutex_unlock(&iwdev
->pbl_mutex
);
1407 iwpbl
->pbl_allocated
= true;
1408 level
= palloc
->level
;
1409 pinfo
= (level
== I40IW_LEVEL_1
) ? &palloc
->level1
: palloc
->level2
.leaf
;
1410 pbl
= (u64
*)pinfo
->addr
;
1412 pbl
= iwmr
->pgaddrmem
;
1415 i40iw_copy_user_pgaddrs(iwmr
, pbl
, level
);
1418 iwmr
->pgaddrmem
[0] = *pbl
;
1424 * i40iw_handle_q_mem - handle memory for qp and cq
1425 * @iwdev: iwarp device
1426 * @req: information for q memory management
1427 * @iwpbl: pble struct
1428 * @use_pbles: flag to use pble
1430 static int i40iw_handle_q_mem(struct i40iw_device
*iwdev
,
1431 struct i40iw_mem_reg_req
*req
,
1432 struct i40iw_pbl
*iwpbl
,
1435 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
1436 struct i40iw_mr
*iwmr
= iwpbl
->iwmr
;
1437 struct i40iw_qp_mr
*qpmr
= &iwpbl
->qp_mr
;
1438 struct i40iw_cq_mr
*cqmr
= &iwpbl
->cq_mr
;
1439 struct i40iw_hmc_pble
*hmc_p
;
1440 u64
*arr
= iwmr
->pgaddrmem
;
1446 total
= req
->sq_pages
+ req
->rq_pages
+ req
->cq_pages
;
1447 pg_size
= iwmr
->page_size
;
1449 err
= i40iw_setup_pbles(iwdev
, iwmr
, use_pbles
);
1453 if (use_pbles
&& (palloc
->level
!= I40IW_LEVEL_1
)) {
1454 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
1455 iwpbl
->pbl_allocated
= false;
1460 arr
= (u64
*)palloc
->level1
.addr
;
1462 if (iwmr
->type
== IW_MEMREG_TYPE_QP
) {
1463 hmc_p
= &qpmr
->sq_pbl
;
1464 qpmr
->shadow
= (dma_addr_t
)arr
[total
];
1467 ret
= i40iw_check_mem_contiguous(arr
, req
->sq_pages
, pg_size
);
1469 ret
= i40iw_check_mem_contiguous(&arr
[req
->sq_pages
], req
->rq_pages
, pg_size
);
1473 hmc_p
->idx
= palloc
->level1
.idx
;
1474 hmc_p
= &qpmr
->rq_pbl
;
1475 hmc_p
->idx
= palloc
->level1
.idx
+ req
->sq_pages
;
1477 hmc_p
->addr
= arr
[0];
1478 hmc_p
= &qpmr
->rq_pbl
;
1479 hmc_p
->addr
= arr
[req
->sq_pages
];
1482 hmc_p
= &cqmr
->cq_pbl
;
1483 cqmr
->shadow
= (dma_addr_t
)arr
[total
];
1486 ret
= i40iw_check_mem_contiguous(arr
, req
->cq_pages
, pg_size
);
1489 hmc_p
->idx
= palloc
->level1
.idx
;
1491 hmc_p
->addr
= arr
[0];
1494 if (use_pbles
&& ret
) {
1495 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
1496 iwpbl
->pbl_allocated
= false;
1503 * i40iw_hw_alloc_stag - cqp command to allocate stag
1504 * @iwdev: iwarp device
1505 * @iwmr: iwarp mr pointer
1507 static int i40iw_hw_alloc_stag(struct i40iw_device
*iwdev
, struct i40iw_mr
*iwmr
)
1509 struct i40iw_allocate_stag_info
*info
;
1510 struct i40iw_pd
*iwpd
= to_iwpd(iwmr
->ibmr
.pd
);
1511 enum i40iw_status_code status
;
1513 struct i40iw_cqp_request
*cqp_request
;
1514 struct cqp_commands_info
*cqp_info
;
1516 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
1520 cqp_info
= &cqp_request
->info
;
1521 info
= &cqp_info
->in
.u
.alloc_stag
.info
;
1522 memset(info
, 0, sizeof(*info
));
1523 info
->page_size
= PAGE_SIZE
;
1524 info
->stag_idx
= iwmr
->stag
>> I40IW_CQPSQ_STAG_IDX_SHIFT
;
1525 info
->pd_id
= iwpd
->sc_pd
.pd_id
;
1526 info
->total_len
= iwmr
->length
;
1527 info
->remote_access
= true;
1528 cqp_info
->cqp_cmd
= OP_ALLOC_STAG
;
1529 cqp_info
->post_sq
= 1;
1530 cqp_info
->in
.u
.alloc_stag
.dev
= &iwdev
->sc_dev
;
1531 cqp_info
->in
.u
.alloc_stag
.scratch
= (uintptr_t)cqp_request
;
1533 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
1536 i40iw_pr_err("CQP-OP MR Reg fail");
1542 * i40iw_alloc_mr - register stag for fast memory registration
1544 * @mr_type: memory for stag registrion
1545 * @max_num_sg: man number of pages
1546 * @udata: user data or NULL for kernel objects
1548 static struct ib_mr
*i40iw_alloc_mr(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
1549 u32 max_num_sg
, struct ib_udata
*udata
)
1551 struct i40iw_pd
*iwpd
= to_iwpd(pd
);
1552 struct i40iw_device
*iwdev
= to_iwdev(pd
->device
);
1553 struct i40iw_pble_alloc
*palloc
;
1554 struct i40iw_pbl
*iwpbl
;
1555 struct i40iw_mr
*iwmr
;
1556 enum i40iw_status_code status
;
1558 int err_code
= -ENOMEM
;
1560 iwmr
= kzalloc(sizeof(*iwmr
), GFP_KERNEL
);
1562 return ERR_PTR(-ENOMEM
);
1564 stag
= i40iw_create_stag(iwdev
);
1566 err_code
= -EOVERFLOW
;
1569 stag
&= ~I40IW_CQPSQ_STAG_KEY_MASK
;
1571 iwmr
->ibmr
.rkey
= stag
;
1572 iwmr
->ibmr
.lkey
= stag
;
1574 iwmr
->ibmr
.device
= pd
->device
;
1575 iwpbl
= &iwmr
->iwpbl
;
1577 iwmr
->type
= IW_MEMREG_TYPE_MEM
;
1578 palloc
= &iwpbl
->pble_alloc
;
1579 iwmr
->page_cnt
= max_num_sg
;
1580 mutex_lock(&iwdev
->pbl_mutex
);
1581 status
= i40iw_get_pble(&iwdev
->sc_dev
, iwdev
->pble_rsrc
, palloc
, iwmr
->page_cnt
);
1582 mutex_unlock(&iwdev
->pbl_mutex
);
1586 if (palloc
->level
!= I40IW_LEVEL_1
)
1588 err_code
= i40iw_hw_alloc_stag(iwdev
, iwmr
);
1591 iwpbl
->pbl_allocated
= true;
1592 i40iw_add_pdusecount(iwpd
);
1595 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
1597 i40iw_free_stag(iwdev
, stag
);
1600 return ERR_PTR(err_code
);
1604 * i40iw_set_page - populate pbl list for fmr
1605 * @ibmr: ib mem to access iwarp mr pointer
1606 * @addr: page dma address fro pbl list
1608 static int i40iw_set_page(struct ib_mr
*ibmr
, u64 addr
)
1610 struct i40iw_mr
*iwmr
= to_iwmr(ibmr
);
1611 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
1612 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
1615 if (unlikely(iwmr
->npages
== iwmr
->page_cnt
))
1618 pbl
= (u64
*)palloc
->level1
.addr
;
1619 pbl
[iwmr
->npages
++] = cpu_to_le64(addr
);
1624 * i40iw_map_mr_sg - map of sg list for fmr
1625 * @ibmr: ib mem to access iwarp mr pointer
1626 * @sg: scatter gather list for fmr
1627 * @sg_nents: number of sg pages
1629 static int i40iw_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
,
1630 int sg_nents
, unsigned int *sg_offset
)
1632 struct i40iw_mr
*iwmr
= to_iwmr(ibmr
);
1635 return ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
, i40iw_set_page
);
1639 * i40iw_drain_sq - drain the send queue
1640 * @ibqp: ib qp pointer
1642 static void i40iw_drain_sq(struct ib_qp
*ibqp
)
1644 struct i40iw_qp
*iwqp
= to_iwqp(ibqp
);
1645 struct i40iw_sc_qp
*qp
= &iwqp
->sc_qp
;
1647 if (I40IW_RING_MORE_WORK(qp
->qp_uk
.sq_ring
))
1648 wait_for_completion(&iwqp
->sq_drained
);
1652 * i40iw_drain_rq - drain the receive queue
1653 * @ibqp: ib qp pointer
1655 static void i40iw_drain_rq(struct ib_qp
*ibqp
)
1657 struct i40iw_qp
*iwqp
= to_iwqp(ibqp
);
1658 struct i40iw_sc_qp
*qp
= &iwqp
->sc_qp
;
1660 if (I40IW_RING_MORE_WORK(qp
->qp_uk
.rq_ring
))
1661 wait_for_completion(&iwqp
->rq_drained
);
1665 * i40iw_hwreg_mr - send cqp command for memory registration
1666 * @iwdev: iwarp device
1667 * @iwmr: iwarp mr pointer
1668 * @access: access for MR
1670 static int i40iw_hwreg_mr(struct i40iw_device
*iwdev
,
1671 struct i40iw_mr
*iwmr
,
1674 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
1675 struct i40iw_reg_ns_stag_info
*stag_info
;
1676 struct i40iw_pd
*iwpd
= to_iwpd(iwmr
->ibmr
.pd
);
1677 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
1678 enum i40iw_status_code status
;
1680 struct i40iw_cqp_request
*cqp_request
;
1681 struct cqp_commands_info
*cqp_info
;
1683 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
1687 cqp_info
= &cqp_request
->info
;
1688 stag_info
= &cqp_info
->in
.u
.mr_reg_non_shared
.info
;
1689 memset(stag_info
, 0, sizeof(*stag_info
));
1690 stag_info
->va
= (void *)(unsigned long)iwpbl
->user_base
;
1691 stag_info
->stag_idx
= iwmr
->stag
>> I40IW_CQPSQ_STAG_IDX_SHIFT
;
1692 stag_info
->stag_key
= (u8
)iwmr
->stag
;
1693 stag_info
->total_len
= iwmr
->length
;
1694 stag_info
->access_rights
= access
;
1695 stag_info
->pd_id
= iwpd
->sc_pd
.pd_id
;
1696 stag_info
->addr_type
= I40IW_ADDR_TYPE_VA_BASED
;
1697 stag_info
->page_size
= iwmr
->page_size
;
1699 if (iwpbl
->pbl_allocated
) {
1700 if (palloc
->level
== I40IW_LEVEL_1
) {
1701 stag_info
->first_pm_pbl_index
= palloc
->level1
.idx
;
1702 stag_info
->chunk_size
= 1;
1704 stag_info
->first_pm_pbl_index
= palloc
->level2
.root
.idx
;
1705 stag_info
->chunk_size
= 3;
1708 stag_info
->reg_addr_pa
= iwmr
->pgaddrmem
[0];
1711 cqp_info
->cqp_cmd
= OP_MR_REG_NON_SHARED
;
1712 cqp_info
->post_sq
= 1;
1713 cqp_info
->in
.u
.mr_reg_non_shared
.dev
= &iwdev
->sc_dev
;
1714 cqp_info
->in
.u
.mr_reg_non_shared
.scratch
= (uintptr_t)cqp_request
;
1716 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
1719 i40iw_pr_err("CQP-OP MR Reg fail");
1725 * i40iw_reg_user_mr - Register a user memory region
1727 * @start: virtual start address
1728 * @length: length of mr
1729 * @virt: virtual address
1730 * @acc: access of mr
1733 static struct ib_mr
*i40iw_reg_user_mr(struct ib_pd
*pd
,
1738 struct ib_udata
*udata
)
1740 struct i40iw_pd
*iwpd
= to_iwpd(pd
);
1741 struct i40iw_device
*iwdev
= to_iwdev(pd
->device
);
1742 struct i40iw_ucontext
*ucontext
= rdma_udata_to_drv_context(
1743 udata
, struct i40iw_ucontext
, ibucontext
);
1744 struct i40iw_pble_alloc
*palloc
;
1745 struct i40iw_pbl
*iwpbl
;
1746 struct i40iw_mr
*iwmr
;
1747 struct ib_umem
*region
;
1748 struct i40iw_mem_reg_req req
;
1753 bool use_pbles
= false;
1754 unsigned long flags
;
1760 return ERR_PTR(-EOPNOTSUPP
);
1763 return ERR_PTR(-ENODEV
);
1765 if (length
> I40IW_MAX_MR_SIZE
)
1766 return ERR_PTR(-EINVAL
);
1767 region
= ib_umem_get(pd
->device
, start
, length
, acc
);
1769 return (struct ib_mr
*)region
;
1771 if (ib_copy_from_udata(&req
, udata
, sizeof(req
))) {
1772 ib_umem_release(region
);
1773 return ERR_PTR(-EFAULT
);
1776 iwmr
= kzalloc(sizeof(*iwmr
), GFP_KERNEL
);
1778 ib_umem_release(region
);
1779 return ERR_PTR(-ENOMEM
);
1782 iwpbl
= &iwmr
->iwpbl
;
1784 iwmr
->region
= region
;
1786 iwmr
->ibmr
.device
= pd
->device
;
1788 iwmr
->page_size
= PAGE_SIZE
;
1789 if (req
.reg_type
== IW_MEMREG_TYPE_MEM
)
1790 iwmr
->page_size
= ib_umem_find_best_pgsz(region
, SZ_4K
| SZ_2M
,
1793 region_length
= region
->length
+ (start
& (iwmr
->page_size
- 1));
1794 pg_shift
= ffs(iwmr
->page_size
) - 1;
1795 pbl_depth
= region_length
>> pg_shift
;
1796 pbl_depth
+= (region_length
& (iwmr
->page_size
- 1)) ? 1 : 0;
1797 iwmr
->length
= region
->length
;
1799 iwpbl
->user_base
= virt
;
1800 palloc
= &iwpbl
->pble_alloc
;
1802 iwmr
->type
= req
.reg_type
;
1803 iwmr
->page_cnt
= (u32
)pbl_depth
;
1805 switch (req
.reg_type
) {
1806 case IW_MEMREG_TYPE_QP
:
1807 use_pbles
= ((req
.sq_pages
+ req
.rq_pages
) > 2);
1808 err
= i40iw_handle_q_mem(iwdev
, &req
, iwpbl
, use_pbles
);
1811 spin_lock_irqsave(&ucontext
->qp_reg_mem_list_lock
, flags
);
1812 list_add_tail(&iwpbl
->list
, &ucontext
->qp_reg_mem_list
);
1813 iwpbl
->on_list
= true;
1814 spin_unlock_irqrestore(&ucontext
->qp_reg_mem_list_lock
, flags
);
1816 case IW_MEMREG_TYPE_CQ
:
1817 use_pbles
= (req
.cq_pages
> 1);
1818 err
= i40iw_handle_q_mem(iwdev
, &req
, iwpbl
, use_pbles
);
1822 spin_lock_irqsave(&ucontext
->cq_reg_mem_list_lock
, flags
);
1823 list_add_tail(&iwpbl
->list
, &ucontext
->cq_reg_mem_list
);
1824 iwpbl
->on_list
= true;
1825 spin_unlock_irqrestore(&ucontext
->cq_reg_mem_list_lock
, flags
);
1827 case IW_MEMREG_TYPE_MEM
:
1828 use_pbles
= (iwmr
->page_cnt
!= 1);
1829 access
= I40IW_ACCESS_FLAGS_LOCALREAD
;
1831 err
= i40iw_setup_pbles(iwdev
, iwmr
, use_pbles
);
1836 ret
= i40iw_check_mr_contiguous(palloc
, iwmr
->page_size
);
1838 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
1839 iwpbl
->pbl_allocated
= false;
1843 access
|= i40iw_get_user_access(acc
);
1844 stag
= i40iw_create_stag(iwdev
);
1851 iwmr
->ibmr
.rkey
= stag
;
1852 iwmr
->ibmr
.lkey
= stag
;
1854 err
= i40iw_hwreg_mr(iwdev
, iwmr
, access
);
1856 i40iw_free_stag(iwdev
, stag
);
1865 iwmr
->type
= req
.reg_type
;
1866 if (req
.reg_type
== IW_MEMREG_TYPE_MEM
)
1867 i40iw_add_pdusecount(iwpd
);
1871 if (palloc
->level
!= I40IW_LEVEL_0
&& iwpbl
->pbl_allocated
)
1872 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
1873 ib_umem_release(region
);
1875 return ERR_PTR(err
);
1879 * i40iw_reg_phys_mr - register kernel physical memory
1881 * @addr: physical address of memory to register
1882 * @size: size of memory to register
1883 * @acc: Access rights
1884 * @iova_start: start of virtual address for physical buffers
1886 struct ib_mr
*i40iw_reg_phys_mr(struct ib_pd
*pd
,
1892 struct i40iw_pd
*iwpd
= to_iwpd(pd
);
1893 struct i40iw_device
*iwdev
= to_iwdev(pd
->device
);
1894 struct i40iw_pbl
*iwpbl
;
1895 struct i40iw_mr
*iwmr
;
1896 enum i40iw_status_code status
;
1898 u16 access
= I40IW_ACCESS_FLAGS_LOCALREAD
;
1901 iwmr
= kzalloc(sizeof(*iwmr
), GFP_KERNEL
);
1903 return ERR_PTR(-ENOMEM
);
1905 iwmr
->ibmr
.device
= pd
->device
;
1906 iwpbl
= &iwmr
->iwpbl
;
1908 iwmr
->type
= IW_MEMREG_TYPE_MEM
;
1909 iwpbl
->user_base
= *iova_start
;
1910 stag
= i40iw_create_stag(iwdev
);
1915 access
|= i40iw_get_user_access(acc
);
1917 iwmr
->ibmr
.rkey
= stag
;
1918 iwmr
->ibmr
.lkey
= stag
;
1920 iwmr
->pgaddrmem
[0] = addr
;
1921 iwmr
->length
= size
;
1922 status
= i40iw_hwreg_mr(iwdev
, iwmr
, access
);
1924 i40iw_free_stag(iwdev
, stag
);
1929 i40iw_add_pdusecount(iwpd
);
1933 return ERR_PTR(ret
);
1937 * i40iw_get_dma_mr - register physical mem
1939 * @acc: access for memory
1941 static struct ib_mr
*i40iw_get_dma_mr(struct ib_pd
*pd
, int acc
)
1945 return i40iw_reg_phys_mr(pd
, 0, 0, acc
, &kva
);
1949 * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
1950 * @iwmr: iwmr for IB's user page addresses
1951 * @ucontext: ptr to user context
1953 static void i40iw_del_memlist(struct i40iw_mr
*iwmr
,
1954 struct i40iw_ucontext
*ucontext
)
1956 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
1957 unsigned long flags
;
1959 switch (iwmr
->type
) {
1960 case IW_MEMREG_TYPE_CQ
:
1961 spin_lock_irqsave(&ucontext
->cq_reg_mem_list_lock
, flags
);
1962 if (iwpbl
->on_list
) {
1963 iwpbl
->on_list
= false;
1964 list_del(&iwpbl
->list
);
1966 spin_unlock_irqrestore(&ucontext
->cq_reg_mem_list_lock
, flags
);
1968 case IW_MEMREG_TYPE_QP
:
1969 spin_lock_irqsave(&ucontext
->qp_reg_mem_list_lock
, flags
);
1970 if (iwpbl
->on_list
) {
1971 iwpbl
->on_list
= false;
1972 list_del(&iwpbl
->list
);
1974 spin_unlock_irqrestore(&ucontext
->qp_reg_mem_list_lock
, flags
);
1982 * i40iw_dereg_mr - deregister mr
1983 * @ib_mr: mr ptr for dereg
1985 static int i40iw_dereg_mr(struct ib_mr
*ib_mr
, struct ib_udata
*udata
)
1987 struct ib_pd
*ibpd
= ib_mr
->pd
;
1988 struct i40iw_pd
*iwpd
= to_iwpd(ibpd
);
1989 struct i40iw_mr
*iwmr
= to_iwmr(ib_mr
);
1990 struct i40iw_device
*iwdev
= to_iwdev(ib_mr
->device
);
1991 enum i40iw_status_code status
;
1992 struct i40iw_dealloc_stag_info
*info
;
1993 struct i40iw_pbl
*iwpbl
= &iwmr
->iwpbl
;
1994 struct i40iw_pble_alloc
*palloc
= &iwpbl
->pble_alloc
;
1995 struct i40iw_cqp_request
*cqp_request
;
1996 struct cqp_commands_info
*cqp_info
;
1999 ib_umem_release(iwmr
->region
);
2001 if (iwmr
->type
!= IW_MEMREG_TYPE_MEM
) {
2002 /* region is released. only test for userness. */
2004 struct i40iw_ucontext
*ucontext
=
2005 rdma_udata_to_drv_context(
2007 struct i40iw_ucontext
,
2010 i40iw_del_memlist(iwmr
, ucontext
);
2012 if (iwpbl
->pbl_allocated
&& iwmr
->type
!= IW_MEMREG_TYPE_QP
)
2013 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
2018 cqp_request
= i40iw_get_cqp_request(&iwdev
->cqp
, true);
2022 cqp_info
= &cqp_request
->info
;
2023 info
= &cqp_info
->in
.u
.dealloc_stag
.info
;
2024 memset(info
, 0, sizeof(*info
));
2026 info
->pd_id
= cpu_to_le32(iwpd
->sc_pd
.pd_id
& 0x00007fff);
2027 info
->stag_idx
= RS_64_1(ib_mr
->rkey
, I40IW_CQPSQ_STAG_IDX_SHIFT
);
2028 stag_idx
= info
->stag_idx
;
2030 if (iwpbl
->pbl_allocated
)
2031 info
->dealloc_pbl
= true;
2033 cqp_info
->cqp_cmd
= OP_DEALLOC_STAG
;
2034 cqp_info
->post_sq
= 1;
2035 cqp_info
->in
.u
.dealloc_stag
.dev
= &iwdev
->sc_dev
;
2036 cqp_info
->in
.u
.dealloc_stag
.scratch
= (uintptr_t)cqp_request
;
2037 status
= i40iw_handle_cqp_op(iwdev
, cqp_request
);
2039 i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx
);
2040 i40iw_rem_pdusecount(iwpd
, iwdev
);
2041 i40iw_free_stag(iwdev
, iwmr
->stag
);
2042 if (iwpbl
->pbl_allocated
)
2043 i40iw_free_pble(iwdev
->pble_rsrc
, palloc
);
2051 static ssize_t
hw_rev_show(struct device
*dev
,
2052 struct device_attribute
*attr
, char *buf
)
2054 struct i40iw_ib_device
*iwibdev
=
2055 rdma_device_to_drv_device(dev
, struct i40iw_ib_device
, ibdev
);
2056 u32 hw_rev
= iwibdev
->iwdev
->sc_dev
.hw_rev
;
2058 return sprintf(buf
, "%x\n", hw_rev
);
2060 static DEVICE_ATTR_RO(hw_rev
);
2065 static ssize_t
hca_type_show(struct device
*dev
,
2066 struct device_attribute
*attr
, char *buf
)
2068 return sprintf(buf
, "I40IW\n");
2070 static DEVICE_ATTR_RO(hca_type
);
2075 static ssize_t
board_id_show(struct device
*dev
,
2076 struct device_attribute
*attr
, char *buf
)
2078 return sprintf(buf
, "%.*s\n", 32, "I40IW Board ID");
2080 static DEVICE_ATTR_RO(board_id
);
2082 static struct attribute
*i40iw_dev_attributes
[] = {
2083 &dev_attr_hw_rev
.attr
,
2084 &dev_attr_hca_type
.attr
,
2085 &dev_attr_board_id
.attr
,
2089 static const struct attribute_group i40iw_attr_group
= {
2090 .attrs
= i40iw_dev_attributes
,
2094 * i40iw_copy_sg_list - copy sg list for qp
2095 * @sg_list: copied into sg_list
2096 * @sgl: copy from sgl
2097 * @num_sges: count of sg entries
2099 static void i40iw_copy_sg_list(struct i40iw_sge
*sg_list
, struct ib_sge
*sgl
, int num_sges
)
2103 for (i
= 0; (i
< num_sges
) && (i
< I40IW_MAX_WQ_FRAGMENT_COUNT
); i
++) {
2104 sg_list
[i
].tag_off
= sgl
[i
].addr
;
2105 sg_list
[i
].len
= sgl
[i
].length
;
2106 sg_list
[i
].stag
= sgl
[i
].lkey
;
2111 * i40iw_post_send - kernel application wr
2112 * @ibqp: qp ptr for wr
2113 * @ib_wr: work request ptr
2114 * @bad_wr: return of bad wr if err
2116 static int i40iw_post_send(struct ib_qp
*ibqp
,
2117 const struct ib_send_wr
*ib_wr
,
2118 const struct ib_send_wr
**bad_wr
)
2120 struct i40iw_qp
*iwqp
;
2121 struct i40iw_qp_uk
*ukqp
;
2122 struct i40iw_post_sq_info info
;
2123 enum i40iw_status_code ret
;
2125 unsigned long flags
;
2128 iwqp
= (struct i40iw_qp
*)ibqp
;
2129 ukqp
= &iwqp
->sc_qp
.qp_uk
;
2131 spin_lock_irqsave(&iwqp
->lock
, flags
);
2133 if (iwqp
->flush_issued
) {
2140 memset(&info
, 0, sizeof(info
));
2141 info
.wr_id
= (u64
)(ib_wr
->wr_id
);
2142 if ((ib_wr
->send_flags
& IB_SEND_SIGNALED
) || iwqp
->sig_all
)
2143 info
.signaled
= true;
2144 if (ib_wr
->send_flags
& IB_SEND_FENCE
)
2145 info
.read_fence
= true;
2147 switch (ib_wr
->opcode
) {
2150 case IB_WR_SEND_WITH_INV
:
2151 if (ib_wr
->opcode
== IB_WR_SEND
) {
2152 if (ib_wr
->send_flags
& IB_SEND_SOLICITED
)
2153 info
.op_type
= I40IW_OP_TYPE_SEND_SOL
;
2155 info
.op_type
= I40IW_OP_TYPE_SEND
;
2157 if (ib_wr
->send_flags
& IB_SEND_SOLICITED
)
2158 info
.op_type
= I40IW_OP_TYPE_SEND_SOL_INV
;
2160 info
.op_type
= I40IW_OP_TYPE_SEND_INV
;
2163 if (ib_wr
->send_flags
& IB_SEND_INLINE
) {
2164 info
.op
.inline_send
.data
= (void *)(unsigned long)ib_wr
->sg_list
[0].addr
;
2165 info
.op
.inline_send
.len
= ib_wr
->sg_list
[0].length
;
2166 ret
= ukqp
->ops
.iw_inline_send(ukqp
, &info
, ib_wr
->ex
.invalidate_rkey
, false);
2168 info
.op
.send
.num_sges
= ib_wr
->num_sge
;
2169 info
.op
.send
.sg_list
= (struct i40iw_sge
*)ib_wr
->sg_list
;
2170 ret
= ukqp
->ops
.iw_send(ukqp
, &info
, ib_wr
->ex
.invalidate_rkey
, false);
2174 if (ret
== I40IW_ERR_QP_TOOMANY_WRS_POSTED
)
2180 case IB_WR_RDMA_WRITE
:
2181 info
.op_type
= I40IW_OP_TYPE_RDMA_WRITE
;
2183 if (ib_wr
->send_flags
& IB_SEND_INLINE
) {
2184 info
.op
.inline_rdma_write
.data
= (void *)(unsigned long)ib_wr
->sg_list
[0].addr
;
2185 info
.op
.inline_rdma_write
.len
= ib_wr
->sg_list
[0].length
;
2186 info
.op
.inline_rdma_write
.rem_addr
.tag_off
= rdma_wr(ib_wr
)->remote_addr
;
2187 info
.op
.inline_rdma_write
.rem_addr
.stag
= rdma_wr(ib_wr
)->rkey
;
2188 ret
= ukqp
->ops
.iw_inline_rdma_write(ukqp
, &info
, false);
2190 info
.op
.rdma_write
.lo_sg_list
= (void *)ib_wr
->sg_list
;
2191 info
.op
.rdma_write
.num_lo_sges
= ib_wr
->num_sge
;
2192 info
.op
.rdma_write
.rem_addr
.tag_off
= rdma_wr(ib_wr
)->remote_addr
;
2193 info
.op
.rdma_write
.rem_addr
.stag
= rdma_wr(ib_wr
)->rkey
;
2194 ret
= ukqp
->ops
.iw_rdma_write(ukqp
, &info
, false);
2198 if (ret
== I40IW_ERR_QP_TOOMANY_WRS_POSTED
)
2204 case IB_WR_RDMA_READ_WITH_INV
:
2207 case IB_WR_RDMA_READ
:
2208 if (ib_wr
->num_sge
> I40IW_MAX_SGE_RD
) {
2212 info
.op_type
= I40IW_OP_TYPE_RDMA_READ
;
2213 info
.op
.rdma_read
.rem_addr
.tag_off
= rdma_wr(ib_wr
)->remote_addr
;
2214 info
.op
.rdma_read
.rem_addr
.stag
= rdma_wr(ib_wr
)->rkey
;
2215 info
.op
.rdma_read
.lo_addr
.tag_off
= ib_wr
->sg_list
->addr
;
2216 info
.op
.rdma_read
.lo_addr
.stag
= ib_wr
->sg_list
->lkey
;
2217 info
.op
.rdma_read
.lo_addr
.len
= ib_wr
->sg_list
->length
;
2218 ret
= ukqp
->ops
.iw_rdma_read(ukqp
, &info
, inv_stag
, false);
2220 if (ret
== I40IW_ERR_QP_TOOMANY_WRS_POSTED
)
2226 case IB_WR_LOCAL_INV
:
2227 info
.op_type
= I40IW_OP_TYPE_INV_STAG
;
2228 info
.op
.inv_local_stag
.target_stag
= ib_wr
->ex
.invalidate_rkey
;
2229 ret
= ukqp
->ops
.iw_stag_local_invalidate(ukqp
, &info
, true);
2235 struct i40iw_mr
*iwmr
= to_iwmr(reg_wr(ib_wr
)->mr
);
2236 int flags
= reg_wr(ib_wr
)->access
;
2237 struct i40iw_pble_alloc
*palloc
= &iwmr
->iwpbl
.pble_alloc
;
2238 struct i40iw_sc_dev
*dev
= &iwqp
->iwdev
->sc_dev
;
2239 struct i40iw_fast_reg_stag_info info
;
2241 memset(&info
, 0, sizeof(info
));
2242 info
.access_rights
= I40IW_ACCESS_FLAGS_LOCALREAD
;
2243 info
.access_rights
|= i40iw_get_user_access(flags
);
2244 info
.stag_key
= reg_wr(ib_wr
)->key
& 0xff;
2245 info
.stag_idx
= reg_wr(ib_wr
)->key
>> 8;
2246 info
.page_size
= reg_wr(ib_wr
)->mr
->page_size
;
2247 info
.wr_id
= ib_wr
->wr_id
;
2249 info
.addr_type
= I40IW_ADDR_TYPE_VA_BASED
;
2250 info
.va
= (void *)(uintptr_t)iwmr
->ibmr
.iova
;
2251 info
.total_len
= iwmr
->ibmr
.length
;
2252 info
.reg_addr_pa
= *(u64
*)palloc
->level1
.addr
;
2253 info
.first_pm_pbl_index
= palloc
->level1
.idx
;
2254 info
.local_fence
= ib_wr
->send_flags
& IB_SEND_FENCE
;
2255 info
.signaled
= ib_wr
->send_flags
& IB_SEND_SIGNALED
;
2257 if (iwmr
->npages
> I40IW_MIN_PAGES_PER_FMR
)
2258 info
.chunk_size
= 1;
2260 ret
= dev
->iw_priv_qp_ops
->iw_mr_fast_register(&iwqp
->sc_qp
, &info
, true);
2267 i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
2274 ib_wr
= ib_wr
->next
;
2281 ukqp
->ops
.iw_qp_post_wr(ukqp
);
2282 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
2288 * i40iw_post_recv - post receive wr for kernel application
2289 * @ibqp: ib qp pointer
2290 * @ib_wr: work request for receive
2291 * @bad_wr: bad wr caused an error
2293 static int i40iw_post_recv(struct ib_qp
*ibqp
, const struct ib_recv_wr
*ib_wr
,
2294 const struct ib_recv_wr
**bad_wr
)
2296 struct i40iw_qp
*iwqp
;
2297 struct i40iw_qp_uk
*ukqp
;
2298 struct i40iw_post_rq_info post_recv
;
2299 struct i40iw_sge sg_list
[I40IW_MAX_WQ_FRAGMENT_COUNT
];
2300 enum i40iw_status_code ret
= 0;
2301 unsigned long flags
;
2304 iwqp
= (struct i40iw_qp
*)ibqp
;
2305 ukqp
= &iwqp
->sc_qp
.qp_uk
;
2307 memset(&post_recv
, 0, sizeof(post_recv
));
2308 spin_lock_irqsave(&iwqp
->lock
, flags
);
2310 if (iwqp
->flush_issued
) {
2316 post_recv
.num_sges
= ib_wr
->num_sge
;
2317 post_recv
.wr_id
= ib_wr
->wr_id
;
2318 i40iw_copy_sg_list(sg_list
, ib_wr
->sg_list
, ib_wr
->num_sge
);
2319 post_recv
.sg_list
= sg_list
;
2320 ret
= ukqp
->ops
.iw_post_receive(ukqp
, &post_recv
);
2322 i40iw_pr_err(" post_recv err %d\n", ret
);
2323 if (ret
== I40IW_ERR_QP_TOOMANY_WRS_POSTED
)
2330 ib_wr
= ib_wr
->next
;
2333 spin_unlock_irqrestore(&iwqp
->lock
, flags
);
2338 * i40iw_poll_cq - poll cq for completion (kernel apps)
2340 * @num_entries: number of entries to poll
2341 * @entry: wr of entry completed
2343 static int i40iw_poll_cq(struct ib_cq
*ibcq
,
2345 struct ib_wc
*entry
)
2347 struct i40iw_cq
*iwcq
;
2349 struct i40iw_cq_poll_info cq_poll_info
;
2350 enum i40iw_status_code ret
;
2351 struct i40iw_cq_uk
*ukcq
;
2352 struct i40iw_sc_qp
*qp
;
2353 struct i40iw_qp
*iwqp
;
2354 unsigned long flags
;
2356 iwcq
= (struct i40iw_cq
*)ibcq
;
2357 ukcq
= &iwcq
->sc_cq
.cq_uk
;
2359 spin_lock_irqsave(&iwcq
->lock
, flags
);
2360 while (cqe_count
< num_entries
) {
2361 ret
= ukcq
->ops
.iw_cq_poll_completion(ukcq
, &cq_poll_info
);
2362 if (ret
== I40IW_ERR_QUEUE_EMPTY
) {
2364 } else if (ret
== I40IW_ERR_QUEUE_DESTROYED
) {
2371 entry
->wc_flags
= 0;
2372 entry
->wr_id
= cq_poll_info
.wr_id
;
2373 if (cq_poll_info
.error
) {
2374 entry
->status
= IB_WC_WR_FLUSH_ERR
;
2375 entry
->vendor_err
= cq_poll_info
.major_err
<< 16 | cq_poll_info
.minor_err
;
2377 entry
->status
= IB_WC_SUCCESS
;
2380 switch (cq_poll_info
.op_type
) {
2381 case I40IW_OP_TYPE_RDMA_WRITE
:
2382 entry
->opcode
= IB_WC_RDMA_WRITE
;
2384 case I40IW_OP_TYPE_RDMA_READ_INV_STAG
:
2385 case I40IW_OP_TYPE_RDMA_READ
:
2386 entry
->opcode
= IB_WC_RDMA_READ
;
2388 case I40IW_OP_TYPE_SEND_SOL
:
2389 case I40IW_OP_TYPE_SEND_SOL_INV
:
2390 case I40IW_OP_TYPE_SEND_INV
:
2391 case I40IW_OP_TYPE_SEND
:
2392 entry
->opcode
= IB_WC_SEND
;
2394 case I40IW_OP_TYPE_REC
:
2395 entry
->opcode
= IB_WC_RECV
;
2398 entry
->opcode
= IB_WC_RECV
;
2402 entry
->ex
.imm_data
= 0;
2403 qp
= (struct i40iw_sc_qp
*)cq_poll_info
.qp_handle
;
2404 entry
->qp
= (struct ib_qp
*)qp
->back_qp
;
2405 entry
->src_qp
= cq_poll_info
.qp_id
;
2406 iwqp
= (struct i40iw_qp
*)qp
->back_qp
;
2407 if (iwqp
->iwarp_state
> I40IW_QP_STATE_RTS
) {
2408 if (!I40IW_RING_MORE_WORK(qp
->qp_uk
.sq_ring
))
2409 complete(&iwqp
->sq_drained
);
2410 if (!I40IW_RING_MORE_WORK(qp
->qp_uk
.rq_ring
))
2411 complete(&iwqp
->rq_drained
);
2413 entry
->byte_len
= cq_poll_info
.bytes_xfered
;
2417 spin_unlock_irqrestore(&iwcq
->lock
, flags
);
2422 * i40iw_req_notify_cq - arm cq kernel application
2424 * @notify_flags: notofication flags
2426 static int i40iw_req_notify_cq(struct ib_cq
*ibcq
,
2427 enum ib_cq_notify_flags notify_flags
)
2429 struct i40iw_cq
*iwcq
;
2430 struct i40iw_cq_uk
*ukcq
;
2431 unsigned long flags
;
2432 enum i40iw_completion_notify cq_notify
= IW_CQ_COMPL_EVENT
;
2434 iwcq
= (struct i40iw_cq
*)ibcq
;
2435 ukcq
= &iwcq
->sc_cq
.cq_uk
;
2436 if (notify_flags
== IB_CQ_SOLICITED
)
2437 cq_notify
= IW_CQ_COMPL_SOLICITED
;
2438 spin_lock_irqsave(&iwcq
->lock
, flags
);
2439 ukcq
->ops
.iw_cq_request_notification(ukcq
, cq_notify
);
2440 spin_unlock_irqrestore(&iwcq
->lock
, flags
);
2445 * i40iw_port_immutable - return port's immutable data
2446 * @ibdev: ib dev struct
2447 * @port_num: port number
2448 * @immutable: immutable data for the port return
2450 static int i40iw_port_immutable(struct ib_device
*ibdev
, u8 port_num
,
2451 struct ib_port_immutable
*immutable
)
2453 struct ib_port_attr attr
;
2456 immutable
->core_cap_flags
= RDMA_CORE_PORT_IWARP
;
2458 err
= ib_query_port(ibdev
, port_num
, &attr
);
2463 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
2464 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
2469 static const char * const i40iw_hw_stat_names
[] = {
2471 [I40IW_HW_STAT_INDEX_IP4RXDISCARD
] = "ip4InDiscards",
2472 [I40IW_HW_STAT_INDEX_IP4RXTRUNC
] = "ip4InTruncatedPkts",
2473 [I40IW_HW_STAT_INDEX_IP4TXNOROUTE
] = "ip4OutNoRoutes",
2474 [I40IW_HW_STAT_INDEX_IP6RXDISCARD
] = "ip6InDiscards",
2475 [I40IW_HW_STAT_INDEX_IP6RXTRUNC
] = "ip6InTruncatedPkts",
2476 [I40IW_HW_STAT_INDEX_IP6TXNOROUTE
] = "ip6OutNoRoutes",
2477 [I40IW_HW_STAT_INDEX_TCPRTXSEG
] = "tcpRetransSegs",
2478 [I40IW_HW_STAT_INDEX_TCPRXOPTERR
] = "tcpInOptErrors",
2479 [I40IW_HW_STAT_INDEX_TCPRXPROTOERR
] = "tcpInProtoErrors",
2481 [I40IW_HW_STAT_INDEX_IP4RXOCTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2483 [I40IW_HW_STAT_INDEX_IP4RXPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2485 [I40IW_HW_STAT_INDEX_IP4RXFRAGS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2487 [I40IW_HW_STAT_INDEX_IP4RXMCPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2489 [I40IW_HW_STAT_INDEX_IP4TXOCTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2491 [I40IW_HW_STAT_INDEX_IP4TXPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2493 [I40IW_HW_STAT_INDEX_IP4TXFRAGS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2495 [I40IW_HW_STAT_INDEX_IP4TXMCPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2497 [I40IW_HW_STAT_INDEX_IP6RXOCTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2499 [I40IW_HW_STAT_INDEX_IP6RXPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2501 [I40IW_HW_STAT_INDEX_IP6RXFRAGS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2503 [I40IW_HW_STAT_INDEX_IP6RXMCPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2505 [I40IW_HW_STAT_INDEX_IP6TXOCTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2507 [I40IW_HW_STAT_INDEX_IP6TXPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2509 [I40IW_HW_STAT_INDEX_IP6TXFRAGS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2511 [I40IW_HW_STAT_INDEX_IP6TXMCPKTS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2513 [I40IW_HW_STAT_INDEX_TCPRXSEGS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2515 [I40IW_HW_STAT_INDEX_TCPTXSEG
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2517 [I40IW_HW_STAT_INDEX_RDMARXRDS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2519 [I40IW_HW_STAT_INDEX_RDMARXSNDS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2521 [I40IW_HW_STAT_INDEX_RDMARXWRS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2523 [I40IW_HW_STAT_INDEX_RDMATXRDS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2525 [I40IW_HW_STAT_INDEX_RDMATXSNDS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2527 [I40IW_HW_STAT_INDEX_RDMATXWRS
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2529 [I40IW_HW_STAT_INDEX_RDMAVBND
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2531 [I40IW_HW_STAT_INDEX_RDMAVINV
+ I40IW_HW_STAT_INDEX_MAX_32
] =
2535 static void i40iw_get_dev_fw_str(struct ib_device
*dev
, char *str
)
2537 u32 firmware_version
= I40IW_FW_VERSION
;
2539 snprintf(str
, IB_FW_VERSION_NAME_MAX
, "%u.%u", firmware_version
,
2540 (firmware_version
& 0x000000ff));
2544 * i40iw_alloc_hw_stats - Allocate a hw stats structure
2545 * @ibdev: device pointer from stack
2546 * @port_num: port number
2548 static struct rdma_hw_stats
*i40iw_alloc_hw_stats(struct ib_device
*ibdev
,
2551 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
2552 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
2553 int num_counters
= I40IW_HW_STAT_INDEX_MAX_32
+
2554 I40IW_HW_STAT_INDEX_MAX_64
;
2555 unsigned long lifespan
= RDMA_HW_STATS_DEFAULT_LIFESPAN
;
2557 BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names
) !=
2558 (I40IW_HW_STAT_INDEX_MAX_32
+
2559 I40IW_HW_STAT_INDEX_MAX_64
));
2562 * PFs get the default update lifespan, but VFs only update once
2567 return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names
, num_counters
,
2572 * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
2573 * @ibdev: device pointer from stack
2574 * @stats: stats pointer from stack
2575 * @port_num: port number
2576 * @index: which hw counter the stack is requesting we update
2578 static int i40iw_get_hw_stats(struct ib_device
*ibdev
,
2579 struct rdma_hw_stats
*stats
,
2580 u8 port_num
, int index
)
2582 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
2583 struct i40iw_sc_dev
*dev
= &iwdev
->sc_dev
;
2584 struct i40iw_vsi_pestat
*devstat
= iwdev
->vsi
.pestat
;
2585 struct i40iw_dev_hw_stats
*hw_stats
= &devstat
->hw_stats
;
2588 i40iw_hw_stats_read_all(devstat
, &devstat
->hw_stats
);
2590 if (i40iw_vchnl_vf_get_pe_stats(dev
, &devstat
->hw_stats
))
2594 memcpy(&stats
->value
[0], hw_stats
, sizeof(*hw_stats
));
2596 return stats
->num_counters
;
2600 * i40iw_query_gid - Query port GID
2601 * @ibdev: device pointer from stack
2602 * @port: port number
2603 * @index: Entry index
2606 static int i40iw_query_gid(struct ib_device
*ibdev
,
2611 struct i40iw_device
*iwdev
= to_iwdev(ibdev
);
2613 memset(gid
->raw
, 0, sizeof(gid
->raw
));
2614 ether_addr_copy(gid
->raw
, iwdev
->netdev
->dev_addr
);
2619 * i40iw_query_pkey - Query partition key
2620 * @ibdev: device pointer from stack
2621 * @port: port number
2622 * @index: index of pkey
2623 * @pkey: pointer to store the pkey
2625 static int i40iw_query_pkey(struct ib_device
*ibdev
,
2634 static const struct ib_device_ops i40iw_dev_ops
= {
2635 .owner
= THIS_MODULE
,
2636 .driver_id
= RDMA_DRIVER_I40IW
,
2637 /* NOTE: Older kernels wrongly use 0 for the uverbs_abi_ver */
2638 .uverbs_abi_ver
= I40IW_ABI_VER
,
2640 .alloc_hw_stats
= i40iw_alloc_hw_stats
,
2641 .alloc_mr
= i40iw_alloc_mr
,
2642 .alloc_pd
= i40iw_alloc_pd
,
2643 .alloc_ucontext
= i40iw_alloc_ucontext
,
2644 .create_cq
= i40iw_create_cq
,
2645 .create_qp
= i40iw_create_qp
,
2646 .dealloc_pd
= i40iw_dealloc_pd
,
2647 .dealloc_ucontext
= i40iw_dealloc_ucontext
,
2648 .dereg_mr
= i40iw_dereg_mr
,
2649 .destroy_cq
= i40iw_destroy_cq
,
2650 .destroy_qp
= i40iw_destroy_qp
,
2651 .drain_rq
= i40iw_drain_rq
,
2652 .drain_sq
= i40iw_drain_sq
,
2653 .get_dev_fw_str
= i40iw_get_dev_fw_str
,
2654 .get_dma_mr
= i40iw_get_dma_mr
,
2655 .get_hw_stats
= i40iw_get_hw_stats
,
2656 .get_port_immutable
= i40iw_port_immutable
,
2657 .iw_accept
= i40iw_accept
,
2658 .iw_add_ref
= i40iw_add_ref
,
2659 .iw_connect
= i40iw_connect
,
2660 .iw_create_listen
= i40iw_create_listen
,
2661 .iw_destroy_listen
= i40iw_destroy_listen
,
2662 .iw_get_qp
= i40iw_get_qp
,
2663 .iw_reject
= i40iw_reject
,
2664 .iw_rem_ref
= i40iw_rem_ref
,
2665 .map_mr_sg
= i40iw_map_mr_sg
,
2667 .modify_qp
= i40iw_modify_qp
,
2668 .poll_cq
= i40iw_poll_cq
,
2669 .post_recv
= i40iw_post_recv
,
2670 .post_send
= i40iw_post_send
,
2671 .query_device
= i40iw_query_device
,
2672 .query_gid
= i40iw_query_gid
,
2673 .query_pkey
= i40iw_query_pkey
,
2674 .query_port
= i40iw_query_port
,
2675 .query_qp
= i40iw_query_qp
,
2676 .reg_user_mr
= i40iw_reg_user_mr
,
2677 .req_notify_cq
= i40iw_req_notify_cq
,
2678 INIT_RDMA_OBJ_SIZE(ib_pd
, i40iw_pd
, ibpd
),
2679 INIT_RDMA_OBJ_SIZE(ib_cq
, i40iw_cq
, ibcq
),
2680 INIT_RDMA_OBJ_SIZE(ib_ucontext
, i40iw_ucontext
, ibucontext
),
2684 * i40iw_init_rdma_device - initialization of iwarp device
2685 * @iwdev: iwarp device
2687 static struct i40iw_ib_device
*i40iw_init_rdma_device(struct i40iw_device
*iwdev
)
2689 struct i40iw_ib_device
*iwibdev
;
2690 struct net_device
*netdev
= iwdev
->netdev
;
2691 struct pci_dev
*pcidev
= (struct pci_dev
*)iwdev
->hw
.dev_context
;
2693 iwibdev
= ib_alloc_device(i40iw_ib_device
, ibdev
);
2695 i40iw_pr_err("iwdev == NULL\n");
2698 iwdev
->iwibdev
= iwibdev
;
2699 iwibdev
->iwdev
= iwdev
;
2701 iwibdev
->ibdev
.node_type
= RDMA_NODE_RNIC
;
2702 ether_addr_copy((u8
*)&iwibdev
->ibdev
.node_guid
, netdev
->dev_addr
);
2704 iwibdev
->ibdev
.uverbs_cmd_mask
=
2705 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT
) |
2706 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
2707 (1ull << IB_USER_VERBS_CMD_QUERY_PORT
) |
2708 (1ull << IB_USER_VERBS_CMD_ALLOC_PD
) |
2709 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD
) |
2710 (1ull << IB_USER_VERBS_CMD_REG_MR
) |
2711 (1ull << IB_USER_VERBS_CMD_DEREG_MR
) |
2712 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
2713 (1ull << IB_USER_VERBS_CMD_CREATE_CQ
) |
2714 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ
) |
2715 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ
) |
2716 (1ull << IB_USER_VERBS_CMD_CREATE_QP
) |
2717 (1ull << IB_USER_VERBS_CMD_MODIFY_QP
) |
2718 (1ull << IB_USER_VERBS_CMD_QUERY_QP
) |
2719 (1ull << IB_USER_VERBS_CMD_POLL_CQ
) |
2720 (1ull << IB_USER_VERBS_CMD_CREATE_AH
) |
2721 (1ull << IB_USER_VERBS_CMD_DESTROY_AH
) |
2722 (1ull << IB_USER_VERBS_CMD_DESTROY_QP
) |
2723 (1ull << IB_USER_VERBS_CMD_POST_RECV
) |
2724 (1ull << IB_USER_VERBS_CMD_POST_SEND
);
2725 iwibdev
->ibdev
.phys_port_cnt
= 1;
2726 iwibdev
->ibdev
.num_comp_vectors
= iwdev
->ceqs_count
;
2727 iwibdev
->ibdev
.dev
.parent
= &pcidev
->dev
;
2728 memcpy(iwibdev
->ibdev
.iw_ifname
, netdev
->name
,
2729 sizeof(iwibdev
->ibdev
.iw_ifname
));
2730 ib_set_device_ops(&iwibdev
->ibdev
, &i40iw_dev_ops
);
2736 * i40iw_port_ibevent - indicate port event
2737 * @iwdev: iwarp device
2739 void i40iw_port_ibevent(struct i40iw_device
*iwdev
)
2741 struct i40iw_ib_device
*iwibdev
= iwdev
->iwibdev
;
2742 struct ib_event event
;
2744 event
.device
= &iwibdev
->ibdev
;
2745 event
.element
.port_num
= 1;
2746 event
.event
= iwdev
->iw_status
? IB_EVENT_PORT_ACTIVE
: IB_EVENT_PORT_ERR
;
2747 ib_dispatch_event(&event
);
2751 * i40iw_destroy_rdma_device - destroy rdma device and free resources
2752 * @iwibdev: IB device ptr
2754 void i40iw_destroy_rdma_device(struct i40iw_ib_device
*iwibdev
)
2756 ib_unregister_device(&iwibdev
->ibdev
);
2757 wait_event_timeout(iwibdev
->iwdev
->close_wq
,
2758 !atomic64_read(&iwibdev
->iwdev
->use_count
),
2759 I40IW_EVENT_TIMEOUT
);
2760 ib_dealloc_device(&iwibdev
->ibdev
);
2764 * i40iw_register_rdma_device - register iwarp device to IB
2765 * @iwdev: iwarp device
2767 int i40iw_register_rdma_device(struct i40iw_device
*iwdev
)
2770 struct i40iw_ib_device
*iwibdev
;
2772 iwdev
->iwibdev
= i40iw_init_rdma_device(iwdev
);
2773 if (!iwdev
->iwibdev
)
2775 iwibdev
= iwdev
->iwibdev
;
2776 rdma_set_device_sysfs_group(&iwibdev
->ibdev
, &i40iw_attr_group
);
2777 ret
= ib_device_set_netdev(&iwibdev
->ibdev
, iwdev
->netdev
, 1);
2781 ret
= ib_register_device(&iwibdev
->ibdev
, "i40iw%d");
2787 ib_dealloc_device(&iwdev
->iwibdev
->ibdev
);