2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/of_platform.h>
35 #include <linux/module.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_cache.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include <rdma/hns-abi.h>
43 #include "hns_roce_hem.h"
46 * hns_get_gid_index - Get gid index.
47 * @hr_dev: pointer to structure hns_roce_dev.
48 * @port: port, value range: 0 ~ MAX
49 * @gid_index: gid_index, value range: 0 ~ MAX
51 * N ports shared gids, allocation method as follow:
52 * GID[0][0], GID[1][0],.....GID[N - 1][0],
53 * GID[0][0], GID[1][0],.....GID[N - 1][0],
56 int hns_get_gid_index(struct hns_roce_dev
*hr_dev
, u8 port
, int gid_index
)
58 return gid_index
* hr_dev
->caps
.num_ports
+ port
;
61 static int hns_roce_set_mac(struct hns_roce_dev
*hr_dev
, u8 port
, u8
*addr
)
66 if (!memcmp(hr_dev
->dev_addr
[port
], addr
, ETH_ALEN
))
69 for (i
= 0; i
< ETH_ALEN
; i
++)
70 hr_dev
->dev_addr
[port
][i
] = addr
[i
];
72 phy_port
= hr_dev
->iboe
.phy_port
[port
];
73 return hr_dev
->hw
->set_mac(hr_dev
, phy_port
, addr
);
76 static int hns_roce_add_gid(const struct ib_gid_attr
*attr
, void **context
)
78 struct hns_roce_dev
*hr_dev
= to_hr_dev(attr
->device
);
79 u8 port
= attr
->port_num
- 1;
82 if (port
>= hr_dev
->caps
.num_ports
)
85 ret
= hr_dev
->hw
->set_gid(hr_dev
, port
, attr
->index
, &attr
->gid
, attr
);
90 static int hns_roce_del_gid(const struct ib_gid_attr
*attr
, void **context
)
92 struct hns_roce_dev
*hr_dev
= to_hr_dev(attr
->device
);
93 struct ib_gid_attr zattr
= {};
94 u8 port
= attr
->port_num
- 1;
97 if (port
>= hr_dev
->caps
.num_ports
)
100 ret
= hr_dev
->hw
->set_gid(hr_dev
, port
, attr
->index
, &zgid
, &zattr
);
105 static int handle_en_event(struct hns_roce_dev
*hr_dev
, u8 port
,
108 struct device
*dev
= hr_dev
->dev
;
109 struct net_device
*netdev
;
112 netdev
= hr_dev
->iboe
.netdevs
[port
];
114 dev_err(dev
, "Can't find netdev on port(%u)!\n", port
);
121 case NETDEV_REGISTER
:
122 case NETDEV_CHANGEADDR
:
123 ret
= hns_roce_set_mac(hr_dev
, port
, netdev
->dev_addr
);
127 * In v1 engine, only support all ports closed together.
131 dev_dbg(dev
, "NETDEV event = 0x%x!\n", (u32
)(event
));
138 static int hns_roce_netdev_event(struct notifier_block
*self
,
139 unsigned long event
, void *ptr
)
141 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
142 struct hns_roce_ib_iboe
*iboe
= NULL
;
143 struct hns_roce_dev
*hr_dev
= NULL
;
147 hr_dev
= container_of(self
, struct hns_roce_dev
, iboe
.nb
);
148 iboe
= &hr_dev
->iboe
;
150 for (port
= 0; port
< hr_dev
->caps
.num_ports
; port
++) {
151 if (dev
== iboe
->netdevs
[port
]) {
152 ret
= handle_en_event(hr_dev
, port
, event
);
162 static int hns_roce_setup_mtu_mac(struct hns_roce_dev
*hr_dev
)
167 for (i
= 0; i
< hr_dev
->caps
.num_ports
; i
++) {
168 if (hr_dev
->hw
->set_mtu
)
169 hr_dev
->hw
->set_mtu(hr_dev
, hr_dev
->iboe
.phy_port
[i
],
170 hr_dev
->caps
.max_mtu
);
171 ret
= hns_roce_set_mac(hr_dev
, i
,
172 hr_dev
->iboe
.netdevs
[i
]->dev_addr
);
180 static int hns_roce_query_device(struct ib_device
*ib_dev
,
181 struct ib_device_attr
*props
,
182 struct ib_udata
*uhw
)
184 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
186 memset(props
, 0, sizeof(*props
));
188 props
->fw_ver
= hr_dev
->caps
.fw_ver
;
189 props
->sys_image_guid
= cpu_to_be64(hr_dev
->sys_image_guid
);
190 props
->max_mr_size
= (u64
)(~(0ULL));
191 props
->page_size_cap
= hr_dev
->caps
.page_size_cap
;
192 props
->vendor_id
= hr_dev
->vendor_id
;
193 props
->vendor_part_id
= hr_dev
->vendor_part_id
;
194 props
->hw_ver
= hr_dev
->hw_rev
;
195 props
->max_qp
= hr_dev
->caps
.num_qps
;
196 props
->max_qp_wr
= hr_dev
->caps
.max_wqes
;
197 props
->device_cap_flags
= IB_DEVICE_PORT_ACTIVE_EVENT
|
198 IB_DEVICE_RC_RNR_NAK_GEN
;
199 props
->max_send_sge
= hr_dev
->caps
.max_sq_sg
;
200 props
->max_recv_sge
= hr_dev
->caps
.max_rq_sg
;
201 props
->max_sge_rd
= 1;
202 props
->max_cq
= hr_dev
->caps
.num_cqs
;
203 props
->max_cqe
= hr_dev
->caps
.max_cqes
;
204 props
->max_mr
= hr_dev
->caps
.num_mtpts
;
205 props
->max_pd
= hr_dev
->caps
.num_pds
;
206 props
->max_qp_rd_atom
= hr_dev
->caps
.max_qp_dest_rdma
;
207 props
->max_qp_init_rd_atom
= hr_dev
->caps
.max_qp_init_rdma
;
208 props
->atomic_cap
= hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_ATOMIC
?
209 IB_ATOMIC_HCA
: IB_ATOMIC_NONE
;
210 props
->max_pkeys
= 1;
211 props
->local_ca_ack_delay
= hr_dev
->caps
.local_ca_ack_delay
;
212 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
) {
213 props
->max_srq
= hr_dev
->caps
.num_srqs
;
214 props
->max_srq_wr
= hr_dev
->caps
.max_srq_wrs
;
215 props
->max_srq_sge
= hr_dev
->caps
.max_srq_sges
;
218 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_FRMR
) {
219 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
220 props
->max_fast_reg_page_list_len
= HNS_ROCE_FRMR_MAX_PA
;
226 static int hns_roce_query_port(struct ib_device
*ib_dev
, u8 port_num
,
227 struct ib_port_attr
*props
)
229 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
230 struct device
*dev
= hr_dev
->dev
;
231 struct net_device
*net_dev
;
236 assert(port_num
> 0);
239 /* props being zeroed by the caller, avoid zeroing it here */
241 props
->max_mtu
= hr_dev
->caps
.max_mtu
;
242 props
->gid_tbl_len
= hr_dev
->caps
.gid_table_len
[port
];
243 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
244 IB_PORT_VENDOR_CLASS_SUP
|
245 IB_PORT_BOOT_MGMT_SUP
;
246 props
->max_msg_sz
= HNS_ROCE_MAX_MSG_LEN
;
247 props
->pkey_tbl_len
= 1;
248 props
->active_width
= IB_WIDTH_4X
;
249 props
->active_speed
= 1;
251 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
253 net_dev
= hr_dev
->iboe
.netdevs
[port
];
255 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
256 dev_err(dev
, "Find netdev %u failed!\n", port
);
260 mtu
= iboe_get_mtu(net_dev
->mtu
);
261 props
->active_mtu
= mtu
? min(props
->max_mtu
, mtu
) : IB_MTU_256
;
262 props
->state
= netif_running(net_dev
) && netif_carrier_ok(net_dev
) ?
265 props
->phys_state
= props
->state
== IB_PORT_ACTIVE
?
266 IB_PORT_PHYS_STATE_LINK_UP
:
267 IB_PORT_PHYS_STATE_DISABLED
;
269 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
274 static enum rdma_link_layer
hns_roce_get_link_layer(struct ib_device
*device
,
277 return IB_LINK_LAYER_ETHERNET
;
280 static int hns_roce_query_pkey(struct ib_device
*ib_dev
, u8 port
, u16 index
,
288 static int hns_roce_modify_device(struct ib_device
*ib_dev
, int mask
,
289 struct ib_device_modify
*props
)
293 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
296 if (mask
& IB_DEVICE_MODIFY_NODE_DESC
) {
297 spin_lock_irqsave(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
298 memcpy(ib_dev
->node_desc
, props
->node_desc
, NODE_DESC_SIZE
);
299 spin_unlock_irqrestore(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
305 static int hns_roce_alloc_ucontext(struct ib_ucontext
*uctx
,
306 struct ib_udata
*udata
)
309 struct hns_roce_ucontext
*context
= to_hr_ucontext(uctx
);
310 struct hns_roce_ib_alloc_ucontext_resp resp
= {};
311 struct hns_roce_dev
*hr_dev
= to_hr_dev(uctx
->device
);
316 resp
.qp_tab_size
= hr_dev
->caps
.num_qps
;
318 ret
= hns_roce_uar_alloc(hr_dev
, &context
->uar
);
320 goto error_fail_uar_alloc
;
322 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) {
323 INIT_LIST_HEAD(&context
->page_list
);
324 mutex_init(&context
->page_mutex
);
327 ret
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
329 goto error_fail_copy_to_udata
;
333 error_fail_copy_to_udata
:
334 hns_roce_uar_free(hr_dev
, &context
->uar
);
336 error_fail_uar_alloc
:
340 static void hns_roce_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
342 struct hns_roce_ucontext
*context
= to_hr_ucontext(ibcontext
);
344 hns_roce_uar_free(to_hr_dev(ibcontext
->device
), &context
->uar
);
347 static int hns_roce_mmap(struct ib_ucontext
*context
,
348 struct vm_area_struct
*vma
)
350 struct hns_roce_dev
*hr_dev
= to_hr_dev(context
->device
);
352 switch (vma
->vm_pgoff
) {
354 return rdma_user_mmap_io(context
, vma
,
355 to_hr_ucontext(context
)->uar
.pfn
,
357 pgprot_noncached(vma
->vm_page_prot
),
360 /* vm_pgoff: 1 -- TPTR */
362 if (!hr_dev
->tptr_dma_addr
|| !hr_dev
->tptr_size
)
365 * FIXME: using io_remap_pfn_range on the dma address returned
366 * by dma_alloc_coherent is totally wrong.
368 return rdma_user_mmap_io(context
, vma
,
369 hr_dev
->tptr_dma_addr
>> PAGE_SHIFT
,
379 static int hns_roce_port_immutable(struct ib_device
*ib_dev
, u8 port_num
,
380 struct ib_port_immutable
*immutable
)
382 struct ib_port_attr attr
;
385 ret
= ib_query_port(ib_dev
, port_num
, &attr
);
389 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
390 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
392 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
393 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
394 if (to_hr_dev(ib_dev
)->caps
.flags
& HNS_ROCE_CAP_FLAG_ROCE_V1_V2
)
395 immutable
->core_cap_flags
|= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
400 static void hns_roce_disassociate_ucontext(struct ib_ucontext
*ibcontext
)
404 static void hns_roce_unregister_device(struct hns_roce_dev
*hr_dev
)
406 struct hns_roce_ib_iboe
*iboe
= &hr_dev
->iboe
;
408 hr_dev
->active
= false;
409 unregister_netdevice_notifier(&iboe
->nb
);
410 ib_unregister_device(&hr_dev
->ib_dev
);
413 static const struct ib_device_ops hns_roce_dev_ops
= {
414 .owner
= THIS_MODULE
,
415 .driver_id
= RDMA_DRIVER_HNS
,
417 .uverbs_no_driver_id_binding
= 1,
419 .add_gid
= hns_roce_add_gid
,
420 .alloc_pd
= hns_roce_alloc_pd
,
421 .alloc_ucontext
= hns_roce_alloc_ucontext
,
422 .create_ah
= hns_roce_create_ah
,
423 .create_cq
= hns_roce_create_cq
,
424 .create_qp
= hns_roce_create_qp
,
425 .dealloc_pd
= hns_roce_dealloc_pd
,
426 .dealloc_ucontext
= hns_roce_dealloc_ucontext
,
427 .del_gid
= hns_roce_del_gid
,
428 .dereg_mr
= hns_roce_dereg_mr
,
429 .destroy_ah
= hns_roce_destroy_ah
,
430 .destroy_cq
= hns_roce_destroy_cq
,
431 .disassociate_ucontext
= hns_roce_disassociate_ucontext
,
432 .fill_res_entry
= hns_roce_fill_res_entry
,
433 .get_dma_mr
= hns_roce_get_dma_mr
,
434 .get_link_layer
= hns_roce_get_link_layer
,
435 .get_port_immutable
= hns_roce_port_immutable
,
436 .mmap
= hns_roce_mmap
,
437 .modify_device
= hns_roce_modify_device
,
438 .modify_qp
= hns_roce_modify_qp
,
439 .query_ah
= hns_roce_query_ah
,
440 .query_device
= hns_roce_query_device
,
441 .query_pkey
= hns_roce_query_pkey
,
442 .query_port
= hns_roce_query_port
,
443 .reg_user_mr
= hns_roce_reg_user_mr
,
445 INIT_RDMA_OBJ_SIZE(ib_ah
, hns_roce_ah
, ibah
),
446 INIT_RDMA_OBJ_SIZE(ib_cq
, hns_roce_cq
, ib_cq
),
447 INIT_RDMA_OBJ_SIZE(ib_pd
, hns_roce_pd
, ibpd
),
448 INIT_RDMA_OBJ_SIZE(ib_ucontext
, hns_roce_ucontext
, ibucontext
),
451 static const struct ib_device_ops hns_roce_dev_mr_ops
= {
452 .rereg_user_mr
= hns_roce_rereg_user_mr
,
455 static const struct ib_device_ops hns_roce_dev_mw_ops
= {
456 .alloc_mw
= hns_roce_alloc_mw
,
457 .dealloc_mw
= hns_roce_dealloc_mw
,
460 static const struct ib_device_ops hns_roce_dev_frmr_ops
= {
461 .alloc_mr
= hns_roce_alloc_mr
,
462 .map_mr_sg
= hns_roce_map_mr_sg
,
465 static const struct ib_device_ops hns_roce_dev_srq_ops
= {
466 .create_srq
= hns_roce_create_srq
,
467 .destroy_srq
= hns_roce_destroy_srq
,
469 INIT_RDMA_OBJ_SIZE(ib_srq
, hns_roce_srq
, ibsrq
),
472 static int hns_roce_register_device(struct hns_roce_dev
*hr_dev
)
475 struct hns_roce_ib_iboe
*iboe
= NULL
;
476 struct ib_device
*ib_dev
= NULL
;
477 struct device
*dev
= hr_dev
->dev
;
480 iboe
= &hr_dev
->iboe
;
481 spin_lock_init(&iboe
->lock
);
483 ib_dev
= &hr_dev
->ib_dev
;
485 ib_dev
->node_type
= RDMA_NODE_IB_CA
;
486 ib_dev
->dev
.parent
= dev
;
488 ib_dev
->phys_port_cnt
= hr_dev
->caps
.num_ports
;
489 ib_dev
->local_dma_lkey
= hr_dev
->caps
.reserved_lkey
;
490 ib_dev
->num_comp_vectors
= hr_dev
->caps
.num_comp_vectors
;
491 ib_dev
->uverbs_cmd_mask
=
492 (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT
) |
493 (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
494 (1ULL << IB_USER_VERBS_CMD_QUERY_PORT
) |
495 (1ULL << IB_USER_VERBS_CMD_ALLOC_PD
) |
496 (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD
) |
497 (1ULL << IB_USER_VERBS_CMD_REG_MR
) |
498 (1ULL << IB_USER_VERBS_CMD_DEREG_MR
) |
499 (1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
500 (1ULL << IB_USER_VERBS_CMD_CREATE_CQ
) |
501 (1ULL << IB_USER_VERBS_CMD_DESTROY_CQ
) |
502 (1ULL << IB_USER_VERBS_CMD_CREATE_QP
) |
503 (1ULL << IB_USER_VERBS_CMD_MODIFY_QP
) |
504 (1ULL << IB_USER_VERBS_CMD_QUERY_QP
) |
505 (1ULL << IB_USER_VERBS_CMD_DESTROY_QP
);
507 ib_dev
->uverbs_ex_cmd_mask
|= (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ
);
509 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_REREG_MR
) {
510 ib_dev
->uverbs_cmd_mask
|= (1ULL << IB_USER_VERBS_CMD_REREG_MR
);
511 ib_set_device_ops(ib_dev
, &hns_roce_dev_mr_ops
);
515 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_MW
) {
516 ib_dev
->uverbs_cmd_mask
|=
517 (1ULL << IB_USER_VERBS_CMD_ALLOC_MW
) |
518 (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW
);
519 ib_set_device_ops(ib_dev
, &hns_roce_dev_mw_ops
);
523 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_FRMR
)
524 ib_set_device_ops(ib_dev
, &hns_roce_dev_frmr_ops
);
527 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
) {
528 ib_dev
->uverbs_cmd_mask
|=
529 (1ULL << IB_USER_VERBS_CMD_CREATE_SRQ
) |
530 (1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
531 (1ULL << IB_USER_VERBS_CMD_QUERY_SRQ
) |
532 (1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
533 (1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV
);
534 ib_set_device_ops(ib_dev
, &hns_roce_dev_srq_ops
);
535 ib_set_device_ops(ib_dev
, hr_dev
->hw
->hns_roce_dev_srq_ops
);
538 ib_set_device_ops(ib_dev
, hr_dev
->hw
->hns_roce_dev_ops
);
539 ib_set_device_ops(ib_dev
, &hns_roce_dev_ops
);
540 for (i
= 0; i
< hr_dev
->caps
.num_ports
; i
++) {
541 if (!hr_dev
->iboe
.netdevs
[i
])
544 ret
= ib_device_set_netdev(ib_dev
, hr_dev
->iboe
.netdevs
[i
],
549 ret
= ib_register_device(ib_dev
, "hns_%d");
551 dev_err(dev
, "ib_register_device failed!\n");
555 ret
= hns_roce_setup_mtu_mac(hr_dev
);
557 dev_err(dev
, "setup_mtu_mac failed!\n");
558 goto error_failed_setup_mtu_mac
;
561 iboe
->nb
.notifier_call
= hns_roce_netdev_event
;
562 ret
= register_netdevice_notifier(&iboe
->nb
);
564 dev_err(dev
, "register_netdevice_notifier failed!\n");
565 goto error_failed_setup_mtu_mac
;
568 hr_dev
->active
= true;
571 error_failed_setup_mtu_mac
:
572 ib_unregister_device(ib_dev
);
577 static int hns_roce_init_hem(struct hns_roce_dev
*hr_dev
)
580 struct device
*dev
= hr_dev
->dev
;
582 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->mr_table
.mtt_table
,
583 HEM_TYPE_MTT
, hr_dev
->caps
.mtt_entry_sz
,
584 hr_dev
->caps
.num_mtt_segs
, 1);
586 dev_err(dev
, "Failed to init MTT context memory, aborting.\n");
590 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
)) {
591 ret
= hns_roce_init_hem_table(hr_dev
,
592 &hr_dev
->mr_table
.mtt_cqe_table
,
594 hr_dev
->caps
.mtt_entry_sz
,
595 hr_dev
->caps
.num_cqe_segs
, 1);
598 "Failed to init CQE context memory, aborting.\n");
603 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
,
604 HEM_TYPE_MTPT
, hr_dev
->caps
.mtpt_entry_sz
,
605 hr_dev
->caps
.num_mtpts
, 1);
607 dev_err(dev
, "Failed to init MTPT context memory, aborting.\n");
611 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
,
612 HEM_TYPE_QPC
, hr_dev
->caps
.qpc_entry_sz
,
613 hr_dev
->caps
.num_qps
, 1);
615 dev_err(dev
, "Failed to init QP context memory, aborting.\n");
619 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
,
621 hr_dev
->caps
.irrl_entry_sz
*
622 hr_dev
->caps
.max_qp_init_rdma
,
623 hr_dev
->caps
.num_qps
, 1);
625 dev_err(dev
, "Failed to init irrl_table memory, aborting.\n");
629 if (hr_dev
->caps
.trrl_entry_sz
) {
630 ret
= hns_roce_init_hem_table(hr_dev
,
631 &hr_dev
->qp_table
.trrl_table
,
633 hr_dev
->caps
.trrl_entry_sz
*
634 hr_dev
->caps
.max_qp_dest_rdma
,
635 hr_dev
->caps
.num_qps
, 1);
638 "Failed to init trrl_table memory, aborting.\n");
643 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->cq_table
.table
,
644 HEM_TYPE_CQC
, hr_dev
->caps
.cqc_entry_sz
,
645 hr_dev
->caps
.num_cqs
, 1);
647 dev_err(dev
, "Failed to init CQ context memory, aborting.\n");
651 if (hr_dev
->caps
.srqc_entry_sz
) {
652 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->srq_table
.table
,
654 hr_dev
->caps
.srqc_entry_sz
,
655 hr_dev
->caps
.num_srqs
, 1);
658 "Failed to init SRQ context memory, aborting.\n");
663 if (hr_dev
->caps
.num_srqwqe_segs
) {
664 ret
= hns_roce_init_hem_table(hr_dev
,
665 &hr_dev
->mr_table
.mtt_srqwqe_table
,
667 hr_dev
->caps
.mtt_entry_sz
,
668 hr_dev
->caps
.num_srqwqe_segs
, 1);
671 "Failed to init MTT srqwqe memory, aborting.\n");
676 if (hr_dev
->caps
.num_idx_segs
) {
677 ret
= hns_roce_init_hem_table(hr_dev
,
678 &hr_dev
->mr_table
.mtt_idx_table
,
680 hr_dev
->caps
.idx_entry_sz
,
681 hr_dev
->caps
.num_idx_segs
, 1);
684 "Failed to init MTT idx memory, aborting.\n");
685 goto err_unmap_srqwqe
;
689 if (hr_dev
->caps
.sccc_entry_sz
) {
690 ret
= hns_roce_init_hem_table(hr_dev
,
691 &hr_dev
->qp_table
.sccc_table
,
693 hr_dev
->caps
.sccc_entry_sz
,
694 hr_dev
->caps
.num_qps
, 1);
697 "Failed to init SCC context memory, aborting.\n");
702 if (hr_dev
->caps
.qpc_timer_entry_sz
) {
703 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qpc_timer_table
,
705 hr_dev
->caps
.qpc_timer_entry_sz
,
706 hr_dev
->caps
.num_qpc_timer
, 1);
709 "Failed to init QPC timer memory, aborting.\n");
714 if (hr_dev
->caps
.cqc_timer_entry_sz
) {
715 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->cqc_timer_table
,
717 hr_dev
->caps
.cqc_timer_entry_sz
,
718 hr_dev
->caps
.num_cqc_timer
, 1);
721 "Failed to init CQC timer memory, aborting.\n");
722 goto err_unmap_qpc_timer
;
729 if (hr_dev
->caps
.qpc_timer_entry_sz
)
730 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qpc_timer_table
);
733 if (hr_dev
->caps
.sccc_entry_sz
)
734 hns_roce_cleanup_hem_table(hr_dev
,
735 &hr_dev
->qp_table
.sccc_table
);
738 if (hr_dev
->caps
.num_idx_segs
)
739 hns_roce_cleanup_hem_table(hr_dev
,
740 &hr_dev
->mr_table
.mtt_idx_table
);
743 if (hr_dev
->caps
.num_srqwqe_segs
)
744 hns_roce_cleanup_hem_table(hr_dev
,
745 &hr_dev
->mr_table
.mtt_srqwqe_table
);
748 if (hr_dev
->caps
.srqc_entry_sz
)
749 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->srq_table
.table
);
752 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->cq_table
.table
);
755 if (hr_dev
->caps
.trrl_entry_sz
)
756 hns_roce_cleanup_hem_table(hr_dev
,
757 &hr_dev
->qp_table
.trrl_table
);
760 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
);
763 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
);
766 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
);
769 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
770 hns_roce_cleanup_hem_table(hr_dev
,
771 &hr_dev
->mr_table
.mtt_cqe_table
);
774 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtt_table
);
780 * hns_roce_setup_hca - setup host channel adapter
781 * @hr_dev: pointer to hns roce device
784 static int hns_roce_setup_hca(struct hns_roce_dev
*hr_dev
)
787 struct device
*dev
= hr_dev
->dev
;
789 spin_lock_init(&hr_dev
->sm_lock
);
790 spin_lock_init(&hr_dev
->bt_cmd_lock
);
792 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) {
793 INIT_LIST_HEAD(&hr_dev
->pgdir_list
);
794 mutex_init(&hr_dev
->pgdir_mutex
);
797 ret
= hns_roce_init_uar_table(hr_dev
);
799 dev_err(dev
, "Failed to initialize uar table. aborting\n");
803 ret
= hns_roce_uar_alloc(hr_dev
, &hr_dev
->priv_uar
);
805 dev_err(dev
, "Failed to allocate priv_uar.\n");
806 goto err_uar_table_free
;
809 ret
= hns_roce_init_pd_table(hr_dev
);
811 dev_err(dev
, "Failed to init protected domain table.\n");
812 goto err_uar_alloc_free
;
815 ret
= hns_roce_init_mr_table(hr_dev
);
817 dev_err(dev
, "Failed to init memory region table.\n");
818 goto err_pd_table_free
;
821 ret
= hns_roce_init_cq_table(hr_dev
);
823 dev_err(dev
, "Failed to init completion queue table.\n");
824 goto err_mr_table_free
;
827 ret
= hns_roce_init_qp_table(hr_dev
);
829 dev_err(dev
, "Failed to init queue pair table.\n");
830 goto err_cq_table_free
;
833 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
) {
834 ret
= hns_roce_init_srq_table(hr_dev
);
837 "Failed to init share receive queue table.\n");
838 goto err_qp_table_free
;
845 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
)
846 hns_roce_cleanup_qp_table(hr_dev
);
849 hns_roce_cleanup_cq_table(hr_dev
);
852 hns_roce_cleanup_mr_table(hr_dev
);
855 hns_roce_cleanup_pd_table(hr_dev
);
858 hns_roce_uar_free(hr_dev
, &hr_dev
->priv_uar
);
861 hns_roce_cleanup_uar_table(hr_dev
);
865 static void check_and_get_armed_cq(struct list_head
*cq_list
, struct ib_cq
*cq
)
867 struct hns_roce_cq
*hr_cq
= to_hr_cq(cq
);
870 spin_lock_irqsave(&hr_cq
->lock
, flags
);
871 if (cq
->comp_handler
) {
872 if (!hr_cq
->is_armed
) {
874 list_add_tail(&hr_cq
->node
, cq_list
);
877 spin_unlock_irqrestore(&hr_cq
->lock
, flags
);
880 void hns_roce_handle_device_err(struct hns_roce_dev
*hr_dev
)
882 struct hns_roce_qp
*hr_qp
;
883 struct hns_roce_cq
*hr_cq
;
884 struct list_head cq_list
;
885 unsigned long flags_qp
;
888 INIT_LIST_HEAD(&cq_list
);
890 spin_lock_irqsave(&hr_dev
->qp_list_lock
, flags
);
891 list_for_each_entry(hr_qp
, &hr_dev
->qp_list
, node
) {
892 spin_lock_irqsave(&hr_qp
->sq
.lock
, flags_qp
);
893 if (hr_qp
->sq
.tail
!= hr_qp
->sq
.head
)
894 check_and_get_armed_cq(&cq_list
, hr_qp
->ibqp
.send_cq
);
895 spin_unlock_irqrestore(&hr_qp
->sq
.lock
, flags_qp
);
897 spin_lock_irqsave(&hr_qp
->rq
.lock
, flags_qp
);
898 if ((!hr_qp
->ibqp
.srq
) && (hr_qp
->rq
.tail
!= hr_qp
->rq
.head
))
899 check_and_get_armed_cq(&cq_list
, hr_qp
->ibqp
.recv_cq
);
900 spin_unlock_irqrestore(&hr_qp
->rq
.lock
, flags_qp
);
903 list_for_each_entry(hr_cq
, &cq_list
, node
)
904 hns_roce_cq_completion(hr_dev
, hr_cq
->cqn
);
906 spin_unlock_irqrestore(&hr_dev
->qp_list_lock
, flags
);
909 int hns_roce_init(struct hns_roce_dev
*hr_dev
)
912 struct device
*dev
= hr_dev
->dev
;
914 if (hr_dev
->hw
->reset
) {
915 ret
= hr_dev
->hw
->reset(hr_dev
, true);
917 dev_err(dev
, "Reset RoCE engine failed!\n");
921 hr_dev
->is_reset
= false;
923 if (hr_dev
->hw
->cmq_init
) {
924 ret
= hr_dev
->hw
->cmq_init(hr_dev
);
926 dev_err(dev
, "Init RoCE Command Queue failed!\n");
927 goto error_failed_cmq_init
;
931 ret
= hr_dev
->hw
->hw_profile(hr_dev
);
933 dev_err(dev
, "Get RoCE engine profile failed!\n");
934 goto error_failed_cmd_init
;
937 ret
= hns_roce_cmd_init(hr_dev
);
939 dev_err(dev
, "cmd init failed!\n");
940 goto error_failed_cmd_init
;
943 /* EQ depends on poll mode, event mode depends on EQ */
944 ret
= hr_dev
->hw
->init_eq(hr_dev
);
946 dev_err(dev
, "eq init failed!\n");
947 goto error_failed_eq_table
;
950 if (hr_dev
->cmd_mod
) {
951 ret
= hns_roce_cmd_use_events(hr_dev
);
954 "Cmd event mode failed, set back to poll!\n");
955 hns_roce_cmd_use_polling(hr_dev
);
959 ret
= hns_roce_init_hem(hr_dev
);
961 dev_err(dev
, "init HEM(Hardware Entry Memory) failed!\n");
962 goto error_failed_init_hem
;
965 ret
= hns_roce_setup_hca(hr_dev
);
967 dev_err(dev
, "setup hca failed!\n");
968 goto error_failed_setup_hca
;
971 if (hr_dev
->hw
->hw_init
) {
972 ret
= hr_dev
->hw
->hw_init(hr_dev
);
974 dev_err(dev
, "hw_init failed!\n");
975 goto error_failed_engine_init
;
979 INIT_LIST_HEAD(&hr_dev
->qp_list
);
980 spin_lock_init(&hr_dev
->qp_list_lock
);
982 ret
= hns_roce_register_device(hr_dev
);
984 goto error_failed_register_device
;
988 error_failed_register_device
:
989 if (hr_dev
->hw
->hw_exit
)
990 hr_dev
->hw
->hw_exit(hr_dev
);
992 error_failed_engine_init
:
993 hns_roce_cleanup_bitmap(hr_dev
);
995 error_failed_setup_hca
:
996 hns_roce_cleanup_hem(hr_dev
);
998 error_failed_init_hem
:
1000 hns_roce_cmd_use_polling(hr_dev
);
1001 hr_dev
->hw
->cleanup_eq(hr_dev
);
1003 error_failed_eq_table
:
1004 hns_roce_cmd_cleanup(hr_dev
);
1006 error_failed_cmd_init
:
1007 if (hr_dev
->hw
->cmq_exit
)
1008 hr_dev
->hw
->cmq_exit(hr_dev
);
1010 error_failed_cmq_init
:
1011 if (hr_dev
->hw
->reset
) {
1012 if (hr_dev
->hw
->reset(hr_dev
, false))
1013 dev_err(dev
, "Dereset RoCE engine failed!\n");
1019 void hns_roce_exit(struct hns_roce_dev
*hr_dev
)
1021 hns_roce_unregister_device(hr_dev
);
1023 if (hr_dev
->hw
->hw_exit
)
1024 hr_dev
->hw
->hw_exit(hr_dev
);
1025 hns_roce_cleanup_bitmap(hr_dev
);
1026 hns_roce_cleanup_hem(hr_dev
);
1028 if (hr_dev
->cmd_mod
)
1029 hns_roce_cmd_use_polling(hr_dev
);
1031 hr_dev
->hw
->cleanup_eq(hr_dev
);
1032 hns_roce_cmd_cleanup(hr_dev
);
1033 if (hr_dev
->hw
->cmq_exit
)
1034 hr_dev
->hw
->cmq_exit(hr_dev
);
1035 if (hr_dev
->hw
->reset
)
1036 hr_dev
->hw
->reset(hr_dev
, false);
1039 MODULE_LICENSE("Dual BSD/GPL");
1040 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
1041 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
1042 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
1043 MODULE_DESCRIPTION("HNS RoCE Driver");