2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/of_platform.h>
35 #include <linux/module.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_cache.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include <rdma/hns-abi.h>
43 #include "hns_roce_hem.h"
46 * hns_get_gid_index - Get gid index.
47 * @hr_dev: pointer to structure hns_roce_dev.
48 * @port: port, value range: 0 ~ MAX
49 * @gid_index: gid_index, value range: 0 ~ MAX
51 * N ports shared gids, allocation method as follow:
52 * GID[0][0], GID[1][0],.....GID[N - 1][0],
53 * GID[0][0], GID[1][0],.....GID[N - 1][0],
56 int hns_get_gid_index(struct hns_roce_dev
*hr_dev
, u8 port
, int gid_index
)
58 return gid_index
* hr_dev
->caps
.num_ports
+ port
;
61 static int hns_roce_set_mac(struct hns_roce_dev
*hr_dev
, u8 port
, u8
*addr
)
66 if (!memcmp(hr_dev
->dev_addr
[port
], addr
, ETH_ALEN
))
69 for (i
= 0; i
< ETH_ALEN
; i
++)
70 hr_dev
->dev_addr
[port
][i
] = addr
[i
];
72 phy_port
= hr_dev
->iboe
.phy_port
[port
];
73 return hr_dev
->hw
->set_mac(hr_dev
, phy_port
, addr
);
76 static int hns_roce_add_gid(const struct ib_gid_attr
*attr
, void **context
)
78 struct hns_roce_dev
*hr_dev
= to_hr_dev(attr
->device
);
79 u8 port
= attr
->port_num
- 1;
82 if (port
>= hr_dev
->caps
.num_ports
)
85 ret
= hr_dev
->hw
->set_gid(hr_dev
, port
, attr
->index
, &attr
->gid
, attr
);
90 static int hns_roce_del_gid(const struct ib_gid_attr
*attr
, void **context
)
92 struct hns_roce_dev
*hr_dev
= to_hr_dev(attr
->device
);
93 struct ib_gid_attr zattr
= { };
94 u8 port
= attr
->port_num
- 1;
97 if (port
>= hr_dev
->caps
.num_ports
)
100 ret
= hr_dev
->hw
->set_gid(hr_dev
, port
, attr
->index
, &zgid
, &zattr
);
105 static int handle_en_event(struct hns_roce_dev
*hr_dev
, u8 port
,
108 struct device
*dev
= hr_dev
->dev
;
109 struct net_device
*netdev
;
112 netdev
= hr_dev
->iboe
.netdevs
[port
];
114 dev_err(dev
, "Can't find netdev on port(%u)!\n", port
);
121 case NETDEV_REGISTER
:
122 case NETDEV_CHANGEADDR
:
123 ret
= hns_roce_set_mac(hr_dev
, port
, netdev
->dev_addr
);
127 * In v1 engine, only support all ports closed together.
131 dev_dbg(dev
, "NETDEV event = 0x%x!\n", (u32
)(event
));
138 static int hns_roce_netdev_event(struct notifier_block
*self
,
139 unsigned long event
, void *ptr
)
141 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
142 struct hns_roce_ib_iboe
*iboe
= NULL
;
143 struct hns_roce_dev
*hr_dev
= NULL
;
147 hr_dev
= container_of(self
, struct hns_roce_dev
, iboe
.nb
);
148 iboe
= &hr_dev
->iboe
;
150 for (port
= 0; port
< hr_dev
->caps
.num_ports
; port
++) {
151 if (dev
== iboe
->netdevs
[port
]) {
152 ret
= handle_en_event(hr_dev
, port
, event
);
162 static int hns_roce_setup_mtu_mac(struct hns_roce_dev
*hr_dev
)
167 for (i
= 0; i
< hr_dev
->caps
.num_ports
; i
++) {
168 if (hr_dev
->hw
->set_mtu
)
169 hr_dev
->hw
->set_mtu(hr_dev
, hr_dev
->iboe
.phy_port
[i
],
170 hr_dev
->caps
.max_mtu
);
171 ret
= hns_roce_set_mac(hr_dev
, i
,
172 hr_dev
->iboe
.netdevs
[i
]->dev_addr
);
180 static int hns_roce_query_device(struct ib_device
*ib_dev
,
181 struct ib_device_attr
*props
,
182 struct ib_udata
*uhw
)
184 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
186 memset(props
, 0, sizeof(*props
));
188 props
->fw_ver
= hr_dev
->caps
.fw_ver
;
189 props
->sys_image_guid
= cpu_to_be64(hr_dev
->sys_image_guid
);
190 props
->max_mr_size
= (u64
)(~(0ULL));
191 props
->page_size_cap
= hr_dev
->caps
.page_size_cap
;
192 props
->vendor_id
= hr_dev
->vendor_id
;
193 props
->vendor_part_id
= hr_dev
->vendor_part_id
;
194 props
->hw_ver
= hr_dev
->hw_rev
;
195 props
->max_qp
= hr_dev
->caps
.num_qps
;
196 props
->max_qp_wr
= hr_dev
->caps
.max_wqes
;
197 props
->device_cap_flags
= IB_DEVICE_PORT_ACTIVE_EVENT
|
198 IB_DEVICE_RC_RNR_NAK_GEN
;
199 props
->max_send_sge
= hr_dev
->caps
.max_sq_sg
;
200 props
->max_recv_sge
= hr_dev
->caps
.max_rq_sg
;
201 props
->max_sge_rd
= 1;
202 props
->max_cq
= hr_dev
->caps
.num_cqs
;
203 props
->max_cqe
= hr_dev
->caps
.max_cqes
;
204 props
->max_mr
= hr_dev
->caps
.num_mtpts
;
205 props
->max_pd
= hr_dev
->caps
.num_pds
;
206 props
->max_qp_rd_atom
= hr_dev
->caps
.max_qp_dest_rdma
;
207 props
->max_qp_init_rd_atom
= hr_dev
->caps
.max_qp_init_rdma
;
208 props
->atomic_cap
= hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_ATOMIC
?
209 IB_ATOMIC_HCA
: IB_ATOMIC_NONE
;
210 props
->max_pkeys
= 1;
211 props
->local_ca_ack_delay
= hr_dev
->caps
.local_ca_ack_delay
;
212 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
) {
213 props
->max_srq
= hr_dev
->caps
.max_srqs
;
214 props
->max_srq_wr
= hr_dev
->caps
.max_srq_wrs
;
215 props
->max_srq_sge
= hr_dev
->caps
.max_srq_sges
;
218 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_FRMR
) {
219 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
220 props
->max_fast_reg_page_list_len
= HNS_ROCE_FRMR_MAX_PA
;
226 static int hns_roce_query_port(struct ib_device
*ib_dev
, u8 port_num
,
227 struct ib_port_attr
*props
)
229 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
230 struct device
*dev
= hr_dev
->dev
;
231 struct net_device
*net_dev
;
236 assert(port_num
> 0);
239 /* props being zeroed by the caller, avoid zeroing it here */
241 props
->max_mtu
= hr_dev
->caps
.max_mtu
;
242 props
->gid_tbl_len
= hr_dev
->caps
.gid_table_len
[port
];
243 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
244 IB_PORT_VENDOR_CLASS_SUP
|
245 IB_PORT_BOOT_MGMT_SUP
;
246 props
->max_msg_sz
= HNS_ROCE_MAX_MSG_LEN
;
247 props
->pkey_tbl_len
= 1;
248 props
->active_width
= IB_WIDTH_4X
;
249 props
->active_speed
= 1;
251 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
253 net_dev
= hr_dev
->iboe
.netdevs
[port
];
255 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
256 dev_err(dev
, "Find netdev %u failed!\n", port
);
260 mtu
= iboe_get_mtu(net_dev
->mtu
);
261 props
->active_mtu
= mtu
? min(props
->max_mtu
, mtu
) : IB_MTU_256
;
262 props
->state
= (netif_running(net_dev
) && netif_carrier_ok(net_dev
)) ?
263 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
264 props
->phys_state
= (props
->state
== IB_PORT_ACTIVE
) ?
265 IB_PORT_PHYS_STATE_LINK_UP
:
266 IB_PORT_PHYS_STATE_DISABLED
;
268 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
273 static enum rdma_link_layer
hns_roce_get_link_layer(struct ib_device
*device
,
276 return IB_LINK_LAYER_ETHERNET
;
279 static int hns_roce_query_pkey(struct ib_device
*ib_dev
, u8 port
, u16 index
,
287 static int hns_roce_modify_device(struct ib_device
*ib_dev
, int mask
,
288 struct ib_device_modify
*props
)
292 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
295 if (mask
& IB_DEVICE_MODIFY_NODE_DESC
) {
296 spin_lock_irqsave(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
297 memcpy(ib_dev
->node_desc
, props
->node_desc
, NODE_DESC_SIZE
);
298 spin_unlock_irqrestore(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
304 static int hns_roce_alloc_ucontext(struct ib_ucontext
*uctx
,
305 struct ib_udata
*udata
)
308 struct hns_roce_ucontext
*context
= to_hr_ucontext(uctx
);
309 struct hns_roce_ib_alloc_ucontext_resp resp
= {};
310 struct hns_roce_dev
*hr_dev
= to_hr_dev(uctx
->device
);
315 resp
.qp_tab_size
= hr_dev
->caps
.num_qps
;
317 ret
= hns_roce_uar_alloc(hr_dev
, &context
->uar
);
319 goto error_fail_uar_alloc
;
321 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) {
322 INIT_LIST_HEAD(&context
->page_list
);
323 mutex_init(&context
->page_mutex
);
326 ret
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
328 goto error_fail_copy_to_udata
;
332 error_fail_copy_to_udata
:
333 hns_roce_uar_free(hr_dev
, &context
->uar
);
335 error_fail_uar_alloc
:
339 static void hns_roce_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
341 struct hns_roce_ucontext
*context
= to_hr_ucontext(ibcontext
);
343 hns_roce_uar_free(to_hr_dev(ibcontext
->device
), &context
->uar
);
346 static int hns_roce_mmap(struct ib_ucontext
*context
,
347 struct vm_area_struct
*vma
)
349 struct hns_roce_dev
*hr_dev
= to_hr_dev(context
->device
);
351 switch (vma
->vm_pgoff
) {
353 return rdma_user_mmap_io(context
, vma
,
354 to_hr_ucontext(context
)->uar
.pfn
,
356 pgprot_noncached(vma
->vm_page_prot
),
359 /* vm_pgoff: 1 -- TPTR */
361 if (!hr_dev
->tptr_dma_addr
|| !hr_dev
->tptr_size
)
364 * FIXME: using io_remap_pfn_range on the dma address returned
365 * by dma_alloc_coherent is totally wrong.
367 return rdma_user_mmap_io(context
, vma
,
368 hr_dev
->tptr_dma_addr
>> PAGE_SHIFT
,
378 static int hns_roce_port_immutable(struct ib_device
*ib_dev
, u8 port_num
,
379 struct ib_port_immutable
*immutable
)
381 struct ib_port_attr attr
;
384 ret
= ib_query_port(ib_dev
, port_num
, &attr
);
388 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
389 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
391 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
392 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
393 if (to_hr_dev(ib_dev
)->caps
.flags
& HNS_ROCE_CAP_FLAG_ROCE_V1_V2
)
394 immutable
->core_cap_flags
|= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
399 static void hns_roce_disassociate_ucontext(struct ib_ucontext
*ibcontext
)
403 static void hns_roce_unregister_device(struct hns_roce_dev
*hr_dev
)
405 struct hns_roce_ib_iboe
*iboe
= &hr_dev
->iboe
;
407 hr_dev
->active
= false;
408 unregister_netdevice_notifier(&iboe
->nb
);
409 ib_unregister_device(&hr_dev
->ib_dev
);
412 static const struct ib_device_ops hns_roce_dev_ops
= {
413 .owner
= THIS_MODULE
,
414 .driver_id
= RDMA_DRIVER_HNS
,
416 .uverbs_no_driver_id_binding
= 1,
418 .add_gid
= hns_roce_add_gid
,
419 .alloc_pd
= hns_roce_alloc_pd
,
420 .alloc_ucontext
= hns_roce_alloc_ucontext
,
421 .create_ah
= hns_roce_create_ah
,
422 .create_cq
= hns_roce_create_cq
,
423 .create_qp
= hns_roce_create_qp
,
424 .dealloc_pd
= hns_roce_dealloc_pd
,
425 .dealloc_ucontext
= hns_roce_dealloc_ucontext
,
426 .del_gid
= hns_roce_del_gid
,
427 .dereg_mr
= hns_roce_dereg_mr
,
428 .destroy_ah
= hns_roce_destroy_ah
,
429 .destroy_cq
= hns_roce_destroy_cq
,
430 .disassociate_ucontext
= hns_roce_disassociate_ucontext
,
431 .fill_res_entry
= hns_roce_fill_res_entry
,
432 .get_dma_mr
= hns_roce_get_dma_mr
,
433 .get_link_layer
= hns_roce_get_link_layer
,
434 .get_port_immutable
= hns_roce_port_immutable
,
435 .mmap
= hns_roce_mmap
,
436 .modify_device
= hns_roce_modify_device
,
437 .modify_qp
= hns_roce_modify_qp
,
438 .query_ah
= hns_roce_query_ah
,
439 .query_device
= hns_roce_query_device
,
440 .query_pkey
= hns_roce_query_pkey
,
441 .query_port
= hns_roce_query_port
,
442 .reg_user_mr
= hns_roce_reg_user_mr
,
444 INIT_RDMA_OBJ_SIZE(ib_ah
, hns_roce_ah
, ibah
),
445 INIT_RDMA_OBJ_SIZE(ib_cq
, hns_roce_cq
, ib_cq
),
446 INIT_RDMA_OBJ_SIZE(ib_pd
, hns_roce_pd
, ibpd
),
447 INIT_RDMA_OBJ_SIZE(ib_ucontext
, hns_roce_ucontext
, ibucontext
),
450 static const struct ib_device_ops hns_roce_dev_mr_ops
= {
451 .rereg_user_mr
= hns_roce_rereg_user_mr
,
454 static const struct ib_device_ops hns_roce_dev_mw_ops
= {
455 .alloc_mw
= hns_roce_alloc_mw
,
456 .dealloc_mw
= hns_roce_dealloc_mw
,
459 static const struct ib_device_ops hns_roce_dev_frmr_ops
= {
460 .alloc_mr
= hns_roce_alloc_mr
,
461 .map_mr_sg
= hns_roce_map_mr_sg
,
464 static const struct ib_device_ops hns_roce_dev_srq_ops
= {
465 .create_srq
= hns_roce_create_srq
,
466 .destroy_srq
= hns_roce_destroy_srq
,
468 INIT_RDMA_OBJ_SIZE(ib_srq
, hns_roce_srq
, ibsrq
),
471 static int hns_roce_register_device(struct hns_roce_dev
*hr_dev
)
474 struct hns_roce_ib_iboe
*iboe
= NULL
;
475 struct ib_device
*ib_dev
= NULL
;
476 struct device
*dev
= hr_dev
->dev
;
479 iboe
= &hr_dev
->iboe
;
480 spin_lock_init(&iboe
->lock
);
482 ib_dev
= &hr_dev
->ib_dev
;
484 ib_dev
->node_type
= RDMA_NODE_IB_CA
;
485 ib_dev
->dev
.parent
= dev
;
487 ib_dev
->phys_port_cnt
= hr_dev
->caps
.num_ports
;
488 ib_dev
->local_dma_lkey
= hr_dev
->caps
.reserved_lkey
;
489 ib_dev
->num_comp_vectors
= hr_dev
->caps
.num_comp_vectors
;
490 ib_dev
->uverbs_cmd_mask
=
491 (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT
) |
492 (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
493 (1ULL << IB_USER_VERBS_CMD_QUERY_PORT
) |
494 (1ULL << IB_USER_VERBS_CMD_ALLOC_PD
) |
495 (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD
) |
496 (1ULL << IB_USER_VERBS_CMD_REG_MR
) |
497 (1ULL << IB_USER_VERBS_CMD_DEREG_MR
) |
498 (1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
499 (1ULL << IB_USER_VERBS_CMD_CREATE_CQ
) |
500 (1ULL << IB_USER_VERBS_CMD_DESTROY_CQ
) |
501 (1ULL << IB_USER_VERBS_CMD_CREATE_QP
) |
502 (1ULL << IB_USER_VERBS_CMD_MODIFY_QP
) |
503 (1ULL << IB_USER_VERBS_CMD_QUERY_QP
) |
504 (1ULL << IB_USER_VERBS_CMD_DESTROY_QP
);
506 ib_dev
->uverbs_ex_cmd_mask
|=
507 (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ
);
509 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_REREG_MR
) {
510 ib_dev
->uverbs_cmd_mask
|= (1ULL << IB_USER_VERBS_CMD_REREG_MR
);
511 ib_set_device_ops(ib_dev
, &hns_roce_dev_mr_ops
);
515 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_MW
) {
516 ib_dev
->uverbs_cmd_mask
|=
517 (1ULL << IB_USER_VERBS_CMD_ALLOC_MW
) |
518 (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW
);
519 ib_set_device_ops(ib_dev
, &hns_roce_dev_mw_ops
);
523 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_FRMR
)
524 ib_set_device_ops(ib_dev
, &hns_roce_dev_frmr_ops
);
527 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
) {
528 ib_dev
->uverbs_cmd_mask
|=
529 (1ULL << IB_USER_VERBS_CMD_CREATE_SRQ
) |
530 (1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ
) |
531 (1ULL << IB_USER_VERBS_CMD_QUERY_SRQ
) |
532 (1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ
) |
533 (1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV
);
534 ib_set_device_ops(ib_dev
, &hns_roce_dev_srq_ops
);
535 ib_set_device_ops(ib_dev
, hr_dev
->hw
->hns_roce_dev_srq_ops
);
538 ib_set_device_ops(ib_dev
, hr_dev
->hw
->hns_roce_dev_ops
);
539 ib_set_device_ops(ib_dev
, &hns_roce_dev_ops
);
540 for (i
= 0; i
< hr_dev
->caps
.num_ports
; i
++) {
541 if (!hr_dev
->iboe
.netdevs
[i
])
544 ret
= ib_device_set_netdev(ib_dev
, hr_dev
->iboe
.netdevs
[i
],
549 ret
= ib_register_device(ib_dev
, "hns_%d");
551 dev_err(dev
, "ib_register_device failed!\n");
555 ret
= hns_roce_setup_mtu_mac(hr_dev
);
557 dev_err(dev
, "setup_mtu_mac failed!\n");
558 goto error_failed_setup_mtu_mac
;
561 iboe
->nb
.notifier_call
= hns_roce_netdev_event
;
562 ret
= register_netdevice_notifier(&iboe
->nb
);
564 dev_err(dev
, "register_netdevice_notifier failed!\n");
565 goto error_failed_setup_mtu_mac
;
568 hr_dev
->active
= true;
571 error_failed_setup_mtu_mac
:
572 ib_unregister_device(ib_dev
);
577 static int hns_roce_init_hem(struct hns_roce_dev
*hr_dev
)
580 struct device
*dev
= hr_dev
->dev
;
582 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->mr_table
.mtt_table
,
583 HEM_TYPE_MTT
, hr_dev
->caps
.mtt_entry_sz
,
584 hr_dev
->caps
.num_mtt_segs
, 1);
586 dev_err(dev
, "Failed to init MTT context memory, aborting.\n");
590 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
)) {
591 ret
= hns_roce_init_hem_table(hr_dev
,
592 &hr_dev
->mr_table
.mtt_cqe_table
,
593 HEM_TYPE_CQE
, hr_dev
->caps
.mtt_entry_sz
,
594 hr_dev
->caps
.num_cqe_segs
, 1);
596 dev_err(dev
, "Failed to init MTT CQE context memory, aborting.\n");
601 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
,
602 HEM_TYPE_MTPT
, hr_dev
->caps
.mtpt_entry_sz
,
603 hr_dev
->caps
.num_mtpts
, 1);
605 dev_err(dev
, "Failed to init MTPT context memory, aborting.\n");
609 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
,
610 HEM_TYPE_QPC
, hr_dev
->caps
.qpc_entry_sz
,
611 hr_dev
->caps
.num_qps
, 1);
613 dev_err(dev
, "Failed to init QP context memory, aborting.\n");
617 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
,
619 hr_dev
->caps
.irrl_entry_sz
*
620 hr_dev
->caps
.max_qp_init_rdma
,
621 hr_dev
->caps
.num_qps
, 1);
623 dev_err(dev
, "Failed to init irrl_table memory, aborting.\n");
627 if (hr_dev
->caps
.trrl_entry_sz
) {
628 ret
= hns_roce_init_hem_table(hr_dev
,
629 &hr_dev
->qp_table
.trrl_table
,
631 hr_dev
->caps
.trrl_entry_sz
*
632 hr_dev
->caps
.max_qp_dest_rdma
,
633 hr_dev
->caps
.num_qps
, 1);
636 "Failed to init trrl_table memory, aborting.\n");
641 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->cq_table
.table
,
642 HEM_TYPE_CQC
, hr_dev
->caps
.cqc_entry_sz
,
643 hr_dev
->caps
.num_cqs
, 1);
645 dev_err(dev
, "Failed to init CQ context memory, aborting.\n");
649 if (hr_dev
->caps
.srqc_entry_sz
) {
650 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->srq_table
.table
,
652 hr_dev
->caps
.srqc_entry_sz
,
653 hr_dev
->caps
.num_srqs
, 1);
656 "Failed to init SRQ context memory, aborting.\n");
661 if (hr_dev
->caps
.num_srqwqe_segs
) {
662 ret
= hns_roce_init_hem_table(hr_dev
,
663 &hr_dev
->mr_table
.mtt_srqwqe_table
,
665 hr_dev
->caps
.mtt_entry_sz
,
666 hr_dev
->caps
.num_srqwqe_segs
, 1);
669 "Failed to init MTT srqwqe memory, aborting.\n");
674 if (hr_dev
->caps
.num_idx_segs
) {
675 ret
= hns_roce_init_hem_table(hr_dev
,
676 &hr_dev
->mr_table
.mtt_idx_table
,
678 hr_dev
->caps
.idx_entry_sz
,
679 hr_dev
->caps
.num_idx_segs
, 1);
682 "Failed to init MTT idx memory, aborting.\n");
683 goto err_unmap_srqwqe
;
687 if (hr_dev
->caps
.sccc_entry_sz
) {
688 ret
= hns_roce_init_hem_table(hr_dev
,
689 &hr_dev
->qp_table
.sccc_table
,
691 hr_dev
->caps
.sccc_entry_sz
,
692 hr_dev
->caps
.num_qps
, 1);
695 "Failed to init SCC context memory, aborting.\n");
700 if (hr_dev
->caps
.qpc_timer_entry_sz
) {
701 ret
= hns_roce_init_hem_table(hr_dev
,
702 &hr_dev
->qpc_timer_table
,
704 hr_dev
->caps
.qpc_timer_entry_sz
,
705 hr_dev
->caps
.num_qpc_timer
, 1);
708 "Failed to init QPC timer memory, aborting.\n");
713 if (hr_dev
->caps
.cqc_timer_entry_sz
) {
714 ret
= hns_roce_init_hem_table(hr_dev
,
715 &hr_dev
->cqc_timer_table
,
717 hr_dev
->caps
.cqc_timer_entry_sz
,
718 hr_dev
->caps
.num_cqc_timer
, 1);
721 "Failed to init CQC timer memory, aborting.\n");
722 goto err_unmap_qpc_timer
;
729 if (hr_dev
->caps
.qpc_timer_entry_sz
)
730 hns_roce_cleanup_hem_table(hr_dev
,
731 &hr_dev
->qpc_timer_table
);
734 if (hr_dev
->caps
.sccc_entry_sz
)
735 hns_roce_cleanup_hem_table(hr_dev
,
736 &hr_dev
->qp_table
.sccc_table
);
739 if (hr_dev
->caps
.num_idx_segs
)
740 hns_roce_cleanup_hem_table(hr_dev
,
741 &hr_dev
->mr_table
.mtt_idx_table
);
744 if (hr_dev
->caps
.num_srqwqe_segs
)
745 hns_roce_cleanup_hem_table(hr_dev
,
746 &hr_dev
->mr_table
.mtt_srqwqe_table
);
749 if (hr_dev
->caps
.srqc_entry_sz
)
750 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->srq_table
.table
);
753 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->cq_table
.table
);
756 if (hr_dev
->caps
.trrl_entry_sz
)
757 hns_roce_cleanup_hem_table(hr_dev
,
758 &hr_dev
->qp_table
.trrl_table
);
761 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
);
764 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
);
767 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
);
770 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
771 hns_roce_cleanup_hem_table(hr_dev
,
772 &hr_dev
->mr_table
.mtt_cqe_table
);
775 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtt_table
);
781 * hns_roce_setup_hca - setup host channel adapter
782 * @hr_dev: pointer to hns roce device
785 static int hns_roce_setup_hca(struct hns_roce_dev
*hr_dev
)
788 struct device
*dev
= hr_dev
->dev
;
790 spin_lock_init(&hr_dev
->sm_lock
);
791 spin_lock_init(&hr_dev
->bt_cmd_lock
);
793 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) {
794 INIT_LIST_HEAD(&hr_dev
->pgdir_list
);
795 mutex_init(&hr_dev
->pgdir_mutex
);
798 ret
= hns_roce_init_uar_table(hr_dev
);
800 dev_err(dev
, "Failed to initialize uar table. aborting\n");
804 ret
= hns_roce_uar_alloc(hr_dev
, &hr_dev
->priv_uar
);
806 dev_err(dev
, "Failed to allocate priv_uar.\n");
807 goto err_uar_table_free
;
810 ret
= hns_roce_init_pd_table(hr_dev
);
812 dev_err(dev
, "Failed to init protected domain table.\n");
813 goto err_uar_alloc_free
;
816 ret
= hns_roce_init_mr_table(hr_dev
);
818 dev_err(dev
, "Failed to init memory region table.\n");
819 goto err_pd_table_free
;
822 ret
= hns_roce_init_cq_table(hr_dev
);
824 dev_err(dev
, "Failed to init completion queue table.\n");
825 goto err_mr_table_free
;
828 ret
= hns_roce_init_qp_table(hr_dev
);
830 dev_err(dev
, "Failed to init queue pair table.\n");
831 goto err_cq_table_free
;
834 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
) {
835 ret
= hns_roce_init_srq_table(hr_dev
);
838 "Failed to init share receive queue table.\n");
839 goto err_qp_table_free
;
846 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
)
847 hns_roce_cleanup_qp_table(hr_dev
);
850 hns_roce_cleanup_cq_table(hr_dev
);
853 hns_roce_cleanup_mr_table(hr_dev
);
856 hns_roce_cleanup_pd_table(hr_dev
);
859 hns_roce_uar_free(hr_dev
, &hr_dev
->priv_uar
);
862 hns_roce_cleanup_uar_table(hr_dev
);
866 int hns_roce_init(struct hns_roce_dev
*hr_dev
)
869 struct device
*dev
= hr_dev
->dev
;
871 if (hr_dev
->hw
->reset
) {
872 ret
= hr_dev
->hw
->reset(hr_dev
, true);
874 dev_err(dev
, "Reset RoCE engine failed!\n");
878 hr_dev
->is_reset
= false;
880 if (hr_dev
->hw
->cmq_init
) {
881 ret
= hr_dev
->hw
->cmq_init(hr_dev
);
883 dev_err(dev
, "Init RoCE Command Queue failed!\n");
884 goto error_failed_cmq_init
;
888 ret
= hr_dev
->hw
->hw_profile(hr_dev
);
890 dev_err(dev
, "Get RoCE engine profile failed!\n");
891 goto error_failed_cmd_init
;
894 ret
= hns_roce_cmd_init(hr_dev
);
896 dev_err(dev
, "cmd init failed!\n");
897 goto error_failed_cmd_init
;
900 /* EQ depends on poll mode, event mode depends on EQ */
901 ret
= hr_dev
->hw
->init_eq(hr_dev
);
903 dev_err(dev
, "eq init failed!\n");
904 goto error_failed_eq_table
;
907 if (hr_dev
->cmd_mod
) {
908 ret
= hns_roce_cmd_use_events(hr_dev
);
911 "Cmd event mode failed, set back to poll!\n");
912 hns_roce_cmd_use_polling(hr_dev
);
916 ret
= hns_roce_init_hem(hr_dev
);
918 dev_err(dev
, "init HEM(Hardware Entry Memory) failed!\n");
919 goto error_failed_init_hem
;
922 ret
= hns_roce_setup_hca(hr_dev
);
924 dev_err(dev
, "setup hca failed!\n");
925 goto error_failed_setup_hca
;
928 if (hr_dev
->hw
->hw_init
) {
929 ret
= hr_dev
->hw
->hw_init(hr_dev
);
931 dev_err(dev
, "hw_init failed!\n");
932 goto error_failed_engine_init
;
936 ret
= hns_roce_register_device(hr_dev
);
938 goto error_failed_register_device
;
942 error_failed_register_device
:
943 if (hr_dev
->hw
->hw_exit
)
944 hr_dev
->hw
->hw_exit(hr_dev
);
946 error_failed_engine_init
:
947 hns_roce_cleanup_bitmap(hr_dev
);
949 error_failed_setup_hca
:
950 hns_roce_cleanup_hem(hr_dev
);
952 error_failed_init_hem
:
954 hns_roce_cmd_use_polling(hr_dev
);
955 hr_dev
->hw
->cleanup_eq(hr_dev
);
957 error_failed_eq_table
:
958 hns_roce_cmd_cleanup(hr_dev
);
960 error_failed_cmd_init
:
961 if (hr_dev
->hw
->cmq_exit
)
962 hr_dev
->hw
->cmq_exit(hr_dev
);
964 error_failed_cmq_init
:
965 if (hr_dev
->hw
->reset
) {
966 if (hr_dev
->hw
->reset(hr_dev
, false))
967 dev_err(dev
, "Dereset RoCE engine failed!\n");
973 void hns_roce_exit(struct hns_roce_dev
*hr_dev
)
975 hns_roce_unregister_device(hr_dev
);
977 if (hr_dev
->hw
->hw_exit
)
978 hr_dev
->hw
->hw_exit(hr_dev
);
979 hns_roce_cleanup_bitmap(hr_dev
);
980 hns_roce_cleanup_hem(hr_dev
);
983 hns_roce_cmd_use_polling(hr_dev
);
985 hr_dev
->hw
->cleanup_eq(hr_dev
);
986 hns_roce_cmd_cleanup(hr_dev
);
987 if (hr_dev
->hw
->cmq_exit
)
988 hr_dev
->hw
->cmq_exit(hr_dev
);
989 if (hr_dev
->hw
->reset
)
990 hr_dev
->hw
->reset(hr_dev
, false);
993 MODULE_LICENSE("Dual BSD/GPL");
994 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
995 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
996 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
997 MODULE_DESCRIPTION("HNS RoCE Driver");