2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/of_platform.h>
35 #include <linux/module.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_user_verbs.h>
39 #include <rdma/ib_cache.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include <rdma/hns-abi.h>
43 #include "hns_roce_hem.h"
46 * hns_get_gid_index - Get gid index.
47 * @hr_dev: pointer to structure hns_roce_dev.
48 * @port: port, value range: 0 ~ MAX
49 * @gid_index: gid_index, value range: 0 ~ MAX
51 * N ports shared gids, allocation method as follow:
52 * GID[0][0], GID[1][0],.....GID[N - 1][0],
53 * GID[0][0], GID[1][0],.....GID[N - 1][0],
56 int hns_get_gid_index(struct hns_roce_dev
*hr_dev
, u8 port
, int gid_index
)
58 return gid_index
* hr_dev
->caps
.num_ports
+ port
;
60 EXPORT_SYMBOL_GPL(hns_get_gid_index
);
62 static int hns_roce_set_mac(struct hns_roce_dev
*hr_dev
, u8 port
, u8
*addr
)
67 if (!memcmp(hr_dev
->dev_addr
[port
], addr
, MAC_ADDR_OCTET_NUM
))
70 for (i
= 0; i
< MAC_ADDR_OCTET_NUM
; i
++)
71 hr_dev
->dev_addr
[port
][i
] = addr
[i
];
73 phy_port
= hr_dev
->iboe
.phy_port
[port
];
74 return hr_dev
->hw
->set_mac(hr_dev
, phy_port
, addr
);
77 static int hns_roce_add_gid(struct ib_device
*device
, u8 port_num
,
78 unsigned int index
, const union ib_gid
*gid
,
79 const struct ib_gid_attr
*attr
, void **context
)
81 struct hns_roce_dev
*hr_dev
= to_hr_dev(device
);
82 u8 port
= port_num
- 1;
86 if (port
>= hr_dev
->caps
.num_ports
)
89 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
91 ret
= hr_dev
->hw
->set_gid(hr_dev
, port
, index
, (union ib_gid
*)gid
,
94 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
99 static int hns_roce_del_gid(struct ib_device
*device
, u8 port_num
,
100 unsigned int index
, void **context
)
102 struct hns_roce_dev
*hr_dev
= to_hr_dev(device
);
103 union ib_gid zgid
= { {0} };
104 u8 port
= port_num
- 1;
108 if (port
>= hr_dev
->caps
.num_ports
)
111 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
113 ret
= hr_dev
->hw
->set_gid(hr_dev
, port
, index
, &zgid
, NULL
);
115 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
120 static int handle_en_event(struct hns_roce_dev
*hr_dev
, u8 port
,
123 struct device
*dev
= hr_dev
->dev
;
124 struct net_device
*netdev
;
127 netdev
= hr_dev
->iboe
.netdevs
[port
];
129 dev_err(dev
, "port(%d) can't find netdev\n", port
);
136 case NETDEV_REGISTER
:
137 case NETDEV_CHANGEADDR
:
138 ret
= hns_roce_set_mac(hr_dev
, port
, netdev
->dev_addr
);
142 * In v1 engine, only support all ports closed together.
146 dev_dbg(dev
, "NETDEV event = 0x%x!\n", (u32
)(event
));
153 static int hns_roce_netdev_event(struct notifier_block
*self
,
154 unsigned long event
, void *ptr
)
156 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
157 struct hns_roce_ib_iboe
*iboe
= NULL
;
158 struct hns_roce_dev
*hr_dev
= NULL
;
162 hr_dev
= container_of(self
, struct hns_roce_dev
, iboe
.nb
);
163 iboe
= &hr_dev
->iboe
;
165 for (port
= 0; port
< hr_dev
->caps
.num_ports
; port
++) {
166 if (dev
== iboe
->netdevs
[port
]) {
167 ret
= handle_en_event(hr_dev
, port
, event
);
177 static int hns_roce_setup_mtu_mac(struct hns_roce_dev
*hr_dev
)
182 for (i
= 0; i
< hr_dev
->caps
.num_ports
; i
++) {
183 if (hr_dev
->hw
->set_mtu
)
184 hr_dev
->hw
->set_mtu(hr_dev
, hr_dev
->iboe
.phy_port
[i
],
185 hr_dev
->caps
.max_mtu
);
186 ret
= hns_roce_set_mac(hr_dev
, i
,
187 hr_dev
->iboe
.netdevs
[i
]->dev_addr
);
195 static int hns_roce_query_device(struct ib_device
*ib_dev
,
196 struct ib_device_attr
*props
,
197 struct ib_udata
*uhw
)
199 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
201 memset(props
, 0, sizeof(*props
));
203 props
->sys_image_guid
= cpu_to_be32(hr_dev
->sys_image_guid
);
204 props
->max_mr_size
= (u64
)(~(0ULL));
205 props
->page_size_cap
= hr_dev
->caps
.page_size_cap
;
206 props
->vendor_id
= hr_dev
->vendor_id
;
207 props
->vendor_part_id
= hr_dev
->vendor_part_id
;
208 props
->hw_ver
= hr_dev
->hw_rev
;
209 props
->max_qp
= hr_dev
->caps
.num_qps
;
210 props
->max_qp_wr
= hr_dev
->caps
.max_wqes
;
211 props
->device_cap_flags
= IB_DEVICE_PORT_ACTIVE_EVENT
|
212 IB_DEVICE_RC_RNR_NAK_GEN
;
213 props
->max_sge
= max(hr_dev
->caps
.max_sq_sg
, hr_dev
->caps
.max_rq_sg
);
214 props
->max_sge_rd
= 1;
215 props
->max_cq
= hr_dev
->caps
.num_cqs
;
216 props
->max_cqe
= hr_dev
->caps
.max_cqes
;
217 props
->max_mr
= hr_dev
->caps
.num_mtpts
;
218 props
->max_pd
= hr_dev
->caps
.num_pds
;
219 props
->max_qp_rd_atom
= hr_dev
->caps
.max_qp_dest_rdma
;
220 props
->max_qp_init_rd_atom
= hr_dev
->caps
.max_qp_init_rdma
;
221 props
->atomic_cap
= IB_ATOMIC_NONE
;
222 props
->max_pkeys
= 1;
223 props
->local_ca_ack_delay
= hr_dev
->caps
.local_ca_ack_delay
;
228 static struct net_device
*hns_roce_get_netdev(struct ib_device
*ib_dev
,
231 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
232 struct net_device
*ndev
;
234 if (port_num
< 1 || port_num
> hr_dev
->caps
.num_ports
)
239 ndev
= hr_dev
->iboe
.netdevs
[port_num
- 1];
247 static int hns_roce_query_port(struct ib_device
*ib_dev
, u8 port_num
,
248 struct ib_port_attr
*props
)
250 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
251 struct device
*dev
= hr_dev
->dev
;
252 struct net_device
*net_dev
;
257 assert(port_num
> 0);
260 /* props being zeroed by the caller, avoid zeroing it here */
262 props
->max_mtu
= hr_dev
->caps
.max_mtu
;
263 props
->gid_tbl_len
= hr_dev
->caps
.gid_table_len
[port
];
264 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
265 IB_PORT_VENDOR_CLASS_SUP
|
266 IB_PORT_BOOT_MGMT_SUP
;
267 props
->max_msg_sz
= HNS_ROCE_MAX_MSG_LEN
;
268 props
->pkey_tbl_len
= 1;
269 props
->active_width
= IB_WIDTH_4X
;
270 props
->active_speed
= 1;
272 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
274 net_dev
= hr_dev
->iboe
.netdevs
[port
];
276 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
277 dev_err(dev
, "find netdev %d failed!\r\n", port
);
281 mtu
= iboe_get_mtu(net_dev
->mtu
);
282 props
->active_mtu
= mtu
? min(props
->max_mtu
, mtu
) : IB_MTU_256
;
283 props
->state
= (netif_running(net_dev
) && netif_carrier_ok(net_dev
)) ?
284 IB_PORT_ACTIVE
: IB_PORT_DOWN
;
285 props
->phys_state
= (props
->state
== IB_PORT_ACTIVE
) ? 5 : 3;
287 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
292 static enum rdma_link_layer
hns_roce_get_link_layer(struct ib_device
*device
,
295 return IB_LINK_LAYER_ETHERNET
;
298 static int hns_roce_query_gid(struct ib_device
*ib_dev
, u8 port_num
, int index
,
304 static int hns_roce_query_pkey(struct ib_device
*ib_dev
, u8 port
, u16 index
,
312 static int hns_roce_modify_device(struct ib_device
*ib_dev
, int mask
,
313 struct ib_device_modify
*props
)
317 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
320 if (mask
& IB_DEVICE_MODIFY_NODE_DESC
) {
321 spin_lock_irqsave(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
322 memcpy(ib_dev
->node_desc
, props
->node_desc
, NODE_DESC_SIZE
);
323 spin_unlock_irqrestore(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
329 static int hns_roce_modify_port(struct ib_device
*ib_dev
, u8 port_num
, int mask
,
330 struct ib_port_modify
*props
)
335 static struct ib_ucontext
*hns_roce_alloc_ucontext(struct ib_device
*ib_dev
,
336 struct ib_udata
*udata
)
339 struct hns_roce_ucontext
*context
;
340 struct hns_roce_ib_alloc_ucontext_resp resp
;
341 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
343 resp
.qp_tab_size
= hr_dev
->caps
.num_qps
;
345 context
= kmalloc(sizeof(*context
), GFP_KERNEL
);
347 return ERR_PTR(-ENOMEM
);
349 ret
= hns_roce_uar_alloc(hr_dev
, &context
->uar
);
351 goto error_fail_uar_alloc
;
353 ret
= ib_copy_to_udata(udata
, &resp
, sizeof(resp
));
355 goto error_fail_copy_to_udata
;
357 return &context
->ibucontext
;
359 error_fail_copy_to_udata
:
360 hns_roce_uar_free(hr_dev
, &context
->uar
);
362 error_fail_uar_alloc
:
368 static int hns_roce_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
370 struct hns_roce_ucontext
*context
= to_hr_ucontext(ibcontext
);
372 hns_roce_uar_free(to_hr_dev(ibcontext
->device
), &context
->uar
);
378 static int hns_roce_mmap(struct ib_ucontext
*context
,
379 struct vm_area_struct
*vma
)
381 struct hns_roce_dev
*hr_dev
= to_hr_dev(context
->device
);
383 if (((vma
->vm_end
- vma
->vm_start
) % PAGE_SIZE
) != 0)
386 if (vma
->vm_pgoff
== 0) {
387 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
388 if (io_remap_pfn_range(vma
, vma
->vm_start
,
389 to_hr_ucontext(context
)->uar
.pfn
,
390 PAGE_SIZE
, vma
->vm_page_prot
))
392 } else if (vma
->vm_pgoff
== 1 && hr_dev
->tptr_dma_addr
&&
394 /* vm_pgoff: 1 -- TPTR */
395 if (io_remap_pfn_range(vma
, vma
->vm_start
,
396 hr_dev
->tptr_dma_addr
>> PAGE_SHIFT
,
406 static int hns_roce_port_immutable(struct ib_device
*ib_dev
, u8 port_num
,
407 struct ib_port_immutable
*immutable
)
409 struct ib_port_attr attr
;
412 ret
= ib_query_port(ib_dev
, port_num
, &attr
);
416 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
417 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
419 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
420 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
421 if (to_hr_dev(ib_dev
)->caps
.flags
& HNS_ROCE_CAP_FLAG_ROCE_V1_V2
)
422 immutable
->core_cap_flags
|= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
427 static void hns_roce_unregister_device(struct hns_roce_dev
*hr_dev
)
429 struct hns_roce_ib_iboe
*iboe
= &hr_dev
->iboe
;
431 unregister_netdevice_notifier(&iboe
->nb
);
432 ib_unregister_device(&hr_dev
->ib_dev
);
435 static int hns_roce_register_device(struct hns_roce_dev
*hr_dev
)
438 struct hns_roce_ib_iboe
*iboe
= NULL
;
439 struct ib_device
*ib_dev
= NULL
;
440 struct device
*dev
= hr_dev
->dev
;
442 iboe
= &hr_dev
->iboe
;
443 spin_lock_init(&iboe
->lock
);
445 ib_dev
= &hr_dev
->ib_dev
;
446 strlcpy(ib_dev
->name
, "hns_%d", IB_DEVICE_NAME_MAX
);
448 ib_dev
->owner
= THIS_MODULE
;
449 ib_dev
->node_type
= RDMA_NODE_IB_CA
;
450 ib_dev
->dev
.parent
= dev
;
452 ib_dev
->phys_port_cnt
= hr_dev
->caps
.num_ports
;
453 ib_dev
->local_dma_lkey
= hr_dev
->caps
.reserved_lkey
;
454 ib_dev
->num_comp_vectors
= hr_dev
->caps
.num_comp_vectors
;
455 ib_dev
->uverbs_abi_ver
= 1;
456 ib_dev
->uverbs_cmd_mask
=
457 (1ULL << IB_USER_VERBS_CMD_GET_CONTEXT
) |
458 (1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE
) |
459 (1ULL << IB_USER_VERBS_CMD_QUERY_PORT
) |
460 (1ULL << IB_USER_VERBS_CMD_ALLOC_PD
) |
461 (1ULL << IB_USER_VERBS_CMD_DEALLOC_PD
) |
462 (1ULL << IB_USER_VERBS_CMD_REG_MR
) |
463 (1ULL << IB_USER_VERBS_CMD_DEREG_MR
) |
464 (1ULL << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL
) |
465 (1ULL << IB_USER_VERBS_CMD_CREATE_CQ
) |
466 (1ULL << IB_USER_VERBS_CMD_DESTROY_CQ
) |
467 (1ULL << IB_USER_VERBS_CMD_CREATE_QP
) |
468 (1ULL << IB_USER_VERBS_CMD_MODIFY_QP
) |
469 (1ULL << IB_USER_VERBS_CMD_QUERY_QP
) |
470 (1ULL << IB_USER_VERBS_CMD_DESTROY_QP
);
472 /* HCA||device||port */
473 ib_dev
->modify_device
= hns_roce_modify_device
;
474 ib_dev
->query_device
= hns_roce_query_device
;
475 ib_dev
->query_port
= hns_roce_query_port
;
476 ib_dev
->modify_port
= hns_roce_modify_port
;
477 ib_dev
->get_link_layer
= hns_roce_get_link_layer
;
478 ib_dev
->get_netdev
= hns_roce_get_netdev
;
479 ib_dev
->query_gid
= hns_roce_query_gid
;
480 ib_dev
->add_gid
= hns_roce_add_gid
;
481 ib_dev
->del_gid
= hns_roce_del_gid
;
482 ib_dev
->query_pkey
= hns_roce_query_pkey
;
483 ib_dev
->alloc_ucontext
= hns_roce_alloc_ucontext
;
484 ib_dev
->dealloc_ucontext
= hns_roce_dealloc_ucontext
;
485 ib_dev
->mmap
= hns_roce_mmap
;
488 ib_dev
->alloc_pd
= hns_roce_alloc_pd
;
489 ib_dev
->dealloc_pd
= hns_roce_dealloc_pd
;
492 ib_dev
->create_ah
= hns_roce_create_ah
;
493 ib_dev
->query_ah
= hns_roce_query_ah
;
494 ib_dev
->destroy_ah
= hns_roce_destroy_ah
;
497 ib_dev
->create_qp
= hns_roce_create_qp
;
498 ib_dev
->modify_qp
= hns_roce_modify_qp
;
499 ib_dev
->query_qp
= hr_dev
->hw
->query_qp
;
500 ib_dev
->destroy_qp
= hr_dev
->hw
->destroy_qp
;
501 ib_dev
->post_send
= hr_dev
->hw
->post_send
;
502 ib_dev
->post_recv
= hr_dev
->hw
->post_recv
;
505 ib_dev
->create_cq
= hns_roce_ib_create_cq
;
506 ib_dev
->modify_cq
= hr_dev
->hw
->modify_cq
;
507 ib_dev
->destroy_cq
= hns_roce_ib_destroy_cq
;
508 ib_dev
->req_notify_cq
= hr_dev
->hw
->req_notify_cq
;
509 ib_dev
->poll_cq
= hr_dev
->hw
->poll_cq
;
512 ib_dev
->get_dma_mr
= hns_roce_get_dma_mr
;
513 ib_dev
->reg_user_mr
= hns_roce_reg_user_mr
;
514 ib_dev
->dereg_mr
= hns_roce_dereg_mr
;
515 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_REREG_MR
) {
516 ib_dev
->rereg_user_mr
= hns_roce_rereg_user_mr
;
517 ib_dev
->uverbs_cmd_mask
|= (1ULL << IB_USER_VERBS_CMD_REREG_MR
);
521 ib_dev
->get_port_immutable
= hns_roce_port_immutable
;
523 ret
= ib_register_device(ib_dev
, NULL
);
525 dev_err(dev
, "ib_register_device failed!\n");
529 ret
= hns_roce_setup_mtu_mac(hr_dev
);
531 dev_err(dev
, "setup_mtu_mac failed!\n");
532 goto error_failed_setup_mtu_mac
;
535 iboe
->nb
.notifier_call
= hns_roce_netdev_event
;
536 ret
= register_netdevice_notifier(&iboe
->nb
);
538 dev_err(dev
, "register_netdevice_notifier failed!\n");
539 goto error_failed_setup_mtu_mac
;
544 error_failed_setup_mtu_mac
:
545 ib_unregister_device(ib_dev
);
550 static int hns_roce_init_hem(struct hns_roce_dev
*hr_dev
)
553 struct device
*dev
= hr_dev
->dev
;
555 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->mr_table
.mtt_table
,
556 HEM_TYPE_MTT
, hr_dev
->caps
.mtt_entry_sz
,
557 hr_dev
->caps
.num_mtt_segs
, 1);
559 dev_err(dev
, "Failed to init MTT context memory, aborting.\n");
563 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
)) {
564 ret
= hns_roce_init_hem_table(hr_dev
,
565 &hr_dev
->mr_table
.mtt_cqe_table
,
566 HEM_TYPE_CQE
, hr_dev
->caps
.mtt_entry_sz
,
567 hr_dev
->caps
.num_cqe_segs
, 1);
569 dev_err(dev
, "Failed to init MTT CQE context memory, aborting.\n");
574 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
,
575 HEM_TYPE_MTPT
, hr_dev
->caps
.mtpt_entry_sz
,
576 hr_dev
->caps
.num_mtpts
, 1);
578 dev_err(dev
, "Failed to init MTPT context memory, aborting.\n");
582 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
,
583 HEM_TYPE_QPC
, hr_dev
->caps
.qpc_entry_sz
,
584 hr_dev
->caps
.num_qps
, 1);
586 dev_err(dev
, "Failed to init QP context memory, aborting.\n");
590 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
,
592 hr_dev
->caps
.irrl_entry_sz
*
593 hr_dev
->caps
.max_qp_init_rdma
,
594 hr_dev
->caps
.num_qps
, 1);
596 dev_err(dev
, "Failed to init irrl_table memory, aborting.\n");
600 if (hr_dev
->caps
.trrl_entry_sz
) {
601 ret
= hns_roce_init_hem_table(hr_dev
,
602 &hr_dev
->qp_table
.trrl_table
,
604 hr_dev
->caps
.trrl_entry_sz
*
605 hr_dev
->caps
.max_qp_dest_rdma
,
606 hr_dev
->caps
.num_qps
, 1);
609 "Failed to init trrl_table memory, aborting.\n");
614 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->cq_table
.table
,
615 HEM_TYPE_CQC
, hr_dev
->caps
.cqc_entry_sz
,
616 hr_dev
->caps
.num_cqs
, 1);
618 dev_err(dev
, "Failed to init CQ context memory, aborting.\n");
625 if (hr_dev
->caps
.trrl_entry_sz
)
626 hns_roce_cleanup_hem_table(hr_dev
,
627 &hr_dev
->qp_table
.trrl_table
);
630 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
);
633 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
);
636 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
);
639 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
640 hns_roce_cleanup_hem_table(hr_dev
,
641 &hr_dev
->mr_table
.mtt_cqe_table
);
644 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtt_table
);
650 * hns_roce_setup_hca - setup host channel adapter
651 * @hr_dev: pointer to hns roce device
654 static int hns_roce_setup_hca(struct hns_roce_dev
*hr_dev
)
657 struct device
*dev
= hr_dev
->dev
;
659 spin_lock_init(&hr_dev
->sm_lock
);
660 spin_lock_init(&hr_dev
->bt_cmd_lock
);
662 ret
= hns_roce_init_uar_table(hr_dev
);
664 dev_err(dev
, "Failed to initialize uar table. aborting\n");
668 ret
= hns_roce_uar_alloc(hr_dev
, &hr_dev
->priv_uar
);
670 dev_err(dev
, "Failed to allocate priv_uar.\n");
671 goto err_uar_table_free
;
674 ret
= hns_roce_init_pd_table(hr_dev
);
676 dev_err(dev
, "Failed to init protected domain table.\n");
677 goto err_uar_alloc_free
;
680 ret
= hns_roce_init_mr_table(hr_dev
);
682 dev_err(dev
, "Failed to init memory region table.\n");
683 goto err_pd_table_free
;
686 ret
= hns_roce_init_cq_table(hr_dev
);
688 dev_err(dev
, "Failed to init completion queue table.\n");
689 goto err_mr_table_free
;
692 ret
= hns_roce_init_qp_table(hr_dev
);
694 dev_err(dev
, "Failed to init queue pair table.\n");
695 goto err_cq_table_free
;
701 hns_roce_cleanup_cq_table(hr_dev
);
704 hns_roce_cleanup_mr_table(hr_dev
);
707 hns_roce_cleanup_pd_table(hr_dev
);
710 hns_roce_uar_free(hr_dev
, &hr_dev
->priv_uar
);
713 hns_roce_cleanup_uar_table(hr_dev
);
717 int hns_roce_init(struct hns_roce_dev
*hr_dev
)
720 struct device
*dev
= hr_dev
->dev
;
722 if (hr_dev
->hw
->reset
) {
723 ret
= hr_dev
->hw
->reset(hr_dev
, true);
725 dev_err(dev
, "Reset RoCE engine failed!\n");
730 if (hr_dev
->hw
->cmq_init
) {
731 ret
= hr_dev
->hw
->cmq_init(hr_dev
);
733 dev_err(dev
, "Init RoCE Command Queue failed!\n");
734 goto error_failed_cmq_init
;
738 ret
= hr_dev
->hw
->hw_profile(hr_dev
);
740 dev_err(dev
, "Get RoCE engine profile failed!\n");
741 goto error_failed_cmd_init
;
744 ret
= hns_roce_cmd_init(hr_dev
);
746 dev_err(dev
, "cmd init failed!\n");
747 goto error_failed_cmd_init
;
750 ret
= hr_dev
->hw
->init_eq(hr_dev
);
752 dev_err(dev
, "eq init failed!\n");
753 goto error_failed_eq_table
;
756 if (hr_dev
->cmd_mod
) {
757 ret
= hns_roce_cmd_use_events(hr_dev
);
759 dev_err(dev
, "Switch to event-driven cmd failed!\n");
760 goto error_failed_use_event
;
764 ret
= hns_roce_init_hem(hr_dev
);
766 dev_err(dev
, "init HEM(Hardware Entry Memory) failed!\n");
767 goto error_failed_init_hem
;
770 ret
= hns_roce_setup_hca(hr_dev
);
772 dev_err(dev
, "setup hca failed!\n");
773 goto error_failed_setup_hca
;
776 if (hr_dev
->hw
->hw_init
) {
777 ret
= hr_dev
->hw
->hw_init(hr_dev
);
779 dev_err(dev
, "hw_init failed!\n");
780 goto error_failed_engine_init
;
784 ret
= hns_roce_register_device(hr_dev
);
786 goto error_failed_register_device
;
790 error_failed_register_device
:
791 if (hr_dev
->hw
->hw_exit
)
792 hr_dev
->hw
->hw_exit(hr_dev
);
794 error_failed_engine_init
:
795 hns_roce_cleanup_bitmap(hr_dev
);
797 error_failed_setup_hca
:
798 hns_roce_cleanup_hem(hr_dev
);
800 error_failed_init_hem
:
802 hns_roce_cmd_use_polling(hr_dev
);
804 error_failed_use_event
:
805 hr_dev
->hw
->cleanup_eq(hr_dev
);
807 error_failed_eq_table
:
808 hns_roce_cmd_cleanup(hr_dev
);
810 error_failed_cmd_init
:
811 if (hr_dev
->hw
->cmq_exit
)
812 hr_dev
->hw
->cmq_exit(hr_dev
);
814 error_failed_cmq_init
:
815 if (hr_dev
->hw
->reset
) {
816 ret
= hr_dev
->hw
->reset(hr_dev
, false);
818 dev_err(dev
, "Dereset RoCE engine failed!\n");
823 EXPORT_SYMBOL_GPL(hns_roce_init
);
825 void hns_roce_exit(struct hns_roce_dev
*hr_dev
)
827 hns_roce_unregister_device(hr_dev
);
828 if (hr_dev
->hw
->hw_exit
)
829 hr_dev
->hw
->hw_exit(hr_dev
);
830 hns_roce_cleanup_bitmap(hr_dev
);
831 hns_roce_cleanup_hem(hr_dev
);
834 hns_roce_cmd_use_polling(hr_dev
);
836 hr_dev
->hw
->cleanup_eq(hr_dev
);
837 hns_roce_cmd_cleanup(hr_dev
);
838 if (hr_dev
->hw
->cmq_exit
)
839 hr_dev
->hw
->cmq_exit(hr_dev
);
840 if (hr_dev
->hw
->reset
)
841 hr_dev
->hw
->reset(hr_dev
, false);
843 EXPORT_SYMBOL_GPL(hns_roce_exit
);
845 MODULE_LICENSE("Dual BSD/GPL");
846 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
847 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
848 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
849 MODULE_DESCRIPTION("HNS RoCE Driver");