2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/acpi.h>
34 #include <linux/of_platform.h>
35 #include <linux/module.h>
36 #include <linux/pci.h>
37 #include <rdma/ib_addr.h>
38 #include <rdma/ib_smi.h>
39 #include <rdma/ib_user_verbs.h>
40 #include <rdma/ib_cache.h>
41 #include "hns_roce_common.h"
42 #include "hns_roce_device.h"
43 #include "hns_roce_hem.h"
46 * hns_get_gid_index - Get gid index.
47 * @hr_dev: pointer to structure hns_roce_dev.
48 * @port: port, value range: 0 ~ MAX
49 * @gid_index: gid_index, value range: 0 ~ MAX
51 * N ports shared gids, allocation method as follow:
52 * GID[0][0], GID[1][0],.....GID[N - 1][0],
53 * GID[0][0], GID[1][0],.....GID[N - 1][0],
56 u8
hns_get_gid_index(struct hns_roce_dev
*hr_dev
, u8 port
, int gid_index
)
58 return gid_index
* hr_dev
->caps
.num_ports
+ port
;
61 static int hns_roce_set_mac(struct hns_roce_dev
*hr_dev
, u8 port
, u8
*addr
)
66 if (hr_dev
->pci_dev
->revision
>= PCI_REVISION_ID_HIP09
)
69 if (!memcmp(hr_dev
->dev_addr
[port
], addr
, ETH_ALEN
))
72 for (i
= 0; i
< ETH_ALEN
; i
++)
73 hr_dev
->dev_addr
[port
][i
] = addr
[i
];
75 phy_port
= hr_dev
->iboe
.phy_port
[port
];
76 return hr_dev
->hw
->set_mac(hr_dev
, phy_port
, addr
);
79 static int hns_roce_add_gid(const struct ib_gid_attr
*attr
, void **context
)
81 struct hns_roce_dev
*hr_dev
= to_hr_dev(attr
->device
);
82 u8 port
= attr
->port_num
- 1;
85 if (port
>= hr_dev
->caps
.num_ports
)
88 ret
= hr_dev
->hw
->set_gid(hr_dev
, port
, attr
->index
, &attr
->gid
, attr
);
93 static int hns_roce_del_gid(const struct ib_gid_attr
*attr
, void **context
)
95 struct hns_roce_dev
*hr_dev
= to_hr_dev(attr
->device
);
96 u8 port
= attr
->port_num
- 1;
99 if (port
>= hr_dev
->caps
.num_ports
)
102 ret
= hr_dev
->hw
->set_gid(hr_dev
, port
, attr
->index
, NULL
, NULL
);
107 static int handle_en_event(struct hns_roce_dev
*hr_dev
, u8 port
,
110 struct device
*dev
= hr_dev
->dev
;
111 struct net_device
*netdev
;
114 netdev
= hr_dev
->iboe
.netdevs
[port
];
116 dev_err(dev
, "Can't find netdev on port(%u)!\n", port
);
123 case NETDEV_REGISTER
:
124 case NETDEV_CHANGEADDR
:
125 ret
= hns_roce_set_mac(hr_dev
, port
, netdev
->dev_addr
);
129 * In v1 engine, only support all ports closed together.
133 dev_dbg(dev
, "NETDEV event = 0x%x!\n", (u32
)(event
));
140 static int hns_roce_netdev_event(struct notifier_block
*self
,
141 unsigned long event
, void *ptr
)
143 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
144 struct hns_roce_ib_iboe
*iboe
= NULL
;
145 struct hns_roce_dev
*hr_dev
= NULL
;
149 hr_dev
= container_of(self
, struct hns_roce_dev
, iboe
.nb
);
150 iboe
= &hr_dev
->iboe
;
152 for (port
= 0; port
< hr_dev
->caps
.num_ports
; port
++) {
153 if (dev
== iboe
->netdevs
[port
]) {
154 ret
= handle_en_event(hr_dev
, port
, event
);
164 static int hns_roce_setup_mtu_mac(struct hns_roce_dev
*hr_dev
)
169 for (i
= 0; i
< hr_dev
->caps
.num_ports
; i
++) {
170 if (hr_dev
->hw
->set_mtu
)
171 hr_dev
->hw
->set_mtu(hr_dev
, hr_dev
->iboe
.phy_port
[i
],
172 hr_dev
->caps
.max_mtu
);
173 ret
= hns_roce_set_mac(hr_dev
, i
,
174 hr_dev
->iboe
.netdevs
[i
]->dev_addr
);
182 static int hns_roce_query_device(struct ib_device
*ib_dev
,
183 struct ib_device_attr
*props
,
184 struct ib_udata
*uhw
)
186 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
188 memset(props
, 0, sizeof(*props
));
190 props
->fw_ver
= hr_dev
->caps
.fw_ver
;
191 props
->sys_image_guid
= cpu_to_be64(hr_dev
->sys_image_guid
);
192 props
->max_mr_size
= (u64
)(~(0ULL));
193 props
->page_size_cap
= hr_dev
->caps
.page_size_cap
;
194 props
->vendor_id
= hr_dev
->vendor_id
;
195 props
->vendor_part_id
= hr_dev
->vendor_part_id
;
196 props
->hw_ver
= hr_dev
->hw_rev
;
197 props
->max_qp
= hr_dev
->caps
.num_qps
;
198 props
->max_qp_wr
= hr_dev
->caps
.max_wqes
;
199 props
->device_cap_flags
= IB_DEVICE_PORT_ACTIVE_EVENT
|
200 IB_DEVICE_RC_RNR_NAK_GEN
;
201 props
->max_send_sge
= hr_dev
->caps
.max_sq_sg
;
202 props
->max_recv_sge
= hr_dev
->caps
.max_rq_sg
;
203 props
->max_sge_rd
= 1;
204 props
->max_cq
= hr_dev
->caps
.num_cqs
;
205 props
->max_cqe
= hr_dev
->caps
.max_cqes
;
206 props
->max_mr
= hr_dev
->caps
.num_mtpts
;
207 props
->max_pd
= hr_dev
->caps
.num_pds
;
208 props
->max_qp_rd_atom
= hr_dev
->caps
.max_qp_dest_rdma
;
209 props
->max_qp_init_rd_atom
= hr_dev
->caps
.max_qp_init_rdma
;
210 props
->atomic_cap
= hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_ATOMIC
?
211 IB_ATOMIC_HCA
: IB_ATOMIC_NONE
;
212 props
->max_pkeys
= 1;
213 props
->local_ca_ack_delay
= hr_dev
->caps
.local_ca_ack_delay
;
214 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
) {
215 props
->max_srq
= hr_dev
->caps
.num_srqs
;
216 props
->max_srq_wr
= hr_dev
->caps
.max_srq_wrs
;
217 props
->max_srq_sge
= hr_dev
->caps
.max_srq_sges
;
220 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_FRMR
) {
221 props
->device_cap_flags
|= IB_DEVICE_MEM_MGT_EXTENSIONS
;
222 props
->max_fast_reg_page_list_len
= HNS_ROCE_FRMR_MAX_PA
;
228 static int hns_roce_query_port(struct ib_device
*ib_dev
, u8 port_num
,
229 struct ib_port_attr
*props
)
231 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_dev
);
232 struct device
*dev
= hr_dev
->dev
;
233 struct net_device
*net_dev
;
240 /* props being zeroed by the caller, avoid zeroing it here */
242 props
->max_mtu
= hr_dev
->caps
.max_mtu
;
243 props
->gid_tbl_len
= hr_dev
->caps
.gid_table_len
[port
];
244 props
->port_cap_flags
= IB_PORT_CM_SUP
| IB_PORT_REINIT_SUP
|
245 IB_PORT_VENDOR_CLASS_SUP
|
246 IB_PORT_BOOT_MGMT_SUP
;
247 props
->max_msg_sz
= HNS_ROCE_MAX_MSG_LEN
;
248 props
->pkey_tbl_len
= 1;
249 props
->active_width
= IB_WIDTH_4X
;
250 props
->active_speed
= 1;
252 spin_lock_irqsave(&hr_dev
->iboe
.lock
, flags
);
254 net_dev
= hr_dev
->iboe
.netdevs
[port
];
256 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
257 dev_err(dev
, "Find netdev %u failed!\n", port
);
261 mtu
= iboe_get_mtu(net_dev
->mtu
);
262 props
->active_mtu
= mtu
? min(props
->max_mtu
, mtu
) : IB_MTU_256
;
263 props
->state
= netif_running(net_dev
) && netif_carrier_ok(net_dev
) ?
266 props
->phys_state
= props
->state
== IB_PORT_ACTIVE
?
267 IB_PORT_PHYS_STATE_LINK_UP
:
268 IB_PORT_PHYS_STATE_DISABLED
;
270 spin_unlock_irqrestore(&hr_dev
->iboe
.lock
, flags
);
275 static enum rdma_link_layer
hns_roce_get_link_layer(struct ib_device
*device
,
278 return IB_LINK_LAYER_ETHERNET
;
281 static int hns_roce_query_pkey(struct ib_device
*ib_dev
, u8 port
, u16 index
,
289 static int hns_roce_modify_device(struct ib_device
*ib_dev
, int mask
,
290 struct ib_device_modify
*props
)
294 if (mask
& ~IB_DEVICE_MODIFY_NODE_DESC
)
297 if (mask
& IB_DEVICE_MODIFY_NODE_DESC
) {
298 spin_lock_irqsave(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
299 memcpy(ib_dev
->node_desc
, props
->node_desc
, NODE_DESC_SIZE
);
300 spin_unlock_irqrestore(&to_hr_dev(ib_dev
)->sm_lock
, flags
);
306 static int hns_roce_alloc_ucontext(struct ib_ucontext
*uctx
,
307 struct ib_udata
*udata
)
310 struct hns_roce_ucontext
*context
= to_hr_ucontext(uctx
);
311 struct hns_roce_ib_alloc_ucontext_resp resp
= {};
312 struct hns_roce_dev
*hr_dev
= to_hr_dev(uctx
->device
);
317 resp
.qp_tab_size
= hr_dev
->caps
.num_qps
;
319 ret
= hns_roce_uar_alloc(hr_dev
, &context
->uar
);
321 goto error_fail_uar_alloc
;
323 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) {
324 INIT_LIST_HEAD(&context
->page_list
);
325 mutex_init(&context
->page_mutex
);
328 resp
.cqe_size
= hr_dev
->caps
.cqe_sz
;
330 ret
= ib_copy_to_udata(udata
, &resp
,
331 min(udata
->outlen
, sizeof(resp
)));
333 goto error_fail_copy_to_udata
;
337 error_fail_copy_to_udata
:
338 hns_roce_uar_free(hr_dev
, &context
->uar
);
340 error_fail_uar_alloc
:
344 static void hns_roce_dealloc_ucontext(struct ib_ucontext
*ibcontext
)
346 struct hns_roce_ucontext
*context
= to_hr_ucontext(ibcontext
);
348 hns_roce_uar_free(to_hr_dev(ibcontext
->device
), &context
->uar
);
351 static int hns_roce_mmap(struct ib_ucontext
*context
,
352 struct vm_area_struct
*vma
)
354 struct hns_roce_dev
*hr_dev
= to_hr_dev(context
->device
);
356 switch (vma
->vm_pgoff
) {
358 return rdma_user_mmap_io(context
, vma
,
359 to_hr_ucontext(context
)->uar
.pfn
,
361 pgprot_noncached(vma
->vm_page_prot
),
364 /* vm_pgoff: 1 -- TPTR */
366 if (!hr_dev
->tptr_dma_addr
|| !hr_dev
->tptr_size
)
369 * FIXME: using io_remap_pfn_range on the dma address returned
370 * by dma_alloc_coherent is totally wrong.
372 return rdma_user_mmap_io(context
, vma
,
373 hr_dev
->tptr_dma_addr
>> PAGE_SHIFT
,
383 static int hns_roce_port_immutable(struct ib_device
*ib_dev
, u8 port_num
,
384 struct ib_port_immutable
*immutable
)
386 struct ib_port_attr attr
;
389 ret
= ib_query_port(ib_dev
, port_num
, &attr
);
393 immutable
->pkey_tbl_len
= attr
.pkey_tbl_len
;
394 immutable
->gid_tbl_len
= attr
.gid_tbl_len
;
396 immutable
->max_mad_size
= IB_MGMT_MAD_SIZE
;
397 immutable
->core_cap_flags
= RDMA_CORE_PORT_IBA_ROCE
;
398 if (to_hr_dev(ib_dev
)->caps
.flags
& HNS_ROCE_CAP_FLAG_ROCE_V1_V2
)
399 immutable
->core_cap_flags
|= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP
;
404 static void hns_roce_disassociate_ucontext(struct ib_ucontext
*ibcontext
)
408 static void hns_roce_unregister_device(struct hns_roce_dev
*hr_dev
)
410 struct hns_roce_ib_iboe
*iboe
= &hr_dev
->iboe
;
412 hr_dev
->active
= false;
413 unregister_netdevice_notifier(&iboe
->nb
);
414 ib_unregister_device(&hr_dev
->ib_dev
);
417 static const struct ib_device_ops hns_roce_dev_ops
= {
418 .owner
= THIS_MODULE
,
419 .driver_id
= RDMA_DRIVER_HNS
,
421 .uverbs_no_driver_id_binding
= 1,
423 .add_gid
= hns_roce_add_gid
,
424 .alloc_pd
= hns_roce_alloc_pd
,
425 .alloc_ucontext
= hns_roce_alloc_ucontext
,
426 .create_ah
= hns_roce_create_ah
,
427 .create_user_ah
= hns_roce_create_ah
,
428 .create_cq
= hns_roce_create_cq
,
429 .create_qp
= hns_roce_create_qp
,
430 .dealloc_pd
= hns_roce_dealloc_pd
,
431 .dealloc_ucontext
= hns_roce_dealloc_ucontext
,
432 .del_gid
= hns_roce_del_gid
,
433 .dereg_mr
= hns_roce_dereg_mr
,
434 .destroy_ah
= hns_roce_destroy_ah
,
435 .destroy_cq
= hns_roce_destroy_cq
,
436 .disassociate_ucontext
= hns_roce_disassociate_ucontext
,
437 .fill_res_cq_entry
= hns_roce_fill_res_cq_entry
,
438 .get_dma_mr
= hns_roce_get_dma_mr
,
439 .get_link_layer
= hns_roce_get_link_layer
,
440 .get_port_immutable
= hns_roce_port_immutable
,
441 .mmap
= hns_roce_mmap
,
442 .modify_device
= hns_roce_modify_device
,
443 .modify_qp
= hns_roce_modify_qp
,
444 .query_ah
= hns_roce_query_ah
,
445 .query_device
= hns_roce_query_device
,
446 .query_pkey
= hns_roce_query_pkey
,
447 .query_port
= hns_roce_query_port
,
448 .reg_user_mr
= hns_roce_reg_user_mr
,
450 INIT_RDMA_OBJ_SIZE(ib_ah
, hns_roce_ah
, ibah
),
451 INIT_RDMA_OBJ_SIZE(ib_cq
, hns_roce_cq
, ib_cq
),
452 INIT_RDMA_OBJ_SIZE(ib_pd
, hns_roce_pd
, ibpd
),
453 INIT_RDMA_OBJ_SIZE(ib_ucontext
, hns_roce_ucontext
, ibucontext
),
456 static const struct ib_device_ops hns_roce_dev_mr_ops
= {
457 .rereg_user_mr
= hns_roce_rereg_user_mr
,
460 static const struct ib_device_ops hns_roce_dev_mw_ops
= {
461 .alloc_mw
= hns_roce_alloc_mw
,
462 .dealloc_mw
= hns_roce_dealloc_mw
,
464 INIT_RDMA_OBJ_SIZE(ib_mw
, hns_roce_mw
, ibmw
),
467 static const struct ib_device_ops hns_roce_dev_frmr_ops
= {
468 .alloc_mr
= hns_roce_alloc_mr
,
469 .map_mr_sg
= hns_roce_map_mr_sg
,
472 static const struct ib_device_ops hns_roce_dev_srq_ops
= {
473 .create_srq
= hns_roce_create_srq
,
474 .destroy_srq
= hns_roce_destroy_srq
,
476 INIT_RDMA_OBJ_SIZE(ib_srq
, hns_roce_srq
, ibsrq
),
479 static int hns_roce_register_device(struct hns_roce_dev
*hr_dev
)
482 struct hns_roce_ib_iboe
*iboe
= NULL
;
483 struct ib_device
*ib_dev
= NULL
;
484 struct device
*dev
= hr_dev
->dev
;
487 iboe
= &hr_dev
->iboe
;
488 spin_lock_init(&iboe
->lock
);
490 ib_dev
= &hr_dev
->ib_dev
;
492 ib_dev
->node_type
= RDMA_NODE_IB_CA
;
493 ib_dev
->dev
.parent
= dev
;
495 ib_dev
->phys_port_cnt
= hr_dev
->caps
.num_ports
;
496 ib_dev
->local_dma_lkey
= hr_dev
->caps
.reserved_lkey
;
497 ib_dev
->num_comp_vectors
= hr_dev
->caps
.num_comp_vectors
;
499 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_REREG_MR
)
500 ib_set_device_ops(ib_dev
, &hns_roce_dev_mr_ops
);
503 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_MW
)
504 ib_set_device_ops(ib_dev
, &hns_roce_dev_mw_ops
);
507 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_FRMR
)
508 ib_set_device_ops(ib_dev
, &hns_roce_dev_frmr_ops
);
511 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
) {
512 ib_set_device_ops(ib_dev
, &hns_roce_dev_srq_ops
);
513 ib_set_device_ops(ib_dev
, hr_dev
->hw
->hns_roce_dev_srq_ops
);
516 ib_set_device_ops(ib_dev
, hr_dev
->hw
->hns_roce_dev_ops
);
517 ib_set_device_ops(ib_dev
, &hns_roce_dev_ops
);
518 for (i
= 0; i
< hr_dev
->caps
.num_ports
; i
++) {
519 if (!hr_dev
->iboe
.netdevs
[i
])
522 ret
= ib_device_set_netdev(ib_dev
, hr_dev
->iboe
.netdevs
[i
],
527 dma_set_max_seg_size(dev
, UINT_MAX
);
528 ret
= ib_register_device(ib_dev
, "hns_%d", dev
);
530 dev_err(dev
, "ib_register_device failed!\n");
534 ret
= hns_roce_setup_mtu_mac(hr_dev
);
536 dev_err(dev
, "setup_mtu_mac failed!\n");
537 goto error_failed_setup_mtu_mac
;
540 iboe
->nb
.notifier_call
= hns_roce_netdev_event
;
541 ret
= register_netdevice_notifier(&iboe
->nb
);
543 dev_err(dev
, "register_netdevice_notifier failed!\n");
544 goto error_failed_setup_mtu_mac
;
547 hr_dev
->active
= true;
550 error_failed_setup_mtu_mac
:
551 ib_unregister_device(ib_dev
);
556 static int hns_roce_init_hem(struct hns_roce_dev
*hr_dev
)
558 struct device
*dev
= hr_dev
->dev
;
561 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
,
562 HEM_TYPE_MTPT
, hr_dev
->caps
.mtpt_entry_sz
,
563 hr_dev
->caps
.num_mtpts
, 1);
565 dev_err(dev
, "Failed to init MTPT context memory, aborting.\n");
569 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
,
570 HEM_TYPE_QPC
, hr_dev
->caps
.qpc_sz
,
571 hr_dev
->caps
.num_qps
, 1);
573 dev_err(dev
, "Failed to init QP context memory, aborting.\n");
577 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
,
579 hr_dev
->caps
.irrl_entry_sz
*
580 hr_dev
->caps
.max_qp_init_rdma
,
581 hr_dev
->caps
.num_qps
, 1);
583 dev_err(dev
, "Failed to init irrl_table memory, aborting.\n");
587 if (hr_dev
->caps
.trrl_entry_sz
) {
588 ret
= hns_roce_init_hem_table(hr_dev
,
589 &hr_dev
->qp_table
.trrl_table
,
591 hr_dev
->caps
.trrl_entry_sz
*
592 hr_dev
->caps
.max_qp_dest_rdma
,
593 hr_dev
->caps
.num_qps
, 1);
596 "Failed to init trrl_table memory, aborting.\n");
601 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->cq_table
.table
,
602 HEM_TYPE_CQC
, hr_dev
->caps
.cqc_entry_sz
,
603 hr_dev
->caps
.num_cqs
, 1);
605 dev_err(dev
, "Failed to init CQ context memory, aborting.\n");
609 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
) {
610 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->srq_table
.table
,
612 hr_dev
->caps
.srqc_entry_sz
,
613 hr_dev
->caps
.num_srqs
, 1);
616 "Failed to init SRQ context memory, aborting.\n");
621 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL
) {
622 ret
= hns_roce_init_hem_table(hr_dev
,
623 &hr_dev
->qp_table
.sccc_table
,
625 hr_dev
->caps
.sccc_sz
,
626 hr_dev
->caps
.num_qps
, 1);
629 "Failed to init SCC context memory, aborting.\n");
634 if (hr_dev
->caps
.qpc_timer_entry_sz
) {
635 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->qpc_timer_table
,
637 hr_dev
->caps
.qpc_timer_entry_sz
,
638 hr_dev
->caps
.num_qpc_timer
, 1);
641 "Failed to init QPC timer memory, aborting.\n");
646 if (hr_dev
->caps
.cqc_timer_entry_sz
) {
647 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->cqc_timer_table
,
649 hr_dev
->caps
.cqc_timer_entry_sz
,
650 hr_dev
->caps
.num_cqc_timer
, 1);
653 "Failed to init CQC timer memory, aborting.\n");
654 goto err_unmap_qpc_timer
;
658 if (hr_dev
->caps
.gmv_entry_sz
) {
659 ret
= hns_roce_init_hem_table(hr_dev
, &hr_dev
->gmv_table
,
661 hr_dev
->caps
.gmv_entry_sz
,
662 hr_dev
->caps
.gmv_entry_num
, 1);
665 "failed to init gmv table memory, ret = %d\n",
667 goto err_unmap_cqc_timer
;
674 if (hr_dev
->caps
.cqc_timer_entry_sz
)
675 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->cqc_timer_table
);
678 if (hr_dev
->caps
.qpc_timer_entry_sz
)
679 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qpc_timer_table
);
682 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL
)
683 hns_roce_cleanup_hem_table(hr_dev
,
684 &hr_dev
->qp_table
.sccc_table
);
686 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
)
687 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->srq_table
.table
);
690 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->cq_table
.table
);
693 if (hr_dev
->caps
.trrl_entry_sz
)
694 hns_roce_cleanup_hem_table(hr_dev
,
695 &hr_dev
->qp_table
.trrl_table
);
698 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
);
701 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
);
704 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
);
710 * hns_roce_setup_hca - setup host channel adapter
711 * @hr_dev: pointer to hns roce device
714 static int hns_roce_setup_hca(struct hns_roce_dev
*hr_dev
)
716 struct device
*dev
= hr_dev
->dev
;
719 spin_lock_init(&hr_dev
->sm_lock
);
720 spin_lock_init(&hr_dev
->bt_cmd_lock
);
722 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) {
723 INIT_LIST_HEAD(&hr_dev
->pgdir_list
);
724 mutex_init(&hr_dev
->pgdir_mutex
);
727 ret
= hns_roce_init_uar_table(hr_dev
);
729 dev_err(dev
, "Failed to initialize uar table. aborting\n");
733 ret
= hns_roce_uar_alloc(hr_dev
, &hr_dev
->priv_uar
);
735 dev_err(dev
, "Failed to allocate priv_uar.\n");
736 goto err_uar_table_free
;
739 ret
= hns_roce_init_pd_table(hr_dev
);
741 dev_err(dev
, "Failed to init protected domain table.\n");
742 goto err_uar_alloc_free
;
745 ret
= hns_roce_init_mr_table(hr_dev
);
747 dev_err(dev
, "Failed to init memory region table.\n");
748 goto err_pd_table_free
;
751 ret
= hns_roce_init_cq_table(hr_dev
);
753 dev_err(dev
, "Failed to init completion queue table.\n");
754 goto err_mr_table_free
;
757 ret
= hns_roce_init_qp_table(hr_dev
);
759 dev_err(dev
, "Failed to init queue pair table.\n");
760 goto err_cq_table_free
;
763 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
) {
764 ret
= hns_roce_init_srq_table(hr_dev
);
767 "Failed to init share receive queue table.\n");
768 goto err_qp_table_free
;
775 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
)
776 hns_roce_cleanup_qp_table(hr_dev
);
779 hns_roce_cleanup_cq_table(hr_dev
);
782 hns_roce_cleanup_mr_table(hr_dev
);
785 hns_roce_cleanup_pd_table(hr_dev
);
788 hns_roce_uar_free(hr_dev
, &hr_dev
->priv_uar
);
791 hns_roce_cleanup_uar_table(hr_dev
);
795 static void check_and_get_armed_cq(struct list_head
*cq_list
, struct ib_cq
*cq
)
797 struct hns_roce_cq
*hr_cq
= to_hr_cq(cq
);
800 spin_lock_irqsave(&hr_cq
->lock
, flags
);
801 if (cq
->comp_handler
) {
802 if (!hr_cq
->is_armed
) {
804 list_add_tail(&hr_cq
->node
, cq_list
);
807 spin_unlock_irqrestore(&hr_cq
->lock
, flags
);
810 void hns_roce_handle_device_err(struct hns_roce_dev
*hr_dev
)
812 struct hns_roce_qp
*hr_qp
;
813 struct hns_roce_cq
*hr_cq
;
814 struct list_head cq_list
;
815 unsigned long flags_qp
;
818 INIT_LIST_HEAD(&cq_list
);
820 spin_lock_irqsave(&hr_dev
->qp_list_lock
, flags
);
821 list_for_each_entry(hr_qp
, &hr_dev
->qp_list
, node
) {
822 spin_lock_irqsave(&hr_qp
->sq
.lock
, flags_qp
);
823 if (hr_qp
->sq
.tail
!= hr_qp
->sq
.head
)
824 check_and_get_armed_cq(&cq_list
, hr_qp
->ibqp
.send_cq
);
825 spin_unlock_irqrestore(&hr_qp
->sq
.lock
, flags_qp
);
827 spin_lock_irqsave(&hr_qp
->rq
.lock
, flags_qp
);
828 if ((!hr_qp
->ibqp
.srq
) && (hr_qp
->rq
.tail
!= hr_qp
->rq
.head
))
829 check_and_get_armed_cq(&cq_list
, hr_qp
->ibqp
.recv_cq
);
830 spin_unlock_irqrestore(&hr_qp
->rq
.lock
, flags_qp
);
833 list_for_each_entry(hr_cq
, &cq_list
, node
)
834 hns_roce_cq_completion(hr_dev
, hr_cq
->cqn
);
836 spin_unlock_irqrestore(&hr_dev
->qp_list_lock
, flags
);
839 int hns_roce_init(struct hns_roce_dev
*hr_dev
)
841 struct device
*dev
= hr_dev
->dev
;
844 if (hr_dev
->hw
->reset
) {
845 ret
= hr_dev
->hw
->reset(hr_dev
, true);
847 dev_err(dev
, "Reset RoCE engine failed!\n");
851 hr_dev
->is_reset
= false;
853 if (hr_dev
->hw
->cmq_init
) {
854 ret
= hr_dev
->hw
->cmq_init(hr_dev
);
856 dev_err(dev
, "Init RoCE Command Queue failed!\n");
857 goto error_failed_cmq_init
;
861 ret
= hr_dev
->hw
->hw_profile(hr_dev
);
863 dev_err(dev
, "Get RoCE engine profile failed!\n");
864 goto error_failed_cmd_init
;
867 ret
= hns_roce_cmd_init(hr_dev
);
869 dev_err(dev
, "cmd init failed!\n");
870 goto error_failed_cmd_init
;
873 /* EQ depends on poll mode, event mode depends on EQ */
874 ret
= hr_dev
->hw
->init_eq(hr_dev
);
876 dev_err(dev
, "eq init failed!\n");
877 goto error_failed_eq_table
;
880 if (hr_dev
->cmd_mod
) {
881 ret
= hns_roce_cmd_use_events(hr_dev
);
884 "Cmd event mode failed, set back to poll!\n");
885 hns_roce_cmd_use_polling(hr_dev
);
889 ret
= hns_roce_init_hem(hr_dev
);
891 dev_err(dev
, "init HEM(Hardware Entry Memory) failed!\n");
892 goto error_failed_init_hem
;
895 ret
= hns_roce_setup_hca(hr_dev
);
897 dev_err(dev
, "setup hca failed!\n");
898 goto error_failed_setup_hca
;
901 if (hr_dev
->hw
->hw_init
) {
902 ret
= hr_dev
->hw
->hw_init(hr_dev
);
904 dev_err(dev
, "hw_init failed!\n");
905 goto error_failed_engine_init
;
909 INIT_LIST_HEAD(&hr_dev
->qp_list
);
910 spin_lock_init(&hr_dev
->qp_list_lock
);
912 ret
= hns_roce_register_device(hr_dev
);
914 goto error_failed_register_device
;
918 error_failed_register_device
:
919 if (hr_dev
->hw
->hw_exit
)
920 hr_dev
->hw
->hw_exit(hr_dev
);
922 error_failed_engine_init
:
923 hns_roce_cleanup_bitmap(hr_dev
);
925 error_failed_setup_hca
:
926 hns_roce_cleanup_hem(hr_dev
);
928 error_failed_init_hem
:
930 hns_roce_cmd_use_polling(hr_dev
);
931 hr_dev
->hw
->cleanup_eq(hr_dev
);
933 error_failed_eq_table
:
934 hns_roce_cmd_cleanup(hr_dev
);
936 error_failed_cmd_init
:
937 if (hr_dev
->hw
->cmq_exit
)
938 hr_dev
->hw
->cmq_exit(hr_dev
);
940 error_failed_cmq_init
:
941 if (hr_dev
->hw
->reset
) {
942 if (hr_dev
->hw
->reset(hr_dev
, false))
943 dev_err(dev
, "Dereset RoCE engine failed!\n");
949 void hns_roce_exit(struct hns_roce_dev
*hr_dev
)
951 hns_roce_unregister_device(hr_dev
);
953 if (hr_dev
->hw
->hw_exit
)
954 hr_dev
->hw
->hw_exit(hr_dev
);
955 hns_roce_cleanup_bitmap(hr_dev
);
956 hns_roce_cleanup_hem(hr_dev
);
959 hns_roce_cmd_use_polling(hr_dev
);
961 hr_dev
->hw
->cleanup_eq(hr_dev
);
962 hns_roce_cmd_cleanup(hr_dev
);
963 if (hr_dev
->hw
->cmq_exit
)
964 hr_dev
->hw
->cmq_exit(hr_dev
);
965 if (hr_dev
->hw
->reset
)
966 hr_dev
->hw
->reset(hr_dev
, false);
969 MODULE_LICENSE("Dual BSD/GPL");
970 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
971 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
972 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
973 MODULE_DESCRIPTION("HNS RoCE Driver");