2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/mlx5/cq.h>
45 #include <linux/mlx5/qp.h>
46 #include <linux/mlx5/srq.h>
47 #include <linux/debugfs.h>
48 #include <linux/kmod.h>
49 #include <linux/mlx5/mlx5_ifc.h>
50 #ifdef CONFIG_RFS_ACCEL
51 #include <linux/cpu_rmap.h>
53 #include <net/devlink.h>
54 #include "mlx5_core.h"
56 #ifdef CONFIG_MLX5_CORE_EN
60 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
61 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
62 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_VERSION(DRIVER_VERSION
);
65 unsigned int mlx5_core_debug_mask
;
66 module_param_named(debug_mask
, mlx5_core_debug_mask
, uint
, 0644);
67 MODULE_PARM_DESC(debug_mask
, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
69 #define MLX5_DEFAULT_PROF 2
70 static unsigned int prof_sel
= MLX5_DEFAULT_PROF
;
71 module_param_named(prof_sel
, prof_sel
, uint
, 0444);
72 MODULE_PARM_DESC(prof_sel
, "profile selector. Valid range 0 - 2");
75 MLX5_ATOMIC_REQ_MODE_BE
= 0x0,
76 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
= 0x1,
79 static struct mlx5_profile profile
[] = {
84 .mask
= MLX5_PROF_MASK_QP_SIZE
,
88 .mask
= MLX5_PROF_MASK_QP_SIZE
|
89 MLX5_PROF_MASK_MR_CACHE
,
158 #define FW_INIT_TIMEOUT_MILI 2000
159 #define FW_INIT_WAIT_MS 2
161 static int wait_fw_init(struct mlx5_core_dev
*dev
, u32 max_wait_mili
)
163 unsigned long end
= jiffies
+ msecs_to_jiffies(max_wait_mili
);
166 while (fw_initializing(dev
)) {
167 if (time_after(jiffies
, end
)) {
171 msleep(FW_INIT_WAIT_MS
);
177 static void mlx5_set_driver_version(struct mlx5_core_dev
*dev
)
179 int driver_ver_sz
= MLX5_FLD_SZ_BYTES(set_driver_version_in
,
181 u8 in
[MLX5_ST_SZ_BYTES(set_driver_version_in
)] = {0};
182 u8 out
[MLX5_ST_SZ_BYTES(set_driver_version_out
)] = {0};
183 int remaining_size
= driver_ver_sz
;
186 if (!MLX5_CAP_GEN(dev
, driver_version
))
189 string
= MLX5_ADDR_OF(set_driver_version_in
, in
, driver_version
);
191 strncpy(string
, "Linux", remaining_size
);
193 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
194 strncat(string
, ",", remaining_size
);
196 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
197 strncat(string
, DRIVER_NAME
, remaining_size
);
199 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
200 strncat(string
, ",", remaining_size
);
202 remaining_size
= max_t(int, 0, driver_ver_sz
- strlen(string
));
203 strncat(string
, DRIVER_VERSION
, remaining_size
);
206 MLX5_SET(set_driver_version_in
, in
, opcode
,
207 MLX5_CMD_OP_SET_DRIVER_VERSION
);
209 mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
212 static int set_dma_caps(struct pci_dev
*pdev
)
216 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
218 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask\n");
219 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
221 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting\n");
226 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
229 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
230 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
233 "Can't set consistent PCI DMA mask, aborting\n");
238 dma_set_max_seg_size(&pdev
->dev
, 2u * 1024 * 1024 * 1024);
242 static int mlx5_pci_enable_device(struct mlx5_core_dev
*dev
)
244 struct pci_dev
*pdev
= dev
->pdev
;
247 mutex_lock(&dev
->pci_status_mutex
);
248 if (dev
->pci_status
== MLX5_PCI_STATUS_DISABLED
) {
249 err
= pci_enable_device(pdev
);
251 dev
->pci_status
= MLX5_PCI_STATUS_ENABLED
;
253 mutex_unlock(&dev
->pci_status_mutex
);
258 static void mlx5_pci_disable_device(struct mlx5_core_dev
*dev
)
260 struct pci_dev
*pdev
= dev
->pdev
;
262 mutex_lock(&dev
->pci_status_mutex
);
263 if (dev
->pci_status
== MLX5_PCI_STATUS_ENABLED
) {
264 pci_disable_device(pdev
);
265 dev
->pci_status
= MLX5_PCI_STATUS_DISABLED
;
267 mutex_unlock(&dev
->pci_status_mutex
);
270 static int request_bar(struct pci_dev
*pdev
)
274 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
275 dev_err(&pdev
->dev
, "Missing registers BAR, aborting\n");
279 err
= pci_request_regions(pdev
, DRIVER_NAME
);
281 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
286 static void release_bar(struct pci_dev
*pdev
)
288 pci_release_regions(pdev
);
291 static int mlx5_enable_msix(struct mlx5_core_dev
*dev
)
293 struct mlx5_priv
*priv
= &dev
->priv
;
294 struct mlx5_eq_table
*table
= &priv
->eq_table
;
295 int num_eqs
= 1 << MLX5_CAP_GEN(dev
, log_max_eq
);
299 nvec
= MLX5_CAP_GEN(dev
, num_ports
) * num_online_cpus() +
300 MLX5_EQ_VEC_COMP_BASE
;
301 nvec
= min_t(int, nvec
, num_eqs
);
302 if (nvec
<= MLX5_EQ_VEC_COMP_BASE
)
305 priv
->msix_arr
= kcalloc(nvec
, sizeof(*priv
->msix_arr
), GFP_KERNEL
);
307 priv
->irq_info
= kcalloc(nvec
, sizeof(*priv
->irq_info
), GFP_KERNEL
);
308 if (!priv
->msix_arr
|| !priv
->irq_info
)
311 for (i
= 0; i
< nvec
; i
++)
312 priv
->msix_arr
[i
].entry
= i
;
314 nvec
= pci_enable_msix_range(dev
->pdev
, priv
->msix_arr
,
315 MLX5_EQ_VEC_COMP_BASE
+ 1, nvec
);
319 table
->num_comp_vectors
= nvec
- MLX5_EQ_VEC_COMP_BASE
;
324 kfree(priv
->irq_info
);
325 kfree(priv
->msix_arr
);
329 static void mlx5_disable_msix(struct mlx5_core_dev
*dev
)
331 struct mlx5_priv
*priv
= &dev
->priv
;
333 pci_disable_msix(dev
->pdev
);
334 kfree(priv
->irq_info
);
335 kfree(priv
->msix_arr
);
338 struct mlx5_reg_host_endianess
{
344 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
347 MLX5_CAP_BITS_RW_MASK
= CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM
, 2) |
348 MLX5_DEV_CAP_FLAG_DCT
,
351 static u16
to_fw_pkey_sz(struct mlx5_core_dev
*dev
, u32 size
)
367 mlx5_core_warn(dev
, "invalid pkey table size %d\n", size
);
372 static int mlx5_core_get_caps_mode(struct mlx5_core_dev
*dev
,
373 enum mlx5_cap_type cap_type
,
374 enum mlx5_cap_mode cap_mode
)
376 u8 in
[MLX5_ST_SZ_BYTES(query_hca_cap_in
)];
377 int out_sz
= MLX5_ST_SZ_BYTES(query_hca_cap_out
);
378 void *out
, *hca_caps
;
379 u16 opmod
= (cap_type
<< 1) | (cap_mode
& 0x01);
382 memset(in
, 0, sizeof(in
));
383 out
= kzalloc(out_sz
, GFP_KERNEL
);
387 MLX5_SET(query_hca_cap_in
, in
, opcode
, MLX5_CMD_OP_QUERY_HCA_CAP
);
388 MLX5_SET(query_hca_cap_in
, in
, op_mod
, opmod
);
389 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, out_sz
);
392 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
393 cap_type
, cap_mode
, err
);
397 hca_caps
= MLX5_ADDR_OF(query_hca_cap_out
, out
, capability
);
400 case HCA_CAP_OPMOD_GET_MAX
:
401 memcpy(dev
->hca_caps_max
[cap_type
], hca_caps
,
402 MLX5_UN_SZ_BYTES(hca_cap_union
));
404 case HCA_CAP_OPMOD_GET_CUR
:
405 memcpy(dev
->hca_caps_cur
[cap_type
], hca_caps
,
406 MLX5_UN_SZ_BYTES(hca_cap_union
));
410 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
420 int mlx5_core_get_caps(struct mlx5_core_dev
*dev
, enum mlx5_cap_type cap_type
)
424 ret
= mlx5_core_get_caps_mode(dev
, cap_type
, HCA_CAP_OPMOD_GET_CUR
);
427 return mlx5_core_get_caps_mode(dev
, cap_type
, HCA_CAP_OPMOD_GET_MAX
);
430 static int set_caps(struct mlx5_core_dev
*dev
, void *in
, int in_sz
, int opmod
)
432 u32 out
[MLX5_ST_SZ_DW(set_hca_cap_out
)] = {0};
434 MLX5_SET(set_hca_cap_in
, in
, opcode
, MLX5_CMD_OP_SET_HCA_CAP
);
435 MLX5_SET(set_hca_cap_in
, in
, op_mod
, opmod
<< 1);
436 return mlx5_cmd_exec(dev
, in
, in_sz
, out
, sizeof(out
));
439 static int handle_hca_cap_atomic(struct mlx5_core_dev
*dev
)
443 int set_sz
= MLX5_ST_SZ_BYTES(set_hca_cap_in
);
447 if (MLX5_CAP_GEN(dev
, atomic
)) {
448 err
= mlx5_core_get_caps(dev
, MLX5_CAP_ATOMIC
);
457 supported_atomic_req_8B_endianess_mode_1
);
459 if (req_endianness
!= MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
)
462 set_ctx
= kzalloc(set_sz
, GFP_KERNEL
);
466 set_hca_cap
= MLX5_ADDR_OF(set_hca_cap_in
, set_ctx
, capability
);
468 /* Set requestor to host endianness */
469 MLX5_SET(atomic_caps
, set_hca_cap
, atomic_req_8B_endianess_mode
,
470 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS
);
472 err
= set_caps(dev
, set_ctx
, set_sz
, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC
);
478 static int handle_hca_cap(struct mlx5_core_dev
*dev
)
480 void *set_ctx
= NULL
;
481 struct mlx5_profile
*prof
= dev
->profile
;
483 int set_sz
= MLX5_ST_SZ_BYTES(set_hca_cap_in
);
486 set_ctx
= kzalloc(set_sz
, GFP_KERNEL
);
490 err
= mlx5_core_get_caps(dev
, MLX5_CAP_GENERAL
);
494 set_hca_cap
= MLX5_ADDR_OF(set_hca_cap_in
, set_ctx
,
496 memcpy(set_hca_cap
, dev
->hca_caps_cur
[MLX5_CAP_GENERAL
],
497 MLX5_ST_SZ_BYTES(cmd_hca_cap
));
499 mlx5_core_dbg(dev
, "Current Pkey table size %d Setting new size %d\n",
500 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev
, pkey_table_size
)),
502 /* we limit the size of the pkey table to 128 entries for now */
503 MLX5_SET(cmd_hca_cap
, set_hca_cap
, pkey_table_size
,
504 to_fw_pkey_sz(dev
, 128));
506 /* Check log_max_qp from HCA caps to set in current profile */
507 if (MLX5_CAP_GEN_MAX(dev
, log_max_qp
) < profile
[prof_sel
].log_max_qp
) {
508 mlx5_core_warn(dev
, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
509 profile
[prof_sel
].log_max_qp
,
510 MLX5_CAP_GEN_MAX(dev
, log_max_qp
));
511 profile
[prof_sel
].log_max_qp
= MLX5_CAP_GEN_MAX(dev
, log_max_qp
);
513 if (prof
->mask
& MLX5_PROF_MASK_QP_SIZE
)
514 MLX5_SET(cmd_hca_cap
, set_hca_cap
, log_max_qp
,
517 /* disable cmdif checksum */
518 MLX5_SET(cmd_hca_cap
, set_hca_cap
, cmdif_checksum
, 0);
520 MLX5_SET(cmd_hca_cap
, set_hca_cap
, log_uar_page_sz
, PAGE_SHIFT
- 12);
522 err
= set_caps(dev
, set_ctx
, set_sz
,
523 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE
);
530 static int set_hca_ctrl(struct mlx5_core_dev
*dev
)
532 struct mlx5_reg_host_endianess he_in
;
533 struct mlx5_reg_host_endianess he_out
;
536 if (!mlx5_core_is_pf(dev
))
539 memset(&he_in
, 0, sizeof(he_in
));
540 he_in
.he
= MLX5_SET_HOST_ENDIANNESS
;
541 err
= mlx5_core_access_reg(dev
, &he_in
, sizeof(he_in
),
542 &he_out
, sizeof(he_out
),
543 MLX5_REG_HOST_ENDIANNESS
, 0, 1);
547 int mlx5_core_enable_hca(struct mlx5_core_dev
*dev
, u16 func_id
)
549 u32 out
[MLX5_ST_SZ_DW(enable_hca_out
)] = {0};
550 u32 in
[MLX5_ST_SZ_DW(enable_hca_in
)] = {0};
552 MLX5_SET(enable_hca_in
, in
, opcode
, MLX5_CMD_OP_ENABLE_HCA
);
553 MLX5_SET(enable_hca_in
, in
, function_id
, func_id
);
554 return mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
557 int mlx5_core_disable_hca(struct mlx5_core_dev
*dev
, u16 func_id
)
559 u32 out
[MLX5_ST_SZ_DW(disable_hca_out
)] = {0};
560 u32 in
[MLX5_ST_SZ_DW(disable_hca_in
)] = {0};
562 MLX5_SET(disable_hca_in
, in
, opcode
, MLX5_CMD_OP_DISABLE_HCA
);
563 MLX5_SET(disable_hca_in
, in
, function_id
, func_id
);
564 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
567 u64
mlx5_read_internal_timer(struct mlx5_core_dev
*dev
)
569 u32 timer_h
, timer_h1
, timer_l
;
571 timer_h
= ioread32be(&dev
->iseg
->internal_timer_h
);
572 timer_l
= ioread32be(&dev
->iseg
->internal_timer_l
);
573 timer_h1
= ioread32be(&dev
->iseg
->internal_timer_h
);
574 if (timer_h
!= timer_h1
) /* wrap around */
575 timer_l
= ioread32be(&dev
->iseg
->internal_timer_l
);
577 return (u64
)timer_l
| (u64
)timer_h1
<< 32;
580 static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev
*mdev
, int i
)
582 struct mlx5_priv
*priv
= &mdev
->priv
;
583 struct msix_entry
*msix
= priv
->msix_arr
;
584 int irq
= msix
[i
+ MLX5_EQ_VEC_COMP_BASE
].vector
;
587 if (!zalloc_cpumask_var(&priv
->irq_info
[i
].mask
, GFP_KERNEL
)) {
588 mlx5_core_warn(mdev
, "zalloc_cpumask_var failed");
592 cpumask_set_cpu(cpumask_local_spread(i
, priv
->numa_node
),
593 priv
->irq_info
[i
].mask
);
595 err
= irq_set_affinity_hint(irq
, priv
->irq_info
[i
].mask
);
597 mlx5_core_warn(mdev
, "irq_set_affinity_hint failed,irq 0x%.4x",
605 free_cpumask_var(priv
->irq_info
[i
].mask
);
609 static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev
*mdev
, int i
)
611 struct mlx5_priv
*priv
= &mdev
->priv
;
612 struct msix_entry
*msix
= priv
->msix_arr
;
613 int irq
= msix
[i
+ MLX5_EQ_VEC_COMP_BASE
].vector
;
615 irq_set_affinity_hint(irq
, NULL
);
616 free_cpumask_var(priv
->irq_info
[i
].mask
);
619 static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev
*mdev
)
624 for (i
= 0; i
< mdev
->priv
.eq_table
.num_comp_vectors
; i
++) {
625 err
= mlx5_irq_set_affinity_hint(mdev
, i
);
633 for (i
--; i
>= 0; i
--)
634 mlx5_irq_clear_affinity_hint(mdev
, i
);
639 static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev
*mdev
)
643 for (i
= 0; i
< mdev
->priv
.eq_table
.num_comp_vectors
; i
++)
644 mlx5_irq_clear_affinity_hint(mdev
, i
);
647 int mlx5_vector2eqn(struct mlx5_core_dev
*dev
, int vector
, int *eqn
,
650 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
651 struct mlx5_eq
*eq
, *n
;
654 spin_lock(&table
->lock
);
655 list_for_each_entry_safe(eq
, n
, &table
->comp_eqs_list
, list
) {
656 if (eq
->index
== vector
) {
663 spin_unlock(&table
->lock
);
667 EXPORT_SYMBOL(mlx5_vector2eqn
);
669 struct mlx5_eq
*mlx5_eqn2eq(struct mlx5_core_dev
*dev
, int eqn
)
671 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
674 spin_lock(&table
->lock
);
675 list_for_each_entry(eq
, &table
->comp_eqs_list
, list
)
676 if (eq
->eqn
== eqn
) {
677 spin_unlock(&table
->lock
);
681 spin_unlock(&table
->lock
);
683 return ERR_PTR(-ENOENT
);
686 static void free_comp_eqs(struct mlx5_core_dev
*dev
)
688 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
689 struct mlx5_eq
*eq
, *n
;
691 #ifdef CONFIG_RFS_ACCEL
693 free_irq_cpu_rmap(dev
->rmap
);
697 spin_lock(&table
->lock
);
698 list_for_each_entry_safe(eq
, n
, &table
->comp_eqs_list
, list
) {
700 spin_unlock(&table
->lock
);
701 if (mlx5_destroy_unmap_eq(dev
, eq
))
702 mlx5_core_warn(dev
, "failed to destroy EQ 0x%x\n",
705 spin_lock(&table
->lock
);
707 spin_unlock(&table
->lock
);
710 static int alloc_comp_eqs(struct mlx5_core_dev
*dev
)
712 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
713 char name
[MLX5_MAX_IRQ_NAME
];
720 INIT_LIST_HEAD(&table
->comp_eqs_list
);
721 ncomp_vec
= table
->num_comp_vectors
;
722 nent
= MLX5_COMP_EQ_SIZE
;
723 #ifdef CONFIG_RFS_ACCEL
724 dev
->rmap
= alloc_irq_cpu_rmap(ncomp_vec
);
728 for (i
= 0; i
< ncomp_vec
; i
++) {
729 eq
= kzalloc(sizeof(*eq
), GFP_KERNEL
);
735 #ifdef CONFIG_RFS_ACCEL
736 irq_cpu_rmap_add(dev
->rmap
,
737 dev
->priv
.msix_arr
[i
+ MLX5_EQ_VEC_COMP_BASE
].vector
);
739 snprintf(name
, MLX5_MAX_IRQ_NAME
, "mlx5_comp%d", i
);
740 err
= mlx5_create_map_eq(dev
, eq
,
741 i
+ MLX5_EQ_VEC_COMP_BASE
, nent
, 0,
742 name
, &dev
->priv
.uuari
.uars
[0]);
747 mlx5_core_dbg(dev
, "allocated completion EQN %d\n", eq
->eqn
);
749 spin_lock(&table
->lock
);
750 list_add_tail(&eq
->list
, &table
->comp_eqs_list
);
751 spin_unlock(&table
->lock
);
761 static int mlx5_core_set_issi(struct mlx5_core_dev
*dev
)
763 u32 query_in
[MLX5_ST_SZ_DW(query_issi_in
)] = {0};
764 u32 query_out
[MLX5_ST_SZ_DW(query_issi_out
)] = {0};
768 MLX5_SET(query_issi_in
, query_in
, opcode
, MLX5_CMD_OP_QUERY_ISSI
);
769 err
= mlx5_cmd_exec(dev
, query_in
, sizeof(query_in
),
770 query_out
, sizeof(query_out
));
775 mlx5_cmd_mbox_status(query_out
, &status
, &syndrome
);
776 if (!status
|| syndrome
== MLX5_DRIVER_SYND
) {
777 mlx5_core_err(dev
, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
778 err
, status
, syndrome
);
782 mlx5_core_warn(dev
, "Query ISSI is not supported by FW, ISSI is 0\n");
787 sup_issi
= MLX5_GET(query_issi_out
, query_out
, supported_issi_dw0
);
789 if (sup_issi
& (1 << 1)) {
790 u32 set_in
[MLX5_ST_SZ_DW(set_issi_in
)] = {0};
791 u32 set_out
[MLX5_ST_SZ_DW(set_issi_out
)] = {0};
793 MLX5_SET(set_issi_in
, set_in
, opcode
, MLX5_CMD_OP_SET_ISSI
);
794 MLX5_SET(set_issi_in
, set_in
, current_issi
, 1);
795 err
= mlx5_cmd_exec(dev
, set_in
, sizeof(set_in
),
796 set_out
, sizeof(set_out
));
798 mlx5_core_err(dev
, "Failed to set ISSI to 1 err(%d)\n",
806 } else if (sup_issi
& (1 << 0) || !sup_issi
) {
814 static int mlx5_pci_init(struct mlx5_core_dev
*dev
, struct mlx5_priv
*priv
)
816 struct pci_dev
*pdev
= dev
->pdev
;
819 pci_set_drvdata(dev
->pdev
, dev
);
820 strncpy(priv
->name
, dev_name(&pdev
->dev
), MLX5_MAX_NAME_LEN
);
821 priv
->name
[MLX5_MAX_NAME_LEN
- 1] = 0;
823 mutex_init(&priv
->pgdir_mutex
);
824 INIT_LIST_HEAD(&priv
->pgdir_list
);
825 spin_lock_init(&priv
->mkey_lock
);
827 mutex_init(&priv
->alloc_mutex
);
829 priv
->numa_node
= dev_to_node(&dev
->pdev
->dev
);
831 priv
->dbg_root
= debugfs_create_dir(dev_name(&pdev
->dev
), mlx5_debugfs_root
);
835 err
= mlx5_pci_enable_device(dev
);
837 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
841 err
= request_bar(pdev
);
843 dev_err(&pdev
->dev
, "error requesting BARs, aborting\n");
847 pci_set_master(pdev
);
849 err
= set_dma_caps(pdev
);
851 dev_err(&pdev
->dev
, "Failed setting DMA capabilities mask, aborting\n");
855 dev
->iseg_base
= pci_resource_start(dev
->pdev
, 0);
856 dev
->iseg
= ioremap(dev
->iseg_base
, sizeof(*dev
->iseg
));
859 dev_err(&pdev
->dev
, "Failed mapping initialization segment, aborting\n");
866 pci_clear_master(dev
->pdev
);
867 release_bar(dev
->pdev
);
869 mlx5_pci_disable_device(dev
);
872 debugfs_remove(priv
->dbg_root
);
876 static void mlx5_pci_close(struct mlx5_core_dev
*dev
, struct mlx5_priv
*priv
)
879 pci_clear_master(dev
->pdev
);
880 release_bar(dev
->pdev
);
881 mlx5_pci_disable_device(dev
);
882 debugfs_remove(priv
->dbg_root
);
885 static int mlx5_init_once(struct mlx5_core_dev
*dev
, struct mlx5_priv
*priv
)
887 struct pci_dev
*pdev
= dev
->pdev
;
890 err
= mlx5_query_board_id(dev
);
892 dev_err(&pdev
->dev
, "query board id failed\n");
896 err
= mlx5_eq_init(dev
);
898 dev_err(&pdev
->dev
, "failed to initialize eq\n");
902 MLX5_INIT_DOORBELL_LOCK(&priv
->cq_uar_lock
);
904 err
= mlx5_init_cq_table(dev
);
906 dev_err(&pdev
->dev
, "failed to initialize cq table\n");
910 mlx5_init_qp_table(dev
);
912 mlx5_init_srq_table(dev
);
914 mlx5_init_mkey_table(dev
);
916 err
= mlx5_init_rl_table(dev
);
918 dev_err(&pdev
->dev
, "Failed to init rate limiting\n");
919 goto err_tables_cleanup
;
922 #ifdef CONFIG_MLX5_CORE_EN
923 err
= mlx5_eswitch_init(dev
);
925 dev_err(&pdev
->dev
, "Failed to init eswitch %d\n", err
);
930 err
= mlx5_sriov_init(dev
);
932 dev_err(&pdev
->dev
, "Failed to init sriov %d\n", err
);
933 goto err_eswitch_cleanup
;
939 #ifdef CONFIG_MLX5_CORE_EN
940 mlx5_eswitch_cleanup(dev
->priv
.eswitch
);
944 mlx5_cleanup_rl_table(dev
);
947 mlx5_cleanup_mkey_table(dev
);
948 mlx5_cleanup_srq_table(dev
);
949 mlx5_cleanup_qp_table(dev
);
950 mlx5_cleanup_cq_table(dev
);
953 mlx5_eq_cleanup(dev
);
959 static void mlx5_cleanup_once(struct mlx5_core_dev
*dev
)
961 mlx5_sriov_cleanup(dev
);
962 #ifdef CONFIG_MLX5_CORE_EN
963 mlx5_eswitch_cleanup(dev
->priv
.eswitch
);
965 mlx5_cleanup_rl_table(dev
);
966 mlx5_cleanup_mkey_table(dev
);
967 mlx5_cleanup_srq_table(dev
);
968 mlx5_cleanup_qp_table(dev
);
969 mlx5_cleanup_cq_table(dev
);
970 mlx5_eq_cleanup(dev
);
973 static int mlx5_load_one(struct mlx5_core_dev
*dev
, struct mlx5_priv
*priv
,
976 struct pci_dev
*pdev
= dev
->pdev
;
979 mutex_lock(&dev
->intf_state_mutex
);
980 if (test_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
)) {
981 dev_warn(&dev
->pdev
->dev
, "%s: interface is up, NOP\n",
986 dev_info(&pdev
->dev
, "firmware version: %d.%d.%d\n", fw_rev_maj(dev
),
987 fw_rev_min(dev
), fw_rev_sub(dev
));
989 /* on load removing any previous indication of internal error, device is
992 dev
->state
= MLX5_DEVICE_STATE_UP
;
994 err
= mlx5_cmd_init(dev
);
996 dev_err(&pdev
->dev
, "Failed initializing command interface, aborting\n");
1000 err
= wait_fw_init(dev
, FW_INIT_TIMEOUT_MILI
);
1002 dev_err(&dev
->pdev
->dev
, "Firmware over %d MS in initializing state, aborting\n",
1003 FW_INIT_TIMEOUT_MILI
);
1004 goto err_cmd_cleanup
;
1007 err
= mlx5_core_enable_hca(dev
, 0);
1009 dev_err(&pdev
->dev
, "enable hca failed\n");
1010 goto err_cmd_cleanup
;
1013 err
= mlx5_core_set_issi(dev
);
1015 dev_err(&pdev
->dev
, "failed to set issi\n");
1016 goto err_disable_hca
;
1019 err
= mlx5_satisfy_startup_pages(dev
, 1);
1021 dev_err(&pdev
->dev
, "failed to allocate boot pages\n");
1022 goto err_disable_hca
;
1025 err
= set_hca_ctrl(dev
);
1027 dev_err(&pdev
->dev
, "set_hca_ctrl failed\n");
1028 goto reclaim_boot_pages
;
1031 err
= handle_hca_cap(dev
);
1033 dev_err(&pdev
->dev
, "handle_hca_cap failed\n");
1034 goto reclaim_boot_pages
;
1037 err
= handle_hca_cap_atomic(dev
);
1039 dev_err(&pdev
->dev
, "handle_hca_cap_atomic failed\n");
1040 goto reclaim_boot_pages
;
1043 err
= mlx5_satisfy_startup_pages(dev
, 0);
1045 dev_err(&pdev
->dev
, "failed to allocate init pages\n");
1046 goto reclaim_boot_pages
;
1049 err
= mlx5_pagealloc_start(dev
);
1051 dev_err(&pdev
->dev
, "mlx5_pagealloc_start failed\n");
1052 goto reclaim_boot_pages
;
1055 err
= mlx5_cmd_init_hca(dev
);
1057 dev_err(&pdev
->dev
, "init hca failed\n");
1058 goto err_pagealloc_stop
;
1061 mlx5_set_driver_version(dev
);
1063 mlx5_start_health_poll(dev
);
1065 err
= mlx5_query_hca_caps(dev
);
1067 dev_err(&pdev
->dev
, "query hca failed\n");
1071 if (boot
&& mlx5_init_once(dev
, priv
)) {
1072 dev_err(&pdev
->dev
, "sw objs init failed\n");
1076 err
= mlx5_enable_msix(dev
);
1078 dev_err(&pdev
->dev
, "enable msix failed\n");
1079 goto err_cleanup_once
;
1082 err
= mlx5_alloc_uuars(dev
, &priv
->uuari
);
1084 dev_err(&pdev
->dev
, "Failed allocating uar, aborting\n");
1085 goto err_disable_msix
;
1088 err
= mlx5_start_eqs(dev
);
1090 dev_err(&pdev
->dev
, "Failed to start pages and async EQs\n");
1094 err
= alloc_comp_eqs(dev
);
1096 dev_err(&pdev
->dev
, "Failed to alloc completion EQs\n");
1100 err
= mlx5_irq_set_affinity_hints(dev
);
1102 dev_err(&pdev
->dev
, "Failed to alloc affinity hint cpumask\n");
1103 goto err_affinity_hints
;
1106 err
= mlx5_init_fs(dev
);
1108 dev_err(&pdev
->dev
, "Failed to init flow steering\n");
1112 #ifdef CONFIG_MLX5_CORE_EN
1113 mlx5_eswitch_attach(dev
->priv
.eswitch
);
1116 err
= mlx5_sriov_attach(dev
);
1118 dev_err(&pdev
->dev
, "sriov init failed %d\n", err
);
1122 if (mlx5_device_registered(dev
)) {
1123 mlx5_attach_device(dev
);
1125 err
= mlx5_register_device(dev
);
1127 dev_err(&pdev
->dev
, "mlx5_register_device failed %d\n", err
);
1132 clear_bit(MLX5_INTERFACE_STATE_DOWN
, &dev
->intf_state
);
1133 set_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1135 mutex_unlock(&dev
->intf_state_mutex
);
1140 mlx5_sriov_detach(dev
);
1143 #ifdef CONFIG_MLX5_CORE_EN
1144 mlx5_eswitch_detach(dev
->priv
.eswitch
);
1146 mlx5_cleanup_fs(dev
);
1149 mlx5_irq_clear_affinity_hints(dev
);
1158 mlx5_free_uuars(dev
, &priv
->uuari
);
1161 mlx5_disable_msix(dev
);
1165 mlx5_cleanup_once(dev
);
1168 mlx5_stop_health_poll(dev
);
1169 if (mlx5_cmd_teardown_hca(dev
)) {
1170 dev_err(&dev
->pdev
->dev
, "tear_down_hca failed, skip cleanup\n");
1175 mlx5_pagealloc_stop(dev
);
1178 mlx5_reclaim_startup_pages(dev
);
1181 mlx5_core_disable_hca(dev
, 0);
1184 mlx5_cmd_cleanup(dev
);
1187 dev
->state
= MLX5_DEVICE_STATE_INTERNAL_ERROR
;
1188 mutex_unlock(&dev
->intf_state_mutex
);
1193 static int mlx5_unload_one(struct mlx5_core_dev
*dev
, struct mlx5_priv
*priv
,
1199 mlx5_drain_health_wq(dev
);
1201 mutex_lock(&dev
->intf_state_mutex
);
1202 if (test_bit(MLX5_INTERFACE_STATE_DOWN
, &dev
->intf_state
)) {
1203 dev_warn(&dev
->pdev
->dev
, "%s: interface is down, NOP\n",
1206 mlx5_cleanup_once(dev
);
1210 if (mlx5_device_registered(dev
))
1211 mlx5_detach_device(dev
);
1213 mlx5_sriov_detach(dev
);
1214 #ifdef CONFIG_MLX5_CORE_EN
1215 mlx5_eswitch_detach(dev
->priv
.eswitch
);
1217 mlx5_cleanup_fs(dev
);
1218 mlx5_irq_clear_affinity_hints(dev
);
1221 mlx5_free_uuars(dev
, &priv
->uuari
);
1222 mlx5_disable_msix(dev
);
1224 mlx5_cleanup_once(dev
);
1225 mlx5_stop_health_poll(dev
);
1226 err
= mlx5_cmd_teardown_hca(dev
);
1228 dev_err(&dev
->pdev
->dev
, "tear_down_hca failed, skip cleanup\n");
1231 mlx5_pagealloc_stop(dev
);
1232 mlx5_reclaim_startup_pages(dev
);
1233 mlx5_core_disable_hca(dev
, 0);
1234 mlx5_cmd_cleanup(dev
);
1237 clear_bit(MLX5_INTERFACE_STATE_UP
, &dev
->intf_state
);
1238 set_bit(MLX5_INTERFACE_STATE_DOWN
, &dev
->intf_state
);
1239 mutex_unlock(&dev
->intf_state_mutex
);
1243 struct mlx5_core_event_handler
{
1244 void (*event
)(struct mlx5_core_dev
*dev
,
1245 enum mlx5_dev_event event
,
1249 static const struct devlink_ops mlx5_devlink_ops
= {
1250 #ifdef CONFIG_MLX5_CORE_EN
1251 .eswitch_mode_set
= mlx5_devlink_eswitch_mode_set
,
1252 .eswitch_mode_get
= mlx5_devlink_eswitch_mode_get
,
1253 .eswitch_inline_mode_set
= mlx5_devlink_eswitch_inline_mode_set
,
1254 .eswitch_inline_mode_get
= mlx5_devlink_eswitch_inline_mode_get
,
1258 #define MLX5_IB_MOD "mlx5_ib"
1259 static int init_one(struct pci_dev
*pdev
,
1260 const struct pci_device_id
*id
)
1262 struct mlx5_core_dev
*dev
;
1263 struct devlink
*devlink
;
1264 struct mlx5_priv
*priv
;
1267 devlink
= devlink_alloc(&mlx5_devlink_ops
, sizeof(*dev
));
1269 dev_err(&pdev
->dev
, "kzalloc failed\n");
1273 dev
= devlink_priv(devlink
);
1275 priv
->pci_dev_data
= id
->driver_data
;
1277 pci_set_drvdata(pdev
, dev
);
1280 dev
->event
= mlx5_core_event
;
1281 dev
->profile
= &profile
[prof_sel
];
1283 INIT_LIST_HEAD(&priv
->ctx_list
);
1284 spin_lock_init(&priv
->ctx_lock
);
1285 mutex_init(&dev
->pci_status_mutex
);
1286 mutex_init(&dev
->intf_state_mutex
);
1287 err
= mlx5_pci_init(dev
, priv
);
1289 dev_err(&pdev
->dev
, "mlx5_pci_init failed with error code %d\n", err
);
1293 err
= mlx5_health_init(dev
);
1295 dev_err(&pdev
->dev
, "mlx5_health_init failed with error code %d\n", err
);
1299 mlx5_pagealloc_init(dev
);
1301 err
= mlx5_load_one(dev
, priv
, true);
1303 dev_err(&pdev
->dev
, "mlx5_load_one failed with error code %d\n", err
);
1307 err
= request_module_nowait(MLX5_IB_MOD
);
1309 pr_info("failed request module on %s\n", MLX5_IB_MOD
);
1311 err
= devlink_register(devlink
, &pdev
->dev
);
1318 mlx5_unload_one(dev
, priv
, true);
1320 mlx5_pagealloc_cleanup(dev
);
1321 mlx5_health_cleanup(dev
);
1323 mlx5_pci_close(dev
, priv
);
1325 pci_set_drvdata(pdev
, NULL
);
1326 devlink_free(devlink
);
1331 static void remove_one(struct pci_dev
*pdev
)
1333 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1334 struct devlink
*devlink
= priv_to_devlink(dev
);
1335 struct mlx5_priv
*priv
= &dev
->priv
;
1337 devlink_unregister(devlink
);
1338 mlx5_unregister_device(dev
);
1340 if (mlx5_unload_one(dev
, priv
, true)) {
1341 dev_err(&dev
->pdev
->dev
, "mlx5_unload_one failed\n");
1342 mlx5_health_cleanup(dev
);
1346 mlx5_pagealloc_cleanup(dev
);
1347 mlx5_health_cleanup(dev
);
1348 mlx5_pci_close(dev
, priv
);
1349 pci_set_drvdata(pdev
, NULL
);
1350 devlink_free(devlink
);
1353 static pci_ers_result_t
mlx5_pci_err_detected(struct pci_dev
*pdev
,
1354 pci_channel_state_t state
)
1356 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1357 struct mlx5_priv
*priv
= &dev
->priv
;
1359 dev_info(&pdev
->dev
, "%s was called\n", __func__
);
1361 mlx5_enter_error_state(dev
);
1362 mlx5_unload_one(dev
, priv
, false);
1363 /* In case of kernel call save the pci state and drain the health wq */
1365 pci_save_state(pdev
);
1366 mlx5_drain_health_wq(dev
);
1367 mlx5_pci_disable_device(dev
);
1370 return state
== pci_channel_io_perm_failure
?
1371 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
1374 /* wait for the device to show vital signs by waiting
1375 * for the health counter to start counting.
1377 static int wait_vital(struct pci_dev
*pdev
)
1379 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1380 struct mlx5_core_health
*health
= &dev
->priv
.health
;
1381 const int niter
= 100;
1386 for (i
= 0; i
< niter
; i
++) {
1387 count
= ioread32be(health
->health_counter
);
1388 if (count
&& count
!= 0xffffffff) {
1389 if (last_count
&& last_count
!= count
) {
1390 dev_info(&pdev
->dev
, "Counter value 0x%x after %d iterations\n", count
, i
);
1401 static pci_ers_result_t
mlx5_pci_slot_reset(struct pci_dev
*pdev
)
1403 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1406 dev_info(&pdev
->dev
, "%s was called\n", __func__
);
1408 err
= mlx5_pci_enable_device(dev
);
1410 dev_err(&pdev
->dev
, "%s: mlx5_pci_enable_device failed with error code: %d\n"
1412 return PCI_ERS_RESULT_DISCONNECT
;
1415 pci_set_master(pdev
);
1416 pci_restore_state(pdev
);
1418 if (wait_vital(pdev
)) {
1419 dev_err(&pdev
->dev
, "%s: wait_vital timed out\n", __func__
);
1420 return PCI_ERS_RESULT_DISCONNECT
;
1423 return PCI_ERS_RESULT_RECOVERED
;
1426 static void mlx5_pci_resume(struct pci_dev
*pdev
)
1428 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1429 struct mlx5_priv
*priv
= &dev
->priv
;
1432 dev_info(&pdev
->dev
, "%s was called\n", __func__
);
1434 err
= mlx5_load_one(dev
, priv
, false);
1436 dev_err(&pdev
->dev
, "%s: mlx5_load_one failed with error code: %d\n"
1439 dev_info(&pdev
->dev
, "%s: device recovered\n", __func__
);
1442 static const struct pci_error_handlers mlx5_err_handler
= {
1443 .error_detected
= mlx5_pci_err_detected
,
1444 .slot_reset
= mlx5_pci_slot_reset
,
1445 .resume
= mlx5_pci_resume
1448 static void shutdown(struct pci_dev
*pdev
)
1450 struct mlx5_core_dev
*dev
= pci_get_drvdata(pdev
);
1451 struct mlx5_priv
*priv
= &dev
->priv
;
1453 dev_info(&pdev
->dev
, "Shutdown was called\n");
1454 /* Notify mlx5 clients that the kernel is being shut down */
1455 set_bit(MLX5_INTERFACE_STATE_SHUTDOWN
, &dev
->intf_state
);
1456 mlx5_unload_one(dev
, priv
, false);
1457 mlx5_pci_disable_device(dev
);
1460 static const struct pci_device_id mlx5_core_pci_table
[] = {
1461 { PCI_VDEVICE(MELLANOX
, 0x1011) }, /* Connect-IB */
1462 { PCI_VDEVICE(MELLANOX
, 0x1012), MLX5_PCI_DEV_IS_VF
}, /* Connect-IB VF */
1463 { PCI_VDEVICE(MELLANOX
, 0x1013) }, /* ConnectX-4 */
1464 { PCI_VDEVICE(MELLANOX
, 0x1014), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-4 VF */
1465 { PCI_VDEVICE(MELLANOX
, 0x1015) }, /* ConnectX-4LX */
1466 { PCI_VDEVICE(MELLANOX
, 0x1016), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-4LX VF */
1467 { PCI_VDEVICE(MELLANOX
, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
1468 { PCI_VDEVICE(MELLANOX
, 0x1018), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-5 VF */
1469 { PCI_VDEVICE(MELLANOX
, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
1470 { PCI_VDEVICE(MELLANOX
, 0x101a), MLX5_PCI_DEV_IS_VF
}, /* ConnectX-5, PCIe 4.0 VF */
1474 MODULE_DEVICE_TABLE(pci
, mlx5_core_pci_table
);
1476 void mlx5_disable_device(struct mlx5_core_dev
*dev
)
1478 mlx5_pci_err_detected(dev
->pdev
, 0);
1481 void mlx5_recover_device(struct mlx5_core_dev
*dev
)
1483 mlx5_pci_disable_device(dev
);
1484 if (mlx5_pci_slot_reset(dev
->pdev
) == PCI_ERS_RESULT_RECOVERED
)
1485 mlx5_pci_resume(dev
->pdev
);
1488 static struct pci_driver mlx5_core_driver
= {
1489 .name
= DRIVER_NAME
,
1490 .id_table
= mlx5_core_pci_table
,
1492 .remove
= remove_one
,
1493 .shutdown
= shutdown
,
1494 .err_handler
= &mlx5_err_handler
,
1495 .sriov_configure
= mlx5_core_sriov_configure
,
1498 static void mlx5_core_verify_params(void)
1500 if (prof_sel
>= ARRAY_SIZE(profile
)) {
1501 pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
1503 ARRAY_SIZE(profile
) - 1,
1505 prof_sel
= MLX5_DEFAULT_PROF
;
1509 static int __init
init(void)
1513 mlx5_core_verify_params();
1514 mlx5_register_debugfs();
1516 err
= pci_register_driver(&mlx5_core_driver
);
1520 #ifdef CONFIG_MLX5_CORE_EN
1527 mlx5_unregister_debugfs();
1531 static void __exit
cleanup(void)
1533 #ifdef CONFIG_MLX5_CORE_EN
1536 pci_unregister_driver(&mlx5_core_driver
);
1537 mlx5_unregister_debugfs();
1541 module_exit(cleanup
);