2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
43 #include <linux/delay.h>
44 #include <linux/kmod.h>
46 #include <linux/mlx4/device.h>
47 #include <linux/mlx4/doorbell.h>
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
55 MODULE_LICENSE("Dual BSD/GPL");
56 MODULE_VERSION(DRV_VERSION
);
58 struct workqueue_struct
*mlx4_wq
;
60 #ifdef CONFIG_MLX4_DEBUG
62 int mlx4_debug_level
= 0;
63 module_param_named(debug_level
, mlx4_debug_level
, int, 0644);
64 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
66 #endif /* CONFIG_MLX4_DEBUG */
71 module_param(msi_x
, int, 0444);
72 MODULE_PARM_DESC(msi_x
, "attempt to use MSI-X if nonzero");
74 #else /* CONFIG_PCI_MSI */
78 #endif /* CONFIG_PCI_MSI */
80 static uint8_t num_vfs
[3] = {0, 0, 0};
81 static int num_vfs_argc
;
82 module_param_array(num_vfs
, byte
, &num_vfs_argc
, 0444);
83 MODULE_PARM_DESC(num_vfs
, "enable #num_vfs functions if num_vfs > 0\n"
84 "num_vfs=port1,port2,port1+2");
86 static uint8_t probe_vf
[3] = {0, 0, 0};
87 static int probe_vfs_argc
;
88 module_param_array(probe_vf
, byte
, &probe_vfs_argc
, 0444);
89 MODULE_PARM_DESC(probe_vf
, "number of vfs to probe by pf driver (num_vfs > 0)\n"
90 "probe_vf=port1,port2,port1+2");
92 int mlx4_log_num_mgm_entry_size
= MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE
;
93 module_param_named(log_num_mgm_entry_size
,
94 mlx4_log_num_mgm_entry_size
, int, 0444);
95 MODULE_PARM_DESC(log_num_mgm_entry_size
, "log mgm size, that defines the num"
96 " of qp per mcg, for example:"
97 " 10 gives 248.range: 7 <="
98 " log_num_mgm_entry_size <= 12."
99 " To activate device managed"
100 " flow steering when available, set to -1");
102 static bool enable_64b_cqe_eqe
= true;
103 module_param(enable_64b_cqe_eqe
, bool, 0444);
104 MODULE_PARM_DESC(enable_64b_cqe_eqe
,
105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
109 MLX4_FUNC_CAP_DMFS_A0_STATIC)
111 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
113 static char mlx4_version
[] =
114 DRV_NAME
": Mellanox ConnectX core driver v"
115 DRV_VERSION
" (" DRV_RELDATE
")\n";
117 static struct mlx4_profile default_profile
= {
120 .rdmarc_per_qp
= 1 << 4,
124 .num_mtt
= 1 << 20, /* It is really num mtt segements */
127 static struct mlx4_profile low_mem_profile
= {
130 .rdmarc_per_qp
= 1 << 4,
137 static int log_num_mac
= 7;
138 module_param_named(log_num_mac
, log_num_mac
, int, 0444);
139 MODULE_PARM_DESC(log_num_mac
, "Log2 max number of MACs per ETH port (1-7)");
141 static int log_num_vlan
;
142 module_param_named(log_num_vlan
, log_num_vlan
, int, 0444);
143 MODULE_PARM_DESC(log_num_vlan
, "Log2 max number of VLANs per ETH port (0-7)");
144 /* Log2 max number of VLANs per ETH port (0-7) */
145 #define MLX4_LOG_NUM_VLANS 7
146 #define MLX4_MIN_LOG_NUM_VLANS 0
147 #define MLX4_MIN_LOG_NUM_MAC 1
149 static bool use_prio
;
150 module_param_named(use_prio
, use_prio
, bool, 0444);
151 MODULE_PARM_DESC(use_prio
, "Enable steering by VLAN priority on ETH ports (deprecated)");
153 int log_mtts_per_seg
= ilog2(MLX4_MTT_ENTRY_PER_SEG
);
154 module_param_named(log_mtts_per_seg
, log_mtts_per_seg
, int, 0444);
155 MODULE_PARM_DESC(log_mtts_per_seg
, "Log2 number of MTT entries per segment (1-7)");
157 static int port_type_array
[2] = {MLX4_PORT_TYPE_NONE
, MLX4_PORT_TYPE_NONE
};
158 static int arr_argc
= 2;
159 module_param_array(port_type_array
, int, &arr_argc
, 0444);
160 MODULE_PARM_DESC(port_type_array
, "Array of port types: HW_DEFAULT (0) is default "
161 "1 for IB, 2 for Ethernet");
163 struct mlx4_port_config
{
164 struct list_head list
;
165 enum mlx4_port_type port_type
[MLX4_MAX_PORTS
+ 1];
166 struct pci_dev
*pdev
;
169 static atomic_t pf_loading
= ATOMIC_INIT(0);
171 int mlx4_check_port_params(struct mlx4_dev
*dev
,
172 enum mlx4_port_type
*port_type
)
176 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
)) {
177 for (i
= 0; i
< dev
->caps
.num_ports
- 1; i
++) {
178 if (port_type
[i
] != port_type
[i
+ 1]) {
179 mlx4_err(dev
, "Only same port types supported on this HCA, aborting\n");
185 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
186 if (!(port_type
[i
] & dev
->caps
.supported_type
[i
+1])) {
187 mlx4_err(dev
, "Requested port type for port %d is not supported on this HCA\n",
195 static void mlx4_set_port_mask(struct mlx4_dev
*dev
)
199 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
)
200 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
204 MLX4_QUERY_FUNC_NUM_SYS_EQS
= 1 << 0,
207 static int mlx4_query_func(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
210 struct mlx4_func func
;
212 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
) {
213 err
= mlx4_QUERY_FUNC(dev
, &func
, 0);
215 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
218 dev_cap
->max_eqs
= func
.max_eq
;
219 dev_cap
->reserved_eqs
= func
.rsvd_eqs
;
220 dev_cap
->reserved_uars
= func
.rsvd_uars
;
221 err
|= MLX4_QUERY_FUNC_NUM_SYS_EQS
;
226 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev
*dev
)
228 struct mlx4_caps
*dev_cap
= &dev
->caps
;
230 /* FW not supporting or cancelled by user */
231 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_EQE_STRIDE
) ||
232 !(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_CQE_STRIDE
))
235 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
236 * When FW has NCSI it may decide not to report 64B CQE/EQEs
238 if (!(dev_cap
->flags
& MLX4_DEV_CAP_FLAG_64B_EQE
) ||
239 !(dev_cap
->flags
& MLX4_DEV_CAP_FLAG_64B_CQE
)) {
240 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
241 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
245 if (cache_line_size() == 128 || cache_line_size() == 256) {
246 mlx4_dbg(dev
, "Enabling CQE stride cacheLine supported\n");
247 /* Changing the real data inside CQE size to 32B */
248 dev_cap
->flags
&= ~MLX4_DEV_CAP_FLAG_64B_CQE
;
249 dev_cap
->flags
&= ~MLX4_DEV_CAP_FLAG_64B_EQE
;
251 if (mlx4_is_master(dev
))
252 dev_cap
->function_caps
|= MLX4_FUNC_CAP_EQE_CQE_STRIDE
;
254 if (cache_line_size() != 32 && cache_line_size() != 64)
255 mlx4_dbg(dev
, "Disabling CQE stride, cacheLine size unsupported\n");
256 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
257 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
261 static int _mlx4_dev_port(struct mlx4_dev
*dev
, int port
,
262 struct mlx4_port_cap
*port_cap
)
264 dev
->caps
.vl_cap
[port
] = port_cap
->max_vl
;
265 dev
->caps
.ib_mtu_cap
[port
] = port_cap
->ib_mtu
;
266 dev
->phys_caps
.gid_phys_table_len
[port
] = port_cap
->max_gids
;
267 dev
->phys_caps
.pkey_phys_table_len
[port
] = port_cap
->max_pkeys
;
268 /* set gid and pkey table operating lengths by default
269 * to non-sriov values
271 dev
->caps
.gid_table_len
[port
] = port_cap
->max_gids
;
272 dev
->caps
.pkey_table_len
[port
] = port_cap
->max_pkeys
;
273 dev
->caps
.port_width_cap
[port
] = port_cap
->max_port_width
;
274 dev
->caps
.eth_mtu_cap
[port
] = port_cap
->eth_mtu
;
275 dev
->caps
.def_mac
[port
] = port_cap
->def_mac
;
276 dev
->caps
.supported_type
[port
] = port_cap
->supported_port_types
;
277 dev
->caps
.suggested_type
[port
] = port_cap
->suggested_type
;
278 dev
->caps
.default_sense
[port
] = port_cap
->default_sense
;
279 dev
->caps
.trans_type
[port
] = port_cap
->trans_type
;
280 dev
->caps
.vendor_oui
[port
] = port_cap
->vendor_oui
;
281 dev
->caps
.wavelength
[port
] = port_cap
->wavelength
;
282 dev
->caps
.trans_code
[port
] = port_cap
->trans_code
;
287 static int mlx4_dev_port(struct mlx4_dev
*dev
, int port
,
288 struct mlx4_port_cap
*port_cap
)
292 err
= mlx4_QUERY_PORT(dev
, port
, port_cap
);
295 mlx4_err(dev
, "QUERY_PORT command failed.\n");
300 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev
*dev
)
302 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_IGNORE_FCS
))
305 if (mlx4_is_mfunc(dev
)) {
306 mlx4_dbg(dev
, "SRIOV mode - Disabling Ignore FCS");
307 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS
;
311 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_FCS_KEEP
)) {
313 "Keep FCS is not supported - Disabling Ignore FCS");
314 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS
;
319 #define MLX4_A0_STEERING_TABLE_SIZE 256
320 static int mlx4_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
325 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
327 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
330 mlx4_dev_cap_dump(dev
, dev_cap
);
332 if (dev_cap
->min_page_sz
> PAGE_SIZE
) {
333 mlx4_err(dev
, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
334 dev_cap
->min_page_sz
, PAGE_SIZE
);
337 if (dev_cap
->num_ports
> MLX4_MAX_PORTS
) {
338 mlx4_err(dev
, "HCA has %d ports, but we only support %d, aborting\n",
339 dev_cap
->num_ports
, MLX4_MAX_PORTS
);
343 if (dev_cap
->uar_size
> pci_resource_len(dev
->persist
->pdev
, 2)) {
344 mlx4_err(dev
, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
347 pci_resource_len(dev
->persist
->pdev
, 2));
351 dev
->caps
.num_ports
= dev_cap
->num_ports
;
352 dev
->caps
.num_sys_eqs
= dev_cap
->num_sys_eqs
;
353 dev
->phys_caps
.num_phys_eqs
= dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
?
354 dev
->caps
.num_sys_eqs
:
356 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
357 err
= _mlx4_dev_port(dev
, i
, dev_cap
->port_cap
+ i
);
359 mlx4_err(dev
, "QUERY_PORT command failed, aborting\n");
364 dev
->caps
.uar_page_size
= PAGE_SIZE
;
365 dev
->caps
.num_uars
= dev_cap
->uar_size
/ PAGE_SIZE
;
366 dev
->caps
.local_ca_ack_delay
= dev_cap
->local_ca_ack_delay
;
367 dev
->caps
.bf_reg_size
= dev_cap
->bf_reg_size
;
368 dev
->caps
.bf_regs_per_page
= dev_cap
->bf_regs_per_page
;
369 dev
->caps
.max_sq_sg
= dev_cap
->max_sq_sg
;
370 dev
->caps
.max_rq_sg
= dev_cap
->max_rq_sg
;
371 dev
->caps
.max_wqes
= dev_cap
->max_qp_sz
;
372 dev
->caps
.max_qp_init_rdma
= dev_cap
->max_requester_per_qp
;
373 dev
->caps
.max_srq_wqes
= dev_cap
->max_srq_sz
;
374 dev
->caps
.max_srq_sge
= dev_cap
->max_rq_sg
- 1;
375 dev
->caps
.reserved_srqs
= dev_cap
->reserved_srqs
;
376 dev
->caps
.max_sq_desc_sz
= dev_cap
->max_sq_desc_sz
;
377 dev
->caps
.max_rq_desc_sz
= dev_cap
->max_rq_desc_sz
;
379 * Subtract 1 from the limit because we need to allocate a
380 * spare CQE so the HCA HW can tell the difference between an
381 * empty CQ and a full CQ.
383 dev
->caps
.max_cqes
= dev_cap
->max_cq_sz
- 1;
384 dev
->caps
.reserved_cqs
= dev_cap
->reserved_cqs
;
385 dev
->caps
.reserved_eqs
= dev_cap
->reserved_eqs
;
386 dev
->caps
.reserved_mtts
= dev_cap
->reserved_mtts
;
387 dev
->caps
.reserved_mrws
= dev_cap
->reserved_mrws
;
389 /* The first 128 UARs are used for EQ doorbells */
390 dev
->caps
.reserved_uars
= max_t(int, 128, dev_cap
->reserved_uars
);
391 dev
->caps
.reserved_pds
= dev_cap
->reserved_pds
;
392 dev
->caps
.reserved_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
393 dev_cap
->reserved_xrcds
: 0;
394 dev
->caps
.max_xrcds
= (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_XRC
) ?
395 dev_cap
->max_xrcds
: 0;
396 dev
->caps
.mtt_entry_sz
= dev_cap
->mtt_entry_sz
;
398 dev
->caps
.max_msg_sz
= dev_cap
->max_msg_sz
;
399 dev
->caps
.page_size_cap
= ~(u32
) (dev_cap
->min_page_sz
- 1);
400 dev
->caps
.flags
= dev_cap
->flags
;
401 dev
->caps
.flags2
= dev_cap
->flags2
;
402 dev
->caps
.bmme_flags
= dev_cap
->bmme_flags
;
403 dev
->caps
.reserved_lkey
= dev_cap
->reserved_lkey
;
404 dev
->caps
.stat_rate_support
= dev_cap
->stat_rate_support
;
405 dev
->caps
.max_gso_sz
= dev_cap
->max_gso_sz
;
406 dev
->caps
.max_rss_tbl_sz
= dev_cap
->max_rss_tbl_sz
;
408 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_PHV_EN
) {
409 struct mlx4_init_hca_param hca_param
;
411 memset(&hca_param
, 0, sizeof(hca_param
));
412 err
= mlx4_QUERY_HCA(dev
, &hca_param
);
413 /* Turn off PHV_EN flag in case phv_check_en is set.
414 * phv_check_en is a HW check that parse the packet and verify
415 * phv bit was reported correctly in the wqe. To allow QinQ
416 * PHV_EN flag should be set and phv_check_en must be cleared
417 * otherwise QinQ packets will be drop by the HW.
419 if (err
|| hca_param
.phv_check_en
)
420 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_PHV_EN
;
423 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
424 if (mlx4_priv(dev
)->pci_dev_data
& MLX4_PCI_DEV_FORCE_SENSE_PORT
)
425 dev
->caps
.flags
|= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
426 /* Don't do sense port on multifunction devices (for now at least) */
427 if (mlx4_is_mfunc(dev
))
428 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
;
430 if (mlx4_low_memory_profile()) {
431 dev
->caps
.log_num_macs
= MLX4_MIN_LOG_NUM_MAC
;
432 dev
->caps
.log_num_vlans
= MLX4_MIN_LOG_NUM_VLANS
;
434 dev
->caps
.log_num_macs
= log_num_mac
;
435 dev
->caps
.log_num_vlans
= MLX4_LOG_NUM_VLANS
;
438 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
439 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_NONE
;
440 if (dev
->caps
.supported_type
[i
]) {
441 /* if only ETH is supported - assign ETH */
442 if (dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_ETH
)
443 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_ETH
;
444 /* if only IB is supported, assign IB */
445 else if (dev
->caps
.supported_type
[i
] ==
447 dev
->caps
.port_type
[i
] = MLX4_PORT_TYPE_IB
;
449 /* if IB and ETH are supported, we set the port
450 * type according to user selection of port type;
451 * if user selected none, take the FW hint */
452 if (port_type_array
[i
- 1] == MLX4_PORT_TYPE_NONE
)
453 dev
->caps
.port_type
[i
] = dev
->caps
.suggested_type
[i
] ?
454 MLX4_PORT_TYPE_ETH
: MLX4_PORT_TYPE_IB
;
456 dev
->caps
.port_type
[i
] = port_type_array
[i
- 1];
460 * Link sensing is allowed on the port if 3 conditions are true:
461 * 1. Both protocols are supported on the port.
462 * 2. Different types are supported on the port
463 * 3. FW declared that it supports link sensing
465 mlx4_priv(dev
)->sense
.sense_allowed
[i
] =
466 ((dev
->caps
.supported_type
[i
] == MLX4_PORT_TYPE_AUTO
) &&
467 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
468 (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
));
471 * If "default_sense" bit is set, we move the port to "AUTO" mode
472 * and perform sense_port FW command to try and set the correct
473 * port type from beginning
475 if (mlx4_priv(dev
)->sense
.sense_allowed
[i
] && dev
->caps
.default_sense
[i
]) {
476 enum mlx4_port_type sensed_port
= MLX4_PORT_TYPE_NONE
;
477 dev
->caps
.possible_type
[i
] = MLX4_PORT_TYPE_AUTO
;
478 mlx4_SENSE_PORT(dev
, i
, &sensed_port
);
479 if (sensed_port
!= MLX4_PORT_TYPE_NONE
)
480 dev
->caps
.port_type
[i
] = sensed_port
;
482 dev
->caps
.possible_type
[i
] = dev
->caps
.port_type
[i
];
485 if (dev
->caps
.log_num_macs
> dev_cap
->port_cap
[i
].log_max_macs
) {
486 dev
->caps
.log_num_macs
= dev_cap
->port_cap
[i
].log_max_macs
;
487 mlx4_warn(dev
, "Requested number of MACs is too much for port %d, reducing to %d\n",
488 i
, 1 << dev
->caps
.log_num_macs
);
490 if (dev
->caps
.log_num_vlans
> dev_cap
->port_cap
[i
].log_max_vlans
) {
491 dev
->caps
.log_num_vlans
= dev_cap
->port_cap
[i
].log_max_vlans
;
492 mlx4_warn(dev
, "Requested number of VLANs is too much for port %d, reducing to %d\n",
493 i
, 1 << dev
->caps
.log_num_vlans
);
497 if (mlx4_is_master(dev
) && (dev
->caps
.num_ports
== 2) &&
498 (port_type_array
[0] == MLX4_PORT_TYPE_IB
) &&
499 (port_type_array
[1] == MLX4_PORT_TYPE_ETH
)) {
501 "Granular QoS per VF not supported with IB/Eth configuration\n");
502 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_QOS_VPP
;
505 dev
->caps
.max_counters
= dev_cap
->max_counters
;
507 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] = dev_cap
->reserved_qps
;
508 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] =
509 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] =
510 (1 << dev
->caps
.log_num_macs
) *
511 (1 << dev
->caps
.log_num_vlans
) *
513 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
] = MLX4_NUM_FEXCH
;
515 if (dev_cap
->dmfs_high_rate_qpn_base
> 0 &&
516 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
)
517 dev
->caps
.dmfs_high_rate_qpn_base
= dev_cap
->dmfs_high_rate_qpn_base
;
519 dev
->caps
.dmfs_high_rate_qpn_base
=
520 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
522 if (dev_cap
->dmfs_high_rate_qpn_range
> 0 &&
523 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
) {
524 dev
->caps
.dmfs_high_rate_qpn_range
= dev_cap
->dmfs_high_rate_qpn_range
;
525 dev
->caps
.dmfs_high_steer_mode
= MLX4_STEERING_DMFS_A0_DEFAULT
;
526 dev
->caps
.flags2
|= MLX4_DEV_CAP_FLAG2_FS_A0
;
528 dev
->caps
.dmfs_high_steer_mode
= MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
;
529 dev
->caps
.dmfs_high_rate_qpn_base
=
530 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
531 dev
->caps
.dmfs_high_rate_qpn_range
= MLX4_A0_STEERING_TABLE_SIZE
;
534 dev
->caps
.rl_caps
= dev_cap
->rl_caps
;
536 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_RSS_RAW_ETH
] =
537 dev
->caps
.dmfs_high_rate_qpn_range
;
539 dev
->caps
.reserved_qps
= dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
] +
540 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_ETH_ADDR
] +
541 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_ADDR
] +
542 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FC_EXCH
];
544 dev
->caps
.sqp_demux
= (mlx4_is_master(dev
)) ? MLX4_MAX_NUM_SLAVES
: 0;
546 if (!enable_64b_cqe_eqe
&& !mlx4_is_slave(dev
)) {
548 (MLX4_DEV_CAP_FLAG_64B_CQE
| MLX4_DEV_CAP_FLAG_64B_EQE
)) {
549 mlx4_warn(dev
, "64B EQEs/CQEs supported by the device but not enabled\n");
550 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_64B_CQE
;
551 dev
->caps
.flags
&= ~MLX4_DEV_CAP_FLAG_64B_EQE
;
554 if (dev_cap
->flags2
&
555 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE
|
556 MLX4_DEV_CAP_FLAG2_EQE_STRIDE
)) {
557 mlx4_warn(dev
, "Disabling EQE/CQE stride per user request\n");
558 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
559 dev_cap
->flags2
&= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
563 if ((dev
->caps
.flags
&
564 (MLX4_DEV_CAP_FLAG_64B_CQE
| MLX4_DEV_CAP_FLAG_64B_EQE
)) &&
566 dev
->caps
.function_caps
|= MLX4_FUNC_CAP_64B_EQE_CQE
;
568 if (!mlx4_is_slave(dev
)) {
569 mlx4_enable_cqe_eqe_stride(dev
);
570 dev
->caps
.alloc_res_qp_mask
=
571 (dev
->caps
.bf_reg_size
? MLX4_RESERVE_ETH_BF_QP
: 0) |
574 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ETS_CFG
) &&
575 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SET_ETH_SCHED
) {
576 mlx4_warn(dev
, "Old device ETS support detected\n");
577 mlx4_warn(dev
, "Consider upgrading device FW.\n");
578 dev
->caps
.flags2
|= MLX4_DEV_CAP_FLAG2_ETS_CFG
;
582 dev
->caps
.alloc_res_qp_mask
= 0;
585 mlx4_enable_ignore_fcs(dev
);
590 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev
*dev
,
591 enum pci_bus_speed
*speed
,
592 enum pcie_link_width
*width
)
594 u32 lnkcap1
, lnkcap2
;
597 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */
599 *speed
= PCI_SPEED_UNKNOWN
;
600 *width
= PCIE_LNK_WIDTH_UNKNOWN
;
602 err1
= pcie_capability_read_dword(dev
->persist
->pdev
, PCI_EXP_LNKCAP
,
604 err2
= pcie_capability_read_dword(dev
->persist
->pdev
, PCI_EXP_LNKCAP2
,
606 if (!err2
&& lnkcap2
) { /* PCIe r3.0-compliant */
607 if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_8_0GB
)
608 *speed
= PCIE_SPEED_8_0GT
;
609 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_5_0GB
)
610 *speed
= PCIE_SPEED_5_0GT
;
611 else if (lnkcap2
& PCI_EXP_LNKCAP2_SLS_2_5GB
)
612 *speed
= PCIE_SPEED_2_5GT
;
615 *width
= (lnkcap1
& PCI_EXP_LNKCAP_MLW
) >> PCIE_MLW_CAP_SHIFT
;
616 if (!lnkcap2
) { /* pre-r3.0 */
617 if (lnkcap1
& PCI_EXP_LNKCAP_SLS_5_0GB
)
618 *speed
= PCIE_SPEED_5_0GT
;
619 else if (lnkcap1
& PCI_EXP_LNKCAP_SLS_2_5GB
)
620 *speed
= PCIE_SPEED_2_5GT
;
624 if (*speed
== PCI_SPEED_UNKNOWN
|| *width
== PCIE_LNK_WIDTH_UNKNOWN
) {
626 err2
? err2
: -EINVAL
;
631 static void mlx4_check_pcie_caps(struct mlx4_dev
*dev
)
633 enum pcie_link_width width
, width_cap
;
634 enum pci_bus_speed speed
, speed_cap
;
637 #define PCIE_SPEED_STR(speed) \
638 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \
639 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \
640 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \
643 err
= mlx4_get_pcie_dev_link_caps(dev
, &speed_cap
, &width_cap
);
646 "Unable to determine PCIe device BW capabilities\n");
650 err
= pcie_get_minimum_link(dev
->persist
->pdev
, &speed
, &width
);
651 if (err
|| speed
== PCI_SPEED_UNKNOWN
||
652 width
== PCIE_LNK_WIDTH_UNKNOWN
) {
654 "Unable to determine PCI device chain minimum BW\n");
658 if (width
!= width_cap
|| speed
!= speed_cap
)
660 "PCIe BW is different than device's capability\n");
662 mlx4_info(dev
, "PCIe link speed is %s, device supports %s\n",
663 PCIE_SPEED_STR(speed
), PCIE_SPEED_STR(speed_cap
));
664 mlx4_info(dev
, "PCIe link width is x%d, device supports x%d\n",
669 /*The function checks if there are live vf, return the num of them*/
670 static int mlx4_how_many_lives_vf(struct mlx4_dev
*dev
)
672 struct mlx4_priv
*priv
= mlx4_priv(dev
);
673 struct mlx4_slave_state
*s_state
;
677 for (i
= 1/*the ppf is 0*/; i
< dev
->num_slaves
; ++i
) {
678 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
679 if (s_state
->active
&& s_state
->last_cmd
!=
680 MLX4_COMM_CMD_RESET
) {
681 mlx4_warn(dev
, "%s: slave: %d is still active\n",
689 int mlx4_get_parav_qkey(struct mlx4_dev
*dev
, u32 qpn
, u32
*qkey
)
691 u32 qk
= MLX4_RESERVED_QKEY_BASE
;
693 if (qpn
>= dev
->phys_caps
.base_tunnel_sqpn
+ 8 * MLX4_MFUNC_MAX
||
694 qpn
< dev
->phys_caps
.base_proxy_sqpn
)
697 if (qpn
>= dev
->phys_caps
.base_tunnel_sqpn
)
699 qk
+= qpn
- dev
->phys_caps
.base_tunnel_sqpn
;
701 qk
+= qpn
- dev
->phys_caps
.base_proxy_sqpn
;
705 EXPORT_SYMBOL(mlx4_get_parav_qkey
);
707 void mlx4_sync_pkey_table(struct mlx4_dev
*dev
, int slave
, int port
, int i
, int val
)
709 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
711 if (!mlx4_is_master(dev
))
714 priv
->virt2phys_pkey
[slave
][port
- 1][i
] = val
;
716 EXPORT_SYMBOL(mlx4_sync_pkey_table
);
718 void mlx4_put_slave_node_guid(struct mlx4_dev
*dev
, int slave
, __be64 guid
)
720 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
722 if (!mlx4_is_master(dev
))
725 priv
->slave_node_guids
[slave
] = guid
;
727 EXPORT_SYMBOL(mlx4_put_slave_node_guid
);
729 __be64
mlx4_get_slave_node_guid(struct mlx4_dev
*dev
, int slave
)
731 struct mlx4_priv
*priv
= container_of(dev
, struct mlx4_priv
, dev
);
733 if (!mlx4_is_master(dev
))
736 return priv
->slave_node_guids
[slave
];
738 EXPORT_SYMBOL(mlx4_get_slave_node_guid
);
740 int mlx4_is_slave_active(struct mlx4_dev
*dev
, int slave
)
742 struct mlx4_priv
*priv
= mlx4_priv(dev
);
743 struct mlx4_slave_state
*s_slave
;
745 if (!mlx4_is_master(dev
))
748 s_slave
= &priv
->mfunc
.master
.slave_state
[slave
];
749 return !!s_slave
->active
;
751 EXPORT_SYMBOL(mlx4_is_slave_active
);
753 static void slave_adjust_steering_mode(struct mlx4_dev
*dev
,
754 struct mlx4_dev_cap
*dev_cap
,
755 struct mlx4_init_hca_param
*hca_param
)
757 dev
->caps
.steering_mode
= hca_param
->steering_mode
;
758 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
759 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
760 dev
->caps
.fs_log_max_ucast_qp_range_size
=
761 dev_cap
->fs_log_max_ucast_qp_range_size
;
763 dev
->caps
.num_qp_per_mgm
=
764 4 * ((1 << hca_param
->log_mc_entry_sz
)/16 - 2);
766 mlx4_dbg(dev
, "Steering mode is: %s\n",
767 mlx4_steering_mode_str(dev
->caps
.steering_mode
));
770 static int mlx4_slave_cap(struct mlx4_dev
*dev
)
774 struct mlx4_dev_cap dev_cap
;
775 struct mlx4_func_cap func_cap
;
776 struct mlx4_init_hca_param hca_param
;
779 memset(&hca_param
, 0, sizeof(hca_param
));
780 err
= mlx4_QUERY_HCA(dev
, &hca_param
);
782 mlx4_err(dev
, "QUERY_HCA command failed, aborting\n");
786 /* fail if the hca has an unknown global capability
787 * at this time global_caps should be always zeroed
789 if (hca_param
.global_caps
) {
790 mlx4_err(dev
, "Unknown hca global capabilities\n");
794 mlx4_log_num_mgm_entry_size
= hca_param
.log_mc_entry_sz
;
796 dev
->caps
.hca_core_clock
= hca_param
.hca_core_clock
;
798 memset(&dev_cap
, 0, sizeof(dev_cap
));
799 dev
->caps
.max_qp_dest_rdma
= 1 << hca_param
.log_rd_per_qp
;
800 err
= mlx4_dev_cap(dev
, &dev_cap
);
802 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
806 err
= mlx4_QUERY_FW(dev
);
808 mlx4_err(dev
, "QUERY_FW command failed: could not get FW version\n");
810 page_size
= ~dev
->caps
.page_size_cap
+ 1;
811 mlx4_warn(dev
, "HCA minimum page size:%d\n", page_size
);
812 if (page_size
> PAGE_SIZE
) {
813 mlx4_err(dev
, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
814 page_size
, PAGE_SIZE
);
818 /* slave gets uar page size from QUERY_HCA fw command */
819 dev
->caps
.uar_page_size
= 1 << (hca_param
.uar_page_sz
+ 12);
821 /* TODO: relax this assumption */
822 if (dev
->caps
.uar_page_size
!= PAGE_SIZE
) {
823 mlx4_err(dev
, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
824 dev
->caps
.uar_page_size
, PAGE_SIZE
);
828 memset(&func_cap
, 0, sizeof(func_cap
));
829 err
= mlx4_QUERY_FUNC_CAP(dev
, 0, &func_cap
);
831 mlx4_err(dev
, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
836 if ((func_cap
.pf_context_behaviour
| PF_CONTEXT_BEHAVIOUR_MASK
) !=
837 PF_CONTEXT_BEHAVIOUR_MASK
) {
838 mlx4_err(dev
, "Unknown pf context behaviour %x known flags %x\n",
839 func_cap
.pf_context_behaviour
, PF_CONTEXT_BEHAVIOUR_MASK
);
843 dev
->caps
.num_ports
= func_cap
.num_ports
;
844 dev
->quotas
.qp
= func_cap
.qp_quota
;
845 dev
->quotas
.srq
= func_cap
.srq_quota
;
846 dev
->quotas
.cq
= func_cap
.cq_quota
;
847 dev
->quotas
.mpt
= func_cap
.mpt_quota
;
848 dev
->quotas
.mtt
= func_cap
.mtt_quota
;
849 dev
->caps
.num_qps
= 1 << hca_param
.log_num_qps
;
850 dev
->caps
.num_srqs
= 1 << hca_param
.log_num_srqs
;
851 dev
->caps
.num_cqs
= 1 << hca_param
.log_num_cqs
;
852 dev
->caps
.num_mpts
= 1 << hca_param
.log_mpt_sz
;
853 dev
->caps
.num_eqs
= func_cap
.max_eq
;
854 dev
->caps
.reserved_eqs
= func_cap
.reserved_eq
;
855 dev
->caps
.reserved_lkey
= func_cap
.reserved_lkey
;
856 dev
->caps
.num_pds
= MLX4_NUM_PDS
;
857 dev
->caps
.num_mgms
= 0;
858 dev
->caps
.num_amgms
= 0;
860 if (dev
->caps
.num_ports
> MLX4_MAX_PORTS
) {
861 mlx4_err(dev
, "HCA has %d ports, but we only support %d, aborting\n",
862 dev
->caps
.num_ports
, MLX4_MAX_PORTS
);
866 mlx4_replace_zero_macs(dev
);
868 dev
->caps
.qp0_qkey
= kcalloc(dev
->caps
.num_ports
, sizeof(u32
), GFP_KERNEL
);
869 dev
->caps
.qp0_tunnel
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
870 dev
->caps
.qp0_proxy
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
871 dev
->caps
.qp1_tunnel
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
872 dev
->caps
.qp1_proxy
= kcalloc(dev
->caps
.num_ports
, sizeof (u32
), GFP_KERNEL
);
874 if (!dev
->caps
.qp0_tunnel
|| !dev
->caps
.qp0_proxy
||
875 !dev
->caps
.qp1_tunnel
|| !dev
->caps
.qp1_proxy
||
876 !dev
->caps
.qp0_qkey
) {
881 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
) {
882 err
= mlx4_QUERY_FUNC_CAP(dev
, i
, &func_cap
);
884 mlx4_err(dev
, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
888 dev
->caps
.qp0_qkey
[i
- 1] = func_cap
.qp0_qkey
;
889 dev
->caps
.qp0_tunnel
[i
- 1] = func_cap
.qp0_tunnel_qpn
;
890 dev
->caps
.qp0_proxy
[i
- 1] = func_cap
.qp0_proxy_qpn
;
891 dev
->caps
.qp1_tunnel
[i
- 1] = func_cap
.qp1_tunnel_qpn
;
892 dev
->caps
.qp1_proxy
[i
- 1] = func_cap
.qp1_proxy_qpn
;
893 dev
->caps
.port_mask
[i
] = dev
->caps
.port_type
[i
];
894 dev
->caps
.phys_port_id
[i
] = func_cap
.phys_port_id
;
895 err
= mlx4_get_slave_pkey_gid_tbl_len(dev
, i
,
896 &dev
->caps
.gid_table_len
[i
],
897 &dev
->caps
.pkey_table_len
[i
]);
902 if (dev
->caps
.uar_page_size
* (dev
->caps
.num_uars
-
903 dev
->caps
.reserved_uars
) >
904 pci_resource_len(dev
->persist
->pdev
,
906 mlx4_err(dev
, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
907 dev
->caps
.uar_page_size
* dev
->caps
.num_uars
,
909 pci_resource_len(dev
->persist
->pdev
, 2));
914 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_64B_EQE_ENABLED
) {
915 dev
->caps
.eqe_size
= 64;
916 dev
->caps
.eqe_factor
= 1;
918 dev
->caps
.eqe_size
= 32;
919 dev
->caps
.eqe_factor
= 0;
922 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_64B_CQE_ENABLED
) {
923 dev
->caps
.cqe_size
= 64;
924 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
926 dev
->caps
.cqe_size
= 32;
929 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_EQE_STRIDE_ENABLED
) {
930 dev
->caps
.eqe_size
= hca_param
.eqe_size
;
931 dev
->caps
.eqe_factor
= 0;
934 if (hca_param
.dev_cap_enabled
& MLX4_DEV_CAP_CQE_STRIDE_ENABLED
) {
935 dev
->caps
.cqe_size
= hca_param
.cqe_size
;
936 /* User still need to know when CQE > 32B */
937 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
940 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
941 mlx4_warn(dev
, "Timestamping is not supported in slave mode\n");
943 slave_adjust_steering_mode(dev
, &dev_cap
, &hca_param
);
944 mlx4_dbg(dev
, "RSS support for IP fragments is %s\n",
945 hca_param
.rss_ip_frags
? "on" : "off");
947 if (func_cap
.extra_flags
& MLX4_QUERY_FUNC_FLAGS_BF_RES_QP
&&
948 dev
->caps
.bf_reg_size
)
949 dev
->caps
.alloc_res_qp_mask
|= MLX4_RESERVE_ETH_BF_QP
;
951 if (func_cap
.extra_flags
& MLX4_QUERY_FUNC_FLAGS_A0_RES_QP
)
952 dev
->caps
.alloc_res_qp_mask
|= MLX4_RESERVE_A0_QP
;
957 kfree(dev
->caps
.qp0_qkey
);
958 kfree(dev
->caps
.qp0_tunnel
);
959 kfree(dev
->caps
.qp0_proxy
);
960 kfree(dev
->caps
.qp1_tunnel
);
961 kfree(dev
->caps
.qp1_proxy
);
962 dev
->caps
.qp0_qkey
= NULL
;
963 dev
->caps
.qp0_tunnel
= NULL
;
964 dev
->caps
.qp0_proxy
= NULL
;
965 dev
->caps
.qp1_tunnel
= NULL
;
966 dev
->caps
.qp1_proxy
= NULL
;
971 static void mlx4_request_modules(struct mlx4_dev
*dev
)
974 int has_ib_port
= false;
975 int has_eth_port
= false;
976 #define EN_DRV_NAME "mlx4_en"
977 #define IB_DRV_NAME "mlx4_ib"
979 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
980 if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_IB
)
982 else if (dev
->caps
.port_type
[port
] == MLX4_PORT_TYPE_ETH
)
987 request_module_nowait(EN_DRV_NAME
);
988 if (has_ib_port
|| (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IBOE
))
989 request_module_nowait(IB_DRV_NAME
);
993 * Change the port configuration of the device.
994 * Every user of this function must hold the port mutex.
996 int mlx4_change_port_types(struct mlx4_dev
*dev
,
997 enum mlx4_port_type
*port_types
)
1003 for (port
= 0; port
< dev
->caps
.num_ports
; port
++) {
1004 /* Change the port type only if the new type is different
1005 * from the current, and not set to Auto */
1006 if (port_types
[port
] != dev
->caps
.port_type
[port
+ 1])
1010 mlx4_unregister_device(dev
);
1011 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
1012 mlx4_CLOSE_PORT(dev
, port
);
1013 dev
->caps
.port_type
[port
] = port_types
[port
- 1];
1014 err
= mlx4_SET_PORT(dev
, port
, -1);
1016 mlx4_err(dev
, "Failed to set port %d, aborting\n",
1021 mlx4_set_port_mask(dev
);
1022 err
= mlx4_register_device(dev
);
1024 mlx4_err(dev
, "Failed to register device\n");
1027 mlx4_request_modules(dev
);
1034 static ssize_t
show_port_type(struct device
*dev
,
1035 struct device_attribute
*attr
,
1038 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1040 struct mlx4_dev
*mdev
= info
->dev
;
1044 (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_IB
) ?
1046 if (mdev
->caps
.possible_type
[info
->port
] == MLX4_PORT_TYPE_AUTO
)
1047 sprintf(buf
, "auto (%s)\n", type
);
1049 sprintf(buf
, "%s\n", type
);
1054 static ssize_t
set_port_type(struct device
*dev
,
1055 struct device_attribute
*attr
,
1056 const char *buf
, size_t count
)
1058 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1060 struct mlx4_dev
*mdev
= info
->dev
;
1061 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
1062 enum mlx4_port_type types
[MLX4_MAX_PORTS
];
1063 enum mlx4_port_type new_types
[MLX4_MAX_PORTS
];
1064 static DEFINE_MUTEX(set_port_type_mutex
);
1068 mutex_lock(&set_port_type_mutex
);
1070 if (!strcmp(buf
, "ib\n"))
1071 info
->tmp_type
= MLX4_PORT_TYPE_IB
;
1072 else if (!strcmp(buf
, "eth\n"))
1073 info
->tmp_type
= MLX4_PORT_TYPE_ETH
;
1074 else if (!strcmp(buf
, "auto\n"))
1075 info
->tmp_type
= MLX4_PORT_TYPE_AUTO
;
1077 mlx4_err(mdev
, "%s is not supported port type\n", buf
);
1082 mlx4_stop_sense(mdev
);
1083 mutex_lock(&priv
->port_mutex
);
1084 /* Possible type is always the one that was delivered */
1085 mdev
->caps
.possible_type
[info
->port
] = info
->tmp_type
;
1087 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++) {
1088 types
[i
] = priv
->port
[i
+1].tmp_type
? priv
->port
[i
+1].tmp_type
:
1089 mdev
->caps
.possible_type
[i
+1];
1090 if (types
[i
] == MLX4_PORT_TYPE_AUTO
)
1091 types
[i
] = mdev
->caps
.port_type
[i
+1];
1094 if (!(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_DPDP
) &&
1095 !(mdev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SENSE_SUPPORT
)) {
1096 for (i
= 1; i
<= mdev
->caps
.num_ports
; i
++) {
1097 if (mdev
->caps
.possible_type
[i
] == MLX4_PORT_TYPE_AUTO
) {
1098 mdev
->caps
.possible_type
[i
] = mdev
->caps
.port_type
[i
];
1104 mlx4_err(mdev
, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1108 mlx4_do_sense_ports(mdev
, new_types
, types
);
1110 err
= mlx4_check_port_params(mdev
, new_types
);
1114 /* We are about to apply the changes after the configuration
1115 * was verified, no need to remember the temporary types
1117 for (i
= 0; i
< mdev
->caps
.num_ports
; i
++)
1118 priv
->port
[i
+ 1].tmp_type
= 0;
1120 err
= mlx4_change_port_types(mdev
, new_types
);
1123 mlx4_start_sense(mdev
);
1124 mutex_unlock(&priv
->port_mutex
);
1126 mutex_unlock(&set_port_type_mutex
);
1128 return err
? err
: count
;
1139 static inline int int_to_ibta_mtu(int mtu
)
1142 case 256: return IB_MTU_256
;
1143 case 512: return IB_MTU_512
;
1144 case 1024: return IB_MTU_1024
;
1145 case 2048: return IB_MTU_2048
;
1146 case 4096: return IB_MTU_4096
;
1151 static inline int ibta_mtu_to_int(enum ibta_mtu mtu
)
1154 case IB_MTU_256
: return 256;
1155 case IB_MTU_512
: return 512;
1156 case IB_MTU_1024
: return 1024;
1157 case IB_MTU_2048
: return 2048;
1158 case IB_MTU_4096
: return 4096;
1163 static ssize_t
show_port_ib_mtu(struct device
*dev
,
1164 struct device_attribute
*attr
,
1167 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1169 struct mlx4_dev
*mdev
= info
->dev
;
1171 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
)
1172 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
1174 sprintf(buf
, "%d\n",
1175 ibta_mtu_to_int(mdev
->caps
.port_ib_mtu
[info
->port
]));
1179 static ssize_t
set_port_ib_mtu(struct device
*dev
,
1180 struct device_attribute
*attr
,
1181 const char *buf
, size_t count
)
1183 struct mlx4_port_info
*info
= container_of(attr
, struct mlx4_port_info
,
1185 struct mlx4_dev
*mdev
= info
->dev
;
1186 struct mlx4_priv
*priv
= mlx4_priv(mdev
);
1187 int err
, port
, mtu
, ibta_mtu
= -1;
1189 if (mdev
->caps
.port_type
[info
->port
] == MLX4_PORT_TYPE_ETH
) {
1190 mlx4_warn(mdev
, "port level mtu is only used for IB ports\n");
1194 err
= kstrtoint(buf
, 0, &mtu
);
1196 ibta_mtu
= int_to_ibta_mtu(mtu
);
1198 if (err
|| ibta_mtu
< 0) {
1199 mlx4_err(mdev
, "%s is invalid IBTA mtu\n", buf
);
1203 mdev
->caps
.port_ib_mtu
[info
->port
] = ibta_mtu
;
1205 mlx4_stop_sense(mdev
);
1206 mutex_lock(&priv
->port_mutex
);
1207 mlx4_unregister_device(mdev
);
1208 for (port
= 1; port
<= mdev
->caps
.num_ports
; port
++) {
1209 mlx4_CLOSE_PORT(mdev
, port
);
1210 err
= mlx4_SET_PORT(mdev
, port
, -1);
1212 mlx4_err(mdev
, "Failed to set port %d, aborting\n",
1217 err
= mlx4_register_device(mdev
);
1219 mutex_unlock(&priv
->port_mutex
);
1220 mlx4_start_sense(mdev
);
1221 return err
? err
: count
;
1224 int mlx4_bond(struct mlx4_dev
*dev
)
1227 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1229 mutex_lock(&priv
->bond_mutex
);
1231 if (!mlx4_is_bonded(dev
))
1232 ret
= mlx4_do_bond(dev
, true);
1236 mutex_unlock(&priv
->bond_mutex
);
1238 mlx4_err(dev
, "Failed to bond device: %d\n", ret
);
1240 mlx4_dbg(dev
, "Device is bonded\n");
1243 EXPORT_SYMBOL_GPL(mlx4_bond
);
1245 int mlx4_unbond(struct mlx4_dev
*dev
)
1248 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1250 mutex_lock(&priv
->bond_mutex
);
1252 if (mlx4_is_bonded(dev
))
1253 ret
= mlx4_do_bond(dev
, false);
1255 mutex_unlock(&priv
->bond_mutex
);
1257 mlx4_err(dev
, "Failed to unbond device: %d\n", ret
);
1259 mlx4_dbg(dev
, "Device is unbonded\n");
1262 EXPORT_SYMBOL_GPL(mlx4_unbond
);
1265 int mlx4_port_map_set(struct mlx4_dev
*dev
, struct mlx4_port_map
*v2p
)
1267 u8 port1
= v2p
->port1
;
1268 u8 port2
= v2p
->port2
;
1269 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1272 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_PORT_REMAP
))
1275 mutex_lock(&priv
->bond_mutex
);
1277 /* zero means keep current mapping for this port */
1279 port1
= priv
->v2p
.port1
;
1281 port2
= priv
->v2p
.port2
;
1283 if ((port1
< 1) || (port1
> MLX4_MAX_PORTS
) ||
1284 (port2
< 1) || (port2
> MLX4_MAX_PORTS
) ||
1285 (port1
== 2 && port2
== 1)) {
1286 /* besides boundary checks cross mapping makes
1287 * no sense and therefore not allowed */
1289 } else if ((port1
== priv
->v2p
.port1
) &&
1290 (port2
== priv
->v2p
.port2
)) {
1293 err
= mlx4_virt2phy_port_map(dev
, port1
, port2
);
1295 mlx4_dbg(dev
, "port map changed: [%d][%d]\n",
1297 priv
->v2p
.port1
= port1
;
1298 priv
->v2p
.port2
= port2
;
1300 mlx4_err(dev
, "Failed to change port mape: %d\n", err
);
1304 mutex_unlock(&priv
->bond_mutex
);
1307 EXPORT_SYMBOL_GPL(mlx4_port_map_set
);
1309 static int mlx4_load_fw(struct mlx4_dev
*dev
)
1311 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1314 priv
->fw
.fw_icm
= mlx4_alloc_icm(dev
, priv
->fw
.fw_pages
,
1315 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
1316 if (!priv
->fw
.fw_icm
) {
1317 mlx4_err(dev
, "Couldn't allocate FW area, aborting\n");
1321 err
= mlx4_MAP_FA(dev
, priv
->fw
.fw_icm
);
1323 mlx4_err(dev
, "MAP_FA command failed, aborting\n");
1327 err
= mlx4_RUN_FW(dev
);
1329 mlx4_err(dev
, "RUN_FW command failed, aborting\n");
1339 mlx4_free_icm(dev
, priv
->fw
.fw_icm
, 0);
1343 static int mlx4_init_cmpt_table(struct mlx4_dev
*dev
, u64 cmpt_base
,
1346 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1350 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.cmpt_table
,
1352 ((u64
) (MLX4_CMPT_TYPE_QP
*
1353 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1354 cmpt_entry_sz
, dev
->caps
.num_qps
,
1355 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1360 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.cmpt_table
,
1362 ((u64
) (MLX4_CMPT_TYPE_SRQ
*
1363 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1364 cmpt_entry_sz
, dev
->caps
.num_srqs
,
1365 dev
->caps
.reserved_srqs
, 0, 0);
1369 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.cmpt_table
,
1371 ((u64
) (MLX4_CMPT_TYPE_CQ
*
1372 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1373 cmpt_entry_sz
, dev
->caps
.num_cqs
,
1374 dev
->caps
.reserved_cqs
, 0, 0);
1378 num_eqs
= dev
->phys_caps
.num_phys_eqs
;
1379 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.cmpt_table
,
1381 ((u64
) (MLX4_CMPT_TYPE_EQ
*
1382 cmpt_entry_sz
) << MLX4_CMPT_SHIFT
),
1383 cmpt_entry_sz
, num_eqs
, num_eqs
, 0, 0);
1390 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1393 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1396 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1402 static int mlx4_init_icm(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
1403 struct mlx4_init_hca_param
*init_hca
, u64 icm_size
)
1405 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1410 err
= mlx4_SET_ICM_SIZE(dev
, icm_size
, &aux_pages
);
1412 mlx4_err(dev
, "SET_ICM_SIZE command failed, aborting\n");
1416 mlx4_dbg(dev
, "%lld KB of HCA context requires %lld KB aux memory\n",
1417 (unsigned long long) icm_size
>> 10,
1418 (unsigned long long) aux_pages
<< 2);
1420 priv
->fw
.aux_icm
= mlx4_alloc_icm(dev
, aux_pages
,
1421 GFP_HIGHUSER
| __GFP_NOWARN
, 0);
1422 if (!priv
->fw
.aux_icm
) {
1423 mlx4_err(dev
, "Couldn't allocate aux memory, aborting\n");
1427 err
= mlx4_MAP_ICM_AUX(dev
, priv
->fw
.aux_icm
);
1429 mlx4_err(dev
, "MAP_ICM_AUX command failed, aborting\n");
1433 err
= mlx4_init_cmpt_table(dev
, init_hca
->cmpt_base
, dev_cap
->cmpt_entry_sz
);
1435 mlx4_err(dev
, "Failed to map cMPT context memory, aborting\n");
1440 num_eqs
= dev
->phys_caps
.num_phys_eqs
;
1441 err
= mlx4_init_icm_table(dev
, &priv
->eq_table
.table
,
1442 init_hca
->eqc_base
, dev_cap
->eqc_entry_sz
,
1443 num_eqs
, num_eqs
, 0, 0);
1445 mlx4_err(dev
, "Failed to map EQ context memory, aborting\n");
1446 goto err_unmap_cmpt
;
1450 * Reserved MTT entries must be aligned up to a cacheline
1451 * boundary, since the FW will write to them, while the driver
1452 * writes to all other MTT entries. (The variable
1453 * dev->caps.mtt_entry_sz below is really the MTT segment
1454 * size, not the raw entry size)
1456 dev
->caps
.reserved_mtts
=
1457 ALIGN(dev
->caps
.reserved_mtts
* dev
->caps
.mtt_entry_sz
,
1458 dma_get_cache_alignment()) / dev
->caps
.mtt_entry_sz
;
1460 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.mtt_table
,
1462 dev
->caps
.mtt_entry_sz
,
1464 dev
->caps
.reserved_mtts
, 1, 0);
1466 mlx4_err(dev
, "Failed to map MTT context memory, aborting\n");
1470 err
= mlx4_init_icm_table(dev
, &priv
->mr_table
.dmpt_table
,
1471 init_hca
->dmpt_base
,
1472 dev_cap
->dmpt_entry_sz
,
1474 dev
->caps
.reserved_mrws
, 1, 1);
1476 mlx4_err(dev
, "Failed to map dMPT context memory, aborting\n");
1480 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.qp_table
,
1482 dev_cap
->qpc_entry_sz
,
1484 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1487 mlx4_err(dev
, "Failed to map QP context memory, aborting\n");
1488 goto err_unmap_dmpt
;
1491 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.auxc_table
,
1492 init_hca
->auxc_base
,
1493 dev_cap
->aux_entry_sz
,
1495 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1498 mlx4_err(dev
, "Failed to map AUXC context memory, aborting\n");
1502 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.altc_table
,
1503 init_hca
->altc_base
,
1504 dev_cap
->altc_entry_sz
,
1506 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1509 mlx4_err(dev
, "Failed to map ALTC context memory, aborting\n");
1510 goto err_unmap_auxc
;
1513 err
= mlx4_init_icm_table(dev
, &priv
->qp_table
.rdmarc_table
,
1514 init_hca
->rdmarc_base
,
1515 dev_cap
->rdmarc_entry_sz
<< priv
->qp_table
.rdmarc_shift
,
1517 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
],
1520 mlx4_err(dev
, "Failed to map RDMARC context memory, aborting\n");
1521 goto err_unmap_altc
;
1524 err
= mlx4_init_icm_table(dev
, &priv
->cq_table
.table
,
1526 dev_cap
->cqc_entry_sz
,
1528 dev
->caps
.reserved_cqs
, 0, 0);
1530 mlx4_err(dev
, "Failed to map CQ context memory, aborting\n");
1531 goto err_unmap_rdmarc
;
1534 err
= mlx4_init_icm_table(dev
, &priv
->srq_table
.table
,
1535 init_hca
->srqc_base
,
1536 dev_cap
->srq_entry_sz
,
1538 dev
->caps
.reserved_srqs
, 0, 0);
1540 mlx4_err(dev
, "Failed to map SRQ context memory, aborting\n");
1545 * For flow steering device managed mode it is required to use
1546 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1547 * required, but for simplicity just map the whole multicast
1548 * group table now. The table isn't very big and it's a lot
1549 * easier than trying to track ref counts.
1551 err
= mlx4_init_icm_table(dev
, &priv
->mcg_table
.table
,
1553 mlx4_get_mgm_entry_size(dev
),
1554 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1555 dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
,
1558 mlx4_err(dev
, "Failed to map MCG context memory, aborting\n");
1565 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1568 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1571 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1574 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1577 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1580 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1583 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1586 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1589 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1592 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1593 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1594 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1595 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1598 mlx4_UNMAP_ICM_AUX(dev
);
1601 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1606 static void mlx4_free_icms(struct mlx4_dev
*dev
)
1608 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1610 mlx4_cleanup_icm_table(dev
, &priv
->mcg_table
.table
);
1611 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.table
);
1612 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.table
);
1613 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.rdmarc_table
);
1614 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.altc_table
);
1615 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.auxc_table
);
1616 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.qp_table
);
1617 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.dmpt_table
);
1618 mlx4_cleanup_icm_table(dev
, &priv
->mr_table
.mtt_table
);
1619 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.table
);
1620 mlx4_cleanup_icm_table(dev
, &priv
->eq_table
.cmpt_table
);
1621 mlx4_cleanup_icm_table(dev
, &priv
->cq_table
.cmpt_table
);
1622 mlx4_cleanup_icm_table(dev
, &priv
->srq_table
.cmpt_table
);
1623 mlx4_cleanup_icm_table(dev
, &priv
->qp_table
.cmpt_table
);
1625 mlx4_UNMAP_ICM_AUX(dev
);
1626 mlx4_free_icm(dev
, priv
->fw
.aux_icm
, 0);
1629 static void mlx4_slave_exit(struct mlx4_dev
*dev
)
1631 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1633 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
1634 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, MLX4_COMM_CMD_NA_OP
,
1636 mlx4_warn(dev
, "Failed to close slave function\n");
1637 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1640 static int map_bf_area(struct mlx4_dev
*dev
)
1642 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1643 resource_size_t bf_start
;
1644 resource_size_t bf_len
;
1647 if (!dev
->caps
.bf_reg_size
)
1650 bf_start
= pci_resource_start(dev
->persist
->pdev
, 2) +
1651 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1652 bf_len
= pci_resource_len(dev
->persist
->pdev
, 2) -
1653 (dev
->caps
.num_uars
<< PAGE_SHIFT
);
1654 priv
->bf_mapping
= io_mapping_create_wc(bf_start
, bf_len
);
1655 if (!priv
->bf_mapping
)
1661 static void unmap_bf_area(struct mlx4_dev
*dev
)
1663 if (mlx4_priv(dev
)->bf_mapping
)
1664 io_mapping_free(mlx4_priv(dev
)->bf_mapping
);
1667 cycle_t
mlx4_read_clock(struct mlx4_dev
*dev
)
1669 u32 clockhi
, clocklo
, clockhi1
;
1672 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1674 for (i
= 0; i
< 10; i
++) {
1675 clockhi
= swab32(readl(priv
->clock_mapping
));
1676 clocklo
= swab32(readl(priv
->clock_mapping
+ 4));
1677 clockhi1
= swab32(readl(priv
->clock_mapping
));
1678 if (clockhi
== clockhi1
)
1682 cycles
= (u64
) clockhi
<< 32 | (u64
) clocklo
;
1686 EXPORT_SYMBOL_GPL(mlx4_read_clock
);
1689 static int map_internal_clock(struct mlx4_dev
*dev
)
1691 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1693 priv
->clock_mapping
=
1694 ioremap(pci_resource_start(dev
->persist
->pdev
,
1695 priv
->fw
.clock_bar
) +
1696 priv
->fw
.clock_offset
, MLX4_CLOCK_SIZE
);
1698 if (!priv
->clock_mapping
)
1704 int mlx4_get_internal_clock_params(struct mlx4_dev
*dev
,
1705 struct mlx4_clock_params
*params
)
1707 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1709 if (mlx4_is_slave(dev
))
1715 params
->bar
= priv
->fw
.clock_bar
;
1716 params
->offset
= priv
->fw
.clock_offset
;
1717 params
->size
= MLX4_CLOCK_SIZE
;
1721 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params
);
1723 static void unmap_internal_clock(struct mlx4_dev
*dev
)
1725 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1727 if (priv
->clock_mapping
)
1728 iounmap(priv
->clock_mapping
);
1731 static void mlx4_close_hca(struct mlx4_dev
*dev
)
1733 unmap_internal_clock(dev
);
1735 if (mlx4_is_slave(dev
))
1736 mlx4_slave_exit(dev
);
1738 mlx4_CLOSE_HCA(dev
, 0);
1739 mlx4_free_icms(dev
);
1743 static void mlx4_close_fw(struct mlx4_dev
*dev
)
1745 if (!mlx4_is_slave(dev
)) {
1747 mlx4_free_icm(dev
, mlx4_priv(dev
)->fw
.fw_icm
, 0);
1751 static int mlx4_comm_check_offline(struct mlx4_dev
*dev
)
1753 #define COMM_CHAN_OFFLINE_OFFSET 0x09
1758 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1760 end
= msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT
) + jiffies
;
1761 while (time_before(jiffies
, end
)) {
1762 comm_flags
= swab32(readl((__iomem
char *)priv
->mfunc
.comm
+
1763 MLX4_COMM_CHAN_FLAGS
));
1764 offline_bit
= (comm_flags
&
1765 (u32
)(1 << COMM_CHAN_OFFLINE_OFFSET
));
1768 /* There are cases as part of AER/Reset flow that PF needs
1769 * around 100 msec to load. We therefore sleep for 100 msec
1770 * to allow other tasks to make use of that CPU during this
1775 mlx4_err(dev
, "Communication channel is offline.\n");
1779 static void mlx4_reset_vf_support(struct mlx4_dev
*dev
)
1781 #define COMM_CHAN_RST_OFFSET 0x1e
1783 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1787 comm_caps
= swab32(readl((__iomem
char *)priv
->mfunc
.comm
+
1788 MLX4_COMM_CHAN_CAPS
));
1789 comm_rst
= (comm_caps
& (u32
)(1 << COMM_CHAN_RST_OFFSET
));
1792 dev
->caps
.vf_caps
|= MLX4_VF_CAP_FLAG_RESET
;
1795 static int mlx4_init_slave(struct mlx4_dev
*dev
)
1797 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1798 u64 dma
= (u64
) priv
->mfunc
.vhcr_dma
;
1799 int ret_from_reset
= 0;
1801 u32 cmd_channel_ver
;
1803 if (atomic_read(&pf_loading
)) {
1804 mlx4_warn(dev
, "PF is not ready - Deferring probe\n");
1805 return -EPROBE_DEFER
;
1808 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
1809 priv
->cmd
.max_cmds
= 1;
1810 if (mlx4_comm_check_offline(dev
)) {
1811 mlx4_err(dev
, "PF is not responsive, skipping initialization\n");
1815 mlx4_reset_vf_support(dev
);
1816 mlx4_warn(dev
, "Sending reset\n");
1817 ret_from_reset
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0,
1818 MLX4_COMM_CMD_NA_OP
, MLX4_COMM_TIME
);
1819 /* if we are in the middle of flr the slave will try
1820 * NUM_OF_RESET_RETRIES times before leaving.*/
1821 if (ret_from_reset
) {
1822 if (MLX4_DELAY_RESET_SLAVE
== ret_from_reset
) {
1823 mlx4_warn(dev
, "slave is currently in the middle of FLR - Deferring probe\n");
1824 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1825 return -EPROBE_DEFER
;
1830 /* check the driver version - the slave I/F revision
1831 * must match the master's */
1832 slave_read
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
1833 cmd_channel_ver
= mlx4_comm_get_version();
1835 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver
) !=
1836 MLX4_COMM_GET_IF_REV(slave_read
)) {
1837 mlx4_err(dev
, "slave driver version is not supported by the master\n");
1841 mlx4_warn(dev
, "Sending vhcr0\n");
1842 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR0
, dma
>> 48,
1843 MLX4_COMM_CMD_NA_OP
, MLX4_COMM_TIME
))
1845 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR1
, dma
>> 32,
1846 MLX4_COMM_CMD_NA_OP
, MLX4_COMM_TIME
))
1848 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR2
, dma
>> 16,
1849 MLX4_COMM_CMD_NA_OP
, MLX4_COMM_TIME
))
1851 if (mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_EN
, dma
,
1852 MLX4_COMM_CMD_NA_OP
, MLX4_COMM_TIME
))
1855 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1859 mlx4_comm_cmd(dev
, MLX4_COMM_CMD_RESET
, 0, MLX4_COMM_CMD_NA_OP
, 0);
1861 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
1865 static void mlx4_parav_master_pf_caps(struct mlx4_dev
*dev
)
1869 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
1870 if (dev
->caps
.port_type
[i
] == MLX4_PORT_TYPE_ETH
)
1871 dev
->caps
.gid_table_len
[i
] =
1872 mlx4_get_slave_num_gids(dev
, 0, i
);
1874 dev
->caps
.gid_table_len
[i
] = 1;
1875 dev
->caps
.pkey_table_len
[i
] =
1876 dev
->phys_caps
.pkey_phys_table_len
[i
] - 1;
1880 static int choose_log_fs_mgm_entry_size(int qp_per_entry
)
1882 int i
= MLX4_MIN_MGM_LOG_ENTRY_SIZE
;
1884 for (i
= MLX4_MIN_MGM_LOG_ENTRY_SIZE
; i
<= MLX4_MAX_MGM_LOG_ENTRY_SIZE
;
1886 if (qp_per_entry
<= 4 * ((1 << i
) / 16 - 2))
1890 return (i
<= MLX4_MAX_MGM_LOG_ENTRY_SIZE
) ? i
: -1;
1893 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode
)
1895 switch (dmfs_high_steer_mode
) {
1896 case MLX4_STEERING_DMFS_A0_DEFAULT
:
1897 return "default performance";
1899 case MLX4_STEERING_DMFS_A0_DYNAMIC
:
1900 return "dynamic hybrid mode";
1902 case MLX4_STEERING_DMFS_A0_STATIC
:
1903 return "performance optimized for limited rule configuration (static)";
1905 case MLX4_STEERING_DMFS_A0_DISABLE
:
1906 return "disabled performance optimized steering";
1908 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
:
1909 return "performance optimized steering not supported";
1912 return "Unrecognized mode";
1916 #define MLX4_DMFS_A0_STEERING (1UL << 2)
1918 static void choose_steering_mode(struct mlx4_dev
*dev
,
1919 struct mlx4_dev_cap
*dev_cap
)
1921 if (mlx4_log_num_mgm_entry_size
<= 0) {
1922 if ((-mlx4_log_num_mgm_entry_size
) & MLX4_DMFS_A0_STEERING
) {
1923 if (dev
->caps
.dmfs_high_steer_mode
==
1924 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
)
1925 mlx4_err(dev
, "DMFS high rate mode not supported\n");
1927 dev
->caps
.dmfs_high_steer_mode
=
1928 MLX4_STEERING_DMFS_A0_STATIC
;
1932 if (mlx4_log_num_mgm_entry_size
<= 0 &&
1933 dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_FS_EN
&&
1934 (!mlx4_is_mfunc(dev
) ||
1935 (dev_cap
->fs_max_num_qp_per_entry
>=
1936 (dev
->persist
->num_vfs
+ 1))) &&
1937 choose_log_fs_mgm_entry_size(dev_cap
->fs_max_num_qp_per_entry
) >=
1938 MLX4_MIN_MGM_LOG_ENTRY_SIZE
) {
1939 dev
->oper_log_mgm_entry_size
=
1940 choose_log_fs_mgm_entry_size(dev_cap
->fs_max_num_qp_per_entry
);
1941 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
1942 dev
->caps
.num_qp_per_mgm
= dev_cap
->fs_max_num_qp_per_entry
;
1943 dev
->caps
.fs_log_max_ucast_qp_range_size
=
1944 dev_cap
->fs_log_max_ucast_qp_range_size
;
1946 if (dev
->caps
.dmfs_high_steer_mode
!=
1947 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
)
1948 dev
->caps
.dmfs_high_steer_mode
= MLX4_STEERING_DMFS_A0_DISABLE
;
1949 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
&&
1950 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
1951 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_B0
;
1953 dev
->caps
.steering_mode
= MLX4_STEERING_MODE_A0
;
1955 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_UC_STEER
||
1956 dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_VEP_MC_STEER
)
1957 mlx4_warn(dev
, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
1959 dev
->oper_log_mgm_entry_size
=
1960 mlx4_log_num_mgm_entry_size
> 0 ?
1961 mlx4_log_num_mgm_entry_size
:
1962 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE
;
1963 dev
->caps
.num_qp_per_mgm
= mlx4_get_qp_per_mgm(dev
);
1965 mlx4_dbg(dev
, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
1966 mlx4_steering_mode_str(dev
->caps
.steering_mode
),
1967 dev
->oper_log_mgm_entry_size
,
1968 mlx4_log_num_mgm_entry_size
);
1971 static void choose_tunnel_offload_mode(struct mlx4_dev
*dev
,
1972 struct mlx4_dev_cap
*dev_cap
)
1974 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
&&
1975 dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
)
1976 dev
->caps
.tunnel_offload_mode
= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
;
1978 dev
->caps
.tunnel_offload_mode
= MLX4_TUNNEL_OFFLOAD_MODE_NONE
;
1980 mlx4_dbg(dev
, "Tunneling offload mode is: %s\n", (dev
->caps
.tunnel_offload_mode
1981 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) ? "vxlan" : "none");
1984 static int mlx4_validate_optimized_steering(struct mlx4_dev
*dev
)
1987 struct mlx4_port_cap port_cap
;
1989 if (dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
)
1992 for (i
= 1; i
<= dev
->caps
.num_ports
; i
++) {
1993 if (mlx4_dev_port(dev
, i
, &port_cap
)) {
1995 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n");
1996 } else if ((dev
->caps
.dmfs_high_steer_mode
!=
1997 MLX4_STEERING_DMFS_A0_DEFAULT
) &&
1998 (port_cap
.dmfs_optimized_state
==
1999 !!(dev
->caps
.dmfs_high_steer_mode
==
2000 MLX4_STEERING_DMFS_A0_DISABLE
))) {
2002 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
2003 dmfs_high_rate_steering_mode_str(
2004 dev
->caps
.dmfs_high_steer_mode
),
2005 (port_cap
.dmfs_optimized_state
?
2006 "enabled" : "disabled"));
2013 static int mlx4_init_fw(struct mlx4_dev
*dev
)
2015 struct mlx4_mod_stat_cfg mlx4_cfg
;
2018 if (!mlx4_is_slave(dev
)) {
2019 err
= mlx4_QUERY_FW(dev
);
2022 mlx4_info(dev
, "non-primary physical function, skipping\n");
2024 mlx4_err(dev
, "QUERY_FW command failed, aborting\n");
2028 err
= mlx4_load_fw(dev
);
2030 mlx4_err(dev
, "Failed to start FW, aborting\n");
2034 mlx4_cfg
.log_pg_sz_m
= 1;
2035 mlx4_cfg
.log_pg_sz
= 0;
2036 err
= mlx4_MOD_STAT_CFG(dev
, &mlx4_cfg
);
2038 mlx4_warn(dev
, "Failed to override log_pg_sz parameter\n");
2044 static int mlx4_init_hca(struct mlx4_dev
*dev
)
2046 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2047 struct mlx4_adapter adapter
;
2048 struct mlx4_dev_cap dev_cap
;
2049 struct mlx4_profile profile
;
2050 struct mlx4_init_hca_param init_hca
;
2052 struct mlx4_config_dev_params params
;
2055 if (!mlx4_is_slave(dev
)) {
2056 err
= mlx4_dev_cap(dev
, &dev_cap
);
2058 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting\n");
2062 choose_steering_mode(dev
, &dev_cap
);
2063 choose_tunnel_offload_mode(dev
, &dev_cap
);
2065 if (dev
->caps
.dmfs_high_steer_mode
== MLX4_STEERING_DMFS_A0_STATIC
&&
2066 mlx4_is_master(dev
))
2067 dev
->caps
.function_caps
|= MLX4_FUNC_CAP_DMFS_A0_STATIC
;
2069 err
= mlx4_get_phys_port_id(dev
);
2071 mlx4_err(dev
, "Fail to get physical port id\n");
2073 if (mlx4_is_master(dev
))
2074 mlx4_parav_master_pf_caps(dev
);
2076 if (mlx4_low_memory_profile()) {
2077 mlx4_info(dev
, "Running from within kdump kernel. Using low memory profile\n");
2078 profile
= low_mem_profile
;
2080 profile
= default_profile
;
2082 if (dev
->caps
.steering_mode
==
2083 MLX4_STEERING_MODE_DEVICE_MANAGED
)
2084 profile
.num_mcg
= MLX4_FS_NUM_MCG
;
2086 icm_size
= mlx4_make_profile(dev
, &profile
, &dev_cap
,
2088 if ((long long) icm_size
< 0) {
2093 dev
->caps
.max_fmr_maps
= (1 << (32 - ilog2(dev
->caps
.num_mpts
))) - 1;
2095 init_hca
.log_uar_sz
= ilog2(dev
->caps
.num_uars
);
2096 init_hca
.uar_page_sz
= PAGE_SHIFT
- 12;
2097 init_hca
.mw_enabled
= 0;
2098 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_MEM_WINDOW
||
2099 dev
->caps
.bmme_flags
& MLX4_BMME_FLAG_TYPE_2_WIN
)
2100 init_hca
.mw_enabled
= INIT_HCA_TPT_MW_ENABLE
;
2102 err
= mlx4_init_icm(dev
, &dev_cap
, &init_hca
, icm_size
);
2106 err
= mlx4_INIT_HCA(dev
, &init_hca
);
2108 mlx4_err(dev
, "INIT_HCA command failed, aborting\n");
2112 if (dev_cap
.flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
) {
2113 err
= mlx4_query_func(dev
, &dev_cap
);
2115 mlx4_err(dev
, "QUERY_FUNC command failed, aborting.\n");
2117 } else if (err
& MLX4_QUERY_FUNC_NUM_SYS_EQS
) {
2118 dev
->caps
.num_eqs
= dev_cap
.max_eqs
;
2119 dev
->caps
.reserved_eqs
= dev_cap
.reserved_eqs
;
2120 dev
->caps
.reserved_uars
= dev_cap
.reserved_uars
;
2125 * If TS is supported by FW
2126 * read HCA frequency by QUERY_HCA command
2128 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
) {
2129 memset(&init_hca
, 0, sizeof(init_hca
));
2130 err
= mlx4_QUERY_HCA(dev
, &init_hca
);
2132 mlx4_err(dev
, "QUERY_HCA command failed, disable timestamp\n");
2133 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
2135 dev
->caps
.hca_core_clock
=
2136 init_hca
.hca_core_clock
;
2139 /* In case we got HCA frequency 0 - disable timestamping
2140 * to avoid dividing by zero
2142 if (!dev
->caps
.hca_core_clock
) {
2143 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
2145 "HCA frequency is 0 - timestamping is not supported\n");
2146 } else if (map_internal_clock(dev
)) {
2148 * Map internal clock,
2149 * in case of failure disable timestamping
2151 dev
->caps
.flags2
&= ~MLX4_DEV_CAP_FLAG2_TS
;
2152 mlx4_err(dev
, "Failed to map internal clock. Timestamping is not supported\n");
2156 if (dev
->caps
.dmfs_high_steer_mode
!=
2157 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
) {
2158 if (mlx4_validate_optimized_steering(dev
))
2159 mlx4_warn(dev
, "Optimized steering validation failed\n");
2161 if (dev
->caps
.dmfs_high_steer_mode
==
2162 MLX4_STEERING_DMFS_A0_DISABLE
) {
2163 dev
->caps
.dmfs_high_rate_qpn_base
=
2164 dev
->caps
.reserved_qps_cnt
[MLX4_QP_REGION_FW
];
2165 dev
->caps
.dmfs_high_rate_qpn_range
=
2166 MLX4_A0_STEERING_TABLE_SIZE
;
2169 mlx4_dbg(dev
, "DMFS high rate steer mode is: %s\n",
2170 dmfs_high_rate_steering_mode_str(
2171 dev
->caps
.dmfs_high_steer_mode
));
2174 err
= mlx4_init_slave(dev
);
2176 if (err
!= -EPROBE_DEFER
)
2177 mlx4_err(dev
, "Failed to initialize slave\n");
2181 err
= mlx4_slave_cap(dev
);
2183 mlx4_err(dev
, "Failed to obtain slave caps\n");
2188 if (map_bf_area(dev
))
2189 mlx4_dbg(dev
, "Failed to map blue flame area\n");
2191 /*Only the master set the ports, all the rest got it from it.*/
2192 if (!mlx4_is_slave(dev
))
2193 mlx4_set_port_mask(dev
);
2195 err
= mlx4_QUERY_ADAPTER(dev
, &adapter
);
2197 mlx4_err(dev
, "QUERY_ADAPTER command failed, aborting\n");
2201 /* Query CONFIG_DEV parameters */
2202 err
= mlx4_config_dev_retrieval(dev
, ¶ms
);
2203 if (err
&& err
!= -ENOTSUPP
) {
2204 mlx4_err(dev
, "Failed to query CONFIG_DEV parameters\n");
2206 dev
->caps
.rx_checksum_flags_port
[1] = params
.rx_csum_flags_port_1
;
2207 dev
->caps
.rx_checksum_flags_port
[2] = params
.rx_csum_flags_port_2
;
2209 priv
->eq_table
.inta_pin
= adapter
.inta_pin
;
2210 memcpy(dev
->board_id
, adapter
.board_id
, sizeof dev
->board_id
);
2215 unmap_internal_clock(dev
);
2218 if (mlx4_is_slave(dev
)) {
2219 kfree(dev
->caps
.qp0_qkey
);
2220 kfree(dev
->caps
.qp0_tunnel
);
2221 kfree(dev
->caps
.qp0_proxy
);
2222 kfree(dev
->caps
.qp1_tunnel
);
2223 kfree(dev
->caps
.qp1_proxy
);
2227 if (mlx4_is_slave(dev
))
2228 mlx4_slave_exit(dev
);
2230 mlx4_CLOSE_HCA(dev
, 0);
2233 if (!mlx4_is_slave(dev
))
2234 mlx4_free_icms(dev
);
2239 static int mlx4_init_counters_table(struct mlx4_dev
*dev
)
2241 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2244 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
2247 if (!dev
->caps
.max_counters
)
2250 nent_pow2
= roundup_pow_of_two(dev
->caps
.max_counters
);
2251 /* reserve last counter index for sink counter */
2252 return mlx4_bitmap_init(&priv
->counters_bitmap
, nent_pow2
,
2254 nent_pow2
- dev
->caps
.max_counters
+ 1);
2257 static void mlx4_cleanup_counters_table(struct mlx4_dev
*dev
)
2259 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
2262 if (!dev
->caps
.max_counters
)
2265 mlx4_bitmap_cleanup(&mlx4_priv(dev
)->counters_bitmap
);
2268 static void mlx4_cleanup_default_counters(struct mlx4_dev
*dev
)
2270 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2273 for (port
= 0; port
< dev
->caps
.num_ports
; port
++)
2274 if (priv
->def_counter
[port
] != -1)
2275 mlx4_counter_free(dev
, priv
->def_counter
[port
]);
2278 static int mlx4_allocate_default_counters(struct mlx4_dev
*dev
)
2280 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2284 for (port
= 0; port
< dev
->caps
.num_ports
; port
++)
2285 priv
->def_counter
[port
] = -1;
2287 for (port
= 0; port
< dev
->caps
.num_ports
; port
++) {
2288 err
= mlx4_counter_alloc(dev
, &idx
);
2290 if (!err
|| err
== -ENOSPC
) {
2291 priv
->def_counter
[port
] = idx
;
2292 } else if (err
== -ENOENT
) {
2295 } else if (mlx4_is_slave(dev
) && err
== -EINVAL
) {
2296 priv
->def_counter
[port
] = MLX4_SINK_COUNTER_INDEX(dev
);
2297 mlx4_warn(dev
, "can't allocate counter from old PF driver, using index %d\n",
2298 MLX4_SINK_COUNTER_INDEX(dev
));
2301 mlx4_err(dev
, "%s: failed to allocate default counter port %d err %d\n",
2302 __func__
, port
+ 1, err
);
2303 mlx4_cleanup_default_counters(dev
);
2307 mlx4_dbg(dev
, "%s: default counter index %d for port %d\n",
2308 __func__
, priv
->def_counter
[port
], port
+ 1);
2314 int __mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
2316 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2318 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
2321 *idx
= mlx4_bitmap_alloc(&priv
->counters_bitmap
);
2323 *idx
= MLX4_SINK_COUNTER_INDEX(dev
);
2330 int mlx4_counter_alloc(struct mlx4_dev
*dev
, u32
*idx
)
2335 if (mlx4_is_mfunc(dev
)) {
2336 err
= mlx4_cmd_imm(dev
, 0, &out_param
, RES_COUNTER
,
2337 RES_OP_RESERVE
, MLX4_CMD_ALLOC_RES
,
2338 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
2340 *idx
= get_param_l(&out_param
);
2344 return __mlx4_counter_alloc(dev
, idx
);
2346 EXPORT_SYMBOL_GPL(mlx4_counter_alloc
);
2348 static int __mlx4_clear_if_stat(struct mlx4_dev
*dev
,
2351 struct mlx4_cmd_mailbox
*if_stat_mailbox
;
2353 u32 if_stat_in_mod
= (counter_index
& 0xff) | MLX4_QUERY_IF_STAT_RESET
;
2355 if_stat_mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2356 if (IS_ERR(if_stat_mailbox
))
2357 return PTR_ERR(if_stat_mailbox
);
2359 err
= mlx4_cmd_box(dev
, 0, if_stat_mailbox
->dma
, if_stat_in_mod
, 0,
2360 MLX4_CMD_QUERY_IF_STAT
, MLX4_CMD_TIME_CLASS_C
,
2363 mlx4_free_cmd_mailbox(dev
, if_stat_mailbox
);
2367 void __mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
2369 if (!(dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
))
2372 if (idx
== MLX4_SINK_COUNTER_INDEX(dev
))
2375 __mlx4_clear_if_stat(dev
, idx
);
2377 mlx4_bitmap_free(&mlx4_priv(dev
)->counters_bitmap
, idx
, MLX4_USE_RR
);
2381 void mlx4_counter_free(struct mlx4_dev
*dev
, u32 idx
)
2385 if (mlx4_is_mfunc(dev
)) {
2386 set_param_l(&in_param
, idx
);
2387 mlx4_cmd(dev
, in_param
, RES_COUNTER
, RES_OP_RESERVE
,
2388 MLX4_CMD_FREE_RES
, MLX4_CMD_TIME_CLASS_A
,
2392 __mlx4_counter_free(dev
, idx
);
2394 EXPORT_SYMBOL_GPL(mlx4_counter_free
);
2396 int mlx4_get_default_counter_index(struct mlx4_dev
*dev
, int port
)
2398 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2400 return priv
->def_counter
[port
- 1];
2402 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index
);
2404 void mlx4_set_admin_guid(struct mlx4_dev
*dev
, __be64 guid
, int entry
, int port
)
2406 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2408 priv
->mfunc
.master
.vf_admin
[entry
].vport
[port
].guid
= guid
;
2410 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid
);
2412 __be64
mlx4_get_admin_guid(struct mlx4_dev
*dev
, int entry
, int port
)
2414 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2416 return priv
->mfunc
.master
.vf_admin
[entry
].vport
[port
].guid
;
2418 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid
);
2420 void mlx4_set_random_admin_guid(struct mlx4_dev
*dev
, int entry
, int port
)
2422 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2429 get_random_bytes((char *)&guid
, sizeof(guid
));
2430 guid
&= ~(cpu_to_be64(1ULL << 56));
2431 guid
|= cpu_to_be64(1ULL << 57);
2432 priv
->mfunc
.master
.vf_admin
[entry
].vport
[port
].guid
= guid
;
2435 static int mlx4_setup_hca(struct mlx4_dev
*dev
)
2437 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2440 __be32 ib_port_default_caps
;
2442 err
= mlx4_init_uar_table(dev
);
2444 mlx4_err(dev
, "Failed to initialize user access region table, aborting\n");
2448 err
= mlx4_uar_alloc(dev
, &priv
->driver_uar
);
2450 mlx4_err(dev
, "Failed to allocate driver access region, aborting\n");
2451 goto err_uar_table_free
;
2454 priv
->kar
= ioremap((phys_addr_t
) priv
->driver_uar
.pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
2456 mlx4_err(dev
, "Couldn't map kernel access region, aborting\n");
2461 err
= mlx4_init_pd_table(dev
);
2463 mlx4_err(dev
, "Failed to initialize protection domain table, aborting\n");
2467 err
= mlx4_init_xrcd_table(dev
);
2469 mlx4_err(dev
, "Failed to initialize reliable connection domain table, aborting\n");
2470 goto err_pd_table_free
;
2473 err
= mlx4_init_mr_table(dev
);
2475 mlx4_err(dev
, "Failed to initialize memory region table, aborting\n");
2476 goto err_xrcd_table_free
;
2479 if (!mlx4_is_slave(dev
)) {
2480 err
= mlx4_init_mcg_table(dev
);
2482 mlx4_err(dev
, "Failed to initialize multicast group table, aborting\n");
2483 goto err_mr_table_free
;
2485 err
= mlx4_config_mad_demux(dev
);
2487 mlx4_err(dev
, "Failed in config_mad_demux, aborting\n");
2488 goto err_mcg_table_free
;
2492 err
= mlx4_init_eq_table(dev
);
2494 mlx4_err(dev
, "Failed to initialize event queue table, aborting\n");
2495 goto err_mcg_table_free
;
2498 err
= mlx4_cmd_use_events(dev
);
2500 mlx4_err(dev
, "Failed to switch to event-driven firmware commands, aborting\n");
2501 goto err_eq_table_free
;
2504 err
= mlx4_NOP(dev
);
2506 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
2507 mlx4_warn(dev
, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2508 priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].irq
);
2509 mlx4_warn(dev
, "Trying again without MSI-X\n");
2511 mlx4_err(dev
, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2512 priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].irq
);
2513 mlx4_err(dev
, "BIOS or ACPI interrupt routing problem?\n");
2519 mlx4_dbg(dev
, "NOP command IRQ test passed\n");
2521 err
= mlx4_init_cq_table(dev
);
2523 mlx4_err(dev
, "Failed to initialize completion queue table, aborting\n");
2527 err
= mlx4_init_srq_table(dev
);
2529 mlx4_err(dev
, "Failed to initialize shared receive queue table, aborting\n");
2530 goto err_cq_table_free
;
2533 err
= mlx4_init_qp_table(dev
);
2535 mlx4_err(dev
, "Failed to initialize queue pair table, aborting\n");
2536 goto err_srq_table_free
;
2539 if (!mlx4_is_slave(dev
)) {
2540 err
= mlx4_init_counters_table(dev
);
2541 if (err
&& err
!= -ENOENT
) {
2542 mlx4_err(dev
, "Failed to initialize counters table, aborting\n");
2543 goto err_qp_table_free
;
2547 err
= mlx4_allocate_default_counters(dev
);
2549 mlx4_err(dev
, "Failed to allocate default counters, aborting\n");
2550 goto err_counters_table_free
;
2553 if (!mlx4_is_slave(dev
)) {
2554 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2555 ib_port_default_caps
= 0;
2556 err
= mlx4_get_port_ib_caps(dev
, port
,
2557 &ib_port_default_caps
);
2559 mlx4_warn(dev
, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2561 dev
->caps
.ib_port_def_cap
[port
] = ib_port_default_caps
;
2563 /* initialize per-slave default ib port capabilities */
2564 if (mlx4_is_master(dev
)) {
2566 for (i
= 0; i
< dev
->num_slaves
; i
++) {
2567 if (i
== mlx4_master_func_num(dev
))
2569 priv
->mfunc
.master
.slave_state
[i
].ib_cap_mask
[port
] =
2570 ib_port_default_caps
;
2574 if (mlx4_is_mfunc(dev
))
2575 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_2048
;
2577 dev
->caps
.port_ib_mtu
[port
] = IB_MTU_4096
;
2579 err
= mlx4_SET_PORT(dev
, port
, mlx4_is_master(dev
) ?
2580 dev
->caps
.pkey_table_len
[port
] : -1);
2582 mlx4_err(dev
, "Failed to set port %d, aborting\n",
2584 goto err_default_countes_free
;
2591 err_default_countes_free
:
2592 mlx4_cleanup_default_counters(dev
);
2594 err_counters_table_free
:
2595 if (!mlx4_is_slave(dev
))
2596 mlx4_cleanup_counters_table(dev
);
2599 mlx4_cleanup_qp_table(dev
);
2602 mlx4_cleanup_srq_table(dev
);
2605 mlx4_cleanup_cq_table(dev
);
2608 mlx4_cmd_use_polling(dev
);
2611 mlx4_cleanup_eq_table(dev
);
2614 if (!mlx4_is_slave(dev
))
2615 mlx4_cleanup_mcg_table(dev
);
2618 mlx4_cleanup_mr_table(dev
);
2620 err_xrcd_table_free
:
2621 mlx4_cleanup_xrcd_table(dev
);
2624 mlx4_cleanup_pd_table(dev
);
2630 mlx4_uar_free(dev
, &priv
->driver_uar
);
2633 mlx4_cleanup_uar_table(dev
);
2637 static int mlx4_init_affinity_hint(struct mlx4_dev
*dev
, int port
, int eqn
)
2639 int requested_cpu
= 0;
2640 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2645 if (eqn
> dev
->caps
.num_comp_vectors
)
2648 for (i
= 1; i
< port
; i
++)
2649 off
+= mlx4_get_eqs_per_port(dev
, i
);
2651 requested_cpu
= eqn
- off
- !!(eqn
> MLX4_EQ_ASYNC
);
2653 /* Meaning EQs are shared, and this call comes from the second port */
2654 if (requested_cpu
< 0)
2657 eq
= &priv
->eq_table
.eq
[eqn
];
2659 if (!zalloc_cpumask_var(&eq
->affinity_mask
, GFP_KERNEL
))
2662 cpumask_set_cpu(requested_cpu
, eq
->affinity_mask
);
2667 static void mlx4_enable_msi_x(struct mlx4_dev
*dev
)
2669 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2670 struct msix_entry
*entries
;
2675 int nreq
= dev
->caps
.num_ports
* num_online_cpus() + 1;
2677 nreq
= min_t(int, dev
->caps
.num_eqs
- dev
->caps
.reserved_eqs
,
2679 if (nreq
> MAX_MSIX
)
2682 entries
= kcalloc(nreq
, sizeof *entries
, GFP_KERNEL
);
2686 for (i
= 0; i
< nreq
; ++i
)
2687 entries
[i
].entry
= i
;
2689 nreq
= pci_enable_msix_range(dev
->persist
->pdev
, entries
, 2,
2692 if (nreq
< 0 || nreq
< MLX4_EQ_ASYNC
) {
2696 /* 1 is reserved for events (asyncrounous EQ) */
2697 dev
->caps
.num_comp_vectors
= nreq
- 1;
2699 priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].irq
= entries
[0].vector
;
2700 bitmap_zero(priv
->eq_table
.eq
[MLX4_EQ_ASYNC
].actv_ports
.ports
,
2701 dev
->caps
.num_ports
);
2703 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; i
++) {
2704 if (i
== MLX4_EQ_ASYNC
)
2707 priv
->eq_table
.eq
[i
].irq
=
2708 entries
[i
+ 1 - !!(i
> MLX4_EQ_ASYNC
)].vector
;
2710 if (MLX4_IS_LEGACY_EQ_MODE(dev
->caps
)) {
2711 bitmap_fill(priv
->eq_table
.eq
[i
].actv_ports
.ports
,
2712 dev
->caps
.num_ports
);
2713 /* We don't set affinity hint when there
2718 priv
->eq_table
.eq
[i
].actv_ports
.ports
);
2719 if (mlx4_init_affinity_hint(dev
, port
+ 1, i
))
2720 mlx4_warn(dev
, "Couldn't init hint cpumask for EQ %d\n",
2723 /* We divide the Eqs evenly between the two ports.
2724 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
2725 * refers to the number of Eqs per port
2726 * (i.e eqs_per_port). Theoretically, we would like to
2727 * write something like (i + 1) % eqs_per_port == 0.
2728 * However, since there's an asynchronous Eq, we have
2729 * to skip over it by comparing this condition to
2730 * !!((i + 1) > MLX4_EQ_ASYNC).
2732 if ((dev
->caps
.num_comp_vectors
> dev
->caps
.num_ports
) &&
2734 (dev
->caps
.num_comp_vectors
/ dev
->caps
.num_ports
)) ==
2735 !!((i
+ 1) > MLX4_EQ_ASYNC
))
2736 /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
2737 * everything is shared anyway.
2742 dev
->flags
|= MLX4_FLAG_MSI_X
;
2749 dev
->caps
.num_comp_vectors
= 1;
2751 BUG_ON(MLX4_EQ_ASYNC
>= 2);
2752 for (i
= 0; i
< 2; ++i
) {
2753 priv
->eq_table
.eq
[i
].irq
= dev
->persist
->pdev
->irq
;
2754 if (i
!= MLX4_EQ_ASYNC
) {
2755 bitmap_fill(priv
->eq_table
.eq
[i
].actv_ports
.ports
,
2756 dev
->caps
.num_ports
);
2761 static int mlx4_init_port_info(struct mlx4_dev
*dev
, int port
)
2763 struct mlx4_port_info
*info
= &mlx4_priv(dev
)->port
[port
];
2768 if (!mlx4_is_slave(dev
)) {
2769 mlx4_init_mac_table(dev
, &info
->mac_table
);
2770 mlx4_init_vlan_table(dev
, &info
->vlan_table
);
2771 mlx4_init_roce_gid_table(dev
, &info
->gid_table
);
2772 info
->base_qpn
= mlx4_get_base_qpn(dev
, port
);
2775 sprintf(info
->dev_name
, "mlx4_port%d", port
);
2776 info
->port_attr
.attr
.name
= info
->dev_name
;
2777 if (mlx4_is_mfunc(dev
))
2778 info
->port_attr
.attr
.mode
= S_IRUGO
;
2780 info
->port_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2781 info
->port_attr
.store
= set_port_type
;
2783 info
->port_attr
.show
= show_port_type
;
2784 sysfs_attr_init(&info
->port_attr
.attr
);
2786 err
= device_create_file(&dev
->persist
->pdev
->dev
, &info
->port_attr
);
2788 mlx4_err(dev
, "Failed to create file for port %d\n", port
);
2792 sprintf(info
->dev_mtu_name
, "mlx4_port%d_mtu", port
);
2793 info
->port_mtu_attr
.attr
.name
= info
->dev_mtu_name
;
2794 if (mlx4_is_mfunc(dev
))
2795 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
;
2797 info
->port_mtu_attr
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2798 info
->port_mtu_attr
.store
= set_port_ib_mtu
;
2800 info
->port_mtu_attr
.show
= show_port_ib_mtu
;
2801 sysfs_attr_init(&info
->port_mtu_attr
.attr
);
2803 err
= device_create_file(&dev
->persist
->pdev
->dev
,
2804 &info
->port_mtu_attr
);
2806 mlx4_err(dev
, "Failed to create mtu file for port %d\n", port
);
2807 device_remove_file(&info
->dev
->persist
->pdev
->dev
,
2815 static void mlx4_cleanup_port_info(struct mlx4_port_info
*info
)
2820 device_remove_file(&info
->dev
->persist
->pdev
->dev
, &info
->port_attr
);
2821 device_remove_file(&info
->dev
->persist
->pdev
->dev
,
2822 &info
->port_mtu_attr
);
2823 #ifdef CONFIG_RFS_ACCEL
2824 free_irq_cpu_rmap(info
->rmap
);
2829 static int mlx4_init_steering(struct mlx4_dev
*dev
)
2831 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2832 int num_entries
= dev
->caps
.num_ports
;
2835 priv
->steer
= kzalloc(sizeof(struct mlx4_steer
) * num_entries
, GFP_KERNEL
);
2839 for (i
= 0; i
< num_entries
; i
++)
2840 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
2841 INIT_LIST_HEAD(&priv
->steer
[i
].promisc_qps
[j
]);
2842 INIT_LIST_HEAD(&priv
->steer
[i
].steer_entries
[j
]);
2847 static void mlx4_clear_steering(struct mlx4_dev
*dev
)
2849 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2850 struct mlx4_steer_index
*entry
, *tmp_entry
;
2851 struct mlx4_promisc_qp
*pqp
, *tmp_pqp
;
2852 int num_entries
= dev
->caps
.num_ports
;
2855 for (i
= 0; i
< num_entries
; i
++) {
2856 for (j
= 0; j
< MLX4_NUM_STEERS
; j
++) {
2857 list_for_each_entry_safe(pqp
, tmp_pqp
,
2858 &priv
->steer
[i
].promisc_qps
[j
],
2860 list_del(&pqp
->list
);
2863 list_for_each_entry_safe(entry
, tmp_entry
,
2864 &priv
->steer
[i
].steer_entries
[j
],
2866 list_del(&entry
->list
);
2867 list_for_each_entry_safe(pqp
, tmp_pqp
,
2870 list_del(&pqp
->list
);
2880 static int extended_func_num(struct pci_dev
*pdev
)
2882 return PCI_SLOT(pdev
->devfn
) * 8 + PCI_FUNC(pdev
->devfn
);
2885 #define MLX4_OWNER_BASE 0x8069c
2886 #define MLX4_OWNER_SIZE 4
2888 static int mlx4_get_ownership(struct mlx4_dev
*dev
)
2890 void __iomem
*owner
;
2893 if (pci_channel_offline(dev
->persist
->pdev
))
2896 owner
= ioremap(pci_resource_start(dev
->persist
->pdev
, 0) +
2900 mlx4_err(dev
, "Failed to obtain ownership bit\n");
2909 static void mlx4_free_ownership(struct mlx4_dev
*dev
)
2911 void __iomem
*owner
;
2913 if (pci_channel_offline(dev
->persist
->pdev
))
2916 owner
= ioremap(pci_resource_start(dev
->persist
->pdev
, 0) +
2920 mlx4_err(dev
, "Failed to obtain ownership bit\n");
2928 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
2929 !!((flags) & MLX4_FLAG_MASTER))
2931 static u64
mlx4_enable_sriov(struct mlx4_dev
*dev
, struct pci_dev
*pdev
,
2932 u8 total_vfs
, int existing_vfs
, int reset_flow
)
2934 u64 dev_flags
= dev
->flags
;
2936 int fw_enabled_sriov_vfs
= min(pci_sriov_get_totalvfs(pdev
),
2940 dev
->dev_vfs
= kcalloc(total_vfs
, sizeof(*dev
->dev_vfs
),
2947 atomic_inc(&pf_loading
);
2948 if (dev
->flags
& MLX4_FLAG_SRIOV
) {
2949 if (existing_vfs
!= total_vfs
) {
2950 mlx4_err(dev
, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
2951 existing_vfs
, total_vfs
);
2952 total_vfs
= existing_vfs
;
2956 dev
->dev_vfs
= kzalloc(total_vfs
* sizeof(*dev
->dev_vfs
), GFP_KERNEL
);
2957 if (NULL
== dev
->dev_vfs
) {
2958 mlx4_err(dev
, "Failed to allocate memory for VFs\n");
2962 if (!(dev
->flags
& MLX4_FLAG_SRIOV
)) {
2963 if (total_vfs
> fw_enabled_sriov_vfs
) {
2964 mlx4_err(dev
, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
2965 total_vfs
, fw_enabled_sriov_vfs
);
2969 mlx4_warn(dev
, "Enabling SR-IOV with %d VFs\n", total_vfs
);
2970 err
= pci_enable_sriov(pdev
, total_vfs
);
2973 mlx4_err(dev
, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
2977 mlx4_warn(dev
, "Running in master mode\n");
2978 dev_flags
|= MLX4_FLAG_SRIOV
|
2980 dev_flags
&= ~MLX4_FLAG_SLAVE
;
2981 dev
->persist
->num_vfs
= total_vfs
;
2986 atomic_dec(&pf_loading
);
2988 dev
->persist
->num_vfs
= 0;
2989 kfree(dev
->dev_vfs
);
2990 dev
->dev_vfs
= NULL
;
2991 return dev_flags
& ~MLX4_FLAG_MASTER
;
2995 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64
= -1,
2998 static int mlx4_check_dev_cap(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
,
3001 int requested_vfs
= nvfs
[0] + nvfs
[1] + nvfs
[2];
3002 /* Checking for 64 VFs as a limitation of CX2 */
3003 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_80_VFS
) &&
3004 requested_vfs
>= 64) {
3005 mlx4_err(dev
, "Requested %d VFs, but FW does not support more than 64\n",
3007 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64
;
3012 static int mlx4_load_one(struct pci_dev
*pdev
, int pci_dev_data
,
3013 int total_vfs
, int *nvfs
, struct mlx4_priv
*priv
,
3016 struct mlx4_dev
*dev
;
3021 struct mlx4_dev_cap
*dev_cap
= NULL
;
3022 int existing_vfs
= 0;
3026 INIT_LIST_HEAD(&priv
->ctx_list
);
3027 spin_lock_init(&priv
->ctx_lock
);
3029 mutex_init(&priv
->port_mutex
);
3030 mutex_init(&priv
->bond_mutex
);
3032 INIT_LIST_HEAD(&priv
->pgdir_list
);
3033 mutex_init(&priv
->pgdir_mutex
);
3035 INIT_LIST_HEAD(&priv
->bf_list
);
3036 mutex_init(&priv
->bf_mutex
);
3038 dev
->rev_id
= pdev
->revision
;
3039 dev
->numa_node
= dev_to_node(&pdev
->dev
);
3041 /* Detect if this device is a virtual function */
3042 if (pci_dev_data
& MLX4_PCI_DEV_IS_VF
) {
3043 mlx4_warn(dev
, "Detected virtual function - running in slave mode\n");
3044 dev
->flags
|= MLX4_FLAG_SLAVE
;
3046 /* We reset the device and enable SRIOV only for physical
3047 * devices. Try to claim ownership on the device;
3048 * if already taken, skip -- do not allow multiple PFs */
3049 err
= mlx4_get_ownership(dev
);
3054 mlx4_warn(dev
, "Multiple PFs not yet supported - Skipping PF\n");
3059 atomic_set(&priv
->opreq_count
, 0);
3060 INIT_WORK(&priv
->opreq_task
, mlx4_opreq_action
);
3063 * Now reset the HCA before we touch the PCI capabilities or
3064 * attempt a firmware command, since a boot ROM may have left
3065 * the HCA in an undefined state.
3067 err
= mlx4_reset(dev
);
3069 mlx4_err(dev
, "Failed to reset HCA, aborting\n");
3074 dev
->flags
= MLX4_FLAG_MASTER
;
3075 existing_vfs
= pci_num_vf(pdev
);
3077 dev
->flags
|= MLX4_FLAG_SRIOV
;
3078 dev
->persist
->num_vfs
= total_vfs
;
3082 /* on load remove any previous indication of internal error,
3085 dev
->persist
->state
= MLX4_DEVICE_STATE_UP
;
3088 err
= mlx4_cmd_init(dev
);
3090 mlx4_err(dev
, "Failed to init command interface, aborting\n");
3094 /* In slave functions, the communication channel must be initialized
3095 * before posting commands. Also, init num_slaves before calling
3097 if (mlx4_is_mfunc(dev
)) {
3098 if (mlx4_is_master(dev
)) {
3099 dev
->num_slaves
= MLX4_MAX_NUM_SLAVES
;
3102 dev
->num_slaves
= 0;
3103 err
= mlx4_multi_func_init(dev
);
3105 mlx4_err(dev
, "Failed to init slave mfunc interface, aborting\n");
3111 err
= mlx4_init_fw(dev
);
3113 mlx4_err(dev
, "Failed to init fw, aborting.\n");
3117 if (mlx4_is_master(dev
)) {
3118 /* when we hit the goto slave_start below, dev_cap already initialized */
3120 dev_cap
= kzalloc(sizeof(*dev_cap
), GFP_KERNEL
);
3127 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
3129 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
3133 if (mlx4_check_dev_cap(dev
, dev_cap
, nvfs
))
3136 if (!(dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
)) {
3137 u64 dev_flags
= mlx4_enable_sriov(dev
, pdev
,
3143 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
3144 dev
->flags
= dev_flags
;
3145 if (!SRIOV_VALID_STATE(dev
->flags
)) {
3146 mlx4_err(dev
, "Invalid SRIOV state\n");
3149 err
= mlx4_reset(dev
);
3151 mlx4_err(dev
, "Failed to reset HCA, aborting.\n");
3157 /* Legacy mode FW requires SRIOV to be enabled before
3158 * doing QUERY_DEV_CAP, since max_eq's value is different if
3161 memset(dev_cap
, 0, sizeof(*dev_cap
));
3162 err
= mlx4_QUERY_DEV_CAP(dev
, dev_cap
);
3164 mlx4_err(dev
, "QUERY_DEV_CAP command failed, aborting.\n");
3168 if (mlx4_check_dev_cap(dev
, dev_cap
, nvfs
))
3173 err
= mlx4_init_hca(dev
);
3175 if (err
== -EACCES
) {
3176 /* Not primary Physical function
3177 * Running in slave mode */
3178 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
3179 /* We're not a PF */
3180 if (dev
->flags
& MLX4_FLAG_SRIOV
) {
3182 pci_disable_sriov(pdev
);
3183 if (mlx4_is_master(dev
) && !reset_flow
)
3184 atomic_dec(&pf_loading
);
3185 dev
->flags
&= ~MLX4_FLAG_SRIOV
;
3187 if (!mlx4_is_slave(dev
))
3188 mlx4_free_ownership(dev
);
3189 dev
->flags
|= MLX4_FLAG_SLAVE
;
3190 dev
->flags
&= ~MLX4_FLAG_MASTER
;
3196 if (mlx4_is_master(dev
) && (dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
)) {
3197 u64 dev_flags
= mlx4_enable_sriov(dev
, pdev
, total_vfs
,
3198 existing_vfs
, reset_flow
);
3200 if ((dev
->flags
^ dev_flags
) & (MLX4_FLAG_MASTER
| MLX4_FLAG_SLAVE
)) {
3201 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_VHCR
);
3202 dev
->flags
= dev_flags
;
3203 err
= mlx4_cmd_init(dev
);
3205 /* Only VHCR is cleaned up, so could still
3208 mlx4_err(dev
, "Failed to init VHCR command interface, aborting\n");
3212 dev
->flags
= dev_flags
;
3215 if (!SRIOV_VALID_STATE(dev
->flags
)) {
3216 mlx4_err(dev
, "Invalid SRIOV state\n");
3221 /* check if the device is functioning at its maximum possible speed.
3222 * No return code for this call, just warn the user in case of PCI
3223 * express device capabilities are under-satisfied by the bus.
3225 if (!mlx4_is_slave(dev
))
3226 mlx4_check_pcie_caps(dev
);
3228 /* In master functions, the communication channel must be initialized
3229 * after obtaining its address from fw */
3230 if (mlx4_is_master(dev
)) {
3231 if (dev
->caps
.num_ports
< 2 &&
3235 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3236 dev
->caps
.num_ports
);
3239 memcpy(dev
->persist
->nvfs
, nvfs
, sizeof(dev
->persist
->nvfs
));
3242 i
< sizeof(dev
->persist
->nvfs
)/
3243 sizeof(dev
->persist
->nvfs
[0]); i
++) {
3246 for (j
= 0; j
< dev
->persist
->nvfs
[i
]; ++sum
, ++j
) {
3247 dev
->dev_vfs
[sum
].min_port
= i
< 2 ? i
+ 1 : 1;
3248 dev
->dev_vfs
[sum
].n_ports
= i
< 2 ? 1 :
3249 dev
->caps
.num_ports
;
3253 /* In master functions, the communication channel
3254 * must be initialized after obtaining its address from fw
3256 err
= mlx4_multi_func_init(dev
);
3258 mlx4_err(dev
, "Failed to init master mfunc interface, aborting.\n");
3263 err
= mlx4_alloc_eq_table(dev
);
3265 goto err_master_mfunc
;
3267 bitmap_zero(priv
->msix_ctl
.pool_bm
, MAX_MSIX
);
3268 mutex_init(&priv
->msix_ctl
.pool_lock
);
3270 mlx4_enable_msi_x(dev
);
3271 if ((mlx4_is_mfunc(dev
)) &&
3272 !(dev
->flags
& MLX4_FLAG_MSI_X
)) {
3274 mlx4_err(dev
, "INTx is not supported in multi-function mode, aborting\n");
3278 if (!mlx4_is_slave(dev
)) {
3279 err
= mlx4_init_steering(dev
);
3281 goto err_disable_msix
;
3284 err
= mlx4_setup_hca(dev
);
3285 if (err
== -EBUSY
&& (dev
->flags
& MLX4_FLAG_MSI_X
) &&
3286 !mlx4_is_mfunc(dev
)) {
3287 dev
->flags
&= ~MLX4_FLAG_MSI_X
;
3288 dev
->caps
.num_comp_vectors
= 1;
3289 pci_disable_msix(pdev
);
3290 err
= mlx4_setup_hca(dev
);
3296 mlx4_init_quotas(dev
);
3297 /* When PF resources are ready arm its comm channel to enable
3300 if (mlx4_is_master(dev
)) {
3301 err
= mlx4_ARM_COMM_CHANNEL(dev
);
3303 mlx4_err(dev
, " Failed to arm comm channel eq: %x\n",
3309 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
3310 err
= mlx4_init_port_info(dev
, port
);
3315 priv
->v2p
.port1
= 1;
3316 priv
->v2p
.port2
= 2;
3318 err
= mlx4_register_device(dev
);
3322 mlx4_request_modules(dev
);
3324 mlx4_sense_init(dev
);
3325 mlx4_start_sense(dev
);
3329 if (mlx4_is_master(dev
) && dev
->persist
->num_vfs
&& !reset_flow
)
3330 atomic_dec(&pf_loading
);
3336 for (--port
; port
>= 1; --port
)
3337 mlx4_cleanup_port_info(&priv
->port
[port
]);
3339 mlx4_cleanup_default_counters(dev
);
3340 if (!mlx4_is_slave(dev
))
3341 mlx4_cleanup_counters_table(dev
);
3342 mlx4_cleanup_qp_table(dev
);
3343 mlx4_cleanup_srq_table(dev
);
3344 mlx4_cleanup_cq_table(dev
);
3345 mlx4_cmd_use_polling(dev
);
3346 mlx4_cleanup_eq_table(dev
);
3347 mlx4_cleanup_mcg_table(dev
);
3348 mlx4_cleanup_mr_table(dev
);
3349 mlx4_cleanup_xrcd_table(dev
);
3350 mlx4_cleanup_pd_table(dev
);
3351 mlx4_cleanup_uar_table(dev
);
3354 if (!mlx4_is_slave(dev
))
3355 mlx4_clear_steering(dev
);
3358 if (dev
->flags
& MLX4_FLAG_MSI_X
)
3359 pci_disable_msix(pdev
);
3362 mlx4_free_eq_table(dev
);
3365 if (mlx4_is_master(dev
)) {
3366 mlx4_free_resource_tracker(dev
, RES_TR_FREE_STRUCTS_ONLY
);
3367 mlx4_multi_func_cleanup(dev
);
3370 if (mlx4_is_slave(dev
)) {
3371 kfree(dev
->caps
.qp0_qkey
);
3372 kfree(dev
->caps
.qp0_tunnel
);
3373 kfree(dev
->caps
.qp0_proxy
);
3374 kfree(dev
->caps
.qp1_tunnel
);
3375 kfree(dev
->caps
.qp1_proxy
);
3379 mlx4_close_hca(dev
);
3385 if (mlx4_is_slave(dev
))
3386 mlx4_multi_func_cleanup(dev
);
3389 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
3392 if (dev
->flags
& MLX4_FLAG_SRIOV
&& !existing_vfs
) {
3393 pci_disable_sriov(pdev
);
3394 dev
->flags
&= ~MLX4_FLAG_SRIOV
;
3397 if (mlx4_is_master(dev
) && dev
->persist
->num_vfs
&& !reset_flow
)
3398 atomic_dec(&pf_loading
);
3400 kfree(priv
->dev
.dev_vfs
);
3402 if (!mlx4_is_slave(dev
))
3403 mlx4_free_ownership(dev
);
3409 static int __mlx4_init_one(struct pci_dev
*pdev
, int pci_dev_data
,
3410 struct mlx4_priv
*priv
)
3413 int nvfs
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
3414 int prb_vf
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
3415 const int param_map
[MLX4_MAX_PORTS
+ 1][MLX4_MAX_PORTS
+ 1] = {
3416 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3417 unsigned total_vfs
= 0;
3420 pr_info(DRV_NAME
": Initializing %s\n", pci_name(pdev
));
3422 err
= pci_enable_device(pdev
);
3424 dev_err(&pdev
->dev
, "Cannot enable PCI device, aborting\n");
3428 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3429 * per port, we must limit the number of VFs to 63 (since their are
3432 for (i
= 0; i
< sizeof(nvfs
)/sizeof(nvfs
[0]) && i
< num_vfs_argc
;
3433 total_vfs
+= nvfs
[param_map
[num_vfs_argc
- 1][i
]], i
++) {
3434 nvfs
[param_map
[num_vfs_argc
- 1][i
]] = num_vfs
[i
];
3436 dev_err(&pdev
->dev
, "num_vfs module parameter cannot be negative\n");
3438 goto err_disable_pdev
;
3441 for (i
= 0; i
< sizeof(prb_vf
)/sizeof(prb_vf
[0]) && i
< probe_vfs_argc
;
3443 prb_vf
[param_map
[probe_vfs_argc
- 1][i
]] = probe_vf
[i
];
3444 if (prb_vf
[i
] < 0 || prb_vf
[i
] > nvfs
[i
]) {
3445 dev_err(&pdev
->dev
, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3447 goto err_disable_pdev
;
3450 if (total_vfs
> MLX4_MAX_NUM_VF
) {
3452 "Requested more VF's (%d) than allowed by hw (%d)\n",
3453 total_vfs
, MLX4_MAX_NUM_VF
);
3455 goto err_disable_pdev
;
3458 for (i
= 0; i
< MLX4_MAX_PORTS
; i
++) {
3459 if (nvfs
[i
] + nvfs
[2] > MLX4_MAX_NUM_VF_P_PORT
) {
3461 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
3462 nvfs
[i
] + nvfs
[2], i
+ 1,
3463 MLX4_MAX_NUM_VF_P_PORT
);
3465 goto err_disable_pdev
;
3469 /* Check for BARs. */
3470 if (!(pci_dev_data
& MLX4_PCI_DEV_IS_VF
) &&
3471 !(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
3472 dev_err(&pdev
->dev
, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3473 pci_dev_data
, pci_resource_flags(pdev
, 0));
3475 goto err_disable_pdev
;
3477 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
3478 dev_err(&pdev
->dev
, "Missing UAR, aborting\n");
3480 goto err_disable_pdev
;
3483 err
= pci_request_regions(pdev
, DRV_NAME
);
3485 dev_err(&pdev
->dev
, "Couldn't get PCI resources, aborting\n");
3486 goto err_disable_pdev
;
3489 pci_set_master(pdev
);
3491 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
3493 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit PCI DMA mask\n");
3494 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3496 dev_err(&pdev
->dev
, "Can't set PCI DMA mask, aborting\n");
3497 goto err_release_regions
;
3500 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
3502 dev_warn(&pdev
->dev
, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
3503 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
3505 dev_err(&pdev
->dev
, "Can't set consistent PCI DMA mask, aborting\n");
3506 goto err_release_regions
;
3510 /* Allow large DMA segments, up to the firmware limit of 1 GB */
3511 dma_set_max_seg_size(&pdev
->dev
, 1024 * 1024 * 1024);
3512 /* Detect if this device is a virtual function */
3513 if (pci_dev_data
& MLX4_PCI_DEV_IS_VF
) {
3514 /* When acting as pf, we normally skip vfs unless explicitly
3515 * requested to probe them.
3518 unsigned vfs_offset
= 0;
3520 for (i
= 0; i
< sizeof(nvfs
)/sizeof(nvfs
[0]) &&
3521 vfs_offset
+ nvfs
[i
] < extended_func_num(pdev
);
3522 vfs_offset
+= nvfs
[i
], i
++)
3524 if (i
== sizeof(nvfs
)/sizeof(nvfs
[0])) {
3526 goto err_release_regions
;
3528 if ((extended_func_num(pdev
) - vfs_offset
)
3530 dev_warn(&pdev
->dev
, "Skipping virtual function:%d\n",
3531 extended_func_num(pdev
));
3533 goto err_release_regions
;
3538 err
= mlx4_catas_init(&priv
->dev
);
3540 goto err_release_regions
;
3542 err
= mlx4_load_one(pdev
, pci_dev_data
, total_vfs
, nvfs
, priv
, 0);
3549 mlx4_catas_end(&priv
->dev
);
3551 err_release_regions
:
3552 pci_release_regions(pdev
);
3555 pci_disable_device(pdev
);
3556 pci_set_drvdata(pdev
, NULL
);
3560 static int mlx4_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
3562 struct mlx4_priv
*priv
;
3563 struct mlx4_dev
*dev
;
3566 printk_once(KERN_INFO
"%s", mlx4_version
);
3568 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
3573 dev
->persist
= kzalloc(sizeof(*dev
->persist
), GFP_KERNEL
);
3574 if (!dev
->persist
) {
3578 dev
->persist
->pdev
= pdev
;
3579 dev
->persist
->dev
= dev
;
3580 pci_set_drvdata(pdev
, dev
->persist
);
3581 priv
->pci_dev_data
= id
->driver_data
;
3582 mutex_init(&dev
->persist
->device_state_mutex
);
3583 mutex_init(&dev
->persist
->interface_state_mutex
);
3585 ret
= __mlx4_init_one(pdev
, id
->driver_data
, priv
);
3587 kfree(dev
->persist
);
3590 pci_save_state(pdev
);
3596 static void mlx4_clean_dev(struct mlx4_dev
*dev
)
3598 struct mlx4_dev_persistent
*persist
= dev
->persist
;
3599 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3600 unsigned long flags
= (dev
->flags
& RESET_PERSIST_MASK_FLAGS
);
3602 memset(priv
, 0, sizeof(*priv
));
3603 priv
->dev
.persist
= persist
;
3604 priv
->dev
.flags
= flags
;
3607 static void mlx4_unload_one(struct pci_dev
*pdev
)
3609 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
3610 struct mlx4_dev
*dev
= persist
->dev
;
3611 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3618 /* saving current ports type for further use */
3619 for (i
= 0; i
< dev
->caps
.num_ports
; i
++) {
3620 dev
->persist
->curr_port_type
[i
] = dev
->caps
.port_type
[i
+ 1];
3621 dev
->persist
->curr_port_poss_type
[i
] = dev
->caps
.
3622 possible_type
[i
+ 1];
3625 pci_dev_data
= priv
->pci_dev_data
;
3627 mlx4_stop_sense(dev
);
3628 mlx4_unregister_device(dev
);
3630 for (p
= 1; p
<= dev
->caps
.num_ports
; p
++) {
3631 mlx4_cleanup_port_info(&priv
->port
[p
]);
3632 mlx4_CLOSE_PORT(dev
, p
);
3635 if (mlx4_is_master(dev
))
3636 mlx4_free_resource_tracker(dev
,
3637 RES_TR_FREE_SLAVES_ONLY
);
3639 mlx4_cleanup_default_counters(dev
);
3640 if (!mlx4_is_slave(dev
))
3641 mlx4_cleanup_counters_table(dev
);
3642 mlx4_cleanup_qp_table(dev
);
3643 mlx4_cleanup_srq_table(dev
);
3644 mlx4_cleanup_cq_table(dev
);
3645 mlx4_cmd_use_polling(dev
);
3646 mlx4_cleanup_eq_table(dev
);
3647 mlx4_cleanup_mcg_table(dev
);
3648 mlx4_cleanup_mr_table(dev
);
3649 mlx4_cleanup_xrcd_table(dev
);
3650 mlx4_cleanup_pd_table(dev
);
3652 if (mlx4_is_master(dev
))
3653 mlx4_free_resource_tracker(dev
,
3654 RES_TR_FREE_STRUCTS_ONLY
);
3657 mlx4_uar_free(dev
, &priv
->driver_uar
);
3658 mlx4_cleanup_uar_table(dev
);
3659 if (!mlx4_is_slave(dev
))
3660 mlx4_clear_steering(dev
);
3661 mlx4_free_eq_table(dev
);
3662 if (mlx4_is_master(dev
))
3663 mlx4_multi_func_cleanup(dev
);
3664 mlx4_close_hca(dev
);
3666 if (mlx4_is_slave(dev
))
3667 mlx4_multi_func_cleanup(dev
);
3668 mlx4_cmd_cleanup(dev
, MLX4_CMD_CLEANUP_ALL
);
3670 if (dev
->flags
& MLX4_FLAG_MSI_X
)
3671 pci_disable_msix(pdev
);
3673 if (!mlx4_is_slave(dev
))
3674 mlx4_free_ownership(dev
);
3676 kfree(dev
->caps
.qp0_qkey
);
3677 kfree(dev
->caps
.qp0_tunnel
);
3678 kfree(dev
->caps
.qp0_proxy
);
3679 kfree(dev
->caps
.qp1_tunnel
);
3680 kfree(dev
->caps
.qp1_proxy
);
3681 kfree(dev
->dev_vfs
);
3683 mlx4_clean_dev(dev
);
3684 priv
->pci_dev_data
= pci_dev_data
;
3688 static void mlx4_remove_one(struct pci_dev
*pdev
)
3690 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
3691 struct mlx4_dev
*dev
= persist
->dev
;
3692 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3695 mutex_lock(&persist
->interface_state_mutex
);
3696 persist
->interface_state
|= MLX4_INTERFACE_STATE_DELETION
;
3697 mutex_unlock(&persist
->interface_state_mutex
);
3699 /* Disabling SR-IOV is not allowed while there are active vf's */
3700 if (mlx4_is_master(dev
) && dev
->flags
& MLX4_FLAG_SRIOV
) {
3701 active_vfs
= mlx4_how_many_lives_vf(dev
);
3703 pr_warn("Removing PF when there are active VF's !!\n");
3704 pr_warn("Will not disable SR-IOV.\n");
3708 /* device marked to be under deletion running now without the lock
3709 * letting other tasks to be terminated
3711 if (persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)
3712 mlx4_unload_one(pdev
);
3714 mlx4_info(dev
, "%s: interface is down\n", __func__
);
3715 mlx4_catas_end(dev
);
3716 if (dev
->flags
& MLX4_FLAG_SRIOV
&& !active_vfs
) {
3717 mlx4_warn(dev
, "Disabling SR-IOV\n");
3718 pci_disable_sriov(pdev
);
3721 pci_release_regions(pdev
);
3722 pci_disable_device(pdev
);
3723 kfree(dev
->persist
);
3725 pci_set_drvdata(pdev
, NULL
);
3728 static int restore_current_port_types(struct mlx4_dev
*dev
,
3729 enum mlx4_port_type
*types
,
3730 enum mlx4_port_type
*poss_types
)
3732 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3735 mlx4_stop_sense(dev
);
3737 mutex_lock(&priv
->port_mutex
);
3738 for (i
= 0; i
< dev
->caps
.num_ports
; i
++)
3739 dev
->caps
.possible_type
[i
+ 1] = poss_types
[i
];
3740 err
= mlx4_change_port_types(dev
, types
);
3741 mlx4_start_sense(dev
);
3742 mutex_unlock(&priv
->port_mutex
);
3747 int mlx4_restart_one(struct pci_dev
*pdev
)
3749 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
3750 struct mlx4_dev
*dev
= persist
->dev
;
3751 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3752 int nvfs
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
3753 int pci_dev_data
, err
, total_vfs
;
3755 pci_dev_data
= priv
->pci_dev_data
;
3756 total_vfs
= dev
->persist
->num_vfs
;
3757 memcpy(nvfs
, dev
->persist
->nvfs
, sizeof(dev
->persist
->nvfs
));
3759 mlx4_unload_one(pdev
);
3760 err
= mlx4_load_one(pdev
, pci_dev_data
, total_vfs
, nvfs
, priv
, 1);
3762 mlx4_err(dev
, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
3763 __func__
, pci_name(pdev
), err
);
3767 err
= restore_current_port_types(dev
, dev
->persist
->curr_port_type
,
3768 dev
->persist
->curr_port_poss_type
);
3770 mlx4_err(dev
, "could not restore original port types (%d)\n",
3776 static const struct pci_device_id mlx4_pci_table
[] = {
3777 /* MT25408 "Hermon" SDR */
3778 { PCI_VDEVICE(MELLANOX
, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3779 /* MT25408 "Hermon" DDR */
3780 { PCI_VDEVICE(MELLANOX
, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3781 /* MT25408 "Hermon" QDR */
3782 { PCI_VDEVICE(MELLANOX
, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3783 /* MT25408 "Hermon" DDR PCIe gen2 */
3784 { PCI_VDEVICE(MELLANOX
, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3785 /* MT25408 "Hermon" QDR PCIe gen2 */
3786 { PCI_VDEVICE(MELLANOX
, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3787 /* MT25408 "Hermon" EN 10GigE */
3788 { PCI_VDEVICE(MELLANOX
, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3789 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
3790 { PCI_VDEVICE(MELLANOX
, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3791 /* MT25458 ConnectX EN 10GBASE-T 10GigE */
3792 { PCI_VDEVICE(MELLANOX
, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3793 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
3794 { PCI_VDEVICE(MELLANOX
, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3795 /* MT26468 ConnectX EN 10GigE PCIe gen2*/
3796 { PCI_VDEVICE(MELLANOX
, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3797 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
3798 { PCI_VDEVICE(MELLANOX
, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3799 /* MT26478 ConnectX2 40GigE PCIe gen2 */
3800 { PCI_VDEVICE(MELLANOX
, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT
},
3801 /* MT25400 Family [ConnectX-2 Virtual Function] */
3802 { PCI_VDEVICE(MELLANOX
, 0x1002), MLX4_PCI_DEV_IS_VF
},
3803 /* MT27500 Family [ConnectX-3] */
3804 { PCI_VDEVICE(MELLANOX
, 0x1003), 0 },
3805 /* MT27500 Family [ConnectX-3 Virtual Function] */
3806 { PCI_VDEVICE(MELLANOX
, 0x1004), MLX4_PCI_DEV_IS_VF
},
3807 { PCI_VDEVICE(MELLANOX
, 0x1005), 0 }, /* MT27510 Family */
3808 { PCI_VDEVICE(MELLANOX
, 0x1006), 0 }, /* MT27511 Family */
3809 { PCI_VDEVICE(MELLANOX
, 0x1007), 0 }, /* MT27520 Family */
3810 { PCI_VDEVICE(MELLANOX
, 0x1008), 0 }, /* MT27521 Family */
3811 { PCI_VDEVICE(MELLANOX
, 0x1009), 0 }, /* MT27530 Family */
3812 { PCI_VDEVICE(MELLANOX
, 0x100a), 0 }, /* MT27531 Family */
3813 { PCI_VDEVICE(MELLANOX
, 0x100b), 0 }, /* MT27540 Family */
3814 { PCI_VDEVICE(MELLANOX
, 0x100c), 0 }, /* MT27541 Family */
3815 { PCI_VDEVICE(MELLANOX
, 0x100d), 0 }, /* MT27550 Family */
3816 { PCI_VDEVICE(MELLANOX
, 0x100e), 0 }, /* MT27551 Family */
3817 { PCI_VDEVICE(MELLANOX
, 0x100f), 0 }, /* MT27560 Family */
3818 { PCI_VDEVICE(MELLANOX
, 0x1010), 0 }, /* MT27561 Family */
3822 MODULE_DEVICE_TABLE(pci
, mlx4_pci_table
);
3824 static pci_ers_result_t
mlx4_pci_err_detected(struct pci_dev
*pdev
,
3825 pci_channel_state_t state
)
3827 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
3829 mlx4_err(persist
->dev
, "mlx4_pci_err_detected was called\n");
3830 mlx4_enter_error_state(persist
);
3832 mutex_lock(&persist
->interface_state_mutex
);
3833 if (persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)
3834 mlx4_unload_one(pdev
);
3836 mutex_unlock(&persist
->interface_state_mutex
);
3837 if (state
== pci_channel_io_perm_failure
)
3838 return PCI_ERS_RESULT_DISCONNECT
;
3840 pci_disable_device(pdev
);
3841 return PCI_ERS_RESULT_NEED_RESET
;
3844 static pci_ers_result_t
mlx4_pci_slot_reset(struct pci_dev
*pdev
)
3846 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
3847 struct mlx4_dev
*dev
= persist
->dev
;
3848 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3850 int nvfs
[MLX4_MAX_PORTS
+ 1] = {0, 0, 0};
3853 mlx4_err(dev
, "mlx4_pci_slot_reset was called\n");
3854 ret
= pci_enable_device(pdev
);
3856 mlx4_err(dev
, "Can not re-enable device, ret=%d\n", ret
);
3857 return PCI_ERS_RESULT_DISCONNECT
;
3860 pci_set_master(pdev
);
3861 pci_restore_state(pdev
);
3862 pci_save_state(pdev
);
3864 total_vfs
= dev
->persist
->num_vfs
;
3865 memcpy(nvfs
, dev
->persist
->nvfs
, sizeof(dev
->persist
->nvfs
));
3867 mutex_lock(&persist
->interface_state_mutex
);
3868 if (!(persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)) {
3869 ret
= mlx4_load_one(pdev
, priv
->pci_dev_data
, total_vfs
, nvfs
,
3872 mlx4_err(dev
, "%s: mlx4_load_one failed, ret=%d\n",
3877 ret
= restore_current_port_types(dev
, dev
->persist
->
3878 curr_port_type
, dev
->persist
->
3879 curr_port_poss_type
);
3881 mlx4_err(dev
, "could not restore original port types (%d)\n", ret
);
3884 mutex_unlock(&persist
->interface_state_mutex
);
3886 return ret
? PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_RECOVERED
;
3889 static void mlx4_shutdown(struct pci_dev
*pdev
)
3891 struct mlx4_dev_persistent
*persist
= pci_get_drvdata(pdev
);
3893 mlx4_info(persist
->dev
, "mlx4_shutdown was called\n");
3894 mutex_lock(&persist
->interface_state_mutex
);
3895 if (persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)
3896 mlx4_unload_one(pdev
);
3897 mutex_unlock(&persist
->interface_state_mutex
);
3900 static const struct pci_error_handlers mlx4_err_handler
= {
3901 .error_detected
= mlx4_pci_err_detected
,
3902 .slot_reset
= mlx4_pci_slot_reset
,
3905 static struct pci_driver mlx4_driver
= {
3907 .id_table
= mlx4_pci_table
,
3908 .probe
= mlx4_init_one
,
3909 .shutdown
= mlx4_shutdown
,
3910 .remove
= mlx4_remove_one
,
3911 .err_handler
= &mlx4_err_handler
,
3914 static int __init
mlx4_verify_params(void)
3916 if ((log_num_mac
< 0) || (log_num_mac
> 7)) {
3917 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac
);
3921 if (log_num_vlan
!= 0)
3922 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
3923 MLX4_LOG_NUM_VLANS
);
3926 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
3928 if ((log_mtts_per_seg
< 1) || (log_mtts_per_seg
> 7)) {
3929 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
3934 /* Check if module param for ports type has legal combination */
3935 if (port_type_array
[0] == false && port_type_array
[1] == true) {
3936 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
3937 port_type_array
[0] = true;
3940 if (mlx4_log_num_mgm_entry_size
< -7 ||
3941 (mlx4_log_num_mgm_entry_size
> 0 &&
3942 (mlx4_log_num_mgm_entry_size
< MLX4_MIN_MGM_LOG_ENTRY_SIZE
||
3943 mlx4_log_num_mgm_entry_size
> MLX4_MAX_MGM_LOG_ENTRY_SIZE
))) {
3944 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
3945 mlx4_log_num_mgm_entry_size
,
3946 MLX4_MIN_MGM_LOG_ENTRY_SIZE
,
3947 MLX4_MAX_MGM_LOG_ENTRY_SIZE
);
3954 static int __init
mlx4_init(void)
3958 if (mlx4_verify_params())
3962 mlx4_wq
= create_singlethread_workqueue("mlx4");
3966 ret
= pci_register_driver(&mlx4_driver
);
3968 destroy_workqueue(mlx4_wq
);
3969 return ret
< 0 ? ret
: 0;
3972 static void __exit
mlx4_cleanup(void)
3974 pci_unregister_driver(&mlx4_driver
);
3975 destroy_workqueue(mlx4_wq
);
3978 module_init(mlx4_init
);
3979 module_exit(mlx4_cleanup
);