2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/etherdevice.h>
36 #include <linux/mlx4/cmd.h>
37 #include <linux/module.h>
38 #include <linux/cache.h>
39 #include <linux/kernel.h>
40 #include <uapi/rdma/mlx4-abi.h>
46 MLX4_COMMAND_INTERFACE_MIN_REV
= 2,
47 MLX4_COMMAND_INTERFACE_MAX_REV
= 3,
48 MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
= 3,
51 extern void __buggy_use_of_MLX4_GET(void);
52 extern void __buggy_use_of_MLX4_PUT(void);
54 static bool enable_qos
;
55 module_param(enable_qos
, bool, 0444);
56 MODULE_PARM_DESC(enable_qos
, "Enable Enhanced QoS support (default: off)");
58 #define MLX4_GET(dest, source, offset) \
60 void *__p = (char *) (source) + (offset); \
62 switch (sizeof(dest)) { \
63 case 1: (dest) = *(u8 *) __p; break; \
64 case 2: (dest) = be16_to_cpup(__p); break; \
65 case 4: (dest) = be32_to_cpup(__p); break; \
66 case 8: val = get_unaligned((__be64 *)__p); \
67 (dest) = be64_to_cpu(val); break; \
68 default: __buggy_use_of_MLX4_GET(); \
72 #define MLX4_PUT(dest, source, offset) \
74 void *__d = ((char *) (dest) + (offset)); \
75 switch (sizeof(source)) { \
76 case 1: *(u8 *) __d = (source); break; \
77 case 2: *(__be16 *) __d = cpu_to_be16(source); break; \
78 case 4: *(__be32 *) __d = cpu_to_be32(source); break; \
79 case 8: *(__be64 *) __d = cpu_to_be64(source); break; \
80 default: __buggy_use_of_MLX4_PUT(); \
84 static void dump_dev_cap_flags(struct mlx4_dev
*dev
, u64 flags
)
86 static const char *fname
[] = {
87 [ 0] = "RC transport",
88 [ 1] = "UC transport",
89 [ 2] = "UD transport",
90 [ 3] = "XRC transport",
92 [ 7] = "IPoIB checksum offload",
93 [ 8] = "P_Key violation counter",
94 [ 9] = "Q_Key violation counter",
95 [12] = "Dual Port Different Protocol (DPDP) support",
96 [15] = "Big LSO headers",
99 [18] = "Atomic ops support",
100 [19] = "Raw multicast support",
101 [20] = "Address vector port checking support",
102 [21] = "UD multicast support",
103 [30] = "IBoE support",
104 [32] = "Unicast loopback support",
105 [34] = "FCS header control",
106 [37] = "Wake On LAN (port1) support",
107 [38] = "Wake On LAN (port2) support",
108 [40] = "UDP RSS support",
109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support",
112 [52] = "RSS IP fragments support",
113 [53] = "Port ETS Scheduler support",
114 [55] = "Port link type sensing support",
115 [59] = "Port management change event support",
116 [61] = "64 byte EQE support",
117 [62] = "64 byte CQE support",
121 mlx4_dbg(dev
, "DEV_CAP flags:\n");
122 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
123 if (fname
[i
] && (flags
& (1LL << i
)))
124 mlx4_dbg(dev
, " %s\n", fname
[i
]);
127 static void dump_dev_cap_flags2(struct mlx4_dev
*dev
, u64 flags
)
129 static const char * const fname
[] = {
131 [1] = "RSS Toeplitz Hash Function support",
132 [2] = "RSS XOR Hash Function support",
133 [3] = "Device managed flow steering support",
134 [4] = "Automatic MAC reassignment support",
135 [5] = "Time stamping support",
136 [6] = "VST (control vlan insertion/stripping) support",
137 [7] = "FSM (MAC anti-spoofing) support",
138 [8] = "Dynamic QP updates support",
139 [9] = "Device managed flow steering IPoIB support",
140 [10] = "TCP/IP offloads/flow-steering for VXLAN support",
141 [11] = "MAD DEMUX (Secure-Host) support",
142 [12] = "Large cache line (>64B) CQE stride support",
143 [13] = "Large cache line (>64B) EQE stride support",
144 [14] = "Ethernet protocol control support",
145 [15] = "Ethernet Backplane autoneg support",
146 [16] = "CONFIG DEV support",
147 [17] = "Asymmetric EQs support",
148 [18] = "More than 80 VFs support",
149 [19] = "Performance optimized for limited rule configuration flow steering support",
150 [20] = "Recoverable error events support",
151 [21] = "Port Remap support",
152 [22] = "QCN support",
153 [23] = "QP rate limiting support",
154 [24] = "Ethernet Flow control statistics support",
155 [25] = "Granular QoS per VF support",
156 [26] = "Port ETS Scheduler support",
157 [27] = "Port beacon support",
158 [28] = "RX-ALL support",
159 [29] = "802.1ad offload support",
160 [31] = "Modifying loopback source checks using UPDATE_QP support",
161 [32] = "Loopback source checks support",
162 [33] = "RoCEv2 support",
163 [34] = "DMFS Sniffer support (UC & MC)",
164 [35] = "Diag counters per port",
165 [36] = "QinQ VST mode support",
166 [37] = "sl to vl mapping table change event support",
167 [38] = "user MAC support",
168 [39] = "Report driver version to FW support",
169 [40] = "SW CQ initialization support",
173 for (i
= 0; i
< ARRAY_SIZE(fname
); ++i
)
174 if (fname
[i
] && (flags
& (1LL << i
)))
175 mlx4_dbg(dev
, " %s\n", fname
[i
]);
178 int mlx4_MOD_STAT_CFG(struct mlx4_dev
*dev
, struct mlx4_mod_stat_cfg
*cfg
)
180 struct mlx4_cmd_mailbox
*mailbox
;
184 #define MOD_STAT_CFG_IN_SIZE 0x100
186 #define MOD_STAT_CFG_PG_SZ_M_OFFSET 0x002
187 #define MOD_STAT_CFG_PG_SZ_OFFSET 0x003
189 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
191 return PTR_ERR(mailbox
);
192 inbox
= mailbox
->buf
;
194 MLX4_PUT(inbox
, cfg
->log_pg_sz
, MOD_STAT_CFG_PG_SZ_OFFSET
);
195 MLX4_PUT(inbox
, cfg
->log_pg_sz_m
, MOD_STAT_CFG_PG_SZ_M_OFFSET
);
197 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_MOD_STAT_CFG
,
198 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
200 mlx4_free_cmd_mailbox(dev
, mailbox
);
204 int mlx4_QUERY_FUNC(struct mlx4_dev
*dev
, struct mlx4_func
*func
, int slave
)
206 struct mlx4_cmd_mailbox
*mailbox
;
213 #define QUERY_FUNC_BUS_OFFSET 0x00
214 #define QUERY_FUNC_DEVICE_OFFSET 0x01
215 #define QUERY_FUNC_FUNCTION_OFFSET 0x01
216 #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03
217 #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04
218 #define QUERY_FUNC_MAX_EQ_OFFSET 0x06
219 #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b
221 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
223 return PTR_ERR(mailbox
);
224 outbox
= mailbox
->buf
;
228 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, in_modifier
, 0,
230 MLX4_CMD_TIME_CLASS_A
,
235 MLX4_GET(field
, outbox
, QUERY_FUNC_BUS_OFFSET
);
236 func
->bus
= field
& 0xf;
237 MLX4_GET(field
, outbox
, QUERY_FUNC_DEVICE_OFFSET
);
238 func
->device
= field
& 0xf1;
239 MLX4_GET(field
, outbox
, QUERY_FUNC_FUNCTION_OFFSET
);
240 func
->function
= field
& 0x7;
241 MLX4_GET(field
, outbox
, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET
);
242 func
->physical_function
= field
& 0xf;
243 MLX4_GET(field16
, outbox
, QUERY_FUNC_RSVD_EQS_OFFSET
);
244 func
->rsvd_eqs
= field16
& 0xffff;
245 MLX4_GET(field16
, outbox
, QUERY_FUNC_MAX_EQ_OFFSET
);
246 func
->max_eq
= field16
& 0xffff;
247 MLX4_GET(field
, outbox
, QUERY_FUNC_RSVD_UARS_OFFSET
);
248 func
->rsvd_uars
= field
& 0x0f;
250 mlx4_dbg(dev
, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n",
251 func
->bus
, func
->device
, func
->function
, func
->physical_function
,
252 func
->max_eq
, func
->rsvd_eqs
, func
->rsvd_uars
);
255 mlx4_free_cmd_mailbox(dev
, mailbox
);
259 static int mlx4_activate_vst_qinq(struct mlx4_priv
*priv
, int slave
, int port
)
261 struct mlx4_vport_oper_state
*vp_oper
;
262 struct mlx4_vport_state
*vp_admin
;
265 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
266 vp_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
268 if (vp_admin
->default_vlan
!= vp_oper
->state
.default_vlan
) {
269 err
= __mlx4_register_vlan(&priv
->dev
, port
,
270 vp_admin
->default_vlan
,
273 vp_oper
->vlan_idx
= NO_INDX
;
274 mlx4_warn(&priv
->dev
,
275 "No vlan resources slave %d, port %d\n",
279 mlx4_dbg(&priv
->dev
, "alloc vlan %d idx %d slave %d port %d\n",
280 (int)(vp_oper
->state
.default_vlan
),
281 vp_oper
->vlan_idx
, slave
, port
);
283 vp_oper
->state
.vlan_proto
= vp_admin
->vlan_proto
;
284 vp_oper
->state
.default_vlan
= vp_admin
->default_vlan
;
285 vp_oper
->state
.default_qos
= vp_admin
->default_qos
;
290 static int mlx4_handle_vst_qinq(struct mlx4_priv
*priv
, int slave
, int port
)
292 struct mlx4_vport_oper_state
*vp_oper
;
293 struct mlx4_slave_state
*slave_state
;
294 struct mlx4_vport_state
*vp_admin
;
297 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
298 vp_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
299 slave_state
= &priv
->mfunc
.master
.slave_state
[slave
];
301 if ((vp_admin
->vlan_proto
!= htons(ETH_P_8021AD
)) ||
302 (!slave_state
->active
))
305 if (vp_oper
->state
.vlan_proto
== vp_admin
->vlan_proto
&&
306 vp_oper
->state
.default_vlan
== vp_admin
->default_vlan
&&
307 vp_oper
->state
.default_qos
== vp_admin
->default_qos
)
310 if (!slave_state
->vst_qinq_supported
) {
311 /* Warn and revert the request to set vst QinQ mode */
312 vp_admin
->vlan_proto
= vp_oper
->state
.vlan_proto
;
313 vp_admin
->default_vlan
= vp_oper
->state
.default_vlan
;
314 vp_admin
->default_qos
= vp_oper
->state
.default_qos
;
316 mlx4_warn(&priv
->dev
,
317 "Slave %d does not support VST QinQ mode\n", slave
);
321 err
= mlx4_activate_vst_qinq(priv
, slave
, port
);
325 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
326 struct mlx4_vhcr
*vhcr
,
327 struct mlx4_cmd_mailbox
*inbox
,
328 struct mlx4_cmd_mailbox
*outbox
,
329 struct mlx4_cmd_info
*cmd
)
331 struct mlx4_priv
*priv
= mlx4_priv(dev
);
333 u32 size
, proxy_qp
, qkey
;
335 struct mlx4_func func
;
337 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
338 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
339 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
340 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8
341 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10
342 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14
343 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18
344 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20
345 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24
346 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28
347 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
348 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30
349 #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48
351 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50
352 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54
353 #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58
354 #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60
355 #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64
356 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68
358 #define QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET 0x6c
360 #define QUERY_FUNC_CAP_FMR_FLAG 0x80
361 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40
362 #define QUERY_FUNC_CAP_FLAG_ETH 0x80
363 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10
364 #define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08
365 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04
367 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31)
368 #define QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG (1UL << 30)
370 /* when opcode modifier = 1 */
371 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
372 #define QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET 0x4
373 #define QUERY_FUNC_CAP_FLAGS0_OFFSET 0x8
374 #define QUERY_FUNC_CAP_FLAGS1_OFFSET 0xc
376 #define QUERY_FUNC_CAP_QP0_TUNNEL 0x10
377 #define QUERY_FUNC_CAP_QP0_PROXY 0x14
378 #define QUERY_FUNC_CAP_QP1_TUNNEL 0x18
379 #define QUERY_FUNC_CAP_QP1_PROXY 0x1c
380 #define QUERY_FUNC_CAP_PHYS_PORT_ID 0x28
382 #define QUERY_FUNC_CAP_FLAGS1_FORCE_MAC 0x40
383 #define QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN 0x80
384 #define QUERY_FUNC_CAP_FLAGS1_NIC_INFO 0x10
385 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08
387 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80
388 #define QUERY_FUNC_CAP_PHV_BIT 0x40
389 #define QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE 0x20
391 #define QUERY_FUNC_CAP_SUPPORTS_VST_QINQ BIT(30)
392 #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS BIT(31)
394 if (vhcr
->op_modifier
== 1) {
395 struct mlx4_active_ports actv_ports
=
396 mlx4_get_active_ports(dev
, slave
);
397 int converted_port
= mlx4_slave_convert_port(
398 dev
, slave
, vhcr
->in_modifier
);
399 struct mlx4_vport_oper_state
*vp_oper
;
401 if (converted_port
< 0)
404 vhcr
->in_modifier
= converted_port
;
405 /* phys-port = logical-port */
406 field
= vhcr
->in_modifier
-
407 find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
408 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
410 port
= vhcr
->in_modifier
;
411 proxy_qp
= dev
->phys_caps
.base_proxy_sqpn
+ 8 * slave
+ port
- 1;
413 /* Set nic_info bit to mark new fields support */
414 field
= QUERY_FUNC_CAP_FLAGS1_NIC_INFO
;
416 if (mlx4_vf_smi_enabled(dev
, slave
, port
) &&
417 !mlx4_get_parav_qkey(dev
, proxy_qp
, &qkey
)) {
418 field
|= QUERY_FUNC_CAP_VF_ENABLE_QP0
;
419 MLX4_PUT(outbox
->buf
, qkey
,
420 QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET
);
422 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
424 /* size is now the QP number */
425 size
= dev
->phys_caps
.base_tunnel_sqpn
+ 8 * slave
+ port
- 1;
426 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP0_TUNNEL
);
429 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP1_TUNNEL
);
431 MLX4_PUT(outbox
->buf
, proxy_qp
, QUERY_FUNC_CAP_QP0_PROXY
);
433 MLX4_PUT(outbox
->buf
, proxy_qp
, QUERY_FUNC_CAP_QP1_PROXY
);
435 MLX4_PUT(outbox
->buf
, dev
->caps
.phys_port_id
[vhcr
->in_modifier
],
436 QUERY_FUNC_CAP_PHYS_PORT_ID
);
438 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
439 err
= mlx4_handle_vst_qinq(priv
, slave
, port
);
444 if (dev
->caps
.phv_bit
[port
])
445 field
|= QUERY_FUNC_CAP_PHV_BIT
;
446 if (vp_oper
->state
.vlan_proto
== htons(ETH_P_8021AD
))
447 field
|= QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE
;
448 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS0_OFFSET
);
450 } else if (vhcr
->op_modifier
== 0) {
451 struct mlx4_active_ports actv_ports
=
452 mlx4_get_active_ports(dev
, slave
);
453 struct mlx4_slave_state
*slave_state
=
454 &priv
->mfunc
.master
.slave_state
[slave
];
456 /* enable rdma and ethernet interfaces, new quota locations,
459 field
= (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
|
460 QUERY_FUNC_CAP_FLAG_QUOTAS
| QUERY_FUNC_CAP_FLAG_VALID_MAILBOX
|
461 QUERY_FUNC_CAP_FLAG_RESD_LKEY
);
462 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
465 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
),
466 dev
->caps
.num_ports
);
467 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
469 size
= dev
->caps
.function_caps
; /* set PF behaviours */
470 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
472 field
= 0; /* protected FMR support not available as yet */
473 MLX4_PUT(outbox
->buf
, field
, QUERY_FUNC_CAP_FMR_OFFSET
);
475 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_QP
].quota
[slave
];
476 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
477 size
= dev
->caps
.num_qps
;
478 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
480 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_SRQ
].quota
[slave
];
481 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
482 size
= dev
->caps
.num_srqs
;
483 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
485 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_CQ
].quota
[slave
];
486 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
487 size
= dev
->caps
.num_cqs
;
488 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
490 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_SYS_EQS
) ||
491 mlx4_QUERY_FUNC(dev
, &func
, slave
)) {
492 size
= vhcr
->in_modifier
&
493 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS
?
495 rounddown_pow_of_two(dev
->caps
.num_eqs
);
496 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
497 size
= dev
->caps
.reserved_eqs
;
498 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
500 size
= vhcr
->in_modifier
&
501 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS
?
503 rounddown_pow_of_two(func
.max_eq
);
504 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
505 size
= func
.rsvd_eqs
;
506 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
509 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MPT
].quota
[slave
];
510 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
511 size
= dev
->caps
.num_mpts
;
512 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
514 size
= priv
->mfunc
.master
.res_tracker
.res_alloc
[RES_MTT
].quota
[slave
];
515 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
516 size
= dev
->caps
.num_mtts
;
517 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
519 size
= dev
->caps
.num_mgms
+ dev
->caps
.num_amgms
;
520 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
521 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
523 size
= QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG
|
524 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG
;
525 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET
);
527 size
= dev
->caps
.reserved_lkey
+ ((slave
<< 8) & 0xFF00);
528 MLX4_PUT(outbox
->buf
, size
, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET
);
530 if (vhcr
->in_modifier
& QUERY_FUNC_CAP_SUPPORTS_VST_QINQ
)
531 slave_state
->vst_qinq_supported
= true;
539 int mlx4_QUERY_FUNC_CAP(struct mlx4_dev
*dev
, u8 gen_or_port
,
540 struct mlx4_func_cap
*func_cap
)
542 struct mlx4_cmd_mailbox
*mailbox
;
544 u8 field
, op_modifier
;
546 int err
= 0, quotas
= 0;
550 op_modifier
= !!gen_or_port
; /* 0 = general, 1 = logical port */
551 slave_caps
= QUERY_FUNC_CAP_SUPPORTS_VST_QINQ
|
552 QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS
;
553 in_modifier
= op_modifier
? gen_or_port
: slave_caps
;
555 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
557 return PTR_ERR(mailbox
);
559 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, in_modifier
, op_modifier
,
560 MLX4_CMD_QUERY_FUNC_CAP
,
561 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
565 outbox
= mailbox
->buf
;
568 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS_OFFSET
);
569 if (!(field
& (QUERY_FUNC_CAP_FLAG_ETH
| QUERY_FUNC_CAP_FLAG_RDMA
))) {
570 mlx4_err(dev
, "The host supports neither eth nor rdma interfaces\n");
571 err
= -EPROTONOSUPPORT
;
574 func_cap
->flags
= field
;
575 quotas
= !!(func_cap
->flags
& QUERY_FUNC_CAP_FLAG_QUOTAS
);
577 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_NUM_PORTS_OFFSET
);
578 func_cap
->num_ports
= field
;
580 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_PF_BHVR_OFFSET
);
581 func_cap
->pf_context_behaviour
= size
;
584 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET
);
585 func_cap
->qp_quota
= size
& 0xFFFFFF;
587 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET
);
588 func_cap
->srq_quota
= size
& 0xFFFFFF;
590 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET
);
591 func_cap
->cq_quota
= size
& 0xFFFFFF;
593 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET
);
594 func_cap
->mpt_quota
= size
& 0xFFFFFF;
596 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET
);
597 func_cap
->mtt_quota
= size
& 0xFFFFFF;
599 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET
);
600 func_cap
->mcg_quota
= size
& 0xFFFFFF;
603 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP
);
604 func_cap
->qp_quota
= size
& 0xFFFFFF;
606 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP
);
607 func_cap
->srq_quota
= size
& 0xFFFFFF;
609 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP
);
610 func_cap
->cq_quota
= size
& 0xFFFFFF;
612 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP
);
613 func_cap
->mpt_quota
= size
& 0xFFFFFF;
615 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP
);
616 func_cap
->mtt_quota
= size
& 0xFFFFFF;
618 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP
);
619 func_cap
->mcg_quota
= size
& 0xFFFFFF;
621 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_MAX_EQ_OFFSET
);
622 func_cap
->max_eq
= size
& 0xFFFFFF;
624 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET
);
625 func_cap
->reserved_eq
= size
& 0xFFFFFF;
627 if (func_cap
->flags
& QUERY_FUNC_CAP_FLAG_RESD_LKEY
) {
628 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET
);
629 func_cap
->reserved_lkey
= size
;
631 func_cap
->reserved_lkey
= 0;
634 func_cap
->extra_flags
= 0;
636 /* Mailbox data from 0x6c and onward should only be treated if
637 * QUERY_FUNC_CAP_FLAG_VALID_MAILBOX is set in func_cap->flags
639 if (func_cap
->flags
& QUERY_FUNC_CAP_FLAG_VALID_MAILBOX
) {
640 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET
);
641 if (size
& QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG
)
642 func_cap
->extra_flags
|= MLX4_QUERY_FUNC_FLAGS_BF_RES_QP
;
643 if (size
& QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG
)
644 func_cap
->extra_flags
|= MLX4_QUERY_FUNC_FLAGS_A0_RES_QP
;
650 /* logical port query */
651 if (gen_or_port
> dev
->caps
.num_ports
) {
656 MLX4_GET(func_cap
->flags1
, outbox
, QUERY_FUNC_CAP_FLAGS1_OFFSET
);
657 if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_ETH
) {
658 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_FORCE_VLAN
) {
659 mlx4_err(dev
, "VLAN is enforced on this port\n");
660 err
= -EPROTONOSUPPORT
;
664 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_FORCE_MAC
) {
665 mlx4_err(dev
, "Force mac is enabled on this port\n");
666 err
= -EPROTONOSUPPORT
;
669 } else if (dev
->caps
.port_type
[gen_or_port
] == MLX4_PORT_TYPE_IB
) {
670 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_FLAGS0_OFFSET
);
671 if (field
& QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID
) {
672 mlx4_err(dev
, "phy_wqe_gid is enforced on this ib port\n");
673 err
= -EPROTONOSUPPORT
;
678 MLX4_GET(field
, outbox
, QUERY_FUNC_CAP_PHYS_PORT_OFFSET
);
679 func_cap
->physical_port
= field
;
680 if (func_cap
->physical_port
!= gen_or_port
) {
685 if (func_cap
->flags1
& QUERY_FUNC_CAP_VF_ENABLE_QP0
) {
686 MLX4_GET(qkey
, outbox
, QUERY_FUNC_CAP_PRIV_VF_QKEY_OFFSET
);
687 func_cap
->spec_qps
.qp0_qkey
= qkey
;
689 func_cap
->spec_qps
.qp0_qkey
= 0;
692 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_TUNNEL
);
693 func_cap
->spec_qps
.qp0_tunnel
= size
& 0xFFFFFF;
695 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP0_PROXY
);
696 func_cap
->spec_qps
.qp0_proxy
= size
& 0xFFFFFF;
698 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_TUNNEL
);
699 func_cap
->spec_qps
.qp1_tunnel
= size
& 0xFFFFFF;
701 MLX4_GET(size
, outbox
, QUERY_FUNC_CAP_QP1_PROXY
);
702 func_cap
->spec_qps
.qp1_proxy
= size
& 0xFFFFFF;
704 if (func_cap
->flags1
& QUERY_FUNC_CAP_FLAGS1_NIC_INFO
)
705 MLX4_GET(func_cap
->phys_port_id
, outbox
,
706 QUERY_FUNC_CAP_PHYS_PORT_ID
);
708 MLX4_GET(func_cap
->flags0
, outbox
, QUERY_FUNC_CAP_FLAGS0_OFFSET
);
710 /* All other resources are allocated by the master, but we still report
711 * 'num' and 'reserved' capabilities as follows:
712 * - num remains the maximum resource index
713 * - 'num - reserved' is the total available objects of a resource, but
714 * resource indices may be less than 'reserved'
715 * TODO: set per-resource quotas */
718 mlx4_free_cmd_mailbox(dev
, mailbox
);
723 static void disable_unsupported_roce_caps(void *buf
);
725 int mlx4_QUERY_DEV_CAP(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
727 struct mlx4_cmd_mailbox
*mailbox
;
730 u32 field32
, flags
, ext_flags
;
736 #define QUERY_DEV_CAP_OUT_SIZE 0x100
737 #define QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET 0x10
738 #define QUERY_DEV_CAP_MAX_QP_SZ_OFFSET 0x11
739 #define QUERY_DEV_CAP_RSVD_QP_OFFSET 0x12
740 #define QUERY_DEV_CAP_MAX_QP_OFFSET 0x13
741 #define QUERY_DEV_CAP_RSVD_SRQ_OFFSET 0x14
742 #define QUERY_DEV_CAP_MAX_SRQ_OFFSET 0x15
743 #define QUERY_DEV_CAP_RSVD_EEC_OFFSET 0x16
744 #define QUERY_DEV_CAP_MAX_EEC_OFFSET 0x17
745 #define QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET 0x19
746 #define QUERY_DEV_CAP_RSVD_CQ_OFFSET 0x1a
747 #define QUERY_DEV_CAP_MAX_CQ_OFFSET 0x1b
748 #define QUERY_DEV_CAP_MAX_MPT_OFFSET 0x1d
749 #define QUERY_DEV_CAP_RSVD_EQ_OFFSET 0x1e
750 #define QUERY_DEV_CAP_MAX_EQ_OFFSET 0x1f
751 #define QUERY_DEV_CAP_RSVD_MTT_OFFSET 0x20
752 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21
753 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22
754 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23
755 #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26
756 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
757 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
758 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
759 #define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
760 #define QUERY_DEV_CAP_RSS_OFFSET 0x2e
761 #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
762 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
763 #define QUERY_DEV_CAP_PORT_BEACON_OFFSET 0x34
764 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
765 #define QUERY_DEV_CAP_MTU_WIDTH_OFFSET 0x36
766 #define QUERY_DEV_CAP_VL_PORT_OFFSET 0x37
767 #define QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET 0x38
768 #define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
769 #define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
770 #define QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET 0x3e
771 #define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
772 #define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
773 #define QUERY_DEV_CAP_WOL_OFFSET 0x43
774 #define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
775 #define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
776 #define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
777 #define QUERY_DEV_CAP_PAGE_SZ_OFFSET 0x4b
778 #define QUERY_DEV_CAP_BF_OFFSET 0x4c
779 #define QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET 0x4d
780 #define QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET 0x4e
781 #define QUERY_DEV_CAP_LOG_MAX_BF_PAGES_OFFSET 0x4f
782 #define QUERY_DEV_CAP_MAX_SG_SQ_OFFSET 0x51
783 #define QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET 0x52
784 #define QUERY_DEV_CAP_MAX_SG_RQ_OFFSET 0x55
785 #define QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET 0x56
786 #define QUERY_DEV_CAP_USER_MAC_EN_OFFSET 0x5C
787 #define QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET 0x5D
788 #define QUERY_DEV_CAP_MAX_QP_MCG_OFFSET 0x61
789 #define QUERY_DEV_CAP_RSVD_MCG_OFFSET 0x62
790 #define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
791 #define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
792 #define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
793 #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
794 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
795 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
796 #define QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET 0x70
797 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70
798 #define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74
799 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
800 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
801 #define QUERY_DEV_CAP_SL2VL_EVENT_OFFSET 0x78
802 #define QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE 0x7a
803 #define QUERY_DEV_CAP_ECN_QCN_VER_OFFSET 0x7b
804 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
805 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
806 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
807 #define QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET 0x86
808 #define QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET 0x88
809 #define QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET 0x8a
810 #define QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET 0x8c
811 #define QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET 0x8e
812 #define QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET 0x90
813 #define QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET 0x92
814 #define QUERY_DEV_CAP_BMME_FLAGS_OFFSET 0x94
815 #define QUERY_DEV_CAP_CONFIG_DEV_OFFSET 0x94
816 #define QUERY_DEV_CAP_PHV_EN_OFFSET 0x96
817 #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
818 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
819 #define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
820 #define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c
821 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
822 #define QUERY_DEV_CAP_VXLAN 0x9e
823 #define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
824 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET 0xa8
825 #define QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET 0xac
826 #define QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET 0xcc
827 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET 0xd0
828 #define QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET 0xd2
829 #define QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET 0xe4
832 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
834 return PTR_ERR(mailbox
);
835 outbox
= mailbox
->buf
;
837 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
838 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
842 if (mlx4_is_mfunc(dev
))
843 disable_unsupported_roce_caps(outbox
);
844 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_QP_OFFSET
);
845 dev_cap
->reserved_qps
= 1 << (field
& 0xf);
846 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_OFFSET
);
847 dev_cap
->max_qps
= 1 << (field
& 0x1f);
848 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_SRQ_OFFSET
);
849 dev_cap
->reserved_srqs
= 1 << (field
>> 4);
850 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_OFFSET
);
851 dev_cap
->max_srqs
= 1 << (field
& 0x1f);
852 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_SZ_OFFSET
);
853 dev_cap
->max_cq_sz
= 1 << field
;
854 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_CQ_OFFSET
);
855 dev_cap
->reserved_cqs
= 1 << (field
& 0xf);
856 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_CQ_OFFSET
);
857 dev_cap
->max_cqs
= 1 << (field
& 0x1f);
858 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MPT_OFFSET
);
859 dev_cap
->max_mpts
= 1 << (field
& 0x3f);
860 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_EQ_OFFSET
);
861 dev_cap
->reserved_eqs
= 1 << (field
& 0xf);
862 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_EQ_OFFSET
);
863 dev_cap
->max_eqs
= 1 << (field
& 0xf);
864 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MTT_OFFSET
);
865 dev_cap
->reserved_mtts
= 1 << (field
>> 4);
866 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MRW_OFFSET
);
867 dev_cap
->reserved_mrws
= 1 << (field
& 0xf);
868 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET
);
869 dev_cap
->num_sys_eqs
= size
& 0xfff;
870 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET
);
871 dev_cap
->max_requester_per_qp
= 1 << (field
& 0x3f);
872 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RES_QP_OFFSET
);
873 dev_cap
->max_responder_per_qp
= 1 << (field
& 0x3f);
874 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GSO_OFFSET
);
877 dev_cap
->max_gso_sz
= 0;
879 dev_cap
->max_gso_sz
= 1 << field
;
881 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSS_OFFSET
);
883 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_XOR
;
885 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS_TOP
;
888 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RSS
;
889 dev_cap
->max_rss_tbl_sz
= 1 << field
;
891 dev_cap
->max_rss_tbl_sz
= 0;
892 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_RDMA_OFFSET
);
893 dev_cap
->max_rdma_global
= 1 << (field
& 0x3f);
894 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_ACK_DELAY_OFFSET
);
895 dev_cap
->local_ca_ack_delay
= field
& 0x1f;
896 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
897 dev_cap
->num_ports
= field
& 0xf;
898 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET
);
899 dev_cap
->max_msg_sz
= 1 << (field
& 0x1f);
900 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_PORT_FLOWSTATS_COUNTERS_OFFSET
);
902 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN
;
903 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
905 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FS_EN
;
906 dev_cap
->fs_log_max_ucast_qp_range_size
= field
& 0x1f;
908 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER
;
909 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_PORT_BEACON_OFFSET
);
911 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_PORT_BEACON
;
912 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
914 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB
;
915 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET
);
916 dev_cap
->fs_max_num_qp_per_entry
= field
;
917 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_SL2VL_EVENT_OFFSET
);
918 if (field
& (1 << 5))
919 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT
;
920 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET
);
922 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_QCN
;
923 MLX4_GET(stat_rate
, outbox
, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET
);
924 dev_cap
->stat_rate_support
= stat_rate
;
925 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
927 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_TS
;
928 MLX4_GET(ext_flags
, outbox
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
929 MLX4_GET(flags
, outbox
, QUERY_DEV_CAP_FLAGS_OFFSET
);
930 dev_cap
->flags
= flags
| (u64
)ext_flags
<< 32;
931 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_WOL_OFFSET
);
932 dev_cap
->wol_port
[1] = !!(field
& 0x20);
933 dev_cap
->wol_port
[2] = !!(field
& 0x40);
934 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_UAR_OFFSET
);
935 dev_cap
->reserved_uars
= field
>> 4;
936 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_UAR_SZ_OFFSET
);
937 dev_cap
->uar_size
= 1 << ((field
& 0x3f) + 20);
938 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_PAGE_SZ_OFFSET
);
939 dev_cap
->min_page_sz
= 1 << field
;
941 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_BF_OFFSET
);
943 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET
);
944 dev_cap
->bf_reg_size
= 1 << (field
& 0x1f);
945 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET
);
946 if ((1 << (field
& 0x3f)) > (PAGE_SIZE
/ dev_cap
->bf_reg_size
))
948 dev_cap
->bf_regs_per_page
= 1 << (field
& 0x3f);
950 dev_cap
->bf_reg_size
= 0;
953 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_SQ_OFFSET
);
954 dev_cap
->max_sq_sg
= field
;
955 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_SQ_OFFSET
);
956 dev_cap
->max_sq_desc_sz
= size
;
958 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_USER_MAC_EN_OFFSET
);
959 if (field
& (1 << 2))
960 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_USER_MAC_EN
;
961 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_SVLAN_BY_QP_OFFSET
);
963 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP
;
964 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_MCG_OFFSET
);
965 dev_cap
->max_qp_per_mcg
= 1 << field
;
966 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_MCG_OFFSET
);
967 dev_cap
->reserved_mgms
= field
& 0xf;
968 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_MCG_OFFSET
);
969 dev_cap
->max_mcgs
= 1 << field
;
970 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_PD_OFFSET
);
971 dev_cap
->reserved_pds
= field
>> 4;
972 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PD_OFFSET
);
973 dev_cap
->max_pds
= 1 << (field
& 0x3f);
974 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSVD_XRC_OFFSET
);
975 dev_cap
->reserved_xrcds
= field
>> 4;
976 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_XRC_OFFSET
);
977 dev_cap
->max_xrcds
= 1 << (field
& 0x1f);
979 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET
);
980 dev_cap
->rdmarc_entry_sz
= size
;
981 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET
);
982 dev_cap
->qpc_entry_sz
= size
;
983 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET
);
984 dev_cap
->aux_entry_sz
= size
;
985 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_ALTC_ENTRY_SZ_OFFSET
);
986 dev_cap
->altc_entry_sz
= size
;
987 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_EQC_ENTRY_SZ_OFFSET
);
988 dev_cap
->eqc_entry_sz
= size
;
989 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_CQC_ENTRY_SZ_OFFSET
);
990 dev_cap
->cqc_entry_sz
= size
;
991 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_SRQ_ENTRY_SZ_OFFSET
);
992 dev_cap
->srq_entry_sz
= size
;
993 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_C_MPT_ENTRY_SZ_OFFSET
);
994 dev_cap
->cmpt_entry_sz
= size
;
995 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MTT_ENTRY_SZ_OFFSET
);
996 dev_cap
->mtt_entry_sz
= size
;
997 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_D_MPT_ENTRY_SZ_OFFSET
);
998 dev_cap
->dmpt_entry_sz
= size
;
1000 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SRQ_SZ_OFFSET
);
1001 dev_cap
->max_srq_sz
= 1 << field
;
1002 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_QP_SZ_OFFSET
);
1003 dev_cap
->max_qp_sz
= 1 << field
;
1004 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_RSZ_SRQ_OFFSET
);
1005 dev_cap
->resize_srq
= field
& 1;
1006 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_SG_RQ_OFFSET
);
1007 dev_cap
->max_rq_sg
= field
;
1008 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_MAX_DESC_SZ_RQ_OFFSET
);
1009 dev_cap
->max_rq_desc_sz
= size
;
1010 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE
);
1011 if (field
& (1 << 4))
1012 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_QOS_VPP
;
1013 if (field
& (1 << 5))
1014 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL
;
1015 if (field
& (1 << 6))
1016 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_CQE_STRIDE
;
1017 if (field
& (1 << 7))
1018 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_EQE_STRIDE
;
1019 MLX4_GET(dev_cap
->bmme_flags
, outbox
,
1020 QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
1021 if (dev_cap
->bmme_flags
& MLX4_FLAG_ROCE_V1_V2
)
1022 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_ROCE_V1_V2
;
1023 if (dev_cap
->bmme_flags
& MLX4_FLAG_PORT_REMAP
)
1024 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_PORT_REMAP
;
1025 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_CONFIG_DEV_OFFSET
);
1027 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_CONFIG_DEV
;
1028 if (field
& (1 << 2))
1029 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_IGNORE_FCS
;
1030 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_PHV_EN_OFFSET
);
1032 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_PHV_EN
;
1034 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN
;
1036 MLX4_GET(dev_cap
->reserved_lkey
, outbox
,
1037 QUERY_DEV_CAP_RSVD_LKEY_OFFSET
);
1038 MLX4_GET(field32
, outbox
, QUERY_DEV_CAP_ETH_BACKPL_OFFSET
);
1039 if (field32
& (1 << 0))
1040 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP
;
1041 if (field32
& (1 << 7))
1042 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT
;
1043 if (field32
& (1 << 8))
1044 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW
;
1045 MLX4_GET(field32
, outbox
, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT
);
1046 if (field32
& (1 << 17))
1047 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT
;
1048 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_FW_REASSIGN_MAC
);
1050 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN
;
1051 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VXLAN
);
1053 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
;
1054 if (field
& (1 << 5))
1055 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_ETS_CFG
;
1056 MLX4_GET(dev_cap
->max_icm_sz
, outbox
,
1057 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET
);
1058 if (dev_cap
->flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
1059 MLX4_GET(dev_cap
->max_counters
, outbox
,
1060 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET
);
1062 MLX4_GET(field32
, outbox
,
1063 QUERY_DEV_CAP_MAD_DEMUX_OFFSET
);
1064 if (field32
& (1 << 0))
1065 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_MAD_DEMUX
;
1067 MLX4_GET(dev_cap
->dmfs_high_rate_qpn_base
, outbox
,
1068 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_BASE_OFFSET
);
1069 dev_cap
->dmfs_high_rate_qpn_base
&= MGM_QPN_MASK
;
1070 MLX4_GET(dev_cap
->dmfs_high_rate_qpn_range
, outbox
,
1071 QUERY_DEV_CAP_DMFS_HIGH_RATE_QPN_RANGE_OFFSET
);
1072 dev_cap
->dmfs_high_rate_qpn_range
&= MGM_QPN_MASK
;
1074 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET
);
1075 dev_cap
->rl_caps
.num_rates
= size
;
1076 if (dev_cap
->rl_caps
.num_rates
) {
1077 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT
;
1078 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_QP_RATE_LIMIT_MAX_OFFSET
);
1079 dev_cap
->rl_caps
.max_val
= size
& 0xfff;
1080 dev_cap
->rl_caps
.max_unit
= size
>> 14;
1081 MLX4_GET(size
, outbox
, QUERY_DEV_CAP_QP_RATE_LIMIT_MIN_OFFSET
);
1082 dev_cap
->rl_caps
.min_val
= size
& 0xfff;
1083 dev_cap
->rl_caps
.min_unit
= size
>> 14;
1086 MLX4_GET(dev_cap
->health_buffer_addrs
, outbox
,
1087 QUERY_DEV_CAP_HEALTH_BUFFER_ADDRESS_OFFSET
);
1089 MLX4_GET(field32
, outbox
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
1090 if (field32
& (1 << 16))
1091 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_UPDATE_QP
;
1092 if (field32
& (1 << 18))
1093 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB
;
1094 if (field32
& (1 << 19))
1095 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_LB_SRC_CHK
;
1096 if (field32
& (1 << 26))
1097 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL
;
1098 if (field32
& (1 << 20))
1099 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_FSM
;
1100 if (field32
& (1 << 21))
1101 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_80_VFS
;
1102 if (field32
& (1 << 23))
1103 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_SW_CQ_INIT
;
1105 for (i
= 1; i
<= dev_cap
->num_ports
; i
++) {
1106 err
= mlx4_QUERY_PORT(dev
, i
, dev_cap
->port_cap
+ i
);
1112 * Each UAR has 4 EQ doorbells; so if a UAR is reserved, then
1113 * we can't use any EQs whose doorbell falls on that page,
1114 * even if the EQ itself isn't reserved.
1116 if (dev_cap
->num_sys_eqs
== 0)
1117 dev_cap
->reserved_eqs
= max(dev_cap
->reserved_uars
* 4,
1118 dev_cap
->reserved_eqs
);
1120 dev_cap
->flags2
|= MLX4_DEV_CAP_FLAG2_SYS_EQS
;
1123 mlx4_free_cmd_mailbox(dev
, mailbox
);
1127 void mlx4_dev_cap_dump(struct mlx4_dev
*dev
, struct mlx4_dev_cap
*dev_cap
)
1129 if (dev_cap
->bf_reg_size
> 0)
1130 mlx4_dbg(dev
, "BlueFlame available (reg size %d, regs/page %d)\n",
1131 dev_cap
->bf_reg_size
, dev_cap
->bf_regs_per_page
);
1133 mlx4_dbg(dev
, "BlueFlame not available\n");
1135 mlx4_dbg(dev
, "Base MM extensions: flags %08x, rsvd L_Key %08x\n",
1136 dev_cap
->bmme_flags
, dev_cap
->reserved_lkey
);
1137 mlx4_dbg(dev
, "Max ICM size %lld MB\n",
1138 (unsigned long long) dev_cap
->max_icm_sz
>> 20);
1139 mlx4_dbg(dev
, "Max QPs: %d, reserved QPs: %d, entry size: %d\n",
1140 dev_cap
->max_qps
, dev_cap
->reserved_qps
, dev_cap
->qpc_entry_sz
);
1141 mlx4_dbg(dev
, "Max SRQs: %d, reserved SRQs: %d, entry size: %d\n",
1142 dev_cap
->max_srqs
, dev_cap
->reserved_srqs
, dev_cap
->srq_entry_sz
);
1143 mlx4_dbg(dev
, "Max CQs: %d, reserved CQs: %d, entry size: %d\n",
1144 dev_cap
->max_cqs
, dev_cap
->reserved_cqs
, dev_cap
->cqc_entry_sz
);
1145 mlx4_dbg(dev
, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n",
1146 dev_cap
->num_sys_eqs
, dev_cap
->max_eqs
, dev_cap
->reserved_eqs
,
1147 dev_cap
->eqc_entry_sz
);
1148 mlx4_dbg(dev
, "reserved MPTs: %d, reserved MTTs: %d\n",
1149 dev_cap
->reserved_mrws
, dev_cap
->reserved_mtts
);
1150 mlx4_dbg(dev
, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n",
1151 dev_cap
->max_pds
, dev_cap
->reserved_pds
, dev_cap
->reserved_uars
);
1152 mlx4_dbg(dev
, "Max QP/MCG: %d, reserved MGMs: %d\n",
1153 dev_cap
->max_pds
, dev_cap
->reserved_mgms
);
1154 mlx4_dbg(dev
, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
1155 dev_cap
->max_cq_sz
, dev_cap
->max_qp_sz
, dev_cap
->max_srq_sz
);
1156 mlx4_dbg(dev
, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
1157 dev_cap
->local_ca_ack_delay
, 128 << dev_cap
->port_cap
[1].ib_mtu
,
1158 dev_cap
->port_cap
[1].max_port_width
);
1159 mlx4_dbg(dev
, "Max SQ desc size: %d, max SQ S/G: %d\n",
1160 dev_cap
->max_sq_desc_sz
, dev_cap
->max_sq_sg
);
1161 mlx4_dbg(dev
, "Max RQ desc size: %d, max RQ S/G: %d\n",
1162 dev_cap
->max_rq_desc_sz
, dev_cap
->max_rq_sg
);
1163 mlx4_dbg(dev
, "Max GSO size: %d\n", dev_cap
->max_gso_sz
);
1164 mlx4_dbg(dev
, "Max counters: %d\n", dev_cap
->max_counters
);
1165 mlx4_dbg(dev
, "Max RSS Table size: %d\n", dev_cap
->max_rss_tbl_sz
);
1166 mlx4_dbg(dev
, "DMFS high rate steer QPn base: %d\n",
1167 dev_cap
->dmfs_high_rate_qpn_base
);
1168 mlx4_dbg(dev
, "DMFS high rate steer QPn range: %d\n",
1169 dev_cap
->dmfs_high_rate_qpn_range
);
1171 if (dev_cap
->flags2
& MLX4_DEV_CAP_FLAG2_QP_RATE_LIMIT
) {
1172 struct mlx4_rate_limit_caps
*rl_caps
= &dev_cap
->rl_caps
;
1174 mlx4_dbg(dev
, "QP Rate-Limit: #rates %d, unit/val max %d/%d, min %d/%d\n",
1175 rl_caps
->num_rates
, rl_caps
->max_unit
, rl_caps
->max_val
,
1176 rl_caps
->min_unit
, rl_caps
->min_val
);
1179 dump_dev_cap_flags(dev
, dev_cap
->flags
);
1180 dump_dev_cap_flags2(dev
, dev_cap
->flags2
);
1183 int mlx4_QUERY_PORT(struct mlx4_dev
*dev
, int port
, struct mlx4_port_cap
*port_cap
)
1185 struct mlx4_cmd_mailbox
*mailbox
;
1191 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1192 if (IS_ERR(mailbox
))
1193 return PTR_ERR(mailbox
);
1194 outbox
= mailbox
->buf
;
1196 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
1197 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
1198 MLX4_CMD_TIME_CLASS_A
,
1204 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
1205 port_cap
->max_vl
= field
>> 4;
1206 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MTU_WIDTH_OFFSET
);
1207 port_cap
->ib_mtu
= field
>> 4;
1208 port_cap
->max_port_width
= field
& 0xf;
1209 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_GID_OFFSET
);
1210 port_cap
->max_gids
= 1 << (field
& 0xf);
1211 MLX4_GET(field
, outbox
, QUERY_DEV_CAP_MAX_PKEY_OFFSET
);
1212 port_cap
->max_pkeys
= 1 << (field
& 0xf);
1214 #define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
1215 #define QUERY_PORT_MTU_OFFSET 0x01
1216 #define QUERY_PORT_ETH_MTU_OFFSET 0x02
1217 #define QUERY_PORT_WIDTH_OFFSET 0x06
1218 #define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
1219 #define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
1220 #define QUERY_PORT_MAX_VL_OFFSET 0x0b
1221 #define QUERY_PORT_MAC_OFFSET 0x10
1222 #define QUERY_PORT_TRANS_VENDOR_OFFSET 0x18
1223 #define QUERY_PORT_WAVELENGTH_OFFSET 0x1c
1224 #define QUERY_PORT_TRANS_CODE_OFFSET 0x20
1226 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, port
, 0, MLX4_CMD_QUERY_PORT
,
1227 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1231 MLX4_GET(field
, outbox
, QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
1232 port_cap
->link_state
= (field
& 0x80) >> 7;
1233 port_cap
->supported_port_types
= field
& 3;
1234 port_cap
->suggested_type
= (field
>> 3) & 1;
1235 port_cap
->default_sense
= (field
>> 4) & 1;
1236 port_cap
->dmfs_optimized_state
= (field
>> 5) & 1;
1237 MLX4_GET(field
, outbox
, QUERY_PORT_MTU_OFFSET
);
1238 port_cap
->ib_mtu
= field
& 0xf;
1239 MLX4_GET(field
, outbox
, QUERY_PORT_WIDTH_OFFSET
);
1240 port_cap
->max_port_width
= field
& 0xf;
1241 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_GID_PKEY_OFFSET
);
1242 port_cap
->max_gids
= 1 << (field
>> 4);
1243 port_cap
->max_pkeys
= 1 << (field
& 0xf);
1244 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_VL_OFFSET
);
1245 port_cap
->max_vl
= field
& 0xf;
1246 port_cap
->max_tc_eth
= field
>> 4;
1247 MLX4_GET(field
, outbox
, QUERY_PORT_MAX_MACVLAN_OFFSET
);
1248 port_cap
->log_max_macs
= field
& 0xf;
1249 port_cap
->log_max_vlans
= field
>> 4;
1250 MLX4_GET(port_cap
->eth_mtu
, outbox
, QUERY_PORT_ETH_MTU_OFFSET
);
1251 MLX4_GET(port_cap
->def_mac
, outbox
, QUERY_PORT_MAC_OFFSET
);
1252 MLX4_GET(field32
, outbox
, QUERY_PORT_TRANS_VENDOR_OFFSET
);
1253 port_cap
->trans_type
= field32
>> 24;
1254 port_cap
->vendor_oui
= field32
& 0xffffff;
1255 MLX4_GET(port_cap
->wavelength
, outbox
, QUERY_PORT_WAVELENGTH_OFFSET
);
1256 MLX4_GET(port_cap
->trans_code
, outbox
, QUERY_PORT_TRANS_CODE_OFFSET
);
1260 mlx4_free_cmd_mailbox(dev
, mailbox
);
1264 #define DEV_CAP_EXT_2_FLAG_PFC_COUNTERS (1 << 28)
1265 #define DEV_CAP_EXT_2_FLAG_VLAN_CONTROL (1 << 26)
1266 #define DEV_CAP_EXT_2_FLAG_80_VFS (1 << 21)
1267 #define DEV_CAP_EXT_2_FLAG_FSM (1 << 20)
1269 int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev
*dev
, int slave
,
1270 struct mlx4_vhcr
*vhcr
,
1271 struct mlx4_cmd_mailbox
*inbox
,
1272 struct mlx4_cmd_mailbox
*outbox
,
1273 struct mlx4_cmd_info
*cmd
)
1279 u32 bmme_flags
, field32
;
1283 struct mlx4_active_ports actv_ports
;
1285 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_DEV_CAP
,
1286 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1290 disable_unsupported_roce_caps(outbox
->buf
);
1291 /* add port mng change event capability and disable mw type 1
1292 * unconditionally to slaves
1294 MLX4_GET(flags
, outbox
->buf
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
1295 flags
|= MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV
;
1296 flags
&= ~MLX4_DEV_CAP_FLAG_MEM_WINDOW
;
1297 actv_ports
= mlx4_get_active_ports(dev
, slave
);
1298 first_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
1299 for (slave_port
= 0, real_port
= first_port
;
1300 real_port
< first_port
+
1301 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
1302 ++real_port
, ++slave_port
) {
1303 if (flags
& (MLX4_DEV_CAP_FLAG_WOL_PORT1
<< real_port
))
1304 flags
|= MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
;
1306 flags
&= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
);
1308 for (; slave_port
< dev
->caps
.num_ports
; ++slave_port
)
1309 flags
&= ~(MLX4_DEV_CAP_FLAG_WOL_PORT1
<< slave_port
);
1311 /* Not exposing RSS IP fragments to guests */
1312 flags
&= ~MLX4_DEV_CAP_FLAG_RSS_IP_FRAG
;
1313 MLX4_PUT(outbox
->buf
, flags
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
1315 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
1317 field
|= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
) & 0x0F;
1318 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_VL_PORT_OFFSET
);
1320 /* For guests, disable timestamp */
1321 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
1323 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET
);
1325 /* For guests, disable vxlan tunneling and QoS support */
1326 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_VXLAN
);
1328 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_VXLAN
);
1330 /* For guests, disable port BEACON */
1331 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_PORT_BEACON_OFFSET
);
1333 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_PORT_BEACON_OFFSET
);
1335 /* For guests, report Blueflame disabled */
1336 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_BF_OFFSET
);
1338 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_BF_OFFSET
);
1340 /* For guests, disable mw type 2 and port remap*/
1341 MLX4_GET(bmme_flags
, outbox
->buf
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
1342 bmme_flags
&= ~MLX4_BMME_FLAG_TYPE_2_WIN
;
1343 bmme_flags
&= ~MLX4_FLAG_PORT_REMAP
;
1344 MLX4_PUT(outbox
->buf
, bmme_flags
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
1346 /* turn off device-managed steering capability if not enabled */
1347 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1348 MLX4_GET(field
, outbox
->buf
,
1349 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
1351 MLX4_PUT(outbox
->buf
, field
,
1352 QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET
);
1355 /* turn off ipoib managed steering for guests */
1356 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
1358 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET
);
1360 /* turn off host side virt features (VST, FSM, etc) for guests */
1361 MLX4_GET(field32
, outbox
->buf
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
1362 field32
&= ~(DEV_CAP_EXT_2_FLAG_VLAN_CONTROL
| DEV_CAP_EXT_2_FLAG_80_VFS
|
1363 DEV_CAP_EXT_2_FLAG_FSM
| DEV_CAP_EXT_2_FLAG_PFC_COUNTERS
);
1364 MLX4_PUT(outbox
->buf
, field32
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
1366 /* turn off QCN for guests */
1367 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET
);
1369 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_ECN_QCN_VER_OFFSET
);
1371 /* turn off QP max-rate limiting for guests */
1373 MLX4_PUT(outbox
->buf
, field16
, QUERY_DEV_CAP_QP_RATE_LIMIT_NUM_OFFSET
);
1375 /* turn off QoS per VF support for guests */
1376 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE
);
1378 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_CQ_EQ_CACHE_LINE_STRIDE
);
1380 /* turn off ignore FCS feature for guests */
1381 MLX4_GET(field
, outbox
->buf
, QUERY_DEV_CAP_CONFIG_DEV_OFFSET
);
1383 MLX4_PUT(outbox
->buf
, field
, QUERY_DEV_CAP_CONFIG_DEV_OFFSET
);
1388 static void disable_unsupported_roce_caps(void *buf
)
1392 MLX4_GET(flags
, buf
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
1393 flags
&= ~(1UL << 31);
1394 MLX4_PUT(buf
, flags
, QUERY_DEV_CAP_EXT_FLAGS_OFFSET
);
1395 MLX4_GET(flags
, buf
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
1396 flags
&= ~(1UL << 24);
1397 MLX4_PUT(buf
, flags
, QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET
);
1398 MLX4_GET(flags
, buf
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
1399 flags
&= ~(MLX4_FLAG_ROCE_V1_V2
);
1400 MLX4_PUT(buf
, flags
, QUERY_DEV_CAP_BMME_FLAGS_OFFSET
);
1403 int mlx4_QUERY_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
1404 struct mlx4_vhcr
*vhcr
,
1405 struct mlx4_cmd_mailbox
*inbox
,
1406 struct mlx4_cmd_mailbox
*outbox
,
1407 struct mlx4_cmd_info
*cmd
)
1409 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1414 int admin_link_state
;
1415 int port
= mlx4_slave_convert_port(dev
, slave
,
1416 vhcr
->in_modifier
& 0xFF);
1418 #define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
1419 #define MLX4_PORT_LINK_UP_MASK 0x80
1420 #define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
1421 #define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
1426 /* Protect against untrusted guests: enforce that this is the
1427 * QUERY_PORT general query.
1429 if (vhcr
->op_modifier
|| vhcr
->in_modifier
& ~0xFF)
1432 vhcr
->in_modifier
= port
;
1434 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, vhcr
->in_modifier
, 0,
1435 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
1438 if (!err
&& dev
->caps
.function
!= slave
) {
1439 def_mac
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.mac
;
1440 MLX4_PUT(outbox
->buf
, def_mac
, QUERY_PORT_MAC_OFFSET
);
1442 /* get port type - currently only eth is enabled */
1443 MLX4_GET(port_type
, outbox
->buf
,
1444 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
1446 /* No link sensing allowed */
1447 port_type
&= MLX4_VF_PORT_NO_LINK_SENSE_MASK
;
1448 /* set port type to currently operating port type */
1449 port_type
|= (dev
->caps
.port_type
[vhcr
->in_modifier
] & 0x3);
1451 admin_link_state
= priv
->mfunc
.master
.vf_oper
[slave
].vport
[vhcr
->in_modifier
].state
.link_state
;
1452 if (IFLA_VF_LINK_STATE_ENABLE
== admin_link_state
)
1453 port_type
|= MLX4_PORT_LINK_UP_MASK
;
1454 else if (IFLA_VF_LINK_STATE_DISABLE
== admin_link_state
)
1455 port_type
&= ~MLX4_PORT_LINK_UP_MASK
;
1456 else if (IFLA_VF_LINK_STATE_AUTO
== admin_link_state
&& mlx4_is_bonded(dev
)) {
1457 int other_port
= (port
== 1) ? 2 : 1;
1458 struct mlx4_port_cap port_cap
;
1460 err
= mlx4_QUERY_PORT(dev
, other_port
, &port_cap
);
1463 port_type
|= (port_cap
.link_state
<< 7);
1466 MLX4_PUT(outbox
->buf
, port_type
,
1467 QUERY_PORT_SUPPORTED_TYPE_OFFSET
);
1469 if (dev
->caps
.port_type
[vhcr
->in_modifier
] == MLX4_PORT_TYPE_ETH
)
1470 short_field
= mlx4_get_slave_num_gids(dev
, slave
, port
);
1472 short_field
= 1; /* slave max gids */
1473 MLX4_PUT(outbox
->buf
, short_field
,
1474 QUERY_PORT_CUR_MAX_GID_OFFSET
);
1476 short_field
= dev
->caps
.pkey_table_len
[vhcr
->in_modifier
];
1477 MLX4_PUT(outbox
->buf
, short_field
,
1478 QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
1484 int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev
*dev
, u8 port
,
1485 int *gid_tbl_len
, int *pkey_tbl_len
)
1487 struct mlx4_cmd_mailbox
*mailbox
;
1492 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1493 if (IS_ERR(mailbox
))
1494 return PTR_ERR(mailbox
);
1496 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, port
, 0,
1497 MLX4_CMD_QUERY_PORT
, MLX4_CMD_TIME_CLASS_B
,
1502 outbox
= mailbox
->buf
;
1504 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_GID_OFFSET
);
1505 *gid_tbl_len
= field
;
1507 MLX4_GET(field
, outbox
, QUERY_PORT_CUR_MAX_PKEY_OFFSET
);
1508 *pkey_tbl_len
= field
;
1511 mlx4_free_cmd_mailbox(dev
, mailbox
);
1514 EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len
);
1516 int mlx4_map_cmd(struct mlx4_dev
*dev
, u16 op
, struct mlx4_icm
*icm
, u64 virt
)
1518 struct mlx4_cmd_mailbox
*mailbox
;
1519 struct mlx4_icm_iter iter
;
1527 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1528 if (IS_ERR(mailbox
))
1529 return PTR_ERR(mailbox
);
1530 pages
= mailbox
->buf
;
1532 for (mlx4_icm_first(icm
, &iter
);
1533 !mlx4_icm_last(&iter
);
1534 mlx4_icm_next(&iter
)) {
1536 * We have to pass pages that are aligned to their
1537 * size, so find the least significant 1 in the
1538 * address or size and use that as our log2 size.
1540 lg
= ffs(mlx4_icm_addr(&iter
) | mlx4_icm_size(&iter
)) - 1;
1541 if (lg
< MLX4_ICM_PAGE_SHIFT
) {
1542 mlx4_warn(dev
, "Got FW area not aligned to %d (%llx/%lx)\n",
1544 (unsigned long long) mlx4_icm_addr(&iter
),
1545 mlx4_icm_size(&iter
));
1550 for (i
= 0; i
< mlx4_icm_size(&iter
) >> lg
; ++i
) {
1552 pages
[nent
* 2] = cpu_to_be64(virt
);
1556 pages
[nent
* 2 + 1] =
1557 cpu_to_be64((mlx4_icm_addr(&iter
) + (i
<< lg
)) |
1558 (lg
- MLX4_ICM_PAGE_SHIFT
));
1559 ts
+= 1 << (lg
- 10);
1562 if (++nent
== MLX4_MAILBOX_SIZE
/ 16) {
1563 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1564 MLX4_CMD_TIME_CLASS_B
,
1574 err
= mlx4_cmd(dev
, mailbox
->dma
, nent
, 0, op
,
1575 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1580 case MLX4_CMD_MAP_FA
:
1581 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for FW\n", tc
, ts
);
1583 case MLX4_CMD_MAP_ICM_AUX
:
1584 mlx4_dbg(dev
, "Mapped %d chunks/%d KB for ICM aux\n", tc
, ts
);
1586 case MLX4_CMD_MAP_ICM
:
1587 mlx4_dbg(dev
, "Mapped %d chunks/%d KB at %llx for ICM\n",
1588 tc
, ts
, (unsigned long long) virt
- (ts
<< 10));
1593 mlx4_free_cmd_mailbox(dev
, mailbox
);
1597 int mlx4_MAP_FA(struct mlx4_dev
*dev
, struct mlx4_icm
*icm
)
1599 return mlx4_map_cmd(dev
, MLX4_CMD_MAP_FA
, icm
, -1);
1602 int mlx4_UNMAP_FA(struct mlx4_dev
*dev
)
1604 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_UNMAP_FA
,
1605 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
1609 int mlx4_RUN_FW(struct mlx4_dev
*dev
)
1611 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_RUN_FW
,
1612 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1615 int mlx4_QUERY_FW(struct mlx4_dev
*dev
)
1617 struct mlx4_fw
*fw
= &mlx4_priv(dev
)->fw
;
1618 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
1619 struct mlx4_cmd_mailbox
*mailbox
;
1626 #define QUERY_FW_OUT_SIZE 0x100
1627 #define QUERY_FW_VER_OFFSET 0x00
1628 #define QUERY_FW_PPF_ID 0x09
1629 #define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
1630 #define QUERY_FW_MAX_CMD_OFFSET 0x0f
1631 #define QUERY_FW_ERR_START_OFFSET 0x30
1632 #define QUERY_FW_ERR_SIZE_OFFSET 0x38
1633 #define QUERY_FW_ERR_BAR_OFFSET 0x3c
1635 #define QUERY_FW_SIZE_OFFSET 0x00
1636 #define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
1637 #define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
1639 #define QUERY_FW_COMM_BASE_OFFSET 0x40
1640 #define QUERY_FW_COMM_BAR_OFFSET 0x48
1642 #define QUERY_FW_CLOCK_OFFSET 0x50
1643 #define QUERY_FW_CLOCK_BAR 0x58
1645 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1646 if (IS_ERR(mailbox
))
1647 return PTR_ERR(mailbox
);
1648 outbox
= mailbox
->buf
;
1650 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1651 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1655 MLX4_GET(fw_ver
, outbox
, QUERY_FW_VER_OFFSET
);
1657 * FW subminor version is at more significant bits than minor
1658 * version, so swap here.
1660 dev
->caps
.fw_ver
= (fw_ver
& 0xffff00000000ull
) |
1661 ((fw_ver
& 0xffff0000ull
) >> 16) |
1662 ((fw_ver
& 0x0000ffffull
) << 16);
1664 MLX4_GET(lg
, outbox
, QUERY_FW_PPF_ID
);
1665 dev
->caps
.function
= lg
;
1667 if (mlx4_is_slave(dev
))
1671 MLX4_GET(cmd_if_rev
, outbox
, QUERY_FW_CMD_IF_REV_OFFSET
);
1672 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_MIN_REV
||
1673 cmd_if_rev
> MLX4_COMMAND_INTERFACE_MAX_REV
) {
1674 mlx4_err(dev
, "Installed FW has unsupported command interface revision %d\n",
1676 mlx4_err(dev
, "(Installed FW version is %d.%d.%03d)\n",
1677 (int) (dev
->caps
.fw_ver
>> 32),
1678 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1679 (int) dev
->caps
.fw_ver
& 0xffff);
1680 mlx4_err(dev
, "This driver version supports only revisions %d to %d\n",
1681 MLX4_COMMAND_INTERFACE_MIN_REV
, MLX4_COMMAND_INTERFACE_MAX_REV
);
1686 if (cmd_if_rev
< MLX4_COMMAND_INTERFACE_NEW_PORT_CMDS
)
1687 dev
->flags
|= MLX4_FLAG_OLD_PORT_CMDS
;
1689 MLX4_GET(lg
, outbox
, QUERY_FW_MAX_CMD_OFFSET
);
1690 cmd
->max_cmds
= 1 << lg
;
1692 mlx4_dbg(dev
, "FW version %d.%d.%03d (cmd intf rev %d), max commands %d\n",
1693 (int) (dev
->caps
.fw_ver
>> 32),
1694 (int) (dev
->caps
.fw_ver
>> 16) & 0xffff,
1695 (int) dev
->caps
.fw_ver
& 0xffff,
1696 cmd_if_rev
, cmd
->max_cmds
);
1698 MLX4_GET(fw
->catas_offset
, outbox
, QUERY_FW_ERR_START_OFFSET
);
1699 MLX4_GET(fw
->catas_size
, outbox
, QUERY_FW_ERR_SIZE_OFFSET
);
1700 MLX4_GET(fw
->catas_bar
, outbox
, QUERY_FW_ERR_BAR_OFFSET
);
1701 fw
->catas_bar
= (fw
->catas_bar
>> 6) * 2;
1703 mlx4_dbg(dev
, "Catastrophic error buffer at 0x%llx, size 0x%x, BAR %d\n",
1704 (unsigned long long) fw
->catas_offset
, fw
->catas_size
, fw
->catas_bar
);
1706 MLX4_GET(fw
->fw_pages
, outbox
, QUERY_FW_SIZE_OFFSET
);
1707 MLX4_GET(fw
->clr_int_base
, outbox
, QUERY_FW_CLR_INT_BASE_OFFSET
);
1708 MLX4_GET(fw
->clr_int_bar
, outbox
, QUERY_FW_CLR_INT_BAR_OFFSET
);
1709 fw
->clr_int_bar
= (fw
->clr_int_bar
>> 6) * 2;
1711 MLX4_GET(fw
->comm_base
, outbox
, QUERY_FW_COMM_BASE_OFFSET
);
1712 MLX4_GET(fw
->comm_bar
, outbox
, QUERY_FW_COMM_BAR_OFFSET
);
1713 fw
->comm_bar
= (fw
->comm_bar
>> 6) * 2;
1714 mlx4_dbg(dev
, "Communication vector bar:%d offset:0x%llx\n",
1715 fw
->comm_bar
, fw
->comm_base
);
1716 mlx4_dbg(dev
, "FW size %d KB\n", fw
->fw_pages
>> 2);
1718 MLX4_GET(fw
->clock_offset
, outbox
, QUERY_FW_CLOCK_OFFSET
);
1719 MLX4_GET(fw
->clock_bar
, outbox
, QUERY_FW_CLOCK_BAR
);
1720 fw
->clock_bar
= (fw
->clock_bar
>> 6) * 2;
1721 mlx4_dbg(dev
, "Internal clock bar:%d offset:0x%llx\n",
1722 fw
->clock_bar
, fw
->clock_offset
);
1725 * Round up number of system pages needed in case
1726 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
1729 ALIGN(fw
->fw_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
1730 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
1732 mlx4_dbg(dev
, "Clear int @ %llx, BAR %d\n",
1733 (unsigned long long) fw
->clr_int_base
, fw
->clr_int_bar
);
1736 mlx4_free_cmd_mailbox(dev
, mailbox
);
1740 int mlx4_QUERY_FW_wrapper(struct mlx4_dev
*dev
, int slave
,
1741 struct mlx4_vhcr
*vhcr
,
1742 struct mlx4_cmd_mailbox
*inbox
,
1743 struct mlx4_cmd_mailbox
*outbox
,
1744 struct mlx4_cmd_info
*cmd
)
1749 outbuf
= outbox
->buf
;
1750 err
= mlx4_cmd_box(dev
, 0, outbox
->dma
, 0, 0, MLX4_CMD_QUERY_FW
,
1751 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1755 /* for slaves, set pci PPF ID to invalid and zero out everything
1756 * else except FW version */
1757 outbuf
[0] = outbuf
[1] = 0;
1758 memset(&outbuf
[8], 0, QUERY_FW_OUT_SIZE
- 8);
1759 outbuf
[QUERY_FW_PPF_ID
] = MLX4_INVALID_SLAVE_ID
;
1764 static void get_board_id(void *vsd
, char *board_id
)
1768 #define VSD_OFFSET_SIG1 0x00
1769 #define VSD_OFFSET_SIG2 0xde
1770 #define VSD_OFFSET_MLX_BOARD_ID 0xd0
1771 #define VSD_OFFSET_TS_BOARD_ID 0x20
1773 #define VSD_SIGNATURE_TOPSPIN 0x5ad
1775 memset(board_id
, 0, MLX4_BOARD_ID_LEN
);
1777 if (be16_to_cpup(vsd
+ VSD_OFFSET_SIG1
) == VSD_SIGNATURE_TOPSPIN
&&
1778 be16_to_cpup(vsd
+ VSD_OFFSET_SIG2
) == VSD_SIGNATURE_TOPSPIN
) {
1779 strlcpy(board_id
, vsd
+ VSD_OFFSET_TS_BOARD_ID
, MLX4_BOARD_ID_LEN
);
1782 * The board ID is a string but the firmware byte
1783 * swaps each 4-byte word before passing it back to
1784 * us. Therefore we need to swab it before printing.
1786 u32
*bid_u32
= (u32
*)board_id
;
1788 for (i
= 0; i
< 4; ++i
) {
1792 addr
= (u32
*) (vsd
+ VSD_OFFSET_MLX_BOARD_ID
+ i
* 4);
1793 val
= get_unaligned(addr
);
1795 put_unaligned(val
, &bid_u32
[i
]);
1800 int mlx4_QUERY_ADAPTER(struct mlx4_dev
*dev
, struct mlx4_adapter
*adapter
)
1802 struct mlx4_cmd_mailbox
*mailbox
;
1806 #define QUERY_ADAPTER_OUT_SIZE 0x100
1807 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10
1808 #define QUERY_ADAPTER_VSD_OFFSET 0x20
1810 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1811 if (IS_ERR(mailbox
))
1812 return PTR_ERR(mailbox
);
1813 outbox
= mailbox
->buf
;
1815 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0, MLX4_CMD_QUERY_ADAPTER
,
1816 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1820 MLX4_GET(adapter
->inta_pin
, outbox
, QUERY_ADAPTER_INTA_PIN_OFFSET
);
1822 get_board_id(outbox
+ QUERY_ADAPTER_VSD_OFFSET
/ 4,
1826 mlx4_free_cmd_mailbox(dev
, mailbox
);
1830 int mlx4_INIT_HCA(struct mlx4_dev
*dev
, struct mlx4_init_hca_param
*param
)
1832 struct mlx4_cmd_mailbox
*mailbox
;
1835 static const u8 a0_dmfs_hw_steering
[] = {
1836 [MLX4_STEERING_DMFS_A0_DEFAULT
] = 0,
1837 [MLX4_STEERING_DMFS_A0_DYNAMIC
] = 1,
1838 [MLX4_STEERING_DMFS_A0_STATIC
] = 2,
1839 [MLX4_STEERING_DMFS_A0_DISABLE
] = 3
1842 #define INIT_HCA_IN_SIZE 0x200
1843 #define INIT_HCA_VERSION_OFFSET 0x000
1844 #define INIT_HCA_VERSION 2
1845 #define INIT_HCA_VXLAN_OFFSET 0x0c
1846 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
1847 #define INIT_HCA_FLAGS_OFFSET 0x014
1848 #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018
1849 #define INIT_HCA_QPC_OFFSET 0x020
1850 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
1851 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17)
1852 #define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28)
1853 #define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
1854 #define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
1855 #define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
1856 #define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
1857 #define INIT_HCA_EQE_CQE_STRIDE_OFFSET (INIT_HCA_QPC_OFFSET + 0x3b)
1858 #define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
1859 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
1860 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
1861 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67)
1862 #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a)
1863 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70)
1864 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77)
1865 #define INIT_HCA_MCAST_OFFSET 0x0c0
1866 #define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00)
1867 #define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x13)
1868 #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x17)
1869 #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
1870 #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
1871 #define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
1872 #define INIT_HCA_DRIVER_VERSION_OFFSET 0x140
1873 #define INIT_HCA_DRIVER_VERSION_SZ 0x40
1874 #define INIT_HCA_FS_PARAM_OFFSET 0x1d0
1875 #define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
1876 #define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x13)
1877 #define INIT_HCA_FS_A0_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x18)
1878 #define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
1879 #define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
1880 #define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
1881 #define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
1882 #define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
1883 #define INIT_HCA_TPT_OFFSET 0x0f0
1884 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
1885 #define INIT_HCA_TPT_MW_OFFSET (INIT_HCA_TPT_OFFSET + 0x08)
1886 #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
1887 #define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10)
1888 #define INIT_HCA_CMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x18)
1889 #define INIT_HCA_UAR_OFFSET 0x120
1890 #define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a)
1891 #define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b)
1893 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
1894 if (IS_ERR(mailbox
))
1895 return PTR_ERR(mailbox
);
1896 inbox
= mailbox
->buf
;
1898 *((u8
*) mailbox
->buf
+ INIT_HCA_VERSION_OFFSET
) = INIT_HCA_VERSION
;
1900 *((u8
*) mailbox
->buf
+ INIT_HCA_CACHELINE_SZ_OFFSET
) =
1901 ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4);
1903 #if defined(__LITTLE_ENDIAN)
1904 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) &= ~cpu_to_be32(1 << 1);
1905 #elif defined(__BIG_ENDIAN)
1906 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 1);
1908 #error Host endianness not defined
1910 /* Check port for UD address vector: */
1911 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1);
1913 /* Enable IPoIB checksumming if we can: */
1914 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_IPOIB_CSUM
)
1915 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 3);
1917 /* Enable QoS support if module parameter set */
1918 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_ETS_CFG
&& enable_qos
)
1919 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 2);
1921 /* enable counters */
1922 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_COUNTERS
)
1923 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 4);
1925 /* Enable RSS spread to fragmented IP packets when supported */
1926 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_RSS_IP_FRAG
)
1927 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |= cpu_to_be32(1 << 13);
1929 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
1930 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_EQE
) {
1931 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 29);
1932 dev
->caps
.eqe_size
= 64;
1933 dev
->caps
.eqe_factor
= 1;
1935 dev
->caps
.eqe_size
= 32;
1936 dev
->caps
.eqe_factor
= 0;
1939 if (dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_64B_CQE
) {
1940 *(inbox
+ INIT_HCA_EQE_CQE_OFFSETS
/ 4) |= cpu_to_be32(1 << 30);
1941 dev
->caps
.cqe_size
= 64;
1942 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
1944 dev
->caps
.cqe_size
= 32;
1947 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
1948 if ((dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_EQE_STRIDE
) &&
1949 (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_CQE_STRIDE
)) {
1950 dev
->caps
.eqe_size
= cache_line_size();
1951 dev
->caps
.cqe_size
= cache_line_size();
1952 dev
->caps
.eqe_factor
= 0;
1953 MLX4_PUT(inbox
, (u8
)((ilog2(dev
->caps
.eqe_size
) - 5) << 4 |
1954 (ilog2(dev
->caps
.eqe_size
) - 5)),
1955 INIT_HCA_EQE_CQE_STRIDE_OFFSET
);
1957 /* User still need to know to support CQE > 32B */
1958 dev
->caps
.userspace_caps
|= MLX4_USER_DEV_CAP_LARGE_CQE
;
1961 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT
)
1962 *(inbox
+ INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET
/ 4) |= cpu_to_be32(1 << 31);
1964 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW
) {
1965 u8
*dst
= (u8
*)(inbox
+ INIT_HCA_DRIVER_VERSION_OFFSET
/ 4);
1967 strncpy(dst
, DRV_NAME_FOR_FW
, INIT_HCA_DRIVER_VERSION_SZ
- 1);
1968 mlx4_dbg(dev
, "Reporting Driver Version to FW: %s\n", dst
);
1971 /* QPC/EEC/CQC/EQC/RDMARC attributes */
1973 MLX4_PUT(inbox
, param
->qpc_base
, INIT_HCA_QPC_BASE_OFFSET
);
1974 MLX4_PUT(inbox
, param
->log_num_qps
, INIT_HCA_LOG_QP_OFFSET
);
1975 MLX4_PUT(inbox
, param
->srqc_base
, INIT_HCA_SRQC_BASE_OFFSET
);
1976 MLX4_PUT(inbox
, param
->log_num_srqs
, INIT_HCA_LOG_SRQ_OFFSET
);
1977 MLX4_PUT(inbox
, param
->cqc_base
, INIT_HCA_CQC_BASE_OFFSET
);
1978 MLX4_PUT(inbox
, param
->log_num_cqs
, INIT_HCA_LOG_CQ_OFFSET
);
1979 MLX4_PUT(inbox
, param
->altc_base
, INIT_HCA_ALTC_BASE_OFFSET
);
1980 MLX4_PUT(inbox
, param
->auxc_base
, INIT_HCA_AUXC_BASE_OFFSET
);
1981 MLX4_PUT(inbox
, param
->eqc_base
, INIT_HCA_EQC_BASE_OFFSET
);
1982 MLX4_PUT(inbox
, param
->log_num_eqs
, INIT_HCA_LOG_EQ_OFFSET
);
1983 MLX4_PUT(inbox
, param
->num_sys_eqs
, INIT_HCA_NUM_SYS_EQS_OFFSET
);
1984 MLX4_PUT(inbox
, param
->rdmarc_base
, INIT_HCA_RDMARC_BASE_OFFSET
);
1985 MLX4_PUT(inbox
, param
->log_rd_per_qp
, INIT_HCA_LOG_RD_OFFSET
);
1987 /* steering attributes */
1988 if (dev
->caps
.steering_mode
==
1989 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1990 *(inbox
+ INIT_HCA_FLAGS_OFFSET
/ 4) |=
1992 INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
);
1994 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_FS_BASE_OFFSET
);
1995 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
1996 INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
1997 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
1998 INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
1999 /* Enable Ethernet flow steering
2000 * with udp unicast and tcp unicast
2002 if (dev
->caps
.dmfs_high_steer_mode
!=
2003 MLX4_STEERING_DMFS_A0_STATIC
)
2005 (u8
)(MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
2006 INIT_HCA_FS_ETH_BITS_OFFSET
);
2007 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
2008 INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET
);
2009 /* Enable IPoIB flow steering
2010 * with udp unicast and tcp unicast
2012 MLX4_PUT(inbox
, (u8
) (MLX4_FS_UDP_UC_EN
| MLX4_FS_TCP_UC_EN
),
2013 INIT_HCA_FS_IB_BITS_OFFSET
);
2014 MLX4_PUT(inbox
, (u16
) MLX4_FS_NUM_OF_L2_ADDR
,
2015 INIT_HCA_FS_IB_NUM_ADDRS_OFFSET
);
2017 if (dev
->caps
.dmfs_high_steer_mode
!=
2018 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
)
2020 ((u8
)(a0_dmfs_hw_steering
[dev
->caps
.dmfs_high_steer_mode
]
2022 INIT_HCA_FS_A0_OFFSET
);
2024 MLX4_PUT(inbox
, param
->mc_base
, INIT_HCA_MC_BASE_OFFSET
);
2025 MLX4_PUT(inbox
, param
->log_mc_entry_sz
,
2026 INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
2027 MLX4_PUT(inbox
, param
->log_mc_hash_sz
,
2028 INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
2029 MLX4_PUT(inbox
, param
->log_mc_table_sz
,
2030 INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
2031 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_B0
)
2032 MLX4_PUT(inbox
, (u8
) (1 << 3),
2033 INIT_HCA_UC_STEERING_OFFSET
);
2036 /* TPT attributes */
2038 MLX4_PUT(inbox
, param
->dmpt_base
, INIT_HCA_DMPT_BASE_OFFSET
);
2039 MLX4_PUT(inbox
, param
->mw_enabled
, INIT_HCA_TPT_MW_OFFSET
);
2040 MLX4_PUT(inbox
, param
->log_mpt_sz
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
2041 MLX4_PUT(inbox
, param
->mtt_base
, INIT_HCA_MTT_BASE_OFFSET
);
2042 MLX4_PUT(inbox
, param
->cmpt_base
, INIT_HCA_CMPT_BASE_OFFSET
);
2044 /* UAR attributes */
2046 MLX4_PUT(inbox
, param
->uar_page_sz
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
2047 MLX4_PUT(inbox
, param
->log_uar_sz
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
2049 /* set parser VXLAN attributes */
2050 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS
) {
2051 u8 parser_params
= 0;
2052 MLX4_PUT(inbox
, parser_params
, INIT_HCA_VXLAN_OFFSET
);
2055 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_INIT_HCA
,
2056 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
2059 mlx4_err(dev
, "INIT_HCA returns %d\n", err
);
2061 mlx4_free_cmd_mailbox(dev
, mailbox
);
2065 int mlx4_QUERY_HCA(struct mlx4_dev
*dev
,
2066 struct mlx4_init_hca_param
*param
)
2068 struct mlx4_cmd_mailbox
*mailbox
;
2075 static const u8 a0_dmfs_query_hw_steering
[] = {
2076 [0] = MLX4_STEERING_DMFS_A0_DEFAULT
,
2077 [1] = MLX4_STEERING_DMFS_A0_DYNAMIC
,
2078 [2] = MLX4_STEERING_DMFS_A0_STATIC
,
2079 [3] = MLX4_STEERING_DMFS_A0_DISABLE
2082 #define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
2083 #define QUERY_HCA_CORE_CLOCK_OFFSET 0x0c
2085 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2086 if (IS_ERR(mailbox
))
2087 return PTR_ERR(mailbox
);
2088 outbox
= mailbox
->buf
;
2090 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
2092 MLX4_CMD_TIME_CLASS_B
,
2093 !mlx4_is_slave(dev
));
2097 MLX4_GET(param
->global_caps
, outbox
, QUERY_HCA_GLOBAL_CAPS_OFFSET
);
2098 MLX4_GET(param
->hca_core_clock
, outbox
, QUERY_HCA_CORE_CLOCK_OFFSET
);
2100 /* QPC/EEC/CQC/EQC/RDMARC attributes */
2102 MLX4_GET(qword_field
, outbox
, INIT_HCA_QPC_BASE_OFFSET
);
2103 param
->qpc_base
= qword_field
& ~((u64
)0x1f);
2104 MLX4_GET(byte_field
, outbox
, INIT_HCA_LOG_QP_OFFSET
);
2105 param
->log_num_qps
= byte_field
& 0x1f;
2106 MLX4_GET(qword_field
, outbox
, INIT_HCA_SRQC_BASE_OFFSET
);
2107 param
->srqc_base
= qword_field
& ~((u64
)0x1f);
2108 MLX4_GET(byte_field
, outbox
, INIT_HCA_LOG_SRQ_OFFSET
);
2109 param
->log_num_srqs
= byte_field
& 0x1f;
2110 MLX4_GET(qword_field
, outbox
, INIT_HCA_CQC_BASE_OFFSET
);
2111 param
->cqc_base
= qword_field
& ~((u64
)0x1f);
2112 MLX4_GET(byte_field
, outbox
, INIT_HCA_LOG_CQ_OFFSET
);
2113 param
->log_num_cqs
= byte_field
& 0x1f;
2114 MLX4_GET(qword_field
, outbox
, INIT_HCA_ALTC_BASE_OFFSET
);
2115 param
->altc_base
= qword_field
;
2116 MLX4_GET(qword_field
, outbox
, INIT_HCA_AUXC_BASE_OFFSET
);
2117 param
->auxc_base
= qword_field
;
2118 MLX4_GET(qword_field
, outbox
, INIT_HCA_EQC_BASE_OFFSET
);
2119 param
->eqc_base
= qword_field
& ~((u64
)0x1f);
2120 MLX4_GET(byte_field
, outbox
, INIT_HCA_LOG_EQ_OFFSET
);
2121 param
->log_num_eqs
= byte_field
& 0x1f;
2122 MLX4_GET(word_field
, outbox
, INIT_HCA_NUM_SYS_EQS_OFFSET
);
2123 param
->num_sys_eqs
= word_field
& 0xfff;
2124 MLX4_GET(qword_field
, outbox
, INIT_HCA_RDMARC_BASE_OFFSET
);
2125 param
->rdmarc_base
= qword_field
& ~((u64
)0x1f);
2126 MLX4_GET(byte_field
, outbox
, INIT_HCA_LOG_RD_OFFSET
);
2127 param
->log_rd_per_qp
= byte_field
& 0x7;
2129 MLX4_GET(dword_field
, outbox
, INIT_HCA_FLAGS_OFFSET
);
2130 if (dword_field
& (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN
)) {
2131 param
->steering_mode
= MLX4_STEERING_MODE_DEVICE_MANAGED
;
2133 MLX4_GET(byte_field
, outbox
, INIT_HCA_UC_STEERING_OFFSET
);
2134 if (byte_field
& 0x8)
2135 param
->steering_mode
= MLX4_STEERING_MODE_B0
;
2137 param
->steering_mode
= MLX4_STEERING_MODE_A0
;
2140 if (dword_field
& (1 << 13))
2141 param
->rss_ip_frags
= 1;
2143 /* steering attributes */
2144 if (param
->steering_mode
== MLX4_STEERING_MODE_DEVICE_MANAGED
) {
2145 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_FS_BASE_OFFSET
);
2146 MLX4_GET(byte_field
, outbox
, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET
);
2147 param
->log_mc_entry_sz
= byte_field
& 0x1f;
2148 MLX4_GET(byte_field
, outbox
, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET
);
2149 param
->log_mc_table_sz
= byte_field
& 0x1f;
2150 MLX4_GET(byte_field
, outbox
, INIT_HCA_FS_A0_OFFSET
);
2151 param
->dmfs_high_steer_mode
=
2152 a0_dmfs_query_hw_steering
[(byte_field
>> 6) & 3];
2154 MLX4_GET(param
->mc_base
, outbox
, INIT_HCA_MC_BASE_OFFSET
);
2155 MLX4_GET(byte_field
, outbox
, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET
);
2156 param
->log_mc_entry_sz
= byte_field
& 0x1f;
2157 MLX4_GET(byte_field
, outbox
, INIT_HCA_LOG_MC_HASH_SZ_OFFSET
);
2158 param
->log_mc_hash_sz
= byte_field
& 0x1f;
2159 MLX4_GET(byte_field
, outbox
, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET
);
2160 param
->log_mc_table_sz
= byte_field
& 0x1f;
2163 /* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
2164 MLX4_GET(byte_field
, outbox
, INIT_HCA_EQE_CQE_OFFSETS
);
2165 if (byte_field
& 0x20) /* 64-bytes eqe enabled */
2166 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_EQE_ENABLED
;
2167 if (byte_field
& 0x40) /* 64-bytes cqe enabled */
2168 param
->dev_cap_enabled
|= MLX4_DEV_CAP_64B_CQE_ENABLED
;
2170 /* CX3 is capable of extending CQEs\EQEs to strides larger than 64B */
2171 MLX4_GET(byte_field
, outbox
, INIT_HCA_EQE_CQE_STRIDE_OFFSET
);
2173 param
->dev_cap_enabled
|= MLX4_DEV_CAP_EQE_STRIDE_ENABLED
;
2174 param
->dev_cap_enabled
|= MLX4_DEV_CAP_CQE_STRIDE_ENABLED
;
2175 param
->cqe_size
= 1 << ((byte_field
&
2176 MLX4_CQE_SIZE_MASK_STRIDE
) + 5);
2177 param
->eqe_size
= 1 << (((byte_field
&
2178 MLX4_EQE_SIZE_MASK_STRIDE
) >> 4) + 5);
2181 /* TPT attributes */
2183 MLX4_GET(param
->dmpt_base
, outbox
, INIT_HCA_DMPT_BASE_OFFSET
);
2184 MLX4_GET(byte_field
, outbox
, INIT_HCA_TPT_MW_OFFSET
);
2185 param
->mw_enabled
= byte_field
>> 7;
2186 MLX4_GET(byte_field
, outbox
, INIT_HCA_LOG_MPT_SZ_OFFSET
);
2187 param
->log_mpt_sz
= byte_field
& 0x3f;
2188 MLX4_GET(param
->mtt_base
, outbox
, INIT_HCA_MTT_BASE_OFFSET
);
2189 MLX4_GET(param
->cmpt_base
, outbox
, INIT_HCA_CMPT_BASE_OFFSET
);
2191 /* UAR attributes */
2193 MLX4_GET(param
->uar_page_sz
, outbox
, INIT_HCA_UAR_PAGE_SZ_OFFSET
);
2194 MLX4_GET(byte_field
, outbox
, INIT_HCA_LOG_UAR_SZ_OFFSET
);
2195 param
->log_uar_sz
= byte_field
& 0xf;
2197 /* phv_check enable */
2198 MLX4_GET(byte_field
, outbox
, INIT_HCA_CACHELINE_SZ_OFFSET
);
2199 if (byte_field
& 0x2)
2200 param
->phv_check_en
= 1;
2202 mlx4_free_cmd_mailbox(dev
, mailbox
);
2207 static int mlx4_hca_core_clock_update(struct mlx4_dev
*dev
)
2209 struct mlx4_cmd_mailbox
*mailbox
;
2213 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2214 if (IS_ERR(mailbox
)) {
2215 mlx4_warn(dev
, "hca_core_clock mailbox allocation failed\n");
2216 return PTR_ERR(mailbox
);
2218 outbox
= mailbox
->buf
;
2220 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
2222 MLX4_CMD_TIME_CLASS_B
,
2223 !mlx4_is_slave(dev
));
2225 mlx4_warn(dev
, "hca_core_clock update failed\n");
2229 MLX4_GET(dev
->caps
.hca_core_clock
, outbox
, QUERY_HCA_CORE_CLOCK_OFFSET
);
2232 mlx4_free_cmd_mailbox(dev
, mailbox
);
2237 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0
2238 * and real QP0 are active, so that the paravirtualized QP0 is ready
2240 static int check_qp0_state(struct mlx4_dev
*dev
, int function
, int port
)
2242 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2243 /* irrelevant if not infiniband */
2244 if (priv
->mfunc
.master
.qp0_state
[port
].proxy_qp0_active
&&
2245 priv
->mfunc
.master
.qp0_state
[port
].qp0_active
)
2250 int mlx4_INIT_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
2251 struct mlx4_vhcr
*vhcr
,
2252 struct mlx4_cmd_mailbox
*inbox
,
2253 struct mlx4_cmd_mailbox
*outbox
,
2254 struct mlx4_cmd_info
*cmd
)
2256 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2257 int port
= mlx4_slave_convert_port(dev
, slave
, vhcr
->in_modifier
);
2263 if (priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
& (1 << port
))
2266 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
2267 /* Enable port only if it was previously disabled */
2268 if (!priv
->mfunc
.master
.init_port_ref
[port
]) {
2269 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
2270 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2274 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
2276 if (slave
== mlx4_master_func_num(dev
)) {
2277 if (check_qp0_state(dev
, slave
, port
) &&
2278 !priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
2279 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
2280 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2283 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 1;
2284 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
2287 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
|= (1 << port
);
2289 ++priv
->mfunc
.master
.init_port_ref
[port
];
2293 int mlx4_INIT_PORT(struct mlx4_dev
*dev
, int port
)
2295 struct mlx4_cmd_mailbox
*mailbox
;
2301 if (dev
->flags
& MLX4_FLAG_OLD_PORT_CMDS
) {
2302 #define INIT_PORT_IN_SIZE 256
2303 #define INIT_PORT_FLAGS_OFFSET 0x00
2304 #define INIT_PORT_FLAG_SIG (1 << 18)
2305 #define INIT_PORT_FLAG_NG (1 << 17)
2306 #define INIT_PORT_FLAG_G0 (1 << 16)
2307 #define INIT_PORT_VL_SHIFT 4
2308 #define INIT_PORT_PORT_WIDTH_SHIFT 8
2309 #define INIT_PORT_MTU_OFFSET 0x04
2310 #define INIT_PORT_MAX_GID_OFFSET 0x06
2311 #define INIT_PORT_MAX_PKEY_OFFSET 0x0a
2312 #define INIT_PORT_GUID0_OFFSET 0x10
2313 #define INIT_PORT_NODE_GUID_OFFSET 0x18
2314 #define INIT_PORT_SI_GUID_OFFSET 0x20
2316 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2317 if (IS_ERR(mailbox
))
2318 return PTR_ERR(mailbox
);
2319 inbox
= mailbox
->buf
;
2322 flags
|= (dev
->caps
.vl_cap
[port
] & 0xf) << INIT_PORT_VL_SHIFT
;
2323 flags
|= (dev
->caps
.port_width_cap
[port
] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT
;
2324 MLX4_PUT(inbox
, flags
, INIT_PORT_FLAGS_OFFSET
);
2326 field
= 128 << dev
->caps
.ib_mtu_cap
[port
];
2327 MLX4_PUT(inbox
, field
, INIT_PORT_MTU_OFFSET
);
2328 field
= dev
->caps
.gid_table_len
[port
];
2329 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_GID_OFFSET
);
2330 field
= dev
->caps
.pkey_table_len
[port
];
2331 MLX4_PUT(inbox
, field
, INIT_PORT_MAX_PKEY_OFFSET
);
2333 err
= mlx4_cmd(dev
, mailbox
->dma
, port
, 0, MLX4_CMD_INIT_PORT
,
2334 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2336 mlx4_free_cmd_mailbox(dev
, mailbox
);
2338 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_INIT_PORT
,
2339 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
2342 mlx4_hca_core_clock_update(dev
);
2346 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT
);
2348 int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev
*dev
, int slave
,
2349 struct mlx4_vhcr
*vhcr
,
2350 struct mlx4_cmd_mailbox
*inbox
,
2351 struct mlx4_cmd_mailbox
*outbox
,
2352 struct mlx4_cmd_info
*cmd
)
2354 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2355 int port
= mlx4_slave_convert_port(dev
, slave
, vhcr
->in_modifier
);
2361 if (!(priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&
2365 if (dev
->caps
.port_mask
[port
] != MLX4_PORT_TYPE_IB
) {
2366 if (priv
->mfunc
.master
.init_port_ref
[port
] == 1) {
2367 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
2368 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2372 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
2374 /* infiniband port */
2375 if (slave
== mlx4_master_func_num(dev
)) {
2376 if (!priv
->mfunc
.master
.qp0_state
[port
].qp0_active
&&
2377 priv
->mfunc
.master
.qp0_state
[port
].port_active
) {
2378 err
= mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
2379 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2382 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
2383 priv
->mfunc
.master
.qp0_state
[port
].port_active
= 0;
2386 priv
->mfunc
.master
.slave_state
[slave
].init_port_mask
&= ~(1 << port
);
2388 --priv
->mfunc
.master
.init_port_ref
[port
];
2392 int mlx4_CLOSE_PORT(struct mlx4_dev
*dev
, int port
)
2394 return mlx4_cmd(dev
, 0, port
, 0, MLX4_CMD_CLOSE_PORT
,
2395 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
2397 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT
);
2399 int mlx4_CLOSE_HCA(struct mlx4_dev
*dev
, int panic
)
2401 return mlx4_cmd(dev
, 0, 0, panic
, MLX4_CMD_CLOSE_HCA
,
2402 MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
2405 struct mlx4_config_dev
{
2406 __be32 update_flags
;
2408 __be16 vxlan_udp_dport
;
2410 __be16 roce_v2_entropy
;
2411 __be16 roce_v2_udp_dport
;
2419 #define MLX4_VXLAN_UDP_DPORT (1 << 0)
2420 #define MLX4_ROCE_V2_UDP_DPORT BIT(3)
2421 #define MLX4_DISABLE_RX_PORT BIT(18)
2423 static int mlx4_CONFIG_DEV_set(struct mlx4_dev
*dev
, struct mlx4_config_dev
*config_dev
)
2426 struct mlx4_cmd_mailbox
*mailbox
;
2428 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2429 if (IS_ERR(mailbox
))
2430 return PTR_ERR(mailbox
);
2432 memcpy(mailbox
->buf
, config_dev
, sizeof(*config_dev
));
2434 err
= mlx4_cmd(dev
, mailbox
->dma
, 0, 0, MLX4_CMD_CONFIG_DEV
,
2435 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
2437 mlx4_free_cmd_mailbox(dev
, mailbox
);
2441 static int mlx4_CONFIG_DEV_get(struct mlx4_dev
*dev
, struct mlx4_config_dev
*config_dev
)
2444 struct mlx4_cmd_mailbox
*mailbox
;
2446 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2447 if (IS_ERR(mailbox
))
2448 return PTR_ERR(mailbox
);
2450 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 1, MLX4_CMD_CONFIG_DEV
,
2451 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2454 memcpy(config_dev
, mailbox
->buf
, sizeof(*config_dev
));
2456 mlx4_free_cmd_mailbox(dev
, mailbox
);
2460 /* Conversion between the HW values and the actual functionality.
2461 * The value represented by the array index,
2462 * and the functionality determined by the flags.
2464 static const u8 config_dev_csum_flags
[] = {
2466 [1] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP
,
2467 [2] = MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP
|
2468 MLX4_RX_CSUM_MODE_L4
,
2469 [3] = MLX4_RX_CSUM_MODE_L4
|
2470 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP
|
2471 MLX4_RX_CSUM_MODE_MULTI_VLAN
2474 int mlx4_config_dev_retrieval(struct mlx4_dev
*dev
,
2475 struct mlx4_config_dev_params
*params
)
2477 struct mlx4_config_dev config_dev
= {0};
2481 #define CONFIG_DEV_RX_CSUM_MODE_MASK 0x7
2482 #define CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET 0
2483 #define CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET 4
2485 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_CONFIG_DEV
))
2488 err
= mlx4_CONFIG_DEV_get(dev
, &config_dev
);
2492 csum_mask
= (config_dev
.rx_checksum_val
>> CONFIG_DEV_RX_CSUM_MODE_PORT1_BIT_OFFSET
) &
2493 CONFIG_DEV_RX_CSUM_MODE_MASK
;
2495 if (csum_mask
>= ARRAY_SIZE(config_dev_csum_flags
))
2497 params
->rx_csum_flags_port_1
= config_dev_csum_flags
[csum_mask
];
2499 csum_mask
= (config_dev
.rx_checksum_val
>> CONFIG_DEV_RX_CSUM_MODE_PORT2_BIT_OFFSET
) &
2500 CONFIG_DEV_RX_CSUM_MODE_MASK
;
2502 if (csum_mask
>= ARRAY_SIZE(config_dev_csum_flags
))
2504 params
->rx_csum_flags_port_2
= config_dev_csum_flags
[csum_mask
];
2506 params
->vxlan_udp_dport
= be16_to_cpu(config_dev
.vxlan_udp_dport
);
2510 EXPORT_SYMBOL_GPL(mlx4_config_dev_retrieval
);
2512 int mlx4_config_vxlan_port(struct mlx4_dev
*dev
, __be16 udp_port
)
2514 struct mlx4_config_dev config_dev
;
2516 memset(&config_dev
, 0, sizeof(config_dev
));
2517 config_dev
.update_flags
= cpu_to_be32(MLX4_VXLAN_UDP_DPORT
);
2518 config_dev
.vxlan_udp_dport
= udp_port
;
2520 return mlx4_CONFIG_DEV_set(dev
, &config_dev
);
2522 EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port
);
2524 #define CONFIG_DISABLE_RX_PORT BIT(15)
2525 int mlx4_disable_rx_port_check(struct mlx4_dev
*dev
, bool dis
)
2527 struct mlx4_config_dev config_dev
;
2529 memset(&config_dev
, 0, sizeof(config_dev
));
2530 config_dev
.update_flags
= cpu_to_be32(MLX4_DISABLE_RX_PORT
);
2532 config_dev
.roce_flags
=
2533 cpu_to_be32(CONFIG_DISABLE_RX_PORT
);
2535 return mlx4_CONFIG_DEV_set(dev
, &config_dev
);
2538 int mlx4_config_roce_v2_port(struct mlx4_dev
*dev
, u16 udp_port
)
2540 struct mlx4_config_dev config_dev
;
2542 memset(&config_dev
, 0, sizeof(config_dev
));
2543 config_dev
.update_flags
= cpu_to_be32(MLX4_ROCE_V2_UDP_DPORT
);
2544 config_dev
.roce_v2_udp_dport
= cpu_to_be16(udp_port
);
2546 return mlx4_CONFIG_DEV_set(dev
, &config_dev
);
2548 EXPORT_SYMBOL_GPL(mlx4_config_roce_v2_port
);
2550 int mlx4_virt2phy_port_map(struct mlx4_dev
*dev
, u32 port1
, u32 port2
)
2552 struct mlx4_cmd_mailbox
*mailbox
;
2559 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2560 if (IS_ERR(mailbox
))
2564 v2p
->v_port1
= cpu_to_be32(port1
);
2565 v2p
->v_port2
= cpu_to_be32(port2
);
2567 err
= mlx4_cmd(dev
, mailbox
->dma
, 0,
2568 MLX4_SET_PORT_VIRT2PHY
, MLX4_CMD_VIRT_PORT_MAP
,
2569 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
2571 mlx4_free_cmd_mailbox(dev
, mailbox
);
2576 int mlx4_SET_ICM_SIZE(struct mlx4_dev
*dev
, u64 icm_size
, u64
*aux_pages
)
2578 int ret
= mlx4_cmd_imm(dev
, icm_size
, aux_pages
, 0, 0,
2579 MLX4_CMD_SET_ICM_SIZE
,
2580 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2585 * Round up number of system pages needed in case
2586 * MLX4_ICM_PAGE_SIZE < PAGE_SIZE.
2588 *aux_pages
= ALIGN(*aux_pages
, PAGE_SIZE
/ MLX4_ICM_PAGE_SIZE
) >>
2589 (PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
);
2594 int mlx4_NOP(struct mlx4_dev
*dev
)
2596 /* Input modifier of 0x1f means "finish as soon as possible." */
2597 return mlx4_cmd(dev
, 0, 0x1f, 0, MLX4_CMD_NOP
, MLX4_CMD_TIME_CLASS_A
,
2601 int mlx4_query_diag_counters(struct mlx4_dev
*dev
, u8 op_modifier
,
2603 u32 value
[], size_t array_len
, u8 port
)
2605 struct mlx4_cmd_mailbox
*mailbox
;
2610 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2611 if (IS_ERR(mailbox
))
2612 return PTR_ERR(mailbox
);
2614 outbox
= mailbox
->buf
;
2616 ret
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, port
, op_modifier
,
2617 MLX4_CMD_DIAG_RPRT
, MLX4_CMD_TIME_CLASS_A
,
2622 for (i
= 0; i
< array_len
; i
++) {
2623 if (offset
[i
] > MLX4_MAILBOX_SIZE
) {
2628 MLX4_GET(value
[i
], outbox
, offset
[i
]);
2632 mlx4_free_cmd_mailbox(dev
, mailbox
);
2635 EXPORT_SYMBOL(mlx4_query_diag_counters
);
2637 int mlx4_get_phys_port_id(struct mlx4_dev
*dev
)
2641 struct mlx4_cmd_mailbox
*mailbox
;
2643 u32 guid_hi
, guid_lo
;
2645 #define MOD_STAT_CFG_PORT_OFFSET 8
2646 #define MOD_STAT_CFG_GUID_H 0X14
2647 #define MOD_STAT_CFG_GUID_L 0X1c
2649 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2650 if (IS_ERR(mailbox
))
2651 return PTR_ERR(mailbox
);
2652 outbox
= mailbox
->buf
;
2654 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2655 in_mod
= port
<< MOD_STAT_CFG_PORT_OFFSET
;
2656 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, in_mod
, 0x2,
2657 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
2660 mlx4_err(dev
, "Fail to get port %d uplink guid\n",
2664 MLX4_GET(guid_hi
, outbox
, MOD_STAT_CFG_GUID_H
);
2665 MLX4_GET(guid_lo
, outbox
, MOD_STAT_CFG_GUID_L
);
2666 dev
->caps
.phys_port_id
[port
] = (u64
)guid_lo
|
2670 mlx4_free_cmd_mailbox(dev
, mailbox
);
2674 #define MLX4_WOL_SETUP_MODE (5 << 28)
2675 int mlx4_wol_read(struct mlx4_dev
*dev
, u64
*config
, int port
)
2677 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
2679 return mlx4_cmd_imm(dev
, 0, config
, in_mod
, 0x3,
2680 MLX4_CMD_MOD_STAT_CFG
, MLX4_CMD_TIME_CLASS_A
,
2683 EXPORT_SYMBOL_GPL(mlx4_wol_read
);
2685 int mlx4_wol_write(struct mlx4_dev
*dev
, u64 config
, int port
)
2687 u32 in_mod
= MLX4_WOL_SETUP_MODE
| port
<< 8;
2689 return mlx4_cmd(dev
, config
, in_mod
, 0x1, MLX4_CMD_MOD_STAT_CFG
,
2690 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
2692 EXPORT_SYMBOL_GPL(mlx4_wol_write
);
2699 void mlx4_opreq_action(struct work_struct
*work
)
2701 struct mlx4_priv
*priv
= container_of(work
, struct mlx4_priv
,
2703 struct mlx4_dev
*dev
= &priv
->dev
;
2704 int num_tasks
= atomic_read(&priv
->opreq_count
);
2705 struct mlx4_cmd_mailbox
*mailbox
;
2706 struct mlx4_mgm
*mgm
;
2718 #define GET_OP_REQ_MODIFIER_OFFSET 0x08
2719 #define GET_OP_REQ_TOKEN_OFFSET 0x14
2720 #define GET_OP_REQ_TYPE_OFFSET 0x1a
2721 #define GET_OP_REQ_DATA_OFFSET 0x20
2723 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2724 if (IS_ERR(mailbox
)) {
2725 mlx4_err(dev
, "Failed to allocate mailbox for GET_OP_REQ\n");
2728 outbox
= mailbox
->buf
;
2731 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0, 0,
2732 MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
2735 mlx4_err(dev
, "Failed to retrieve required operation: %d\n",
2739 MLX4_GET(modifier
, outbox
, GET_OP_REQ_MODIFIER_OFFSET
);
2740 MLX4_GET(token
, outbox
, GET_OP_REQ_TOKEN_OFFSET
);
2741 MLX4_GET(type
, outbox
, GET_OP_REQ_TYPE_OFFSET
);
2746 if (dev
->caps
.steering_mode
==
2747 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
2748 mlx4_warn(dev
, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
2752 mgm
= (struct mlx4_mgm
*)((u8
*)(outbox
) +
2753 GET_OP_REQ_DATA_OFFSET
);
2754 num_qps
= be32_to_cpu(mgm
->members_count
) &
2756 rem_mcg
= ((u8
*)(&mgm
->members_count
))[0] & 1;
2757 prot
= ((u8
*)(&mgm
->members_count
))[0] >> 6;
2759 for (i
= 0; i
< num_qps
; i
++) {
2760 qp
.qpn
= be32_to_cpu(mgm
->qp
[i
]);
2762 err
= mlx4_multicast_detach(dev
, &qp
,
2766 err
= mlx4_multicast_attach(dev
, &qp
,
2776 mlx4_warn(dev
, "Bad type for required operation\n");
2780 err
= mlx4_cmd(dev
, 0, ((u32
) err
|
2781 (__force u32
)cpu_to_be32(token
) << 16),
2782 1, MLX4_CMD_GET_OP_REQ
, MLX4_CMD_TIME_CLASS_A
,
2785 mlx4_err(dev
, "Failed to acknowledge required request: %d\n",
2789 memset(outbox
, 0, 0xffc);
2790 num_tasks
= atomic_dec_return(&priv
->opreq_count
);
2794 mlx4_free_cmd_mailbox(dev
, mailbox
);
2797 static int mlx4_check_smp_firewall_active(struct mlx4_dev
*dev
,
2798 struct mlx4_cmd_mailbox
*mailbox
)
2800 #define MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET 0x10
2801 #define MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET 0x20
2802 #define MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET 0x40
2803 #define MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET 0x70
2805 u32 set_attr_mask
, getresp_attr_mask
;
2806 u32 trap_attr_mask
, traprepress_attr_mask
;
2808 MLX4_GET(set_attr_mask
, mailbox
->buf
,
2809 MLX4_CMD_MAD_DEMUX_SET_ATTR_OFFSET
);
2810 mlx4_dbg(dev
, "SMP firewall set_attribute_mask = 0x%x\n",
2813 MLX4_GET(getresp_attr_mask
, mailbox
->buf
,
2814 MLX4_CMD_MAD_DEMUX_GETRESP_ATTR_OFFSET
);
2815 mlx4_dbg(dev
, "SMP firewall getresp_attribute_mask = 0x%x\n",
2818 MLX4_GET(trap_attr_mask
, mailbox
->buf
,
2819 MLX4_CMD_MAD_DEMUX_TRAP_ATTR_OFFSET
);
2820 mlx4_dbg(dev
, "SMP firewall trap_attribute_mask = 0x%x\n",
2823 MLX4_GET(traprepress_attr_mask
, mailbox
->buf
,
2824 MLX4_CMD_MAD_DEMUX_TRAP_REPRESS_ATTR_OFFSET
);
2825 mlx4_dbg(dev
, "SMP firewall traprepress_attribute_mask = 0x%x\n",
2826 traprepress_attr_mask
);
2828 if (set_attr_mask
&& getresp_attr_mask
&& trap_attr_mask
&&
2829 traprepress_attr_mask
)
2835 int mlx4_config_mad_demux(struct mlx4_dev
*dev
)
2837 struct mlx4_cmd_mailbox
*mailbox
;
2840 /* Check if mad_demux is supported */
2841 if (!(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_MAD_DEMUX
))
2844 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
2845 if (IS_ERR(mailbox
)) {
2846 mlx4_warn(dev
, "Failed to allocate mailbox for cmd MAD_DEMUX");
2850 /* Query mad_demux to find out which MADs are handled by internal sma */
2851 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
, 0x01 /* subn mgmt class */,
2852 MLX4_CMD_MAD_DEMUX_QUERY_RESTR
, MLX4_CMD_MAD_DEMUX
,
2853 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
2855 mlx4_warn(dev
, "MLX4_CMD_MAD_DEMUX: query restrictions failed (%d)\n",
2860 if (mlx4_check_smp_firewall_active(dev
, mailbox
))
2861 dev
->flags
|= MLX4_FLAG_SECURE_HOST
;
2863 /* Config mad_demux to handle all MADs returned by the query above */
2864 err
= mlx4_cmd(dev
, mailbox
->dma
, 0x01 /* subn mgmt class */,
2865 MLX4_CMD_MAD_DEMUX_CONFIG
, MLX4_CMD_MAD_DEMUX
,
2866 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
2868 mlx4_warn(dev
, "MLX4_CMD_MAD_DEMUX: configure failed (%d)\n", err
);
2872 if (dev
->flags
& MLX4_FLAG_SECURE_HOST
)
2873 mlx4_warn(dev
, "HCA operating in secure-host mode. SMP firewall activated.\n");
2875 mlx4_free_cmd_mailbox(dev
, mailbox
);
2879 /* Access Reg commands */
2880 enum mlx4_access_reg_masks
{
2881 MLX4_ACCESS_REG_STATUS_MASK
= 0x7f,
2882 MLX4_ACCESS_REG_METHOD_MASK
= 0x7f,
2883 MLX4_ACCESS_REG_LEN_MASK
= 0x7ff
2886 struct mlx4_access_reg
{
2896 #define MLX4_ACCESS_REG_HEADER_SIZE (20)
2897 u8 reg_data
[MLX4_MAILBOX_SIZE
-MLX4_ACCESS_REG_HEADER_SIZE
];
2898 } __attribute__((__packed__
));
2901 * mlx4_ACCESS_REG - Generic access reg command.
2903 * @reg_id: register ID to access.
2904 * @method: Access method Read/Write.
2905 * @reg_len: register length to Read/Write in bytes.
2906 * @reg_data: reg_data pointer to Read/Write From/To.
2908 * Access ConnectX registers FW command.
2909 * Returns 0 on success and copies outbox mlx4_access_reg data
2910 * field into reg_data or a negative error code.
2912 static int mlx4_ACCESS_REG(struct mlx4_dev
*dev
, u16 reg_id
,
2913 enum mlx4_access_reg_method method
,
2914 u16 reg_len
, void *reg_data
)
2916 struct mlx4_cmd_mailbox
*inbox
, *outbox
;
2917 struct mlx4_access_reg
*inbuf
, *outbuf
;
2920 inbox
= mlx4_alloc_cmd_mailbox(dev
);
2922 return PTR_ERR(inbox
);
2924 outbox
= mlx4_alloc_cmd_mailbox(dev
);
2925 if (IS_ERR(outbox
)) {
2926 mlx4_free_cmd_mailbox(dev
, inbox
);
2927 return PTR_ERR(outbox
);
2931 outbuf
= outbox
->buf
;
2933 inbuf
->constant1
= cpu_to_be16(0x1<<11 | 0x4);
2934 inbuf
->constant2
= 0x1;
2935 inbuf
->reg_id
= cpu_to_be16(reg_id
);
2936 inbuf
->method
= method
& MLX4_ACCESS_REG_METHOD_MASK
;
2938 reg_len
= min(reg_len
, (u16
)(sizeof(inbuf
->reg_data
)));
2940 cpu_to_be16(((reg_len
/4 + 1) & MLX4_ACCESS_REG_LEN_MASK
) |
2943 memcpy(inbuf
->reg_data
, reg_data
, reg_len
);
2944 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, 0, 0,
2945 MLX4_CMD_ACCESS_REG
, MLX4_CMD_TIME_CLASS_C
,
2950 if (outbuf
->status
& MLX4_ACCESS_REG_STATUS_MASK
) {
2951 err
= outbuf
->status
& MLX4_ACCESS_REG_STATUS_MASK
;
2953 "MLX4_CMD_ACCESS_REG(%x) returned REG status (%x)\n",
2958 memcpy(reg_data
, outbuf
->reg_data
, reg_len
);
2960 mlx4_free_cmd_mailbox(dev
, inbox
);
2961 mlx4_free_cmd_mailbox(dev
, outbox
);
2965 /* ConnectX registers IDs */
2967 MLX4_REG_ID_PTYS
= 0x5004,
2971 * mlx4_ACCESS_PTYS_REG - Access PTYs (Port Type and Speed)
2974 * @method: Access method Read/Write.
2975 * @ptys_reg: PTYS register data pointer.
2977 * Access ConnectX PTYS register, to Read/Write Port Type/Speed
2979 * Returns 0 on success or a negative error code.
2981 int mlx4_ACCESS_PTYS_REG(struct mlx4_dev
*dev
,
2982 enum mlx4_access_reg_method method
,
2983 struct mlx4_ptys_reg
*ptys_reg
)
2985 return mlx4_ACCESS_REG(dev
, MLX4_REG_ID_PTYS
,
2986 method
, sizeof(*ptys_reg
), ptys_reg
);
2988 EXPORT_SYMBOL_GPL(mlx4_ACCESS_PTYS_REG
);
2990 int mlx4_ACCESS_REG_wrapper(struct mlx4_dev
*dev
, int slave
,
2991 struct mlx4_vhcr
*vhcr
,
2992 struct mlx4_cmd_mailbox
*inbox
,
2993 struct mlx4_cmd_mailbox
*outbox
,
2994 struct mlx4_cmd_info
*cmd
)
2996 struct mlx4_access_reg
*inbuf
= inbox
->buf
;
2997 u8 method
= inbuf
->method
& MLX4_ACCESS_REG_METHOD_MASK
;
2998 u16 reg_id
= be16_to_cpu(inbuf
->reg_id
);
3000 if (slave
!= mlx4_master_func_num(dev
) &&
3001 method
== MLX4_ACCESS_REG_WRITE
)
3004 if (reg_id
== MLX4_REG_ID_PTYS
) {
3005 struct mlx4_ptys_reg
*ptys_reg
=
3006 (struct mlx4_ptys_reg
*)inbuf
->reg_data
;
3008 ptys_reg
->local_port
=
3009 mlx4_slave_convert_port(dev
, slave
,
3010 ptys_reg
->local_port
);
3013 return mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, vhcr
->in_modifier
,
3014 0, MLX4_CMD_ACCESS_REG
, MLX4_CMD_TIME_CLASS_C
,
3018 static int mlx4_SET_PORT_phv_bit(struct mlx4_dev
*dev
, u8 port
, u8 phv_bit
)
3020 #define SET_PORT_GEN_PHV_VALID 0x10
3021 #define SET_PORT_GEN_PHV_EN 0x80
3023 struct mlx4_cmd_mailbox
*mailbox
;
3024 struct mlx4_set_port_general_context
*context
;
3028 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3029 if (IS_ERR(mailbox
))
3030 return PTR_ERR(mailbox
);
3031 context
= mailbox
->buf
;
3033 context
->flags2
|= SET_PORT_GEN_PHV_VALID
;
3035 context
->phv_en
|= SET_PORT_GEN_PHV_EN
;
3037 in_mod
= MLX4_SET_PORT_GENERAL
<< 8 | port
;
3038 err
= mlx4_cmd(dev
, mailbox
->dma
, in_mod
, MLX4_SET_PORT_ETH_OPCODE
,
3039 MLX4_CMD_SET_PORT
, MLX4_CMD_TIME_CLASS_B
,
3042 mlx4_free_cmd_mailbox(dev
, mailbox
);
3046 int get_phv_bit(struct mlx4_dev
*dev
, u8 port
, int *phv
)
3049 struct mlx4_func_cap func_cap
;
3051 memset(&func_cap
, 0, sizeof(func_cap
));
3052 err
= mlx4_QUERY_FUNC_CAP(dev
, port
, &func_cap
);
3054 *phv
= func_cap
.flags0
& QUERY_FUNC_CAP_PHV_BIT
;
3057 EXPORT_SYMBOL(get_phv_bit
);
3059 int set_phv_bit(struct mlx4_dev
*dev
, u8 port
, int new_val
)
3063 if (mlx4_is_slave(dev
))
3066 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_PHV_EN
&&
3067 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN
)) {
3068 ret
= mlx4_SET_PORT_phv_bit(dev
, port
, new_val
);
3070 dev
->caps
.phv_bit
[port
] = new_val
;
3076 EXPORT_SYMBOL(set_phv_bit
);
3078 int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev
*dev
, u8 port
,
3079 bool *vlan_offload_disabled
)
3081 struct mlx4_func_cap func_cap
;
3084 memset(&func_cap
, 0, sizeof(func_cap
));
3085 err
= mlx4_QUERY_FUNC_CAP(dev
, port
, &func_cap
);
3087 *vlan_offload_disabled
=
3088 !!(func_cap
.flags0
&
3089 QUERY_FUNC_CAP_VLAN_OFFLOAD_DISABLE
);
3092 EXPORT_SYMBOL(mlx4_get_is_vlan_offload_disabled
);
3094 void mlx4_replace_zero_macs(struct mlx4_dev
*dev
)
3097 u8 mac_addr
[ETH_ALEN
];
3099 dev
->port_random_macs
= 0;
3100 for (i
= 1; i
<= dev
->caps
.num_ports
; ++i
)
3101 if (!dev
->caps
.def_mac
[i
] &&
3102 dev
->caps
.port_type
[i
] == MLX4_PORT_TYPE_ETH
) {
3103 eth_random_addr(mac_addr
);
3104 dev
->port_random_macs
|= 1 << i
;
3105 dev
->caps
.def_mac
[i
] = mlx4_mac_to_u64(mac_addr
);
3108 EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs
);