2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/mlx4/device.h>
43 #include <linux/semaphore.h>
44 #include <rdma/ib_smi.h>
45 #include <linux/delay.h>
52 #include "mlx4_stats.h"
54 #define CMD_POLL_TOKEN 0xffff
55 #define INBOX_MASK 0xffffffffffffff00ULL
57 #define CMD_CHAN_VER 1
58 #define CMD_CHAN_IF_REV 1
61 /* command completed successfully: */
63 /* Internal error (such as a bus error) occurred while processing command: */
64 CMD_STAT_INTERNAL_ERR
= 0x01,
65 /* Operation/command not supported or opcode modifier not supported: */
66 CMD_STAT_BAD_OP
= 0x02,
67 /* Parameter not supported or parameter out of range: */
68 CMD_STAT_BAD_PARAM
= 0x03,
69 /* System not enabled or bad system state: */
70 CMD_STAT_BAD_SYS_STATE
= 0x04,
71 /* Attempt to access reserved or unallocaterd resource: */
72 CMD_STAT_BAD_RESOURCE
= 0x05,
73 /* Requested resource is currently executing a command, or is otherwise busy: */
74 CMD_STAT_RESOURCE_BUSY
= 0x06,
75 /* Required capability exceeds device limits: */
76 CMD_STAT_EXCEED_LIM
= 0x08,
77 /* Resource is not in the appropriate state or ownership: */
78 CMD_STAT_BAD_RES_STATE
= 0x09,
79 /* Index out of range: */
80 CMD_STAT_BAD_INDEX
= 0x0a,
81 /* FW image corrupted: */
82 CMD_STAT_BAD_NVMEM
= 0x0b,
83 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
84 CMD_STAT_ICM_ERROR
= 0x0c,
85 /* Attempt to modify a QP/EE which is not in the presumed state: */
86 CMD_STAT_BAD_QP_STATE
= 0x10,
87 /* Bad segment parameters (Address/Size): */
88 CMD_STAT_BAD_SEG_PARAM
= 0x20,
89 /* Memory Region has Memory Windows bound to: */
90 CMD_STAT_REG_BOUND
= 0x21,
91 /* HCA local attached memory not present: */
92 CMD_STAT_LAM_NOT_PRE
= 0x22,
93 /* Bad management packet (silently discarded): */
94 CMD_STAT_BAD_PKT
= 0x30,
95 /* More outstanding CQEs in CQ than new CQ size: */
96 CMD_STAT_BAD_SIZE
= 0x40,
97 /* Multi Function device support required: */
98 CMD_STAT_MULTI_FUNC_REQ
= 0x50,
102 HCR_IN_PARAM_OFFSET
= 0x00,
103 HCR_IN_MODIFIER_OFFSET
= 0x08,
104 HCR_OUT_PARAM_OFFSET
= 0x0c,
105 HCR_TOKEN_OFFSET
= 0x14,
106 HCR_STATUS_OFFSET
= 0x18,
108 HCR_OPMOD_SHIFT
= 12,
115 GO_BIT_TIMEOUT_MSECS
= 10000
118 enum mlx4_vlan_transition
{
119 MLX4_VLAN_TRANSITION_VST_VST
= 0,
120 MLX4_VLAN_TRANSITION_VST_VGT
= 1,
121 MLX4_VLAN_TRANSITION_VGT_VST
= 2,
122 MLX4_VLAN_TRANSITION_VGT_VGT
= 3,
126 struct mlx4_cmd_context
{
127 struct completion done
;
135 static int mlx4_master_process_vhcr(struct mlx4_dev
*dev
, int slave
,
136 struct mlx4_vhcr_cmd
*in_vhcr
);
138 static int mlx4_status_to_errno(u8 status
)
140 static const int trans_table
[] = {
141 [CMD_STAT_INTERNAL_ERR
] = -EIO
,
142 [CMD_STAT_BAD_OP
] = -EPERM
,
143 [CMD_STAT_BAD_PARAM
] = -EINVAL
,
144 [CMD_STAT_BAD_SYS_STATE
] = -ENXIO
,
145 [CMD_STAT_BAD_RESOURCE
] = -EBADF
,
146 [CMD_STAT_RESOURCE_BUSY
] = -EBUSY
,
147 [CMD_STAT_EXCEED_LIM
] = -ENOMEM
,
148 [CMD_STAT_BAD_RES_STATE
] = -EBADF
,
149 [CMD_STAT_BAD_INDEX
] = -EBADF
,
150 [CMD_STAT_BAD_NVMEM
] = -EFAULT
,
151 [CMD_STAT_ICM_ERROR
] = -ENFILE
,
152 [CMD_STAT_BAD_QP_STATE
] = -EINVAL
,
153 [CMD_STAT_BAD_SEG_PARAM
] = -EFAULT
,
154 [CMD_STAT_REG_BOUND
] = -EBUSY
,
155 [CMD_STAT_LAM_NOT_PRE
] = -EAGAIN
,
156 [CMD_STAT_BAD_PKT
] = -EINVAL
,
157 [CMD_STAT_BAD_SIZE
] = -ENOMEM
,
158 [CMD_STAT_MULTI_FUNC_REQ
] = -EACCES
,
161 if (status
>= ARRAY_SIZE(trans_table
) ||
162 (status
!= CMD_STAT_OK
&& trans_table
[status
] == 0))
165 return trans_table
[status
];
168 static u8
mlx4_errno_to_status(int errno
)
172 return CMD_STAT_BAD_OP
;
174 return CMD_STAT_BAD_PARAM
;
176 return CMD_STAT_BAD_SYS_STATE
;
178 return CMD_STAT_RESOURCE_BUSY
;
180 return CMD_STAT_EXCEED_LIM
;
182 return CMD_STAT_ICM_ERROR
;
184 return CMD_STAT_INTERNAL_ERR
;
188 static int mlx4_internal_err_ret_value(struct mlx4_dev
*dev
, u16 op
,
192 case MLX4_CMD_UNMAP_ICM
:
193 case MLX4_CMD_UNMAP_ICM_AUX
:
194 case MLX4_CMD_UNMAP_FA
:
195 case MLX4_CMD_2RST_QP
:
196 case MLX4_CMD_HW2SW_EQ
:
197 case MLX4_CMD_HW2SW_CQ
:
198 case MLX4_CMD_HW2SW_SRQ
:
199 case MLX4_CMD_HW2SW_MPT
:
200 case MLX4_CMD_CLOSE_HCA
:
201 case MLX4_QP_FLOW_STEERING_DETACH
:
202 case MLX4_CMD_FREE_RES
:
203 case MLX4_CMD_CLOSE_PORT
:
206 case MLX4_CMD_QP_ATTACH
:
207 /* On Detach case return success */
208 if (op_modifier
== 0)
210 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
213 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
217 static int mlx4_closing_cmd_fatal_error(u16 op
, u8 fw_status
)
219 /* Any error during the closing commands below is considered fatal */
220 if (op
== MLX4_CMD_CLOSE_HCA
||
221 op
== MLX4_CMD_HW2SW_EQ
||
222 op
== MLX4_CMD_HW2SW_CQ
||
223 op
== MLX4_CMD_2RST_QP
||
224 op
== MLX4_CMD_HW2SW_SRQ
||
225 op
== MLX4_CMD_SYNC_TPT
||
226 op
== MLX4_CMD_UNMAP_ICM
||
227 op
== MLX4_CMD_UNMAP_ICM_AUX
||
228 op
== MLX4_CMD_UNMAP_FA
)
230 /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
231 * CMD_STAT_REG_BOUND.
232 * This status indicates that memory region has memory windows bound to it
233 * which may result from invalid user space usage and is not fatal.
235 if (op
== MLX4_CMD_HW2SW_MPT
&& fw_status
!= CMD_STAT_REG_BOUND
)
240 static int mlx4_cmd_reset_flow(struct mlx4_dev
*dev
, u16 op
, u8 op_modifier
,
243 /* Only if reset flow is really active return code is based on
244 * command, otherwise current error code is returned.
246 if (mlx4_internal_err_reset
) {
247 mlx4_enter_error_state(dev
->persist
);
248 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
254 static int comm_pending(struct mlx4_dev
*dev
)
256 struct mlx4_priv
*priv
= mlx4_priv(dev
);
257 u32 status
= readl(&priv
->mfunc
.comm
->slave_read
);
259 return (swab32(status
) >> 31) != priv
->cmd
.comm_toggle
;
262 static int mlx4_comm_cmd_post(struct mlx4_dev
*dev
, u8 cmd
, u16 param
)
264 struct mlx4_priv
*priv
= mlx4_priv(dev
);
267 /* To avoid writing to unknown addresses after the device state was
268 * changed to internal error and the function was rest,
269 * check the INTERNAL_ERROR flag which is updated under
270 * device_state_mutex lock.
272 mutex_lock(&dev
->persist
->device_state_mutex
);
274 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
275 mutex_unlock(&dev
->persist
->device_state_mutex
);
279 priv
->cmd
.comm_toggle
^= 1;
280 val
= param
| (cmd
<< 16) | (priv
->cmd
.comm_toggle
<< 31);
281 __raw_writel((__force u32
) cpu_to_be32(val
),
282 &priv
->mfunc
.comm
->slave_write
);
284 mutex_unlock(&dev
->persist
->device_state_mutex
);
288 static int mlx4_comm_cmd_poll(struct mlx4_dev
*dev
, u8 cmd
, u16 param
,
289 unsigned long timeout
)
291 struct mlx4_priv
*priv
= mlx4_priv(dev
);
294 int ret_from_pending
= 0;
296 /* First, verify that the master reports correct status */
297 if (comm_pending(dev
)) {
298 mlx4_warn(dev
, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
299 priv
->cmd
.comm_toggle
, cmd
);
304 down(&priv
->cmd
.poll_sem
);
305 if (mlx4_comm_cmd_post(dev
, cmd
, param
)) {
306 /* Only in case the device state is INTERNAL_ERROR,
307 * mlx4_comm_cmd_post returns with an error
309 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
313 end
= msecs_to_jiffies(timeout
) + jiffies
;
314 while (comm_pending(dev
) && time_before(jiffies
, end
))
316 ret_from_pending
= comm_pending(dev
);
317 if (ret_from_pending
) {
318 /* check if the slave is trying to boot in the middle of
319 * FLR process. The only non-zero result in the RESET command
320 * is MLX4_DELAY_RESET_SLAVE*/
321 if ((MLX4_COMM_CMD_RESET
== cmd
)) {
322 err
= MLX4_DELAY_RESET_SLAVE
;
325 mlx4_warn(dev
, "Communication channel command 0x%x timed out\n",
327 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
332 mlx4_enter_error_state(dev
->persist
);
334 up(&priv
->cmd
.poll_sem
);
338 static int mlx4_comm_cmd_wait(struct mlx4_dev
*dev
, u8 vhcr_cmd
,
339 u16 param
, u16 op
, unsigned long timeout
)
341 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
342 struct mlx4_cmd_context
*context
;
346 down(&cmd
->event_sem
);
348 spin_lock(&cmd
->context_lock
);
349 BUG_ON(cmd
->free_head
< 0);
350 context
= &cmd
->context
[cmd
->free_head
];
351 context
->token
+= cmd
->token_mask
+ 1;
352 cmd
->free_head
= context
->next
;
353 spin_unlock(&cmd
->context_lock
);
355 reinit_completion(&context
->done
);
357 if (mlx4_comm_cmd_post(dev
, vhcr_cmd
, param
)) {
358 /* Only in case the device state is INTERNAL_ERROR,
359 * mlx4_comm_cmd_post returns with an error
361 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
365 if (!wait_for_completion_timeout(&context
->done
,
366 msecs_to_jiffies(timeout
))) {
367 mlx4_warn(dev
, "communication channel command 0x%x (op=0x%x) timed out\n",
372 err
= context
->result
;
373 if (err
&& context
->fw_status
!= CMD_STAT_MULTI_FUNC_REQ
) {
374 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
375 vhcr_cmd
, context
->fw_status
);
376 if (mlx4_closing_cmd_fatal_error(op
, context
->fw_status
))
380 /* wait for comm channel ready
381 * this is necessary for prevention the race
382 * when switching between event to polling mode
383 * Skipping this section in case the device is in FATAL_ERROR state,
384 * In this state, no commands are sent via the comm channel until
385 * the device has returned from reset.
387 if (!(dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)) {
388 end
= msecs_to_jiffies(timeout
) + jiffies
;
389 while (comm_pending(dev
) && time_before(jiffies
, end
))
395 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
396 mlx4_enter_error_state(dev
->persist
);
398 spin_lock(&cmd
->context_lock
);
399 context
->next
= cmd
->free_head
;
400 cmd
->free_head
= context
- cmd
->context
;
401 spin_unlock(&cmd
->context_lock
);
407 int mlx4_comm_cmd(struct mlx4_dev
*dev
, u8 cmd
, u16 param
,
408 u16 op
, unsigned long timeout
)
410 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
411 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
413 if (mlx4_priv(dev
)->cmd
.use_events
)
414 return mlx4_comm_cmd_wait(dev
, cmd
, param
, op
, timeout
);
415 return mlx4_comm_cmd_poll(dev
, cmd
, param
, timeout
);
418 static int cmd_pending(struct mlx4_dev
*dev
)
422 if (pci_channel_offline(dev
->persist
->pdev
))
425 status
= readl(mlx4_priv(dev
)->cmd
.hcr
+ HCR_STATUS_OFFSET
);
427 return (status
& swab32(1 << HCR_GO_BIT
)) ||
428 (mlx4_priv(dev
)->cmd
.toggle
==
429 !!(status
& swab32(1 << HCR_T_BIT
)));
432 static int mlx4_cmd_post(struct mlx4_dev
*dev
, u64 in_param
, u64 out_param
,
433 u32 in_modifier
, u8 op_modifier
, u16 op
, u16 token
,
436 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
437 u32 __iomem
*hcr
= cmd
->hcr
;
441 mutex_lock(&dev
->persist
->device_state_mutex
);
442 /* To avoid writing to unknown addresses after the device state was
443 * changed to internal error and the chip was reset,
444 * check the INTERNAL_ERROR flag which is updated under
445 * device_state_mutex lock.
447 if (pci_channel_offline(dev
->persist
->pdev
) ||
448 (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)) {
450 * Device is going through error recovery
451 * and cannot accept commands.
458 end
+= msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS
);
460 while (cmd_pending(dev
)) {
461 if (pci_channel_offline(dev
->persist
->pdev
)) {
463 * Device is going through error recovery
464 * and cannot accept commands.
469 if (time_after_eq(jiffies
, end
)) {
470 mlx4_err(dev
, "%s:cmd_pending failed\n", __func__
);
477 * We use writel (instead of something like memcpy_toio)
478 * because writes of less than 32 bits to the HCR don't work
479 * (and some architectures such as ia64 implement memcpy_toio
480 * in terms of writeb).
482 __raw_writel((__force u32
) cpu_to_be32(in_param
>> 32), hcr
+ 0);
483 __raw_writel((__force u32
) cpu_to_be32(in_param
& 0xfffffffful
), hcr
+ 1);
484 __raw_writel((__force u32
) cpu_to_be32(in_modifier
), hcr
+ 2);
485 __raw_writel((__force u32
) cpu_to_be32(out_param
>> 32), hcr
+ 3);
486 __raw_writel((__force u32
) cpu_to_be32(out_param
& 0xfffffffful
), hcr
+ 4);
487 __raw_writel((__force u32
) cpu_to_be32(token
<< 16), hcr
+ 5);
489 /* __raw_writel may not order writes. */
492 __raw_writel((__force u32
) cpu_to_be32((1 << HCR_GO_BIT
) |
493 (cmd
->toggle
<< HCR_T_BIT
) |
494 (event
? (1 << HCR_E_BIT
) : 0) |
495 (op_modifier
<< HCR_OPMOD_SHIFT
) |
499 * Make sure that our HCR writes don't get mixed in with
500 * writes from another CPU starting a FW command.
504 cmd
->toggle
= cmd
->toggle
^ 1;
510 mlx4_warn(dev
, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
511 op
, ret
, in_param
, in_modifier
, op_modifier
);
512 mutex_unlock(&dev
->persist
->device_state_mutex
);
517 static int mlx4_slave_cmd(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
518 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
519 u16 op
, unsigned long timeout
)
521 struct mlx4_priv
*priv
= mlx4_priv(dev
);
522 struct mlx4_vhcr_cmd
*vhcr
= priv
->mfunc
.vhcr
;
525 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
527 vhcr
->in_param
= cpu_to_be64(in_param
);
528 vhcr
->out_param
= out_param
? cpu_to_be64(*out_param
) : 0;
529 vhcr
->in_modifier
= cpu_to_be32(in_modifier
);
530 vhcr
->opcode
= cpu_to_be16((((u16
) op_modifier
) << 12) | (op
& 0xfff));
531 vhcr
->token
= cpu_to_be16(CMD_POLL_TOKEN
);
533 vhcr
->flags
= !!(priv
->cmd
.use_events
) << 6;
535 if (mlx4_is_master(dev
)) {
536 ret
= mlx4_master_process_vhcr(dev
, dev
->caps
.function
, vhcr
);
541 be64_to_cpu(vhcr
->out_param
);
543 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
545 vhcr
->status
= CMD_STAT_BAD_PARAM
;
548 ret
= mlx4_status_to_errno(vhcr
->status
);
551 dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
552 ret
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
554 ret
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_POST
, 0, op
,
555 MLX4_COMM_TIME
+ timeout
);
560 be64_to_cpu(vhcr
->out_param
);
562 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
564 vhcr
->status
= CMD_STAT_BAD_PARAM
;
567 ret
= mlx4_status_to_errno(vhcr
->status
);
569 if (dev
->persist
->state
&
570 MLX4_DEVICE_STATE_INTERNAL_ERROR
)
571 ret
= mlx4_internal_err_ret_value(dev
, op
,
574 mlx4_err(dev
, "failed execution of VHCR_POST command opcode 0x%x\n", op
);
578 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
582 static int mlx4_cmd_poll(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
583 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
584 u16 op
, unsigned long timeout
)
586 struct mlx4_priv
*priv
= mlx4_priv(dev
);
587 void __iomem
*hcr
= priv
->cmd
.hcr
;
592 down(&priv
->cmd
.poll_sem
);
594 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
596 * Device is going through error recovery
597 * and cannot accept commands.
599 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
603 if (out_is_imm
&& !out_param
) {
604 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
610 err
= mlx4_cmd_post(dev
, in_param
, out_param
? *out_param
: 0,
611 in_modifier
, op_modifier
, op
, CMD_POLL_TOKEN
, 0);
615 end
= msecs_to_jiffies(timeout
) + jiffies
;
616 while (cmd_pending(dev
) && time_before(jiffies
, end
)) {
617 if (pci_channel_offline(dev
->persist
->pdev
)) {
619 * Device is going through error recovery
620 * and cannot accept commands.
626 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
627 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
634 if (cmd_pending(dev
)) {
635 mlx4_warn(dev
, "command 0x%x timed out (go bit not cleared)\n",
643 (u64
) be32_to_cpu((__force __be32
)
644 __raw_readl(hcr
+ HCR_OUT_PARAM_OFFSET
)) << 32 |
645 (u64
) be32_to_cpu((__force __be32
)
646 __raw_readl(hcr
+ HCR_OUT_PARAM_OFFSET
+ 4));
647 stat
= be32_to_cpu((__force __be32
)
648 __raw_readl(hcr
+ HCR_STATUS_OFFSET
)) >> 24;
649 err
= mlx4_status_to_errno(stat
);
651 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
653 if (mlx4_closing_cmd_fatal_error(op
, stat
))
660 err
= mlx4_cmd_reset_flow(dev
, op
, op_modifier
, err
);
662 up(&priv
->cmd
.poll_sem
);
666 void mlx4_cmd_event(struct mlx4_dev
*dev
, u16 token
, u8 status
, u64 out_param
)
668 struct mlx4_priv
*priv
= mlx4_priv(dev
);
669 struct mlx4_cmd_context
*context
=
670 &priv
->cmd
.context
[token
& priv
->cmd
.token_mask
];
672 /* previously timed out command completing at long last */
673 if (token
!= context
->token
)
676 context
->fw_status
= status
;
677 context
->result
= mlx4_status_to_errno(status
);
678 context
->out_param
= out_param
;
680 complete(&context
->done
);
683 static int mlx4_cmd_wait(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
684 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
685 u16 op
, unsigned long timeout
)
687 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
688 struct mlx4_cmd_context
*context
;
692 down(&cmd
->event_sem
);
694 spin_lock(&cmd
->context_lock
);
695 BUG_ON(cmd
->free_head
< 0);
696 context
= &cmd
->context
[cmd
->free_head
];
697 context
->token
+= cmd
->token_mask
+ 1;
698 cmd
->free_head
= context
->next
;
699 spin_unlock(&cmd
->context_lock
);
701 if (out_is_imm
&& !out_param
) {
702 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
708 reinit_completion(&context
->done
);
710 err
= mlx4_cmd_post(dev
, in_param
, out_param
? *out_param
: 0,
711 in_modifier
, op_modifier
, op
, context
->token
, 1);
715 if (op
== MLX4_CMD_SENSE_PORT
) {
717 wait_for_completion_interruptible_timeout(&context
->done
,
718 msecs_to_jiffies(timeout
));
720 context
->fw_status
= 0;
721 context
->out_param
= 0;
725 ret_wait
= (long)wait_for_completion_timeout(&context
->done
,
726 msecs_to_jiffies(timeout
));
729 mlx4_warn(dev
, "command 0x%x timed out (go bit not cleared)\n",
731 if (op
== MLX4_CMD_NOP
) {
740 err
= context
->result
;
742 /* Since we do not want to have this error message always
743 * displayed at driver start when there are ConnectX2 HCAs
744 * on the host, we deprecate the error message for this
745 * specific command/input_mod/opcode_mod/fw-status to be debug.
747 if (op
== MLX4_CMD_SET_PORT
&&
748 (in_modifier
== 1 || in_modifier
== 2) &&
749 op_modifier
== MLX4_SET_PORT_IB_OPCODE
&&
750 context
->fw_status
== CMD_STAT_BAD_SIZE
)
751 mlx4_dbg(dev
, "command 0x%x failed: fw status = 0x%x\n",
752 op
, context
->fw_status
);
754 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
755 op
, context
->fw_status
);
756 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
757 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
758 else if (mlx4_closing_cmd_fatal_error(op
, context
->fw_status
))
765 *out_param
= context
->out_param
;
769 err
= mlx4_cmd_reset_flow(dev
, op
, op_modifier
, err
);
771 spin_lock(&cmd
->context_lock
);
772 context
->next
= cmd
->free_head
;
773 cmd
->free_head
= context
- cmd
->context
;
774 spin_unlock(&cmd
->context_lock
);
780 int __mlx4_cmd(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
781 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
782 u16 op
, unsigned long timeout
, int native
)
784 if (pci_channel_offline(dev
->persist
->pdev
))
785 return mlx4_cmd_reset_flow(dev
, op
, op_modifier
, -EIO
);
787 if (!mlx4_is_mfunc(dev
) || (native
&& mlx4_is_master(dev
))) {
788 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
789 return mlx4_internal_err_ret_value(dev
, op
,
791 if (mlx4_priv(dev
)->cmd
.use_events
)
792 return mlx4_cmd_wait(dev
, in_param
, out_param
,
793 out_is_imm
, in_modifier
,
794 op_modifier
, op
, timeout
);
796 return mlx4_cmd_poll(dev
, in_param
, out_param
,
797 out_is_imm
, in_modifier
,
798 op_modifier
, op
, timeout
);
800 return mlx4_slave_cmd(dev
, in_param
, out_param
, out_is_imm
,
801 in_modifier
, op_modifier
, op
, timeout
);
803 EXPORT_SYMBOL_GPL(__mlx4_cmd
);
806 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev
*dev
)
808 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL
,
809 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
812 static int mlx4_ACCESS_MEM(struct mlx4_dev
*dev
, u64 master_addr
,
813 int slave
, u64 slave_addr
,
814 int size
, int is_read
)
819 if ((slave_addr
& 0xfff) | (master_addr
& 0xfff) |
820 (slave
& ~0x7f) | (size
& 0xff)) {
821 mlx4_err(dev
, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
822 slave_addr
, master_addr
, slave
, size
);
827 in_param
= (u64
) slave
| slave_addr
;
828 out_param
= (u64
) dev
->caps
.function
| master_addr
;
830 in_param
= (u64
) dev
->caps
.function
| master_addr
;
831 out_param
= (u64
) slave
| slave_addr
;
834 return mlx4_cmd_imm(dev
, in_param
, &out_param
, size
, 0,
836 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
839 static int query_pkey_block(struct mlx4_dev
*dev
, u8 port
, u16 index
, u16
*pkey
,
840 struct mlx4_cmd_mailbox
*inbox
,
841 struct mlx4_cmd_mailbox
*outbox
)
843 struct ib_smp
*in_mad
= (struct ib_smp
*)(inbox
->buf
);
844 struct ib_smp
*out_mad
= (struct ib_smp
*)(outbox
->buf
);
851 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
853 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, port
, 3,
854 MLX4_CMD_MAD_IFC
, MLX4_CMD_TIME_CLASS_C
,
859 for (i
= 0; i
< 32; ++i
)
860 pkey
[i
] = be16_to_cpu(((__be16
*) out_mad
->data
)[i
]);
865 static int get_full_pkey_table(struct mlx4_dev
*dev
, u8 port
, u16
*table
,
866 struct mlx4_cmd_mailbox
*inbox
,
867 struct mlx4_cmd_mailbox
*outbox
)
872 for (i
= 0; i
< dev
->caps
.pkey_table_len
[port
]; i
+= 32) {
873 err
= query_pkey_block(dev
, port
, i
, table
+ i
, inbox
, outbox
);
880 #define PORT_CAPABILITY_LOCATION_IN_SMP 20
881 #define PORT_STATE_OFFSET 32
883 static enum ib_port_state
vf_port_state(struct mlx4_dev
*dev
, int port
, int vf
)
885 if (mlx4_get_slave_port_state(dev
, vf
, port
) == SLAVE_PORT_UP
)
886 return IB_PORT_ACTIVE
;
891 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev
*dev
, int slave
,
892 struct mlx4_vhcr
*vhcr
,
893 struct mlx4_cmd_mailbox
*inbox
,
894 struct mlx4_cmd_mailbox
*outbox
,
895 struct mlx4_cmd_info
*cmd
)
897 struct ib_smp
*smp
= inbox
->buf
;
905 struct mlx4_priv
*priv
= mlx4_priv(dev
);
906 struct ib_smp
*outsmp
= outbox
->buf
;
907 __be16
*outtab
= (__be16
*)(outsmp
->data
);
908 __be32 slave_cap_mask
;
909 __be64 slave_node_guid
;
911 slave_port
= vhcr
->in_modifier
;
912 port
= mlx4_slave_convert_port(dev
, slave
, slave_port
);
914 /* network-view bit is for driver use only, and should not be passed to FW */
915 opcode_modifier
= vhcr
->op_modifier
& ~0x8; /* clear netw view bit */
916 network_view
= !!(vhcr
->op_modifier
& 0x8);
918 if (smp
->base_version
== 1 &&
919 smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
&&
920 smp
->class_version
== 1) {
921 /* host view is paravirtualized */
922 if (!network_view
&& smp
->method
== IB_MGMT_METHOD_GET
) {
923 if (smp
->attr_id
== IB_SMP_ATTR_PKEY_TABLE
) {
924 index
= be32_to_cpu(smp
->attr_mod
);
925 if (port
< 1 || port
> dev
->caps
.num_ports
)
927 table
= kcalloc((dev
->caps
.pkey_table_len
[port
] / 32) + 1,
928 sizeof(*table
) * 32, GFP_KERNEL
);
932 /* need to get the full pkey table because the paravirtualized
933 * pkeys may be scattered among several pkey blocks.
935 err
= get_full_pkey_table(dev
, port
, table
, inbox
, outbox
);
937 for (vidx
= index
* 32; vidx
< (index
+ 1) * 32; ++vidx
) {
938 pidx
= priv
->virt2phys_pkey
[slave
][port
- 1][vidx
];
939 outtab
[vidx
% 32] = cpu_to_be16(table
[pidx
]);
945 if (smp
->attr_id
== IB_SMP_ATTR_PORT_INFO
) {
946 /*get the slave specific caps:*/
948 smp
->attr_mod
= cpu_to_be32(port
);
949 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
950 port
, opcode_modifier
,
951 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
952 /* modify the response for slaves */
953 if (!err
&& slave
!= mlx4_master_func_num(dev
)) {
954 u8
*state
= outsmp
->data
+ PORT_STATE_OFFSET
;
956 *state
= (*state
& 0xf0) | vf_port_state(dev
, port
, slave
);
957 slave_cap_mask
= priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
];
958 memcpy(outsmp
->data
+ PORT_CAPABILITY_LOCATION_IN_SMP
, &slave_cap_mask
, 4);
962 if (smp
->attr_id
== IB_SMP_ATTR_GUID_INFO
) {
963 __be64 guid
= mlx4_get_admin_guid(dev
, slave
,
966 /* set the PF admin guid to the FW/HW burned
967 * GUID, if it wasn't yet set
969 if (slave
== 0 && guid
== 0) {
971 err
= mlx4_cmd_box(dev
,
977 MLX4_CMD_TIME_CLASS_C
,
981 mlx4_set_admin_guid(dev
,
985 memcpy(outsmp
->data
, &guid
, 8);
988 /* clean all other gids */
989 memset(outsmp
->data
+ 8, 0, 56);
992 if (smp
->attr_id
== IB_SMP_ATTR_NODE_INFO
) {
993 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
994 port
, opcode_modifier
,
995 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
997 slave_node_guid
= mlx4_get_slave_node_guid(dev
, slave
);
998 memcpy(outsmp
->data
+ 12, &slave_node_guid
, 8);
1005 /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
1006 * These are the MADs used by ib verbs (such as ib_query_gids).
1008 if (slave
!= mlx4_master_func_num(dev
) &&
1009 !mlx4_vf_smi_enabled(dev
, slave
, port
)) {
1010 if (!(smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
&&
1011 smp
->method
== IB_MGMT_METHOD_GET
) || network_view
) {
1012 mlx4_err(dev
, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1013 slave
, smp
->mgmt_class
, smp
->method
,
1014 network_view
? "Network" : "Host",
1015 be16_to_cpu(smp
->attr_id
));
1020 return mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
1021 vhcr
->in_modifier
, opcode_modifier
,
1022 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
1025 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev
*dev
, int slave
,
1026 struct mlx4_vhcr
*vhcr
,
1027 struct mlx4_cmd_mailbox
*inbox
,
1028 struct mlx4_cmd_mailbox
*outbox
,
1029 struct mlx4_cmd_info
*cmd
)
1034 int mlx4_DMA_wrapper(struct mlx4_dev
*dev
, int slave
,
1035 struct mlx4_vhcr
*vhcr
,
1036 struct mlx4_cmd_mailbox
*inbox
,
1037 struct mlx4_cmd_mailbox
*outbox
,
1038 struct mlx4_cmd_info
*cmd
)
1044 in_param
= cmd
->has_inbox
? (u64
) inbox
->dma
: vhcr
->in_param
;
1045 out_param
= cmd
->has_outbox
? (u64
) outbox
->dma
: vhcr
->out_param
;
1046 if (cmd
->encode_slave_id
) {
1047 in_param
&= 0xffffffffffffff00ll
;
1051 err
= __mlx4_cmd(dev
, in_param
, &out_param
, cmd
->out_is_imm
,
1052 vhcr
->in_modifier
, vhcr
->op_modifier
, vhcr
->op
,
1053 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1055 if (cmd
->out_is_imm
)
1056 vhcr
->out_param
= out_param
;
1061 static struct mlx4_cmd_info cmd_info
[] = {
1063 .opcode
= MLX4_CMD_QUERY_FW
,
1066 .out_is_imm
= false,
1067 .encode_slave_id
= false,
1069 .wrapper
= mlx4_QUERY_FW_wrapper
1072 .opcode
= MLX4_CMD_QUERY_HCA
,
1075 .out_is_imm
= false,
1076 .encode_slave_id
= false,
1081 .opcode
= MLX4_CMD_QUERY_DEV_CAP
,
1084 .out_is_imm
= false,
1085 .encode_slave_id
= false,
1087 .wrapper
= mlx4_QUERY_DEV_CAP_wrapper
1090 .opcode
= MLX4_CMD_QUERY_FUNC_CAP
,
1093 .out_is_imm
= false,
1094 .encode_slave_id
= false,
1096 .wrapper
= mlx4_QUERY_FUNC_CAP_wrapper
1099 .opcode
= MLX4_CMD_QUERY_ADAPTER
,
1102 .out_is_imm
= false,
1103 .encode_slave_id
= false,
1108 .opcode
= MLX4_CMD_INIT_PORT
,
1110 .has_outbox
= false,
1111 .out_is_imm
= false,
1112 .encode_slave_id
= false,
1114 .wrapper
= mlx4_INIT_PORT_wrapper
1117 .opcode
= MLX4_CMD_CLOSE_PORT
,
1119 .has_outbox
= false,
1120 .out_is_imm
= false,
1121 .encode_slave_id
= false,
1123 .wrapper
= mlx4_CLOSE_PORT_wrapper
1126 .opcode
= MLX4_CMD_QUERY_PORT
,
1129 .out_is_imm
= false,
1130 .encode_slave_id
= false,
1132 .wrapper
= mlx4_QUERY_PORT_wrapper
1135 .opcode
= MLX4_CMD_SET_PORT
,
1137 .has_outbox
= false,
1138 .out_is_imm
= false,
1139 .encode_slave_id
= false,
1141 .wrapper
= mlx4_SET_PORT_wrapper
1144 .opcode
= MLX4_CMD_MAP_EQ
,
1146 .has_outbox
= false,
1147 .out_is_imm
= false,
1148 .encode_slave_id
= false,
1150 .wrapper
= mlx4_MAP_EQ_wrapper
1153 .opcode
= MLX4_CMD_SW2HW_EQ
,
1155 .has_outbox
= false,
1156 .out_is_imm
= false,
1157 .encode_slave_id
= true,
1159 .wrapper
= mlx4_SW2HW_EQ_wrapper
1162 .opcode
= MLX4_CMD_HW_HEALTH_CHECK
,
1164 .has_outbox
= false,
1165 .out_is_imm
= false,
1166 .encode_slave_id
= false,
1171 .opcode
= MLX4_CMD_NOP
,
1173 .has_outbox
= false,
1174 .out_is_imm
= false,
1175 .encode_slave_id
= false,
1180 .opcode
= MLX4_CMD_CONFIG_DEV
,
1183 .out_is_imm
= false,
1184 .encode_slave_id
= false,
1186 .wrapper
= mlx4_CONFIG_DEV_wrapper
1189 .opcode
= MLX4_CMD_ALLOC_RES
,
1191 .has_outbox
= false,
1193 .encode_slave_id
= false,
1195 .wrapper
= mlx4_ALLOC_RES_wrapper
1198 .opcode
= MLX4_CMD_FREE_RES
,
1200 .has_outbox
= false,
1201 .out_is_imm
= false,
1202 .encode_slave_id
= false,
1204 .wrapper
= mlx4_FREE_RES_wrapper
1207 .opcode
= MLX4_CMD_SW2HW_MPT
,
1209 .has_outbox
= false,
1210 .out_is_imm
= false,
1211 .encode_slave_id
= true,
1213 .wrapper
= mlx4_SW2HW_MPT_wrapper
1216 .opcode
= MLX4_CMD_QUERY_MPT
,
1219 .out_is_imm
= false,
1220 .encode_slave_id
= false,
1222 .wrapper
= mlx4_QUERY_MPT_wrapper
1225 .opcode
= MLX4_CMD_HW2SW_MPT
,
1227 .has_outbox
= false,
1228 .out_is_imm
= false,
1229 .encode_slave_id
= false,
1231 .wrapper
= mlx4_HW2SW_MPT_wrapper
1234 .opcode
= MLX4_CMD_READ_MTT
,
1237 .out_is_imm
= false,
1238 .encode_slave_id
= false,
1243 .opcode
= MLX4_CMD_WRITE_MTT
,
1245 .has_outbox
= false,
1246 .out_is_imm
= false,
1247 .encode_slave_id
= false,
1249 .wrapper
= mlx4_WRITE_MTT_wrapper
1252 .opcode
= MLX4_CMD_SYNC_TPT
,
1254 .has_outbox
= false,
1255 .out_is_imm
= false,
1256 .encode_slave_id
= false,
1261 .opcode
= MLX4_CMD_HW2SW_EQ
,
1263 .has_outbox
= false,
1264 .out_is_imm
= false,
1265 .encode_slave_id
= true,
1267 .wrapper
= mlx4_HW2SW_EQ_wrapper
1270 .opcode
= MLX4_CMD_QUERY_EQ
,
1273 .out_is_imm
= false,
1274 .encode_slave_id
= true,
1276 .wrapper
= mlx4_QUERY_EQ_wrapper
1279 .opcode
= MLX4_CMD_SW2HW_CQ
,
1281 .has_outbox
= false,
1282 .out_is_imm
= false,
1283 .encode_slave_id
= true,
1285 .wrapper
= mlx4_SW2HW_CQ_wrapper
1288 .opcode
= MLX4_CMD_HW2SW_CQ
,
1290 .has_outbox
= false,
1291 .out_is_imm
= false,
1292 .encode_slave_id
= false,
1294 .wrapper
= mlx4_HW2SW_CQ_wrapper
1297 .opcode
= MLX4_CMD_QUERY_CQ
,
1300 .out_is_imm
= false,
1301 .encode_slave_id
= false,
1303 .wrapper
= mlx4_QUERY_CQ_wrapper
1306 .opcode
= MLX4_CMD_MODIFY_CQ
,
1308 .has_outbox
= false,
1310 .encode_slave_id
= false,
1312 .wrapper
= mlx4_MODIFY_CQ_wrapper
1315 .opcode
= MLX4_CMD_SW2HW_SRQ
,
1317 .has_outbox
= false,
1318 .out_is_imm
= false,
1319 .encode_slave_id
= true,
1321 .wrapper
= mlx4_SW2HW_SRQ_wrapper
1324 .opcode
= MLX4_CMD_HW2SW_SRQ
,
1326 .has_outbox
= false,
1327 .out_is_imm
= false,
1328 .encode_slave_id
= false,
1330 .wrapper
= mlx4_HW2SW_SRQ_wrapper
1333 .opcode
= MLX4_CMD_QUERY_SRQ
,
1336 .out_is_imm
= false,
1337 .encode_slave_id
= false,
1339 .wrapper
= mlx4_QUERY_SRQ_wrapper
1342 .opcode
= MLX4_CMD_ARM_SRQ
,
1344 .has_outbox
= false,
1345 .out_is_imm
= false,
1346 .encode_slave_id
= false,
1348 .wrapper
= mlx4_ARM_SRQ_wrapper
1351 .opcode
= MLX4_CMD_RST2INIT_QP
,
1353 .has_outbox
= false,
1354 .out_is_imm
= false,
1355 .encode_slave_id
= true,
1357 .wrapper
= mlx4_RST2INIT_QP_wrapper
1360 .opcode
= MLX4_CMD_INIT2INIT_QP
,
1362 .has_outbox
= false,
1363 .out_is_imm
= false,
1364 .encode_slave_id
= false,
1366 .wrapper
= mlx4_INIT2INIT_QP_wrapper
1369 .opcode
= MLX4_CMD_INIT2RTR_QP
,
1371 .has_outbox
= false,
1372 .out_is_imm
= false,
1373 .encode_slave_id
= false,
1375 .wrapper
= mlx4_INIT2RTR_QP_wrapper
1378 .opcode
= MLX4_CMD_RTR2RTS_QP
,
1380 .has_outbox
= false,
1381 .out_is_imm
= false,
1382 .encode_slave_id
= false,
1384 .wrapper
= mlx4_RTR2RTS_QP_wrapper
1387 .opcode
= MLX4_CMD_RTS2RTS_QP
,
1389 .has_outbox
= false,
1390 .out_is_imm
= false,
1391 .encode_slave_id
= false,
1393 .wrapper
= mlx4_RTS2RTS_QP_wrapper
1396 .opcode
= MLX4_CMD_SQERR2RTS_QP
,
1398 .has_outbox
= false,
1399 .out_is_imm
= false,
1400 .encode_slave_id
= false,
1402 .wrapper
= mlx4_SQERR2RTS_QP_wrapper
1405 .opcode
= MLX4_CMD_2ERR_QP
,
1407 .has_outbox
= false,
1408 .out_is_imm
= false,
1409 .encode_slave_id
= false,
1411 .wrapper
= mlx4_GEN_QP_wrapper
1414 .opcode
= MLX4_CMD_RTS2SQD_QP
,
1416 .has_outbox
= false,
1417 .out_is_imm
= false,
1418 .encode_slave_id
= false,
1420 .wrapper
= mlx4_GEN_QP_wrapper
1423 .opcode
= MLX4_CMD_SQD2SQD_QP
,
1425 .has_outbox
= false,
1426 .out_is_imm
= false,
1427 .encode_slave_id
= false,
1429 .wrapper
= mlx4_SQD2SQD_QP_wrapper
1432 .opcode
= MLX4_CMD_SQD2RTS_QP
,
1434 .has_outbox
= false,
1435 .out_is_imm
= false,
1436 .encode_slave_id
= false,
1438 .wrapper
= mlx4_SQD2RTS_QP_wrapper
1441 .opcode
= MLX4_CMD_2RST_QP
,
1443 .has_outbox
= false,
1444 .out_is_imm
= false,
1445 .encode_slave_id
= false,
1447 .wrapper
= mlx4_2RST_QP_wrapper
1450 .opcode
= MLX4_CMD_QUERY_QP
,
1453 .out_is_imm
= false,
1454 .encode_slave_id
= false,
1456 .wrapper
= mlx4_GEN_QP_wrapper
1459 .opcode
= MLX4_CMD_SUSPEND_QP
,
1461 .has_outbox
= false,
1462 .out_is_imm
= false,
1463 .encode_slave_id
= false,
1465 .wrapper
= mlx4_GEN_QP_wrapper
1468 .opcode
= MLX4_CMD_UNSUSPEND_QP
,
1470 .has_outbox
= false,
1471 .out_is_imm
= false,
1472 .encode_slave_id
= false,
1474 .wrapper
= mlx4_GEN_QP_wrapper
1477 .opcode
= MLX4_CMD_UPDATE_QP
,
1479 .has_outbox
= false,
1480 .out_is_imm
= false,
1481 .encode_slave_id
= false,
1483 .wrapper
= mlx4_UPDATE_QP_wrapper
1486 .opcode
= MLX4_CMD_GET_OP_REQ
,
1488 .has_outbox
= false,
1489 .out_is_imm
= false,
1490 .encode_slave_id
= false,
1492 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1495 .opcode
= MLX4_CMD_ALLOCATE_VPP
,
1498 .out_is_imm
= false,
1499 .encode_slave_id
= false,
1501 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1504 .opcode
= MLX4_CMD_SET_VPORT_QOS
,
1507 .out_is_imm
= false,
1508 .encode_slave_id
= false,
1510 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1513 .opcode
= MLX4_CMD_CONF_SPECIAL_QP
,
1515 .has_outbox
= false,
1516 .out_is_imm
= false,
1517 .encode_slave_id
= false,
1518 .verify
= NULL
, /* XXX verify: only demux can do this */
1522 .opcode
= MLX4_CMD_MAD_IFC
,
1525 .out_is_imm
= false,
1526 .encode_slave_id
= false,
1528 .wrapper
= mlx4_MAD_IFC_wrapper
1531 .opcode
= MLX4_CMD_MAD_DEMUX
,
1533 .has_outbox
= false,
1534 .out_is_imm
= false,
1535 .encode_slave_id
= false,
1537 .wrapper
= mlx4_CMD_EPERM_wrapper
1540 .opcode
= MLX4_CMD_QUERY_IF_STAT
,
1543 .out_is_imm
= false,
1544 .encode_slave_id
= false,
1546 .wrapper
= mlx4_QUERY_IF_STAT_wrapper
1549 .opcode
= MLX4_CMD_ACCESS_REG
,
1552 .out_is_imm
= false,
1553 .encode_slave_id
= false,
1555 .wrapper
= mlx4_ACCESS_REG_wrapper
,
1558 .opcode
= MLX4_CMD_CONGESTION_CTRL_OPCODE
,
1560 .has_outbox
= false,
1561 .out_is_imm
= false,
1562 .encode_slave_id
= false,
1564 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1566 /* Native multicast commands are not available for guests */
1568 .opcode
= MLX4_CMD_QP_ATTACH
,
1570 .has_outbox
= false,
1571 .out_is_imm
= false,
1572 .encode_slave_id
= false,
1574 .wrapper
= mlx4_QP_ATTACH_wrapper
1577 .opcode
= MLX4_CMD_PROMISC
,
1579 .has_outbox
= false,
1580 .out_is_imm
= false,
1581 .encode_slave_id
= false,
1583 .wrapper
= mlx4_PROMISC_wrapper
1585 /* Ethernet specific commands */
1587 .opcode
= MLX4_CMD_SET_VLAN_FLTR
,
1589 .has_outbox
= false,
1590 .out_is_imm
= false,
1591 .encode_slave_id
= false,
1593 .wrapper
= mlx4_SET_VLAN_FLTR_wrapper
1596 .opcode
= MLX4_CMD_SET_MCAST_FLTR
,
1598 .has_outbox
= false,
1599 .out_is_imm
= false,
1600 .encode_slave_id
= false,
1602 .wrapper
= mlx4_SET_MCAST_FLTR_wrapper
1605 .opcode
= MLX4_CMD_DUMP_ETH_STATS
,
1608 .out_is_imm
= false,
1609 .encode_slave_id
= false,
1611 .wrapper
= mlx4_DUMP_ETH_STATS_wrapper
1614 .opcode
= MLX4_CMD_INFORM_FLR_DONE
,
1616 .has_outbox
= false,
1617 .out_is_imm
= false,
1618 .encode_slave_id
= false,
1622 /* flow steering commands */
1624 .opcode
= MLX4_QP_FLOW_STEERING_ATTACH
,
1626 .has_outbox
= false,
1628 .encode_slave_id
= false,
1630 .wrapper
= mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1633 .opcode
= MLX4_QP_FLOW_STEERING_DETACH
,
1635 .has_outbox
= false,
1636 .out_is_imm
= false,
1637 .encode_slave_id
= false,
1639 .wrapper
= mlx4_QP_FLOW_STEERING_DETACH_wrapper
1642 .opcode
= MLX4_FLOW_STEERING_IB_UC_QP_RANGE
,
1644 .has_outbox
= false,
1645 .out_is_imm
= false,
1646 .encode_slave_id
= false,
1648 .wrapper
= mlx4_CMD_EPERM_wrapper
1651 .opcode
= MLX4_CMD_VIRT_PORT_MAP
,
1653 .has_outbox
= false,
1654 .out_is_imm
= false,
1655 .encode_slave_id
= false,
1657 .wrapper
= mlx4_CMD_EPERM_wrapper
1661 static int mlx4_master_process_vhcr(struct mlx4_dev
*dev
, int slave
,
1662 struct mlx4_vhcr_cmd
*in_vhcr
)
1664 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1665 struct mlx4_cmd_info
*cmd
= NULL
;
1666 struct mlx4_vhcr_cmd
*vhcr_cmd
= in_vhcr
? in_vhcr
: priv
->mfunc
.vhcr
;
1667 struct mlx4_vhcr
*vhcr
;
1668 struct mlx4_cmd_mailbox
*inbox
= NULL
;
1669 struct mlx4_cmd_mailbox
*outbox
= NULL
;
1676 /* Create sw representation of Virtual HCR */
1677 vhcr
= kzalloc(sizeof(struct mlx4_vhcr
), GFP_KERNEL
);
1681 /* DMA in the vHCR */
1683 ret
= mlx4_ACCESS_MEM(dev
, priv
->mfunc
.vhcr_dma
, slave
,
1684 priv
->mfunc
.master
.slave_state
[slave
].vhcr_dma
,
1685 ALIGN(sizeof(struct mlx4_vhcr_cmd
),
1686 MLX4_ACCESS_MEM_ALIGN
), 1);
1688 if (!(dev
->persist
->state
&
1689 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1690 mlx4_err(dev
, "%s: Failed reading vhcr ret: 0x%x\n",
1697 /* Fill SW VHCR fields */
1698 vhcr
->in_param
= be64_to_cpu(vhcr_cmd
->in_param
);
1699 vhcr
->out_param
= be64_to_cpu(vhcr_cmd
->out_param
);
1700 vhcr
->in_modifier
= be32_to_cpu(vhcr_cmd
->in_modifier
);
1701 vhcr
->token
= be16_to_cpu(vhcr_cmd
->token
);
1702 vhcr
->op
= be16_to_cpu(vhcr_cmd
->opcode
) & 0xfff;
1703 vhcr
->op_modifier
= (u8
) (be16_to_cpu(vhcr_cmd
->opcode
) >> 12);
1704 vhcr
->e_bit
= vhcr_cmd
->flags
& (1 << 6);
1706 /* Lookup command */
1707 for (i
= 0; i
< ARRAY_SIZE(cmd_info
); ++i
) {
1708 if (vhcr
->op
== cmd_info
[i
].opcode
) {
1714 mlx4_err(dev
, "Unknown command:0x%x accepted from slave:%d\n",
1716 vhcr_cmd
->status
= CMD_STAT_BAD_PARAM
;
1721 if (cmd
->has_inbox
) {
1722 vhcr
->in_param
&= INBOX_MASK
;
1723 inbox
= mlx4_alloc_cmd_mailbox(dev
);
1724 if (IS_ERR(inbox
)) {
1725 vhcr_cmd
->status
= CMD_STAT_BAD_SIZE
;
1730 ret
= mlx4_ACCESS_MEM(dev
, inbox
->dma
, slave
,
1732 MLX4_MAILBOX_SIZE
, 1);
1734 if (!(dev
->persist
->state
&
1735 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1736 mlx4_err(dev
, "%s: Failed reading inbox (cmd:0x%x)\n",
1737 __func__
, cmd
->opcode
);
1738 vhcr_cmd
->status
= CMD_STAT_INTERNAL_ERR
;
1743 /* Apply permission and bound checks if applicable */
1744 if (cmd
->verify
&& cmd
->verify(dev
, slave
, vhcr
, inbox
)) {
1745 mlx4_warn(dev
, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1746 vhcr
->op
, slave
, vhcr
->in_modifier
);
1747 vhcr_cmd
->status
= CMD_STAT_BAD_OP
;
1751 /* Allocate outbox */
1752 if (cmd
->has_outbox
) {
1753 outbox
= mlx4_alloc_cmd_mailbox(dev
);
1754 if (IS_ERR(outbox
)) {
1755 vhcr_cmd
->status
= CMD_STAT_BAD_SIZE
;
1761 /* Execute the command! */
1763 err
= cmd
->wrapper(dev
, slave
, vhcr
, inbox
, outbox
,
1765 if (cmd
->out_is_imm
)
1766 vhcr_cmd
->out_param
= cpu_to_be64(vhcr
->out_param
);
1768 in_param
= cmd
->has_inbox
? (u64
) inbox
->dma
:
1770 out_param
= cmd
->has_outbox
? (u64
) outbox
->dma
:
1772 err
= __mlx4_cmd(dev
, in_param
, &out_param
,
1773 cmd
->out_is_imm
, vhcr
->in_modifier
,
1774 vhcr
->op_modifier
, vhcr
->op
,
1775 MLX4_CMD_TIME_CLASS_A
,
1778 if (cmd
->out_is_imm
) {
1779 vhcr
->out_param
= out_param
;
1780 vhcr_cmd
->out_param
= cpu_to_be64(vhcr
->out_param
);
1785 if (!(dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1786 mlx4_warn(dev
, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1787 vhcr
->op
, slave
, vhcr
->errno
, err
);
1788 vhcr_cmd
->status
= mlx4_errno_to_status(err
);
1793 /* Write outbox if command completed successfully */
1794 if (cmd
->has_outbox
&& !vhcr_cmd
->status
) {
1795 ret
= mlx4_ACCESS_MEM(dev
, outbox
->dma
, slave
,
1797 MLX4_MAILBOX_SIZE
, MLX4_CMD_WRAPPED
);
1799 /* If we failed to write back the outbox after the
1800 *command was successfully executed, we must fail this
1801 * slave, as it is now in undefined state */
1802 if (!(dev
->persist
->state
&
1803 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1804 mlx4_err(dev
, "%s:Failed writing outbox\n", __func__
);
1810 /* DMA back vhcr result */
1812 ret
= mlx4_ACCESS_MEM(dev
, priv
->mfunc
.vhcr_dma
, slave
,
1813 priv
->mfunc
.master
.slave_state
[slave
].vhcr_dma
,
1814 ALIGN(sizeof(struct mlx4_vhcr
),
1815 MLX4_ACCESS_MEM_ALIGN
),
1818 mlx4_err(dev
, "%s:Failed writing vhcr result\n",
1820 else if (vhcr
->e_bit
&&
1821 mlx4_GEN_EQE(dev
, slave
, &priv
->mfunc
.master
.cmd_eqe
))
1822 mlx4_warn(dev
, "Failed to generate command completion eqe for slave %d\n",
1828 mlx4_free_cmd_mailbox(dev
, inbox
);
1829 mlx4_free_cmd_mailbox(dev
, outbox
);
1833 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv
*priv
,
1834 int slave
, int port
)
1836 struct mlx4_vport_oper_state
*vp_oper
;
1837 struct mlx4_vport_state
*vp_admin
;
1838 struct mlx4_vf_immed_vlan_work
*work
;
1839 struct mlx4_dev
*dev
= &(priv
->dev
);
1841 int admin_vlan_ix
= NO_INDX
;
1843 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
1844 vp_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
1846 if (vp_oper
->state
.default_vlan
== vp_admin
->default_vlan
&&
1847 vp_oper
->state
.default_qos
== vp_admin
->default_qos
&&
1848 vp_oper
->state
.link_state
== vp_admin
->link_state
&&
1849 vp_oper
->state
.qos_vport
== vp_admin
->qos_vport
)
1852 if (!(priv
->mfunc
.master
.slave_state
[slave
].active
&&
1853 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
)) {
1854 /* even if the UPDATE_QP command isn't supported, we still want
1855 * to set this VF link according to the admin directive
1857 vp_oper
->state
.link_state
= vp_admin
->link_state
;
1861 mlx4_dbg(dev
, "updating immediately admin params slave %d port %d\n",
1863 mlx4_dbg(dev
, "vlan %d QoS %d link down %d\n",
1864 vp_admin
->default_vlan
, vp_admin
->default_qos
,
1865 vp_admin
->link_state
);
1867 work
= kzalloc(sizeof(*work
), GFP_KERNEL
);
1871 if (vp_oper
->state
.default_vlan
!= vp_admin
->default_vlan
) {
1872 if (MLX4_VGT
!= vp_admin
->default_vlan
) {
1873 err
= __mlx4_register_vlan(&priv
->dev
, port
,
1874 vp_admin
->default_vlan
,
1878 mlx4_warn(&priv
->dev
,
1879 "No vlan resources slave %d, port %d\n",
1884 admin_vlan_ix
= NO_INDX
;
1886 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_VLAN
;
1887 mlx4_dbg(&priv
->dev
,
1888 "alloc vlan %d idx %d slave %d port %d\n",
1889 (int)(vp_admin
->default_vlan
),
1890 admin_vlan_ix
, slave
, port
);
1893 /* save original vlan ix and vlan id */
1894 work
->orig_vlan_id
= vp_oper
->state
.default_vlan
;
1895 work
->orig_vlan_ix
= vp_oper
->vlan_idx
;
1897 /* handle new qos */
1898 if (vp_oper
->state
.default_qos
!= vp_admin
->default_qos
)
1899 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_QOS
;
1901 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
)
1902 vp_oper
->vlan_idx
= admin_vlan_ix
;
1904 vp_oper
->state
.default_vlan
= vp_admin
->default_vlan
;
1905 vp_oper
->state
.default_qos
= vp_admin
->default_qos
;
1906 vp_oper
->state
.link_state
= vp_admin
->link_state
;
1907 vp_oper
->state
.qos_vport
= vp_admin
->qos_vport
;
1909 if (vp_admin
->link_state
== IFLA_VF_LINK_STATE_DISABLE
)
1910 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
;
1912 /* iterate over QPs owned by this slave, using UPDATE_QP */
1914 work
->slave
= slave
;
1915 work
->qos
= vp_oper
->state
.default_qos
;
1916 work
->qos_vport
= vp_oper
->state
.qos_vport
;
1917 work
->vlan_id
= vp_oper
->state
.default_vlan
;
1918 work
->vlan_ix
= vp_oper
->vlan_idx
;
1920 INIT_WORK(&work
->work
, mlx4_vf_immed_vlan_work_handler
);
1921 queue_work(priv
->mfunc
.master
.comm_wq
, &work
->work
);
1926 static void mlx4_set_default_port_qos(struct mlx4_dev
*dev
, int port
)
1928 struct mlx4_qos_manager
*port_qos_ctl
;
1929 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1931 port_qos_ctl
= &priv
->mfunc
.master
.qos_ctl
[port
];
1932 bitmap_zero(port_qos_ctl
->priority_bm
, MLX4_NUM_UP
);
1934 /* Enable only default prio at PF init routine */
1935 set_bit(MLX4_DEFAULT_QOS_PRIO
, port_qos_ctl
->priority_bm
);
1938 static void mlx4_allocate_port_vpps(struct mlx4_dev
*dev
, int port
)
1944 u8 vpp_param
[MLX4_NUM_UP
];
1945 struct mlx4_qos_manager
*port_qos
;
1946 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1948 err
= mlx4_ALLOCATE_VPP_get(dev
, port
, &availible_vpp
, vpp_param
);
1950 mlx4_info(dev
, "Failed query availible VPPs\n");
1954 port_qos
= &priv
->mfunc
.master
.qos_ctl
[port
];
1955 num_vfs
= (availible_vpp
/
1956 bitmap_weight(port_qos
->priority_bm
, MLX4_NUM_UP
));
1958 for (i
= 0; i
< MLX4_NUM_UP
; i
++) {
1959 if (test_bit(i
, port_qos
->priority_bm
))
1960 vpp_param
[i
] = num_vfs
;
1963 err
= mlx4_ALLOCATE_VPP_set(dev
, port
, vpp_param
);
1965 mlx4_info(dev
, "Failed allocating VPPs\n");
1969 /* Query actual allocated VPP, just to make sure */
1970 err
= mlx4_ALLOCATE_VPP_get(dev
, port
, &availible_vpp
, vpp_param
);
1972 mlx4_info(dev
, "Failed query availible VPPs\n");
1976 port_qos
->num_of_qos_vfs
= num_vfs
;
1977 mlx4_dbg(dev
, "Port %d Availible VPPs %d\n", port
, availible_vpp
);
1979 for (i
= 0; i
< MLX4_NUM_UP
; i
++)
1980 mlx4_dbg(dev
, "Port %d UP %d Allocated %d VPPs\n", port
, i
,
1984 static int mlx4_master_activate_admin_state(struct mlx4_priv
*priv
, int slave
)
1987 struct mlx4_vport_state
*vp_admin
;
1988 struct mlx4_vport_oper_state
*vp_oper
;
1989 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(
1991 int min_port
= find_first_bit(actv_ports
.ports
,
1992 priv
->dev
.caps
.num_ports
) + 1;
1993 int max_port
= min_port
- 1 +
1994 bitmap_weight(actv_ports
.ports
, priv
->dev
.caps
.num_ports
);
1996 for (port
= min_port
; port
<= max_port
; port
++) {
1997 if (!test_bit(port
- 1, actv_ports
.ports
))
1999 priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] =
2000 priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
];
2001 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
2002 vp_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
2003 vp_oper
->state
= *vp_admin
;
2004 if (MLX4_VGT
!= vp_admin
->default_vlan
) {
2005 err
= __mlx4_register_vlan(&priv
->dev
, port
,
2006 vp_admin
->default_vlan
, &(vp_oper
->vlan_idx
));
2008 vp_oper
->vlan_idx
= NO_INDX
;
2009 mlx4_warn(&priv
->dev
,
2010 "No vlan resources slave %d, port %d\n",
2014 mlx4_dbg(&priv
->dev
, "alloc vlan %d idx %d slave %d port %d\n",
2015 (int)(vp_oper
->state
.default_vlan
),
2016 vp_oper
->vlan_idx
, slave
, port
);
2018 if (vp_admin
->spoofchk
) {
2019 vp_oper
->mac_idx
= __mlx4_register_mac(&priv
->dev
,
2022 if (0 > vp_oper
->mac_idx
) {
2023 err
= vp_oper
->mac_idx
;
2024 vp_oper
->mac_idx
= NO_INDX
;
2025 mlx4_warn(&priv
->dev
,
2026 "No mac resources slave %d, port %d\n",
2030 mlx4_dbg(&priv
->dev
, "alloc mac %llx idx %d slave %d port %d\n",
2031 vp_oper
->state
.mac
, vp_oper
->mac_idx
, slave
, port
);
2037 static void mlx4_master_deactivate_admin_state(struct mlx4_priv
*priv
, int slave
)
2040 struct mlx4_vport_oper_state
*vp_oper
;
2041 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(
2043 int min_port
= find_first_bit(actv_ports
.ports
,
2044 priv
->dev
.caps
.num_ports
) + 1;
2045 int max_port
= min_port
- 1 +
2046 bitmap_weight(actv_ports
.ports
, priv
->dev
.caps
.num_ports
);
2049 for (port
= min_port
; port
<= max_port
; port
++) {
2050 if (!test_bit(port
- 1, actv_ports
.ports
))
2052 priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] =
2053 MLX4_VF_SMI_DISABLED
;
2054 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
2055 if (NO_INDX
!= vp_oper
->vlan_idx
) {
2056 __mlx4_unregister_vlan(&priv
->dev
,
2057 port
, vp_oper
->state
.default_vlan
);
2058 vp_oper
->vlan_idx
= NO_INDX
;
2060 if (NO_INDX
!= vp_oper
->mac_idx
) {
2061 __mlx4_unregister_mac(&priv
->dev
, port
, vp_oper
->state
.mac
);
2062 vp_oper
->mac_idx
= NO_INDX
;
2068 static void mlx4_master_do_cmd(struct mlx4_dev
*dev
, int slave
, u8 cmd
,
2069 u16 param
, u8 toggle
)
2071 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2072 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2074 u8 is_going_down
= 0;
2076 unsigned long flags
;
2078 slave_state
[slave
].comm_toggle
^= 1;
2079 reply
= (u32
) slave_state
[slave
].comm_toggle
<< 31;
2080 if (toggle
!= slave_state
[slave
].comm_toggle
) {
2081 mlx4_warn(dev
, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2085 if (cmd
== MLX4_COMM_CMD_RESET
) {
2086 mlx4_warn(dev
, "Received reset from slave:%d\n", slave
);
2087 slave_state
[slave
].active
= false;
2088 slave_state
[slave
].old_vlan_api
= false;
2089 mlx4_master_deactivate_admin_state(priv
, slave
);
2090 for (i
= 0; i
< MLX4_EVENT_TYPES_NUM
; ++i
) {
2091 slave_state
[slave
].event_eq
[i
].eqn
= -1;
2092 slave_state
[slave
].event_eq
[i
].token
= 0;
2094 /*check if we are in the middle of FLR process,
2095 if so return "retry" status to the slave*/
2096 if (MLX4_COMM_CMD_FLR
== slave_state
[slave
].last_cmd
)
2097 goto inform_slave_state
;
2099 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_SLAVE_SHUTDOWN
, slave
);
2101 /* write the version in the event field */
2102 reply
|= mlx4_comm_get_version();
2106 /*command from slave in the middle of FLR*/
2107 if (cmd
!= MLX4_COMM_CMD_RESET
&&
2108 MLX4_COMM_CMD_FLR
== slave_state
[slave
].last_cmd
) {
2109 mlx4_warn(dev
, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2115 case MLX4_COMM_CMD_VHCR0
:
2116 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_RESET
)
2118 slave_state
[slave
].vhcr_dma
= ((u64
) param
) << 48;
2119 priv
->mfunc
.master
.slave_state
[slave
].cookie
= 0;
2121 case MLX4_COMM_CMD_VHCR1
:
2122 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR0
)
2124 slave_state
[slave
].vhcr_dma
|= ((u64
) param
) << 32;
2126 case MLX4_COMM_CMD_VHCR2
:
2127 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR1
)
2129 slave_state
[slave
].vhcr_dma
|= ((u64
) param
) << 16;
2131 case MLX4_COMM_CMD_VHCR_EN
:
2132 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR2
)
2134 slave_state
[slave
].vhcr_dma
|= param
;
2135 if (mlx4_master_activate_admin_state(priv
, slave
))
2137 slave_state
[slave
].active
= true;
2138 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_SLAVE_INIT
, slave
);
2140 case MLX4_COMM_CMD_VHCR_POST
:
2141 if ((slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR_EN
) &&
2142 (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR_POST
)) {
2143 mlx4_warn(dev
, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2144 slave
, cmd
, slave_state
[slave
].last_cmd
);
2148 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
2149 if (mlx4_master_process_vhcr(dev
, slave
, NULL
)) {
2150 mlx4_err(dev
, "Failed processing vhcr for slave:%d, resetting slave\n",
2152 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2155 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2158 mlx4_warn(dev
, "Bad comm cmd:%d from slave:%d\n", cmd
, slave
);
2161 spin_lock_irqsave(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2162 if (!slave_state
[slave
].is_slave_going_down
)
2163 slave_state
[slave
].last_cmd
= cmd
;
2166 spin_unlock_irqrestore(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2167 if (is_going_down
) {
2168 mlx4_warn(dev
, "Slave is going down aborting command(%d) executing from slave:%d\n",
2172 __raw_writel((__force u32
) cpu_to_be32(reply
),
2173 &priv
->mfunc
.comm
[slave
].slave_read
);
2179 /* cleanup any slave resources */
2180 if (dev
->persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)
2181 mlx4_delete_all_resources_for_slave(dev
, slave
);
2183 if (cmd
!= MLX4_COMM_CMD_RESET
) {
2184 mlx4_warn(dev
, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2186 /* Turn on internal error letting slave reset itself immeditaly,
2187 * otherwise it might take till timeout on command is passed
2189 reply
|= ((u32
)COMM_CHAN_EVENT_INTERNAL_ERR
);
2192 spin_lock_irqsave(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2193 if (!slave_state
[slave
].is_slave_going_down
)
2194 slave_state
[slave
].last_cmd
= MLX4_COMM_CMD_RESET
;
2195 spin_unlock_irqrestore(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2196 /*with slave in the middle of flr, no need to clean resources again.*/
2198 memset(&slave_state
[slave
].event_eq
, 0,
2199 sizeof(struct mlx4_slave_event_eq_info
));
2200 __raw_writel((__force u32
) cpu_to_be32(reply
),
2201 &priv
->mfunc
.comm
[slave
].slave_read
);
2205 /* master command processing */
2206 void mlx4_master_comm_channel(struct work_struct
*work
)
2208 struct mlx4_mfunc_master_ctx
*master
=
2210 struct mlx4_mfunc_master_ctx
,
2212 struct mlx4_mfunc
*mfunc
=
2213 container_of(master
, struct mlx4_mfunc
, master
);
2214 struct mlx4_priv
*priv
=
2215 container_of(mfunc
, struct mlx4_priv
, mfunc
);
2216 struct mlx4_dev
*dev
= &priv
->dev
;
2226 bit_vec
= master
->comm_arm_bit_vector
;
2227 for (i
= 0; i
< COMM_CHANNEL_BIT_ARRAY_SIZE
; i
++) {
2228 vec
= be32_to_cpu(bit_vec
[i
]);
2229 for (j
= 0; j
< 32; j
++) {
2230 if (!(vec
& (1 << j
)))
2233 slave
= (i
* 32) + j
;
2234 comm_cmd
= swab32(readl(
2235 &mfunc
->comm
[slave
].slave_write
));
2236 slt
= swab32(readl(&mfunc
->comm
[slave
].slave_read
))
2238 toggle
= comm_cmd
>> 31;
2239 if (toggle
!= slt
) {
2240 if (master
->slave_state
[slave
].comm_toggle
2242 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2244 master
->slave_state
[slave
].comm_toggle
);
2245 master
->slave_state
[slave
].comm_toggle
=
2248 mlx4_master_do_cmd(dev
, slave
,
2249 comm_cmd
>> 16 & 0xff,
2250 comm_cmd
& 0xffff, toggle
);
2256 if (reported
&& reported
!= served
)
2257 mlx4_warn(dev
, "Got command event with bitmask from %d slaves but %d were served\n",
2260 if (mlx4_ARM_COMM_CHANNEL(dev
))
2261 mlx4_warn(dev
, "Failed to arm comm channel events\n");
2264 static int sync_toggles(struct mlx4_dev
*dev
)
2266 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2271 wr_toggle
= swab32(readl(&priv
->mfunc
.comm
->slave_write
));
2272 if (wr_toggle
== 0xffffffff)
2273 end
= jiffies
+ msecs_to_jiffies(30000);
2275 end
= jiffies
+ msecs_to_jiffies(5000);
2277 while (time_before(jiffies
, end
)) {
2278 rd_toggle
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
2279 if (wr_toggle
== 0xffffffff || rd_toggle
== 0xffffffff) {
2280 /* PCI might be offline */
2282 wr_toggle
= swab32(readl(&priv
->mfunc
.comm
->
2287 if (rd_toggle
>> 31 == wr_toggle
>> 31) {
2288 priv
->cmd
.comm_toggle
= rd_toggle
>> 31;
2296 * we could reach here if for example the previous VM using this
2297 * function misbehaved and left the channel with unsynced state. We
2298 * should fix this here and give this VM a chance to use a properly
2301 mlx4_warn(dev
, "recovering from previously mis-behaved VM\n");
2302 __raw_writel((__force u32
) 0, &priv
->mfunc
.comm
->slave_read
);
2303 __raw_writel((__force u32
) 0, &priv
->mfunc
.comm
->slave_write
);
2304 priv
->cmd
.comm_toggle
= 0;
2309 int mlx4_multi_func_init(struct mlx4_dev
*dev
)
2311 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2312 struct mlx4_slave_state
*s_state
;
2313 int i
, j
, err
, port
;
2315 if (mlx4_is_master(dev
))
2317 ioremap(pci_resource_start(dev
->persist
->pdev
,
2318 priv
->fw
.comm_bar
) +
2319 priv
->fw
.comm_base
, MLX4_COMM_PAGESIZE
);
2322 ioremap(pci_resource_start(dev
->persist
->pdev
, 2) +
2323 MLX4_SLAVE_COMM_BASE
, MLX4_COMM_PAGESIZE
);
2324 if (!priv
->mfunc
.comm
) {
2325 mlx4_err(dev
, "Couldn't map communication vector\n");
2329 if (mlx4_is_master(dev
)) {
2330 struct mlx4_vf_oper_state
*vf_oper
;
2331 struct mlx4_vf_admin_state
*vf_admin
;
2333 priv
->mfunc
.master
.slave_state
=
2334 kzalloc(dev
->num_slaves
*
2335 sizeof(struct mlx4_slave_state
), GFP_KERNEL
);
2336 if (!priv
->mfunc
.master
.slave_state
)
2339 priv
->mfunc
.master
.vf_admin
=
2340 kzalloc(dev
->num_slaves
*
2341 sizeof(struct mlx4_vf_admin_state
), GFP_KERNEL
);
2342 if (!priv
->mfunc
.master
.vf_admin
)
2343 goto err_comm_admin
;
2345 priv
->mfunc
.master
.vf_oper
=
2346 kzalloc(dev
->num_slaves
*
2347 sizeof(struct mlx4_vf_oper_state
), GFP_KERNEL
);
2348 if (!priv
->mfunc
.master
.vf_oper
)
2351 for (i
= 0; i
< dev
->num_slaves
; ++i
) {
2352 vf_admin
= &priv
->mfunc
.master
.vf_admin
[i
];
2353 vf_oper
= &priv
->mfunc
.master
.vf_oper
[i
];
2354 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
2355 s_state
->last_cmd
= MLX4_COMM_CMD_RESET
;
2356 mutex_init(&priv
->mfunc
.master
.gen_eqe_mutex
[i
]);
2357 for (j
= 0; j
< MLX4_EVENT_TYPES_NUM
; ++j
)
2358 s_state
->event_eq
[j
].eqn
= -1;
2359 __raw_writel((__force u32
) 0,
2360 &priv
->mfunc
.comm
[i
].slave_write
);
2361 __raw_writel((__force u32
) 0,
2362 &priv
->mfunc
.comm
[i
].slave_read
);
2364 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++) {
2365 struct mlx4_vport_state
*admin_vport
;
2366 struct mlx4_vport_state
*oper_vport
;
2368 s_state
->vlan_filter
[port
] =
2369 kzalloc(sizeof(struct mlx4_vlan_fltr
),
2371 if (!s_state
->vlan_filter
[port
]) {
2373 kfree(s_state
->vlan_filter
[port
]);
2377 admin_vport
= &vf_admin
->vport
[port
];
2378 oper_vport
= &vf_oper
->vport
[port
].state
;
2379 INIT_LIST_HEAD(&s_state
->mcast_filters
[port
]);
2380 admin_vport
->default_vlan
= MLX4_VGT
;
2381 oper_vport
->default_vlan
= MLX4_VGT
;
2382 admin_vport
->qos_vport
=
2383 MLX4_VPP_DEFAULT_VPORT
;
2384 oper_vport
->qos_vport
= MLX4_VPP_DEFAULT_VPORT
;
2385 vf_oper
->vport
[port
].vlan_idx
= NO_INDX
;
2386 vf_oper
->vport
[port
].mac_idx
= NO_INDX
;
2387 mlx4_set_random_admin_guid(dev
, i
, port
);
2389 spin_lock_init(&s_state
->lock
);
2392 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
) {
2393 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2394 if (mlx4_is_eth(dev
, port
)) {
2395 mlx4_set_default_port_qos(dev
, port
);
2396 mlx4_allocate_port_vpps(dev
, port
);
2401 memset(&priv
->mfunc
.master
.cmd_eqe
, 0, sizeof(struct mlx4_eqe
));
2402 priv
->mfunc
.master
.cmd_eqe
.type
= MLX4_EVENT_TYPE_CMD
;
2403 INIT_WORK(&priv
->mfunc
.master
.comm_work
,
2404 mlx4_master_comm_channel
);
2405 INIT_WORK(&priv
->mfunc
.master
.slave_event_work
,
2406 mlx4_gen_slave_eqe
);
2407 INIT_WORK(&priv
->mfunc
.master
.slave_flr_event_work
,
2408 mlx4_master_handle_slave_flr
);
2409 spin_lock_init(&priv
->mfunc
.master
.slave_state_lock
);
2410 spin_lock_init(&priv
->mfunc
.master
.slave_eq
.event_lock
);
2411 priv
->mfunc
.master
.comm_wq
=
2412 create_singlethread_workqueue("mlx4_comm");
2413 if (!priv
->mfunc
.master
.comm_wq
)
2416 if (mlx4_init_resource_tracker(dev
))
2420 err
= sync_toggles(dev
);
2422 mlx4_err(dev
, "Couldn't sync toggles\n");
2429 flush_workqueue(priv
->mfunc
.master
.comm_wq
);
2430 destroy_workqueue(priv
->mfunc
.master
.comm_wq
);
2433 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++)
2434 kfree(priv
->mfunc
.master
.slave_state
[i
].vlan_filter
[port
]);
2436 kfree(priv
->mfunc
.master
.vf_oper
);
2438 kfree(priv
->mfunc
.master
.vf_admin
);
2440 kfree(priv
->mfunc
.master
.slave_state
);
2442 iounmap(priv
->mfunc
.comm
);
2444 dma_free_coherent(&dev
->persist
->pdev
->dev
, PAGE_SIZE
,
2446 priv
->mfunc
.vhcr_dma
);
2447 priv
->mfunc
.vhcr
= NULL
;
2451 int mlx4_cmd_init(struct mlx4_dev
*dev
)
2453 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2456 if (!priv
->cmd
.initialized
) {
2457 mutex_init(&priv
->cmd
.slave_cmd_mutex
);
2458 sema_init(&priv
->cmd
.poll_sem
, 1);
2459 priv
->cmd
.use_events
= 0;
2460 priv
->cmd
.toggle
= 1;
2461 priv
->cmd
.initialized
= 1;
2462 flags
|= MLX4_CMD_CLEANUP_STRUCT
;
2465 if (!mlx4_is_slave(dev
) && !priv
->cmd
.hcr
) {
2466 priv
->cmd
.hcr
= ioremap(pci_resource_start(dev
->persist
->pdev
,
2467 0) + MLX4_HCR_BASE
, MLX4_HCR_SIZE
);
2468 if (!priv
->cmd
.hcr
) {
2469 mlx4_err(dev
, "Couldn't map command register\n");
2472 flags
|= MLX4_CMD_CLEANUP_HCR
;
2475 if (mlx4_is_mfunc(dev
) && !priv
->mfunc
.vhcr
) {
2476 priv
->mfunc
.vhcr
= dma_alloc_coherent(&dev
->persist
->pdev
->dev
,
2478 &priv
->mfunc
.vhcr_dma
,
2480 if (!priv
->mfunc
.vhcr
)
2483 flags
|= MLX4_CMD_CLEANUP_VHCR
;
2486 if (!priv
->cmd
.pool
) {
2487 priv
->cmd
.pool
= pci_pool_create("mlx4_cmd",
2490 MLX4_MAILBOX_SIZE
, 0);
2491 if (!priv
->cmd
.pool
)
2494 flags
|= MLX4_CMD_CLEANUP_POOL
;
2500 mlx4_cmd_cleanup(dev
, flags
);
2504 void mlx4_report_internal_err_comm_event(struct mlx4_dev
*dev
)
2506 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2510 /* Report an internal error event to all
2511 * communication channels.
2513 for (slave
= 0; slave
< dev
->num_slaves
; slave
++) {
2514 slave_read
= swab32(readl(&priv
->mfunc
.comm
[slave
].slave_read
));
2515 slave_read
|= (u32
)COMM_CHAN_EVENT_INTERNAL_ERR
;
2516 __raw_writel((__force u32
)cpu_to_be32(slave_read
),
2517 &priv
->mfunc
.comm
[slave
].slave_read
);
2518 /* Make sure that our comm channel write doesn't
2519 * get mixed in with writes from another CPU.
2525 void mlx4_multi_func_cleanup(struct mlx4_dev
*dev
)
2527 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2530 if (mlx4_is_master(dev
)) {
2531 flush_workqueue(priv
->mfunc
.master
.comm_wq
);
2532 destroy_workqueue(priv
->mfunc
.master
.comm_wq
);
2533 for (i
= 0; i
< dev
->num_slaves
; i
++) {
2534 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++)
2535 kfree(priv
->mfunc
.master
.slave_state
[i
].vlan_filter
[port
]);
2537 kfree(priv
->mfunc
.master
.slave_state
);
2538 kfree(priv
->mfunc
.master
.vf_admin
);
2539 kfree(priv
->mfunc
.master
.vf_oper
);
2540 dev
->num_slaves
= 0;
2543 iounmap(priv
->mfunc
.comm
);
2546 void mlx4_cmd_cleanup(struct mlx4_dev
*dev
, int cleanup_mask
)
2548 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2550 if (priv
->cmd
.pool
&& (cleanup_mask
& MLX4_CMD_CLEANUP_POOL
)) {
2551 pci_pool_destroy(priv
->cmd
.pool
);
2552 priv
->cmd
.pool
= NULL
;
2555 if (!mlx4_is_slave(dev
) && priv
->cmd
.hcr
&&
2556 (cleanup_mask
& MLX4_CMD_CLEANUP_HCR
)) {
2557 iounmap(priv
->cmd
.hcr
);
2558 priv
->cmd
.hcr
= NULL
;
2560 if (mlx4_is_mfunc(dev
) && priv
->mfunc
.vhcr
&&
2561 (cleanup_mask
& MLX4_CMD_CLEANUP_VHCR
)) {
2562 dma_free_coherent(&dev
->persist
->pdev
->dev
, PAGE_SIZE
,
2563 priv
->mfunc
.vhcr
, priv
->mfunc
.vhcr_dma
);
2564 priv
->mfunc
.vhcr
= NULL
;
2566 if (priv
->cmd
.initialized
&& (cleanup_mask
& MLX4_CMD_CLEANUP_STRUCT
))
2567 priv
->cmd
.initialized
= 0;
2571 * Switch to using events to issue FW commands (can only be called
2572 * after event queue for command events has been initialized).
2574 int mlx4_cmd_use_events(struct mlx4_dev
*dev
)
2576 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2580 priv
->cmd
.context
= kmalloc(priv
->cmd
.max_cmds
*
2581 sizeof (struct mlx4_cmd_context
),
2583 if (!priv
->cmd
.context
)
2586 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
) {
2587 priv
->cmd
.context
[i
].token
= i
;
2588 priv
->cmd
.context
[i
].next
= i
+ 1;
2589 /* To support fatal error flow, initialize all
2590 * cmd contexts to allow simulating completions
2591 * with complete() at any time.
2593 init_completion(&priv
->cmd
.context
[i
].done
);
2596 priv
->cmd
.context
[priv
->cmd
.max_cmds
- 1].next
= -1;
2597 priv
->cmd
.free_head
= 0;
2599 sema_init(&priv
->cmd
.event_sem
, priv
->cmd
.max_cmds
);
2600 spin_lock_init(&priv
->cmd
.context_lock
);
2602 for (priv
->cmd
.token_mask
= 1;
2603 priv
->cmd
.token_mask
< priv
->cmd
.max_cmds
;
2604 priv
->cmd
.token_mask
<<= 1)
2606 --priv
->cmd
.token_mask
;
2608 down(&priv
->cmd
.poll_sem
);
2609 priv
->cmd
.use_events
= 1;
2615 * Switch back to polling (used when shutting down the device)
2617 void mlx4_cmd_use_polling(struct mlx4_dev
*dev
)
2619 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2622 priv
->cmd
.use_events
= 0;
2624 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
)
2625 down(&priv
->cmd
.event_sem
);
2627 kfree(priv
->cmd
.context
);
2629 up(&priv
->cmd
.poll_sem
);
2632 struct mlx4_cmd_mailbox
*mlx4_alloc_cmd_mailbox(struct mlx4_dev
*dev
)
2634 struct mlx4_cmd_mailbox
*mailbox
;
2636 mailbox
= kmalloc(sizeof *mailbox
, GFP_KERNEL
);
2638 return ERR_PTR(-ENOMEM
);
2640 mailbox
->buf
= pci_pool_alloc(mlx4_priv(dev
)->cmd
.pool
, GFP_KERNEL
,
2642 if (!mailbox
->buf
) {
2644 return ERR_PTR(-ENOMEM
);
2647 memset(mailbox
->buf
, 0, MLX4_MAILBOX_SIZE
);
2651 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox
);
2653 void mlx4_free_cmd_mailbox(struct mlx4_dev
*dev
,
2654 struct mlx4_cmd_mailbox
*mailbox
)
2659 pci_pool_free(mlx4_priv(dev
)->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
2662 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox
);
2664 u32
mlx4_comm_get_version(void)
2666 return ((u32
) CMD_CHAN_IF_REV
<< 8) | (u32
) CMD_CHAN_VER
;
2669 static int mlx4_get_slave_indx(struct mlx4_dev
*dev
, int vf
)
2671 if ((vf
< 0) || (vf
>= dev
->persist
->num_vfs
)) {
2672 mlx4_err(dev
, "Bad vf number:%d (number of activated vf: %d)\n",
2673 vf
, dev
->persist
->num_vfs
);
2680 int mlx4_get_vf_indx(struct mlx4_dev
*dev
, int slave
)
2682 if (slave
< 1 || slave
> dev
->persist
->num_vfs
) {
2684 "Bad slave number:%d (number of activated slaves: %lu)\n",
2685 slave
, dev
->num_slaves
);
2691 void mlx4_cmd_wake_completions(struct mlx4_dev
*dev
)
2693 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2694 struct mlx4_cmd_context
*context
;
2697 spin_lock(&priv
->cmd
.context_lock
);
2698 if (priv
->cmd
.context
) {
2699 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
) {
2700 context
= &priv
->cmd
.context
[i
];
2701 context
->fw_status
= CMD_STAT_INTERNAL_ERR
;
2703 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
2704 complete(&context
->done
);
2707 spin_unlock(&priv
->cmd
.context_lock
);
2710 struct mlx4_active_ports
mlx4_get_active_ports(struct mlx4_dev
*dev
, int slave
)
2712 struct mlx4_active_ports actv_ports
;
2715 bitmap_zero(actv_ports
.ports
, MLX4_MAX_PORTS
);
2718 bitmap_fill(actv_ports
.ports
, dev
->caps
.num_ports
);
2722 vf
= mlx4_get_vf_indx(dev
, slave
);
2726 bitmap_set(actv_ports
.ports
, dev
->dev_vfs
[vf
].min_port
- 1,
2727 min((int)dev
->dev_vfs
[mlx4_get_vf_indx(dev
, slave
)].n_ports
,
2728 dev
->caps
.num_ports
));
2732 EXPORT_SYMBOL_GPL(mlx4_get_active_ports
);
2734 int mlx4_slave_convert_port(struct mlx4_dev
*dev
, int slave
, int port
)
2737 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2738 unsigned m
= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
2740 if (port
<= 0 || port
> m
)
2743 n
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
2749 EXPORT_SYMBOL_GPL(mlx4_slave_convert_port
);
2751 int mlx4_phys_to_slave_port(struct mlx4_dev
*dev
, int slave
, int port
)
2753 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2754 if (test_bit(port
- 1, actv_ports
.ports
))
2756 find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
2760 EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port
);
2762 struct mlx4_slaves_pport
mlx4_phys_to_slaves_pport(struct mlx4_dev
*dev
,
2766 struct mlx4_slaves_pport slaves_pport
;
2768 bitmap_zero(slaves_pport
.slaves
, MLX4_MFUNC_MAX
);
2770 if (port
<= 0 || port
> dev
->caps
.num_ports
)
2771 return slaves_pport
;
2773 for (i
= 0; i
< dev
->persist
->num_vfs
+ 1; i
++) {
2774 struct mlx4_active_ports actv_ports
=
2775 mlx4_get_active_ports(dev
, i
);
2776 if (test_bit(port
- 1, actv_ports
.ports
))
2777 set_bit(i
, slaves_pport
.slaves
);
2780 return slaves_pport
;
2782 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport
);
2784 struct mlx4_slaves_pport
mlx4_phys_to_slaves_pport_actv(
2785 struct mlx4_dev
*dev
,
2786 const struct mlx4_active_ports
*crit_ports
)
2789 struct mlx4_slaves_pport slaves_pport
;
2791 bitmap_zero(slaves_pport
.slaves
, MLX4_MFUNC_MAX
);
2793 for (i
= 0; i
< dev
->persist
->num_vfs
+ 1; i
++) {
2794 struct mlx4_active_ports actv_ports
=
2795 mlx4_get_active_ports(dev
, i
);
2796 if (bitmap_equal(crit_ports
->ports
, actv_ports
.ports
,
2797 dev
->caps
.num_ports
))
2798 set_bit(i
, slaves_pport
.slaves
);
2801 return slaves_pport
;
2803 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv
);
2805 static int mlx4_slaves_closest_port(struct mlx4_dev
*dev
, int slave
, int port
)
2807 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2808 int min_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
)
2810 int max_port
= min_port
+
2811 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
2813 if (port
< min_port
)
2815 else if (port
>= max_port
)
2816 port
= max_port
- 1;
2821 static int mlx4_set_vport_qos(struct mlx4_priv
*priv
, int slave
, int port
,
2826 struct mlx4_qos_manager
*port_qos
;
2827 struct mlx4_dev
*dev
= &priv
->dev
;
2828 struct mlx4_vport_qos_param vpp_qos
[MLX4_NUM_UP
];
2830 port_qos
= &priv
->mfunc
.master
.qos_ctl
[port
];
2831 memset(vpp_qos
, 0, sizeof(struct mlx4_vport_qos_param
) * MLX4_NUM_UP
);
2833 if (slave
> port_qos
->num_of_qos_vfs
) {
2834 mlx4_info(dev
, "No availible VPP resources for this VF\n");
2838 /* Query for default QoS values from Vport 0 is needed */
2839 err
= mlx4_SET_VPORT_QOS_get(dev
, port
, 0, vpp_qos
);
2841 mlx4_info(dev
, "Failed to query Vport 0 QoS values\n");
2845 for (i
= 0; i
< MLX4_NUM_UP
; i
++) {
2846 if (test_bit(i
, port_qos
->priority_bm
) && max_tx_rate
) {
2847 vpp_qos
[i
].max_avg_bw
= max_tx_rate
;
2848 vpp_qos
[i
].enable
= 1;
2850 /* if user supplied tx_rate == 0, meaning no rate limit
2851 * configuration is required. so we are leaving the
2852 * value of max_avg_bw as queried from Vport 0.
2854 vpp_qos
[i
].enable
= 0;
2858 err
= mlx4_SET_VPORT_QOS_set(dev
, port
, slave
, vpp_qos
);
2860 mlx4_info(dev
, "Failed to set Vport %d QoS values\n", slave
);
2867 static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev
*dev
, int port
,
2868 struct mlx4_vport_state
*vf_admin
)
2870 struct mlx4_qos_manager
*info
;
2871 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2873 if (!mlx4_is_master(dev
) ||
2874 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
))
2877 info
= &priv
->mfunc
.master
.qos_ctl
[port
];
2879 if (vf_admin
->default_vlan
!= MLX4_VGT
&&
2880 test_bit(vf_admin
->default_qos
, info
->priority_bm
))
2886 static bool mlx4_valid_vf_state_change(struct mlx4_dev
*dev
, int port
,
2887 struct mlx4_vport_state
*vf_admin
,
2890 struct mlx4_vport_state dummy_admin
= {0};
2892 if (!mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
) ||
2896 dummy_admin
.default_qos
= qos
;
2897 dummy_admin
.default_vlan
= vlan
;
2899 /* VF wants to move to other VST state which is valid with current
2900 * rate limit. Either differnt default vlan in VST or other
2901 * supported QoS priority. Otherwise we don't allow this change when
2902 * the TX rate is still configured.
2904 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, &dummy_admin
))
2907 mlx4_info(dev
, "Cannot change VF state to %s while rate is set\n",
2908 (vlan
== MLX4_VGT
) ? "VGT" : "VST");
2910 if (vlan
!= MLX4_VGT
)
2911 mlx4_info(dev
, "VST priority %d not supported for QoS\n", qos
);
2913 mlx4_info(dev
, "Please set rate to 0 prior to this VF state change\n");
2918 int mlx4_set_vf_mac(struct mlx4_dev
*dev
, int port
, int vf
, u64 mac
)
2920 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2921 struct mlx4_vport_state
*s_info
;
2924 if (!mlx4_is_master(dev
))
2925 return -EPROTONOSUPPORT
;
2927 slave
= mlx4_get_slave_indx(dev
, vf
);
2931 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
2932 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
2934 mlx4_info(dev
, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
2935 vf
, port
, s_info
->mac
);
2938 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac
);
2941 int mlx4_set_vf_vlan(struct mlx4_dev
*dev
, int port
, int vf
, u16 vlan
, u8 qos
)
2943 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2944 struct mlx4_vport_state
*vf_admin
;
2947 if ((!mlx4_is_master(dev
)) ||
2948 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_VLAN_CONTROL
))
2949 return -EPROTONOSUPPORT
;
2951 if ((vlan
> 4095) || (qos
> 7))
2954 slave
= mlx4_get_slave_indx(dev
, vf
);
2958 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
2959 vf_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
2961 if (!mlx4_valid_vf_state_change(dev
, port
, vf_admin
, vlan
, qos
))
2964 if ((0 == vlan
) && (0 == qos
))
2965 vf_admin
->default_vlan
= MLX4_VGT
;
2967 vf_admin
->default_vlan
= vlan
;
2968 vf_admin
->default_qos
= qos
;
2970 /* If rate was configured prior to VST, we saved the configured rate
2971 * in vf_admin->rate and now, if priority supported we enforce the QoS
2973 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
) &&
2975 vf_admin
->qos_vport
= slave
;
2977 if (mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
))
2979 "updating vf %d port %d config will take effect on next VF restart\n",
2983 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan
);
2985 int mlx4_set_vf_rate(struct mlx4_dev
*dev
, int port
, int vf
, int min_tx_rate
,
2990 struct mlx4_vport_state
*vf_admin
;
2991 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2993 if (!mlx4_is_master(dev
) ||
2994 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
))
2995 return -EPROTONOSUPPORT
;
2998 mlx4_info(dev
, "Minimum BW share not supported\n");
2999 return -EPROTONOSUPPORT
;
3002 slave
= mlx4_get_slave_indx(dev
, vf
);
3006 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3007 vf_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3009 err
= mlx4_set_vport_qos(priv
, slave
, port
, max_tx_rate
);
3011 mlx4_info(dev
, "vf %d failed to set rate %d\n", vf
,
3016 vf_admin
->tx_rate
= max_tx_rate
;
3017 /* if VF is not in supported mode (VST with supported prio),
3018 * we do not change vport configuration for its QPs, but save
3019 * the rate, so it will be enforced when it moves to supported
3022 if (!mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
)) {
3024 "rate set for VF %d when not in valid state\n", vf
);
3026 if (vf_admin
->default_vlan
!= MLX4_VGT
)
3027 mlx4_info(dev
, "VST priority not supported by QoS\n");
3029 mlx4_info(dev
, "VF in VGT mode (needed VST)\n");
3032 "rate %d take affect when VF moves to valid state\n",
3037 /* If user sets rate 0 assigning default vport for its QPs */
3038 vf_admin
->qos_vport
= max_tx_rate
? slave
: MLX4_VPP_DEFAULT_VPORT
;
3040 if (priv
->mfunc
.master
.slave_state
[slave
].active
&&
3041 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
)
3042 mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
);
3046 EXPORT_SYMBOL_GPL(mlx4_set_vf_rate
);
3048 /* mlx4_get_slave_default_vlan -
3049 * return true if VST ( default vlan)
3050 * if VST, will return vlan & qos (if not NULL)
3052 bool mlx4_get_slave_default_vlan(struct mlx4_dev
*dev
, int port
, int slave
,
3055 struct mlx4_vport_oper_state
*vp_oper
;
3056 struct mlx4_priv
*priv
;
3058 priv
= mlx4_priv(dev
);
3059 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3060 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
3062 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
3064 *vlan
= vp_oper
->state
.default_vlan
;
3066 *qos
= vp_oper
->state
.default_qos
;
3071 EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan
);
3073 int mlx4_set_vf_spoofchk(struct mlx4_dev
*dev
, int port
, int vf
, bool setting
)
3075 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3076 struct mlx4_vport_state
*s_info
;
3079 if ((!mlx4_is_master(dev
)) ||
3080 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FSM
))
3081 return -EPROTONOSUPPORT
;
3083 slave
= mlx4_get_slave_indx(dev
, vf
);
3087 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3088 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3089 s_info
->spoofchk
= setting
;
3093 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk
);
3095 int mlx4_get_vf_config(struct mlx4_dev
*dev
, int port
, int vf
, struct ifla_vf_info
*ivf
)
3097 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3098 struct mlx4_vport_state
*s_info
;
3101 if (!mlx4_is_master(dev
))
3102 return -EPROTONOSUPPORT
;
3104 slave
= mlx4_get_slave_indx(dev
, vf
);
3108 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3111 /* need to convert it to a func */
3112 ivf
->mac
[0] = ((s_info
->mac
>> (5*8)) & 0xff);
3113 ivf
->mac
[1] = ((s_info
->mac
>> (4*8)) & 0xff);
3114 ivf
->mac
[2] = ((s_info
->mac
>> (3*8)) & 0xff);
3115 ivf
->mac
[3] = ((s_info
->mac
>> (2*8)) & 0xff);
3116 ivf
->mac
[4] = ((s_info
->mac
>> (1*8)) & 0xff);
3117 ivf
->mac
[5] = ((s_info
->mac
) & 0xff);
3119 ivf
->vlan
= s_info
->default_vlan
;
3120 ivf
->qos
= s_info
->default_qos
;
3122 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, s_info
))
3123 ivf
->max_tx_rate
= s_info
->tx_rate
;
3125 ivf
->max_tx_rate
= 0;
3127 ivf
->min_tx_rate
= 0;
3128 ivf
->spoofchk
= s_info
->spoofchk
;
3129 ivf
->linkstate
= s_info
->link_state
;
3133 EXPORT_SYMBOL_GPL(mlx4_get_vf_config
);
3135 int mlx4_set_vf_link_state(struct mlx4_dev
*dev
, int port
, int vf
, int link_state
)
3137 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3138 struct mlx4_vport_state
*s_info
;
3142 slave
= mlx4_get_slave_indx(dev
, vf
);
3146 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3147 switch (link_state
) {
3148 case IFLA_VF_LINK_STATE_AUTO
:
3149 /* get current link state */
3150 if (!priv
->sense
.do_sense_port
[port
])
3151 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_ACTIVE
;
3153 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_DOWN
;
3156 case IFLA_VF_LINK_STATE_ENABLE
:
3157 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_ACTIVE
;
3160 case IFLA_VF_LINK_STATE_DISABLE
:
3161 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_DOWN
;
3165 mlx4_warn(dev
, "unknown value for link_state %02x on slave %d port %d\n",
3166 link_state
, slave
, port
);
3169 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3170 s_info
->link_state
= link_state
;
3173 mlx4_gen_port_state_change_eqe(dev
, slave
, port
, link_stat_event
);
3175 if (mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
))
3177 "updating vf %d port %d no link state HW enforcment\n",
3181 EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state
);
3183 int mlx4_get_counter_stats(struct mlx4_dev
*dev
, int counter_index
,
3184 struct mlx4_counter
*counter_stats
, int reset
)
3186 struct mlx4_cmd_mailbox
*mailbox
= NULL
;
3187 struct mlx4_counter
*tmp_counter
;
3194 if (counter_index
== MLX4_SINK_COUNTER_INDEX(dev
))
3197 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3198 if (IS_ERR(mailbox
))
3199 return PTR_ERR(mailbox
);
3201 memset(mailbox
->buf
, 0, sizeof(struct mlx4_counter
));
3202 if_stat_in_mod
= counter_index
;
3204 if_stat_in_mod
|= MLX4_QUERY_IF_STAT_RESET
;
3205 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
,
3207 MLX4_CMD_QUERY_IF_STAT
,
3208 MLX4_CMD_TIME_CLASS_C
,
3211 mlx4_dbg(dev
, "%s: failed to read statistics for counter index %d\n",
3212 __func__
, counter_index
);
3215 tmp_counter
= (struct mlx4_counter
*)mailbox
->buf
;
3216 counter_stats
->counter_mode
= tmp_counter
->counter_mode
;
3217 if (counter_stats
->counter_mode
== 0) {
3218 counter_stats
->rx_frames
=
3219 cpu_to_be64(be64_to_cpu(counter_stats
->rx_frames
) +
3220 be64_to_cpu(tmp_counter
->rx_frames
));
3221 counter_stats
->tx_frames
=
3222 cpu_to_be64(be64_to_cpu(counter_stats
->tx_frames
) +
3223 be64_to_cpu(tmp_counter
->tx_frames
));
3224 counter_stats
->rx_bytes
=
3225 cpu_to_be64(be64_to_cpu(counter_stats
->rx_bytes
) +
3226 be64_to_cpu(tmp_counter
->rx_bytes
));
3227 counter_stats
->tx_bytes
=
3228 cpu_to_be64(be64_to_cpu(counter_stats
->tx_bytes
) +
3229 be64_to_cpu(tmp_counter
->tx_bytes
));
3233 mlx4_free_cmd_mailbox(dev
, mailbox
);
3237 EXPORT_SYMBOL_GPL(mlx4_get_counter_stats
);
3239 int mlx4_get_vf_stats(struct mlx4_dev
*dev
, int port
, int vf_idx
,
3240 struct ifla_vf_stats
*vf_stats
)
3242 struct mlx4_counter tmp_vf_stats
;
3249 if (!mlx4_is_master(dev
))
3250 return -EPROTONOSUPPORT
;
3252 slave
= mlx4_get_slave_indx(dev
, vf_idx
);
3256 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3257 err
= mlx4_calc_vf_counters(dev
, slave
, port
, &tmp_vf_stats
);
3258 if (!err
&& tmp_vf_stats
.counter_mode
== 0) {
3259 vf_stats
->rx_packets
= be64_to_cpu(tmp_vf_stats
.rx_frames
);
3260 vf_stats
->tx_packets
= be64_to_cpu(tmp_vf_stats
.tx_frames
);
3261 vf_stats
->rx_bytes
= be64_to_cpu(tmp_vf_stats
.rx_bytes
);
3262 vf_stats
->tx_bytes
= be64_to_cpu(tmp_vf_stats
.tx_bytes
);
3267 EXPORT_SYMBOL_GPL(mlx4_get_vf_stats
);
3269 int mlx4_vf_smi_enabled(struct mlx4_dev
*dev
, int slave
, int port
)
3271 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3273 if (slave
< 1 || slave
>= dev
->num_slaves
||
3274 port
< 1 || port
> MLX4_MAX_PORTS
)
3277 return priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] ==
3278 MLX4_VF_SMI_ENABLED
;
3280 EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled
);
3282 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev
*dev
, int slave
, int port
)
3284 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3286 if (slave
== mlx4_master_func_num(dev
))
3289 if (slave
< 1 || slave
>= dev
->num_slaves
||
3290 port
< 1 || port
> MLX4_MAX_PORTS
)
3293 return priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
] ==
3294 MLX4_VF_SMI_ENABLED
;
3296 EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin
);
3298 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev
*dev
, int slave
, int port
,
3301 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3302 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(
3304 int min_port
= find_first_bit(actv_ports
.ports
,
3305 priv
->dev
.caps
.num_ports
) + 1;
3306 int max_port
= min_port
- 1 +
3307 bitmap_weight(actv_ports
.ports
, priv
->dev
.caps
.num_ports
);
3309 if (slave
== mlx4_master_func_num(dev
))
3312 if (slave
< 1 || slave
>= dev
->num_slaves
||
3313 port
< 1 || port
> MLX4_MAX_PORTS
||
3314 enabled
< 0 || enabled
> 1)
3317 if (min_port
== max_port
&& dev
->caps
.num_ports
> 1) {
3318 mlx4_info(dev
, "SMI access disallowed for single ported VFs\n");
3319 return -EPROTONOSUPPORT
;
3322 priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
] = enabled
;
3325 EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin
);