2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/mlx4/device.h>
43 #include <linux/semaphore.h>
44 #include <rdma/ib_smi.h>
45 #include <linux/delay.h>
46 #include <linux/etherdevice.h>
53 #include "mlx4_stats.h"
55 #define CMD_POLL_TOKEN 0xffff
56 #define INBOX_MASK 0xffffffffffffff00ULL
58 #define CMD_CHAN_VER 1
59 #define CMD_CHAN_IF_REV 1
62 /* command completed successfully: */
64 /* Internal error (such as a bus error) occurred while processing command: */
65 CMD_STAT_INTERNAL_ERR
= 0x01,
66 /* Operation/command not supported or opcode modifier not supported: */
67 CMD_STAT_BAD_OP
= 0x02,
68 /* Parameter not supported or parameter out of range: */
69 CMD_STAT_BAD_PARAM
= 0x03,
70 /* System not enabled or bad system state: */
71 CMD_STAT_BAD_SYS_STATE
= 0x04,
72 /* Attempt to access reserved or unallocaterd resource: */
73 CMD_STAT_BAD_RESOURCE
= 0x05,
74 /* Requested resource is currently executing a command, or is otherwise busy: */
75 CMD_STAT_RESOURCE_BUSY
= 0x06,
76 /* Required capability exceeds device limits: */
77 CMD_STAT_EXCEED_LIM
= 0x08,
78 /* Resource is not in the appropriate state or ownership: */
79 CMD_STAT_BAD_RES_STATE
= 0x09,
80 /* Index out of range: */
81 CMD_STAT_BAD_INDEX
= 0x0a,
82 /* FW image corrupted: */
83 CMD_STAT_BAD_NVMEM
= 0x0b,
84 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
85 CMD_STAT_ICM_ERROR
= 0x0c,
86 /* Attempt to modify a QP/EE which is not in the presumed state: */
87 CMD_STAT_BAD_QP_STATE
= 0x10,
88 /* Bad segment parameters (Address/Size): */
89 CMD_STAT_BAD_SEG_PARAM
= 0x20,
90 /* Memory Region has Memory Windows bound to: */
91 CMD_STAT_REG_BOUND
= 0x21,
92 /* HCA local attached memory not present: */
93 CMD_STAT_LAM_NOT_PRE
= 0x22,
94 /* Bad management packet (silently discarded): */
95 CMD_STAT_BAD_PKT
= 0x30,
96 /* More outstanding CQEs in CQ than new CQ size: */
97 CMD_STAT_BAD_SIZE
= 0x40,
98 /* Multi Function device support required: */
99 CMD_STAT_MULTI_FUNC_REQ
= 0x50,
103 HCR_IN_PARAM_OFFSET
= 0x00,
104 HCR_IN_MODIFIER_OFFSET
= 0x08,
105 HCR_OUT_PARAM_OFFSET
= 0x0c,
106 HCR_TOKEN_OFFSET
= 0x14,
107 HCR_STATUS_OFFSET
= 0x18,
109 HCR_OPMOD_SHIFT
= 12,
116 GO_BIT_TIMEOUT_MSECS
= 10000
119 enum mlx4_vlan_transition
{
120 MLX4_VLAN_TRANSITION_VST_VST
= 0,
121 MLX4_VLAN_TRANSITION_VST_VGT
= 1,
122 MLX4_VLAN_TRANSITION_VGT_VST
= 2,
123 MLX4_VLAN_TRANSITION_VGT_VGT
= 3,
127 struct mlx4_cmd_context
{
128 struct completion done
;
136 static int mlx4_master_process_vhcr(struct mlx4_dev
*dev
, int slave
,
137 struct mlx4_vhcr_cmd
*in_vhcr
);
139 static int mlx4_status_to_errno(u8 status
)
141 static const int trans_table
[] = {
142 [CMD_STAT_INTERNAL_ERR
] = -EIO
,
143 [CMD_STAT_BAD_OP
] = -EPERM
,
144 [CMD_STAT_BAD_PARAM
] = -EINVAL
,
145 [CMD_STAT_BAD_SYS_STATE
] = -ENXIO
,
146 [CMD_STAT_BAD_RESOURCE
] = -EBADF
,
147 [CMD_STAT_RESOURCE_BUSY
] = -EBUSY
,
148 [CMD_STAT_EXCEED_LIM
] = -ENOMEM
,
149 [CMD_STAT_BAD_RES_STATE
] = -EBADF
,
150 [CMD_STAT_BAD_INDEX
] = -EBADF
,
151 [CMD_STAT_BAD_NVMEM
] = -EFAULT
,
152 [CMD_STAT_ICM_ERROR
] = -ENFILE
,
153 [CMD_STAT_BAD_QP_STATE
] = -EINVAL
,
154 [CMD_STAT_BAD_SEG_PARAM
] = -EFAULT
,
155 [CMD_STAT_REG_BOUND
] = -EBUSY
,
156 [CMD_STAT_LAM_NOT_PRE
] = -EAGAIN
,
157 [CMD_STAT_BAD_PKT
] = -EINVAL
,
158 [CMD_STAT_BAD_SIZE
] = -ENOMEM
,
159 [CMD_STAT_MULTI_FUNC_REQ
] = -EACCES
,
162 if (status
>= ARRAY_SIZE(trans_table
) ||
163 (status
!= CMD_STAT_OK
&& trans_table
[status
] == 0))
166 return trans_table
[status
];
169 static u8
mlx4_errno_to_status(int errno
)
173 return CMD_STAT_BAD_OP
;
175 return CMD_STAT_BAD_PARAM
;
177 return CMD_STAT_BAD_SYS_STATE
;
179 return CMD_STAT_RESOURCE_BUSY
;
181 return CMD_STAT_EXCEED_LIM
;
183 return CMD_STAT_ICM_ERROR
;
185 return CMD_STAT_INTERNAL_ERR
;
189 static int mlx4_internal_err_ret_value(struct mlx4_dev
*dev
, u16 op
,
193 case MLX4_CMD_UNMAP_ICM
:
194 case MLX4_CMD_UNMAP_ICM_AUX
:
195 case MLX4_CMD_UNMAP_FA
:
196 case MLX4_CMD_2RST_QP
:
197 case MLX4_CMD_HW2SW_EQ
:
198 case MLX4_CMD_HW2SW_CQ
:
199 case MLX4_CMD_HW2SW_SRQ
:
200 case MLX4_CMD_HW2SW_MPT
:
201 case MLX4_CMD_CLOSE_HCA
:
202 case MLX4_QP_FLOW_STEERING_DETACH
:
203 case MLX4_CMD_FREE_RES
:
204 case MLX4_CMD_CLOSE_PORT
:
207 case MLX4_CMD_QP_ATTACH
:
208 /* On Detach case return success */
209 if (op_modifier
== 0)
211 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
214 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
218 static int mlx4_closing_cmd_fatal_error(u16 op
, u8 fw_status
)
220 /* Any error during the closing commands below is considered fatal */
221 if (op
== MLX4_CMD_CLOSE_HCA
||
222 op
== MLX4_CMD_HW2SW_EQ
||
223 op
== MLX4_CMD_HW2SW_CQ
||
224 op
== MLX4_CMD_2RST_QP
||
225 op
== MLX4_CMD_HW2SW_SRQ
||
226 op
== MLX4_CMD_SYNC_TPT
||
227 op
== MLX4_CMD_UNMAP_ICM
||
228 op
== MLX4_CMD_UNMAP_ICM_AUX
||
229 op
== MLX4_CMD_UNMAP_FA
)
231 /* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
232 * CMD_STAT_REG_BOUND.
233 * This status indicates that memory region has memory windows bound to it
234 * which may result from invalid user space usage and is not fatal.
236 if (op
== MLX4_CMD_HW2SW_MPT
&& fw_status
!= CMD_STAT_REG_BOUND
)
241 static int mlx4_cmd_reset_flow(struct mlx4_dev
*dev
, u16 op
, u8 op_modifier
,
244 /* Only if reset flow is really active return code is based on
245 * command, otherwise current error code is returned.
247 if (mlx4_internal_err_reset
) {
248 mlx4_enter_error_state(dev
->persist
);
249 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
255 static int comm_pending(struct mlx4_dev
*dev
)
257 struct mlx4_priv
*priv
= mlx4_priv(dev
);
258 u32 status
= readl(&priv
->mfunc
.comm
->slave_read
);
260 return (swab32(status
) >> 31) != priv
->cmd
.comm_toggle
;
263 static int mlx4_comm_cmd_post(struct mlx4_dev
*dev
, u8 cmd
, u16 param
)
265 struct mlx4_priv
*priv
= mlx4_priv(dev
);
268 /* To avoid writing to unknown addresses after the device state was
269 * changed to internal error and the function was rest,
270 * check the INTERNAL_ERROR flag which is updated under
271 * device_state_mutex lock.
273 mutex_lock(&dev
->persist
->device_state_mutex
);
275 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
276 mutex_unlock(&dev
->persist
->device_state_mutex
);
280 priv
->cmd
.comm_toggle
^= 1;
281 val
= param
| (cmd
<< 16) | (priv
->cmd
.comm_toggle
<< 31);
282 __raw_writel((__force u32
) cpu_to_be32(val
),
283 &priv
->mfunc
.comm
->slave_write
);
284 mutex_unlock(&dev
->persist
->device_state_mutex
);
288 static int mlx4_comm_cmd_poll(struct mlx4_dev
*dev
, u8 cmd
, u16 param
,
289 unsigned long timeout
)
291 struct mlx4_priv
*priv
= mlx4_priv(dev
);
294 int ret_from_pending
= 0;
296 /* First, verify that the master reports correct status */
297 if (comm_pending(dev
)) {
298 mlx4_warn(dev
, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
299 priv
->cmd
.comm_toggle
, cmd
);
304 down(&priv
->cmd
.poll_sem
);
305 if (mlx4_comm_cmd_post(dev
, cmd
, param
)) {
306 /* Only in case the device state is INTERNAL_ERROR,
307 * mlx4_comm_cmd_post returns with an error
309 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
313 end
= msecs_to_jiffies(timeout
) + jiffies
;
314 while (comm_pending(dev
) && time_before(jiffies
, end
))
316 ret_from_pending
= comm_pending(dev
);
317 if (ret_from_pending
) {
318 /* check if the slave is trying to boot in the middle of
319 * FLR process. The only non-zero result in the RESET command
320 * is MLX4_DELAY_RESET_SLAVE*/
321 if ((MLX4_COMM_CMD_RESET
== cmd
)) {
322 err
= MLX4_DELAY_RESET_SLAVE
;
325 mlx4_warn(dev
, "Communication channel command 0x%x timed out\n",
327 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
332 mlx4_enter_error_state(dev
->persist
);
334 up(&priv
->cmd
.poll_sem
);
338 static int mlx4_comm_cmd_wait(struct mlx4_dev
*dev
, u8 vhcr_cmd
,
339 u16 param
, u16 op
, unsigned long timeout
)
341 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
342 struct mlx4_cmd_context
*context
;
346 down(&cmd
->event_sem
);
348 spin_lock(&cmd
->context_lock
);
349 BUG_ON(cmd
->free_head
< 0);
350 context
= &cmd
->context
[cmd
->free_head
];
351 context
->token
+= cmd
->token_mask
+ 1;
352 cmd
->free_head
= context
->next
;
353 spin_unlock(&cmd
->context_lock
);
355 reinit_completion(&context
->done
);
357 if (mlx4_comm_cmd_post(dev
, vhcr_cmd
, param
)) {
358 /* Only in case the device state is INTERNAL_ERROR,
359 * mlx4_comm_cmd_post returns with an error
361 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
365 if (!wait_for_completion_timeout(&context
->done
,
366 msecs_to_jiffies(timeout
))) {
367 mlx4_warn(dev
, "communication channel command 0x%x (op=0x%x) timed out\n",
372 err
= context
->result
;
373 if (err
&& context
->fw_status
!= CMD_STAT_MULTI_FUNC_REQ
) {
374 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
375 vhcr_cmd
, context
->fw_status
);
376 if (mlx4_closing_cmd_fatal_error(op
, context
->fw_status
))
380 /* wait for comm channel ready
381 * this is necessary for prevention the race
382 * when switching between event to polling mode
383 * Skipping this section in case the device is in FATAL_ERROR state,
384 * In this state, no commands are sent via the comm channel until
385 * the device has returned from reset.
387 if (!(dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)) {
388 end
= msecs_to_jiffies(timeout
) + jiffies
;
389 while (comm_pending(dev
) && time_before(jiffies
, end
))
395 err
= mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
396 mlx4_enter_error_state(dev
->persist
);
398 spin_lock(&cmd
->context_lock
);
399 context
->next
= cmd
->free_head
;
400 cmd
->free_head
= context
- cmd
->context
;
401 spin_unlock(&cmd
->context_lock
);
407 int mlx4_comm_cmd(struct mlx4_dev
*dev
, u8 cmd
, u16 param
,
408 u16 op
, unsigned long timeout
)
410 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
411 return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
413 if (mlx4_priv(dev
)->cmd
.use_events
)
414 return mlx4_comm_cmd_wait(dev
, cmd
, param
, op
, timeout
);
415 return mlx4_comm_cmd_poll(dev
, cmd
, param
, timeout
);
418 static int cmd_pending(struct mlx4_dev
*dev
)
422 if (pci_channel_offline(dev
->persist
->pdev
))
425 status
= readl(mlx4_priv(dev
)->cmd
.hcr
+ HCR_STATUS_OFFSET
);
427 return (status
& swab32(1 << HCR_GO_BIT
)) ||
428 (mlx4_priv(dev
)->cmd
.toggle
==
429 !!(status
& swab32(1 << HCR_T_BIT
)));
432 static int mlx4_cmd_post(struct mlx4_dev
*dev
, u64 in_param
, u64 out_param
,
433 u32 in_modifier
, u8 op_modifier
, u16 op
, u16 token
,
436 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
437 u32 __iomem
*hcr
= cmd
->hcr
;
441 mutex_lock(&dev
->persist
->device_state_mutex
);
442 /* To avoid writing to unknown addresses after the device state was
443 * changed to internal error and the chip was reset,
444 * check the INTERNAL_ERROR flag which is updated under
445 * device_state_mutex lock.
447 if (pci_channel_offline(dev
->persist
->pdev
) ||
448 (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)) {
450 * Device is going through error recovery
451 * and cannot accept commands.
458 end
+= msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS
);
460 while (cmd_pending(dev
)) {
461 if (pci_channel_offline(dev
->persist
->pdev
)) {
463 * Device is going through error recovery
464 * and cannot accept commands.
469 if (time_after_eq(jiffies
, end
)) {
470 mlx4_err(dev
, "%s:cmd_pending failed\n", __func__
);
477 * We use writel (instead of something like memcpy_toio)
478 * because writes of less than 32 bits to the HCR don't work
479 * (and some architectures such as ia64 implement memcpy_toio
480 * in terms of writeb).
482 __raw_writel((__force u32
) cpu_to_be32(in_param
>> 32), hcr
+ 0);
483 __raw_writel((__force u32
) cpu_to_be32(in_param
& 0xfffffffful
), hcr
+ 1);
484 __raw_writel((__force u32
) cpu_to_be32(in_modifier
), hcr
+ 2);
485 __raw_writel((__force u32
) cpu_to_be32(out_param
>> 32), hcr
+ 3);
486 __raw_writel((__force u32
) cpu_to_be32(out_param
& 0xfffffffful
), hcr
+ 4);
487 __raw_writel((__force u32
) cpu_to_be32(token
<< 16), hcr
+ 5);
489 /* __raw_writel may not order writes. */
492 __raw_writel((__force u32
) cpu_to_be32((1 << HCR_GO_BIT
) |
493 (cmd
->toggle
<< HCR_T_BIT
) |
494 (event
? (1 << HCR_E_BIT
) : 0) |
495 (op_modifier
<< HCR_OPMOD_SHIFT
) |
498 cmd
->toggle
= cmd
->toggle
^ 1;
504 mlx4_warn(dev
, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
505 op
, ret
, in_param
, in_modifier
, op_modifier
);
506 mutex_unlock(&dev
->persist
->device_state_mutex
);
511 static int mlx4_slave_cmd(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
512 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
513 u16 op
, unsigned long timeout
)
515 struct mlx4_priv
*priv
= mlx4_priv(dev
);
516 struct mlx4_vhcr_cmd
*vhcr
= priv
->mfunc
.vhcr
;
519 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
521 vhcr
->in_param
= cpu_to_be64(in_param
);
522 vhcr
->out_param
= out_param
? cpu_to_be64(*out_param
) : 0;
523 vhcr
->in_modifier
= cpu_to_be32(in_modifier
);
524 vhcr
->opcode
= cpu_to_be16((((u16
) op_modifier
) << 12) | (op
& 0xfff));
525 vhcr
->token
= cpu_to_be16(CMD_POLL_TOKEN
);
527 vhcr
->flags
= !!(priv
->cmd
.use_events
) << 6;
529 if (mlx4_is_master(dev
)) {
530 ret
= mlx4_master_process_vhcr(dev
, dev
->caps
.function
, vhcr
);
535 be64_to_cpu(vhcr
->out_param
);
537 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
539 vhcr
->status
= CMD_STAT_BAD_PARAM
;
542 ret
= mlx4_status_to_errno(vhcr
->status
);
545 dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
546 ret
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
548 ret
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_POST
, 0, op
,
549 MLX4_COMM_TIME
+ timeout
);
554 be64_to_cpu(vhcr
->out_param
);
556 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
558 vhcr
->status
= CMD_STAT_BAD_PARAM
;
561 ret
= mlx4_status_to_errno(vhcr
->status
);
563 if (dev
->persist
->state
&
564 MLX4_DEVICE_STATE_INTERNAL_ERROR
)
565 ret
= mlx4_internal_err_ret_value(dev
, op
,
568 mlx4_err(dev
, "failed execution of VHCR_POST command opcode 0x%x\n", op
);
572 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
576 static int mlx4_cmd_poll(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
577 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
578 u16 op
, unsigned long timeout
)
580 struct mlx4_priv
*priv
= mlx4_priv(dev
);
581 void __iomem
*hcr
= priv
->cmd
.hcr
;
586 down(&priv
->cmd
.poll_sem
);
588 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
590 * Device is going through error recovery
591 * and cannot accept commands.
593 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
597 if (out_is_imm
&& !out_param
) {
598 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
604 err
= mlx4_cmd_post(dev
, in_param
, out_param
? *out_param
: 0,
605 in_modifier
, op_modifier
, op
, CMD_POLL_TOKEN
, 0);
609 end
= msecs_to_jiffies(timeout
) + jiffies
;
610 while (cmd_pending(dev
) && time_before(jiffies
, end
)) {
611 if (pci_channel_offline(dev
->persist
->pdev
)) {
613 * Device is going through error recovery
614 * and cannot accept commands.
620 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
) {
621 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
628 if (cmd_pending(dev
)) {
629 mlx4_warn(dev
, "command 0x%x timed out (go bit not cleared)\n",
637 (u64
) be32_to_cpu((__force __be32
)
638 __raw_readl(hcr
+ HCR_OUT_PARAM_OFFSET
)) << 32 |
639 (u64
) be32_to_cpu((__force __be32
)
640 __raw_readl(hcr
+ HCR_OUT_PARAM_OFFSET
+ 4));
641 stat
= be32_to_cpu((__force __be32
)
642 __raw_readl(hcr
+ HCR_STATUS_OFFSET
)) >> 24;
643 err
= mlx4_status_to_errno(stat
);
645 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
647 if (mlx4_closing_cmd_fatal_error(op
, stat
))
654 err
= mlx4_cmd_reset_flow(dev
, op
, op_modifier
, err
);
656 up(&priv
->cmd
.poll_sem
);
660 void mlx4_cmd_event(struct mlx4_dev
*dev
, u16 token
, u8 status
, u64 out_param
)
662 struct mlx4_priv
*priv
= mlx4_priv(dev
);
663 struct mlx4_cmd_context
*context
=
664 &priv
->cmd
.context
[token
& priv
->cmd
.token_mask
];
666 /* previously timed out command completing at long last */
667 if (token
!= context
->token
)
670 context
->fw_status
= status
;
671 context
->result
= mlx4_status_to_errno(status
);
672 context
->out_param
= out_param
;
674 complete(&context
->done
);
677 static int mlx4_cmd_wait(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
678 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
679 u16 op
, unsigned long timeout
)
681 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
682 struct mlx4_cmd_context
*context
;
686 down(&cmd
->event_sem
);
688 spin_lock(&cmd
->context_lock
);
689 BUG_ON(cmd
->free_head
< 0);
690 context
= &cmd
->context
[cmd
->free_head
];
691 context
->token
+= cmd
->token_mask
+ 1;
692 cmd
->free_head
= context
->next
;
693 spin_unlock(&cmd
->context_lock
);
695 if (out_is_imm
&& !out_param
) {
696 mlx4_err(dev
, "response expected while output mailbox is NULL for command 0x%x\n",
702 reinit_completion(&context
->done
);
704 err
= mlx4_cmd_post(dev
, in_param
, out_param
? *out_param
: 0,
705 in_modifier
, op_modifier
, op
, context
->token
, 1);
709 if (op
== MLX4_CMD_SENSE_PORT
) {
711 wait_for_completion_interruptible_timeout(&context
->done
,
712 msecs_to_jiffies(timeout
));
714 context
->fw_status
= 0;
715 context
->out_param
= 0;
719 ret_wait
= (long)wait_for_completion_timeout(&context
->done
,
720 msecs_to_jiffies(timeout
));
723 mlx4_warn(dev
, "command 0x%x timed out (go bit not cleared)\n",
725 if (op
== MLX4_CMD_NOP
) {
734 err
= context
->result
;
736 /* Since we do not want to have this error message always
737 * displayed at driver start when there are ConnectX2 HCAs
738 * on the host, we deprecate the error message for this
739 * specific command/input_mod/opcode_mod/fw-status to be debug.
741 if (op
== MLX4_CMD_SET_PORT
&&
742 (in_modifier
== 1 || in_modifier
== 2) &&
743 op_modifier
== MLX4_SET_PORT_IB_OPCODE
&&
744 context
->fw_status
== CMD_STAT_BAD_SIZE
)
745 mlx4_dbg(dev
, "command 0x%x failed: fw status = 0x%x\n",
746 op
, context
->fw_status
);
748 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
749 op
, context
->fw_status
);
750 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
751 err
= mlx4_internal_err_ret_value(dev
, op
, op_modifier
);
752 else if (mlx4_closing_cmd_fatal_error(op
, context
->fw_status
))
759 *out_param
= context
->out_param
;
763 err
= mlx4_cmd_reset_flow(dev
, op
, op_modifier
, err
);
765 spin_lock(&cmd
->context_lock
);
766 context
->next
= cmd
->free_head
;
767 cmd
->free_head
= context
- cmd
->context
;
768 spin_unlock(&cmd
->context_lock
);
774 int __mlx4_cmd(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
775 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
776 u16 op
, unsigned long timeout
, int native
)
778 if (pci_channel_offline(dev
->persist
->pdev
))
779 return mlx4_cmd_reset_flow(dev
, op
, op_modifier
, -EIO
);
781 if (!mlx4_is_mfunc(dev
) || (native
&& mlx4_is_master(dev
))) {
784 if (dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)
785 return mlx4_internal_err_ret_value(dev
, op
,
787 down_read(&mlx4_priv(dev
)->cmd
.switch_sem
);
788 if (mlx4_priv(dev
)->cmd
.use_events
)
789 ret
= mlx4_cmd_wait(dev
, in_param
, out_param
,
790 out_is_imm
, in_modifier
,
791 op_modifier
, op
, timeout
);
793 ret
= mlx4_cmd_poll(dev
, in_param
, out_param
,
794 out_is_imm
, in_modifier
,
795 op_modifier
, op
, timeout
);
797 up_read(&mlx4_priv(dev
)->cmd
.switch_sem
);
800 return mlx4_slave_cmd(dev
, in_param
, out_param
, out_is_imm
,
801 in_modifier
, op_modifier
, op
, timeout
);
803 EXPORT_SYMBOL_GPL(__mlx4_cmd
);
806 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev
*dev
)
808 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL
,
809 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
812 static int mlx4_ACCESS_MEM(struct mlx4_dev
*dev
, u64 master_addr
,
813 int slave
, u64 slave_addr
,
814 int size
, int is_read
)
819 if ((slave_addr
& 0xfff) | (master_addr
& 0xfff) |
820 (slave
& ~0x7f) | (size
& 0xff)) {
821 mlx4_err(dev
, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
822 slave_addr
, master_addr
, slave
, size
);
827 in_param
= (u64
) slave
| slave_addr
;
828 out_param
= (u64
) dev
->caps
.function
| master_addr
;
830 in_param
= (u64
) dev
->caps
.function
| master_addr
;
831 out_param
= (u64
) slave
| slave_addr
;
834 return mlx4_cmd_imm(dev
, in_param
, &out_param
, size
, 0,
836 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
839 static int query_pkey_block(struct mlx4_dev
*dev
, u8 port
, u16 index
, u16
*pkey
,
840 struct mlx4_cmd_mailbox
*inbox
,
841 struct mlx4_cmd_mailbox
*outbox
)
843 struct ib_smp
*in_mad
= (struct ib_smp
*)(inbox
->buf
);
844 struct ib_smp
*out_mad
= (struct ib_smp
*)(outbox
->buf
);
851 in_mad
->attr_mod
= cpu_to_be32(index
/ 32);
853 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
, port
, 3,
854 MLX4_CMD_MAD_IFC
, MLX4_CMD_TIME_CLASS_C
,
859 for (i
= 0; i
< 32; ++i
)
860 pkey
[i
] = be16_to_cpu(((__be16
*) out_mad
->data
)[i
]);
865 static int get_full_pkey_table(struct mlx4_dev
*dev
, u8 port
, u16
*table
,
866 struct mlx4_cmd_mailbox
*inbox
,
867 struct mlx4_cmd_mailbox
*outbox
)
872 for (i
= 0; i
< dev
->caps
.pkey_table_len
[port
]; i
+= 32) {
873 err
= query_pkey_block(dev
, port
, i
, table
+ i
, inbox
, outbox
);
880 #define PORT_CAPABILITY_LOCATION_IN_SMP 20
881 #define PORT_STATE_OFFSET 32
883 static enum ib_port_state
vf_port_state(struct mlx4_dev
*dev
, int port
, int vf
)
885 if (mlx4_get_slave_port_state(dev
, vf
, port
) == SLAVE_PORT_UP
)
886 return IB_PORT_ACTIVE
;
891 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev
*dev
, int slave
,
892 struct mlx4_vhcr
*vhcr
,
893 struct mlx4_cmd_mailbox
*inbox
,
894 struct mlx4_cmd_mailbox
*outbox
,
895 struct mlx4_cmd_info
*cmd
)
897 struct ib_smp
*smp
= inbox
->buf
;
905 struct mlx4_priv
*priv
= mlx4_priv(dev
);
906 struct ib_smp
*outsmp
= outbox
->buf
;
907 __be16
*outtab
= (__be16
*)(outsmp
->data
);
908 __be32 slave_cap_mask
;
909 __be64 slave_node_guid
;
911 slave_port
= vhcr
->in_modifier
;
912 port
= mlx4_slave_convert_port(dev
, slave
, slave_port
);
914 /* network-view bit is for driver use only, and should not be passed to FW */
915 opcode_modifier
= vhcr
->op_modifier
& ~0x8; /* clear netw view bit */
916 network_view
= !!(vhcr
->op_modifier
& 0x8);
918 if (smp
->base_version
== 1 &&
919 smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
&&
920 smp
->class_version
== 1) {
921 /* host view is paravirtualized */
922 if (!network_view
&& smp
->method
== IB_MGMT_METHOD_GET
) {
923 if (smp
->attr_id
== IB_SMP_ATTR_PKEY_TABLE
) {
924 index
= be32_to_cpu(smp
->attr_mod
);
925 if (port
< 1 || port
> dev
->caps
.num_ports
)
927 table
= kcalloc((dev
->caps
.pkey_table_len
[port
] / 32) + 1,
928 sizeof(*table
) * 32, GFP_KERNEL
);
932 /* need to get the full pkey table because the paravirtualized
933 * pkeys may be scattered among several pkey blocks.
935 err
= get_full_pkey_table(dev
, port
, table
, inbox
, outbox
);
937 for (vidx
= index
* 32; vidx
< (index
+ 1) * 32; ++vidx
) {
938 pidx
= priv
->virt2phys_pkey
[slave
][port
- 1][vidx
];
939 outtab
[vidx
% 32] = cpu_to_be16(table
[pidx
]);
945 if (smp
->attr_id
== IB_SMP_ATTR_PORT_INFO
) {
946 /*get the slave specific caps:*/
948 smp
->attr_mod
= cpu_to_be32(port
);
949 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
950 port
, opcode_modifier
,
951 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
952 /* modify the response for slaves */
953 if (!err
&& slave
!= mlx4_master_func_num(dev
)) {
954 u8
*state
= outsmp
->data
+ PORT_STATE_OFFSET
;
956 *state
= (*state
& 0xf0) | vf_port_state(dev
, port
, slave
);
957 slave_cap_mask
= priv
->mfunc
.master
.slave_state
[slave
].ib_cap_mask
[port
];
958 memcpy(outsmp
->data
+ PORT_CAPABILITY_LOCATION_IN_SMP
, &slave_cap_mask
, 4);
962 if (smp
->attr_id
== IB_SMP_ATTR_GUID_INFO
) {
963 __be64 guid
= mlx4_get_admin_guid(dev
, slave
,
966 /* set the PF admin guid to the FW/HW burned
967 * GUID, if it wasn't yet set
969 if (slave
== 0 && guid
== 0) {
971 err
= mlx4_cmd_box(dev
,
977 MLX4_CMD_TIME_CLASS_C
,
981 mlx4_set_admin_guid(dev
,
985 memcpy(outsmp
->data
, &guid
, 8);
988 /* clean all other gids */
989 memset(outsmp
->data
+ 8, 0, 56);
992 if (smp
->attr_id
== IB_SMP_ATTR_NODE_INFO
) {
993 err
= mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
994 port
, opcode_modifier
,
995 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
997 slave_node_guid
= mlx4_get_slave_node_guid(dev
, slave
);
998 memcpy(outsmp
->data
+ 12, &slave_node_guid
, 8);
1005 /* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
1006 * These are the MADs used by ib verbs (such as ib_query_gids).
1008 if (slave
!= mlx4_master_func_num(dev
) &&
1009 !mlx4_vf_smi_enabled(dev
, slave
, port
)) {
1010 if (!(smp
->mgmt_class
== IB_MGMT_CLASS_SUBN_LID_ROUTED
&&
1011 smp
->method
== IB_MGMT_METHOD_GET
) || network_view
) {
1012 mlx4_err(dev
, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1013 slave
, smp
->mgmt_class
, smp
->method
,
1014 network_view
? "Network" : "Host",
1015 be16_to_cpu(smp
->attr_id
));
1020 return mlx4_cmd_box(dev
, inbox
->dma
, outbox
->dma
,
1021 vhcr
->in_modifier
, opcode_modifier
,
1022 vhcr
->op
, MLX4_CMD_TIME_CLASS_C
, MLX4_CMD_NATIVE
);
1025 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev
*dev
, int slave
,
1026 struct mlx4_vhcr
*vhcr
,
1027 struct mlx4_cmd_mailbox
*inbox
,
1028 struct mlx4_cmd_mailbox
*outbox
,
1029 struct mlx4_cmd_info
*cmd
)
1034 int mlx4_DMA_wrapper(struct mlx4_dev
*dev
, int slave
,
1035 struct mlx4_vhcr
*vhcr
,
1036 struct mlx4_cmd_mailbox
*inbox
,
1037 struct mlx4_cmd_mailbox
*outbox
,
1038 struct mlx4_cmd_info
*cmd
)
1044 in_param
= cmd
->has_inbox
? (u64
) inbox
->dma
: vhcr
->in_param
;
1045 out_param
= cmd
->has_outbox
? (u64
) outbox
->dma
: vhcr
->out_param
;
1046 if (cmd
->encode_slave_id
) {
1047 in_param
&= 0xffffffffffffff00ll
;
1051 err
= __mlx4_cmd(dev
, in_param
, &out_param
, cmd
->out_is_imm
,
1052 vhcr
->in_modifier
, vhcr
->op_modifier
, vhcr
->op
,
1053 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
1055 if (cmd
->out_is_imm
)
1056 vhcr
->out_param
= out_param
;
1061 static struct mlx4_cmd_info cmd_info
[] = {
1063 .opcode
= MLX4_CMD_QUERY_FW
,
1066 .out_is_imm
= false,
1067 .encode_slave_id
= false,
1069 .wrapper
= mlx4_QUERY_FW_wrapper
1072 .opcode
= MLX4_CMD_QUERY_HCA
,
1075 .out_is_imm
= false,
1076 .encode_slave_id
= false,
1081 .opcode
= MLX4_CMD_QUERY_DEV_CAP
,
1084 .out_is_imm
= false,
1085 .encode_slave_id
= false,
1087 .wrapper
= mlx4_QUERY_DEV_CAP_wrapper
1090 .opcode
= MLX4_CMD_QUERY_FUNC_CAP
,
1093 .out_is_imm
= false,
1094 .encode_slave_id
= false,
1096 .wrapper
= mlx4_QUERY_FUNC_CAP_wrapper
1099 .opcode
= MLX4_CMD_QUERY_ADAPTER
,
1102 .out_is_imm
= false,
1103 .encode_slave_id
= false,
1108 .opcode
= MLX4_CMD_INIT_PORT
,
1110 .has_outbox
= false,
1111 .out_is_imm
= false,
1112 .encode_slave_id
= false,
1114 .wrapper
= mlx4_INIT_PORT_wrapper
1117 .opcode
= MLX4_CMD_CLOSE_PORT
,
1119 .has_outbox
= false,
1120 .out_is_imm
= false,
1121 .encode_slave_id
= false,
1123 .wrapper
= mlx4_CLOSE_PORT_wrapper
1126 .opcode
= MLX4_CMD_QUERY_PORT
,
1129 .out_is_imm
= false,
1130 .encode_slave_id
= false,
1132 .wrapper
= mlx4_QUERY_PORT_wrapper
1135 .opcode
= MLX4_CMD_SET_PORT
,
1137 .has_outbox
= false,
1138 .out_is_imm
= false,
1139 .encode_slave_id
= false,
1141 .wrapper
= mlx4_SET_PORT_wrapper
1144 .opcode
= MLX4_CMD_MAP_EQ
,
1146 .has_outbox
= false,
1147 .out_is_imm
= false,
1148 .encode_slave_id
= false,
1150 .wrapper
= mlx4_MAP_EQ_wrapper
1153 .opcode
= MLX4_CMD_SW2HW_EQ
,
1155 .has_outbox
= false,
1156 .out_is_imm
= false,
1157 .encode_slave_id
= true,
1159 .wrapper
= mlx4_SW2HW_EQ_wrapper
1162 .opcode
= MLX4_CMD_HW_HEALTH_CHECK
,
1164 .has_outbox
= false,
1165 .out_is_imm
= false,
1166 .encode_slave_id
= false,
1171 .opcode
= MLX4_CMD_NOP
,
1173 .has_outbox
= false,
1174 .out_is_imm
= false,
1175 .encode_slave_id
= false,
1180 .opcode
= MLX4_CMD_CONFIG_DEV
,
1183 .out_is_imm
= false,
1184 .encode_slave_id
= false,
1186 .wrapper
= mlx4_CONFIG_DEV_wrapper
1189 .opcode
= MLX4_CMD_ALLOC_RES
,
1191 .has_outbox
= false,
1193 .encode_slave_id
= false,
1195 .wrapper
= mlx4_ALLOC_RES_wrapper
1198 .opcode
= MLX4_CMD_FREE_RES
,
1200 .has_outbox
= false,
1201 .out_is_imm
= false,
1202 .encode_slave_id
= false,
1204 .wrapper
= mlx4_FREE_RES_wrapper
1207 .opcode
= MLX4_CMD_SW2HW_MPT
,
1209 .has_outbox
= false,
1210 .out_is_imm
= false,
1211 .encode_slave_id
= true,
1213 .wrapper
= mlx4_SW2HW_MPT_wrapper
1216 .opcode
= MLX4_CMD_QUERY_MPT
,
1219 .out_is_imm
= false,
1220 .encode_slave_id
= false,
1222 .wrapper
= mlx4_QUERY_MPT_wrapper
1225 .opcode
= MLX4_CMD_HW2SW_MPT
,
1227 .has_outbox
= false,
1228 .out_is_imm
= false,
1229 .encode_slave_id
= false,
1231 .wrapper
= mlx4_HW2SW_MPT_wrapper
1234 .opcode
= MLX4_CMD_READ_MTT
,
1237 .out_is_imm
= false,
1238 .encode_slave_id
= false,
1243 .opcode
= MLX4_CMD_WRITE_MTT
,
1245 .has_outbox
= false,
1246 .out_is_imm
= false,
1247 .encode_slave_id
= false,
1249 .wrapper
= mlx4_WRITE_MTT_wrapper
1252 .opcode
= MLX4_CMD_SYNC_TPT
,
1254 .has_outbox
= false,
1255 .out_is_imm
= false,
1256 .encode_slave_id
= false,
1261 .opcode
= MLX4_CMD_HW2SW_EQ
,
1263 .has_outbox
= false,
1264 .out_is_imm
= false,
1265 .encode_slave_id
= true,
1267 .wrapper
= mlx4_HW2SW_EQ_wrapper
1270 .opcode
= MLX4_CMD_QUERY_EQ
,
1273 .out_is_imm
= false,
1274 .encode_slave_id
= true,
1276 .wrapper
= mlx4_QUERY_EQ_wrapper
1279 .opcode
= MLX4_CMD_SW2HW_CQ
,
1281 .has_outbox
= false,
1282 .out_is_imm
= false,
1283 .encode_slave_id
= true,
1285 .wrapper
= mlx4_SW2HW_CQ_wrapper
1288 .opcode
= MLX4_CMD_HW2SW_CQ
,
1290 .has_outbox
= false,
1291 .out_is_imm
= false,
1292 .encode_slave_id
= false,
1294 .wrapper
= mlx4_HW2SW_CQ_wrapper
1297 .opcode
= MLX4_CMD_QUERY_CQ
,
1300 .out_is_imm
= false,
1301 .encode_slave_id
= false,
1303 .wrapper
= mlx4_QUERY_CQ_wrapper
1306 .opcode
= MLX4_CMD_MODIFY_CQ
,
1308 .has_outbox
= false,
1310 .encode_slave_id
= false,
1312 .wrapper
= mlx4_MODIFY_CQ_wrapper
1315 .opcode
= MLX4_CMD_SW2HW_SRQ
,
1317 .has_outbox
= false,
1318 .out_is_imm
= false,
1319 .encode_slave_id
= true,
1321 .wrapper
= mlx4_SW2HW_SRQ_wrapper
1324 .opcode
= MLX4_CMD_HW2SW_SRQ
,
1326 .has_outbox
= false,
1327 .out_is_imm
= false,
1328 .encode_slave_id
= false,
1330 .wrapper
= mlx4_HW2SW_SRQ_wrapper
1333 .opcode
= MLX4_CMD_QUERY_SRQ
,
1336 .out_is_imm
= false,
1337 .encode_slave_id
= false,
1339 .wrapper
= mlx4_QUERY_SRQ_wrapper
1342 .opcode
= MLX4_CMD_ARM_SRQ
,
1344 .has_outbox
= false,
1345 .out_is_imm
= false,
1346 .encode_slave_id
= false,
1348 .wrapper
= mlx4_ARM_SRQ_wrapper
1351 .opcode
= MLX4_CMD_RST2INIT_QP
,
1353 .has_outbox
= false,
1354 .out_is_imm
= false,
1355 .encode_slave_id
= true,
1357 .wrapper
= mlx4_RST2INIT_QP_wrapper
1360 .opcode
= MLX4_CMD_INIT2INIT_QP
,
1362 .has_outbox
= false,
1363 .out_is_imm
= false,
1364 .encode_slave_id
= false,
1366 .wrapper
= mlx4_INIT2INIT_QP_wrapper
1369 .opcode
= MLX4_CMD_INIT2RTR_QP
,
1371 .has_outbox
= false,
1372 .out_is_imm
= false,
1373 .encode_slave_id
= false,
1375 .wrapper
= mlx4_INIT2RTR_QP_wrapper
1378 .opcode
= MLX4_CMD_RTR2RTS_QP
,
1380 .has_outbox
= false,
1381 .out_is_imm
= false,
1382 .encode_slave_id
= false,
1384 .wrapper
= mlx4_RTR2RTS_QP_wrapper
1387 .opcode
= MLX4_CMD_RTS2RTS_QP
,
1389 .has_outbox
= false,
1390 .out_is_imm
= false,
1391 .encode_slave_id
= false,
1393 .wrapper
= mlx4_RTS2RTS_QP_wrapper
1396 .opcode
= MLX4_CMD_SQERR2RTS_QP
,
1398 .has_outbox
= false,
1399 .out_is_imm
= false,
1400 .encode_slave_id
= false,
1402 .wrapper
= mlx4_SQERR2RTS_QP_wrapper
1405 .opcode
= MLX4_CMD_2ERR_QP
,
1407 .has_outbox
= false,
1408 .out_is_imm
= false,
1409 .encode_slave_id
= false,
1411 .wrapper
= mlx4_GEN_QP_wrapper
1414 .opcode
= MLX4_CMD_RTS2SQD_QP
,
1416 .has_outbox
= false,
1417 .out_is_imm
= false,
1418 .encode_slave_id
= false,
1420 .wrapper
= mlx4_GEN_QP_wrapper
1423 .opcode
= MLX4_CMD_SQD2SQD_QP
,
1425 .has_outbox
= false,
1426 .out_is_imm
= false,
1427 .encode_slave_id
= false,
1429 .wrapper
= mlx4_SQD2SQD_QP_wrapper
1432 .opcode
= MLX4_CMD_SQD2RTS_QP
,
1434 .has_outbox
= false,
1435 .out_is_imm
= false,
1436 .encode_slave_id
= false,
1438 .wrapper
= mlx4_SQD2RTS_QP_wrapper
1441 .opcode
= MLX4_CMD_2RST_QP
,
1443 .has_outbox
= false,
1444 .out_is_imm
= false,
1445 .encode_slave_id
= false,
1447 .wrapper
= mlx4_2RST_QP_wrapper
1450 .opcode
= MLX4_CMD_QUERY_QP
,
1453 .out_is_imm
= false,
1454 .encode_slave_id
= false,
1456 .wrapper
= mlx4_GEN_QP_wrapper
1459 .opcode
= MLX4_CMD_SUSPEND_QP
,
1461 .has_outbox
= false,
1462 .out_is_imm
= false,
1463 .encode_slave_id
= false,
1465 .wrapper
= mlx4_GEN_QP_wrapper
1468 .opcode
= MLX4_CMD_UNSUSPEND_QP
,
1470 .has_outbox
= false,
1471 .out_is_imm
= false,
1472 .encode_slave_id
= false,
1474 .wrapper
= mlx4_GEN_QP_wrapper
1477 .opcode
= MLX4_CMD_UPDATE_QP
,
1479 .has_outbox
= false,
1480 .out_is_imm
= false,
1481 .encode_slave_id
= false,
1483 .wrapper
= mlx4_UPDATE_QP_wrapper
1486 .opcode
= MLX4_CMD_GET_OP_REQ
,
1488 .has_outbox
= false,
1489 .out_is_imm
= false,
1490 .encode_slave_id
= false,
1492 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1495 .opcode
= MLX4_CMD_ALLOCATE_VPP
,
1498 .out_is_imm
= false,
1499 .encode_slave_id
= false,
1501 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1504 .opcode
= MLX4_CMD_SET_VPORT_QOS
,
1507 .out_is_imm
= false,
1508 .encode_slave_id
= false,
1510 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1513 .opcode
= MLX4_CMD_CONF_SPECIAL_QP
,
1515 .has_outbox
= false,
1516 .out_is_imm
= false,
1517 .encode_slave_id
= false,
1518 .verify
= NULL
, /* XXX verify: only demux can do this */
1522 .opcode
= MLX4_CMD_MAD_IFC
,
1525 .out_is_imm
= false,
1526 .encode_slave_id
= false,
1528 .wrapper
= mlx4_MAD_IFC_wrapper
1531 .opcode
= MLX4_CMD_MAD_DEMUX
,
1533 .has_outbox
= false,
1534 .out_is_imm
= false,
1535 .encode_slave_id
= false,
1537 .wrapper
= mlx4_CMD_EPERM_wrapper
1540 .opcode
= MLX4_CMD_QUERY_IF_STAT
,
1543 .out_is_imm
= false,
1544 .encode_slave_id
= false,
1546 .wrapper
= mlx4_QUERY_IF_STAT_wrapper
1549 .opcode
= MLX4_CMD_ACCESS_REG
,
1552 .out_is_imm
= false,
1553 .encode_slave_id
= false,
1555 .wrapper
= mlx4_ACCESS_REG_wrapper
,
1558 .opcode
= MLX4_CMD_CONGESTION_CTRL_OPCODE
,
1560 .has_outbox
= false,
1561 .out_is_imm
= false,
1562 .encode_slave_id
= false,
1564 .wrapper
= mlx4_CMD_EPERM_wrapper
,
1566 /* Native multicast commands are not available for guests */
1568 .opcode
= MLX4_CMD_QP_ATTACH
,
1570 .has_outbox
= false,
1571 .out_is_imm
= false,
1572 .encode_slave_id
= false,
1574 .wrapper
= mlx4_QP_ATTACH_wrapper
1577 .opcode
= MLX4_CMD_PROMISC
,
1579 .has_outbox
= false,
1580 .out_is_imm
= false,
1581 .encode_slave_id
= false,
1583 .wrapper
= mlx4_PROMISC_wrapper
1585 /* Ethernet specific commands */
1587 .opcode
= MLX4_CMD_SET_VLAN_FLTR
,
1589 .has_outbox
= false,
1590 .out_is_imm
= false,
1591 .encode_slave_id
= false,
1593 .wrapper
= mlx4_SET_VLAN_FLTR_wrapper
1596 .opcode
= MLX4_CMD_SET_MCAST_FLTR
,
1598 .has_outbox
= false,
1599 .out_is_imm
= false,
1600 .encode_slave_id
= false,
1602 .wrapper
= mlx4_SET_MCAST_FLTR_wrapper
1605 .opcode
= MLX4_CMD_DUMP_ETH_STATS
,
1608 .out_is_imm
= false,
1609 .encode_slave_id
= false,
1611 .wrapper
= mlx4_DUMP_ETH_STATS_wrapper
1614 .opcode
= MLX4_CMD_INFORM_FLR_DONE
,
1616 .has_outbox
= false,
1617 .out_is_imm
= false,
1618 .encode_slave_id
= false,
1622 /* flow steering commands */
1624 .opcode
= MLX4_QP_FLOW_STEERING_ATTACH
,
1626 .has_outbox
= false,
1628 .encode_slave_id
= false,
1630 .wrapper
= mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1633 .opcode
= MLX4_QP_FLOW_STEERING_DETACH
,
1635 .has_outbox
= false,
1636 .out_is_imm
= false,
1637 .encode_slave_id
= false,
1639 .wrapper
= mlx4_QP_FLOW_STEERING_DETACH_wrapper
1642 .opcode
= MLX4_FLOW_STEERING_IB_UC_QP_RANGE
,
1644 .has_outbox
= false,
1645 .out_is_imm
= false,
1646 .encode_slave_id
= false,
1648 .wrapper
= mlx4_CMD_EPERM_wrapper
1651 .opcode
= MLX4_CMD_VIRT_PORT_MAP
,
1653 .has_outbox
= false,
1654 .out_is_imm
= false,
1655 .encode_slave_id
= false,
1657 .wrapper
= mlx4_CMD_EPERM_wrapper
1661 static int mlx4_master_process_vhcr(struct mlx4_dev
*dev
, int slave
,
1662 struct mlx4_vhcr_cmd
*in_vhcr
)
1664 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1665 struct mlx4_cmd_info
*cmd
= NULL
;
1666 struct mlx4_vhcr_cmd
*vhcr_cmd
= in_vhcr
? in_vhcr
: priv
->mfunc
.vhcr
;
1667 struct mlx4_vhcr
*vhcr
;
1668 struct mlx4_cmd_mailbox
*inbox
= NULL
;
1669 struct mlx4_cmd_mailbox
*outbox
= NULL
;
1676 /* Create sw representation of Virtual HCR */
1677 vhcr
= kzalloc(sizeof(struct mlx4_vhcr
), GFP_KERNEL
);
1681 /* DMA in the vHCR */
1683 ret
= mlx4_ACCESS_MEM(dev
, priv
->mfunc
.vhcr_dma
, slave
,
1684 priv
->mfunc
.master
.slave_state
[slave
].vhcr_dma
,
1685 ALIGN(sizeof(struct mlx4_vhcr_cmd
),
1686 MLX4_ACCESS_MEM_ALIGN
), 1);
1688 if (!(dev
->persist
->state
&
1689 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1690 mlx4_err(dev
, "%s: Failed reading vhcr ret: 0x%x\n",
1697 /* Fill SW VHCR fields */
1698 vhcr
->in_param
= be64_to_cpu(vhcr_cmd
->in_param
);
1699 vhcr
->out_param
= be64_to_cpu(vhcr_cmd
->out_param
);
1700 vhcr
->in_modifier
= be32_to_cpu(vhcr_cmd
->in_modifier
);
1701 vhcr
->token
= be16_to_cpu(vhcr_cmd
->token
);
1702 vhcr
->op
= be16_to_cpu(vhcr_cmd
->opcode
) & 0xfff;
1703 vhcr
->op_modifier
= (u8
) (be16_to_cpu(vhcr_cmd
->opcode
) >> 12);
1704 vhcr
->e_bit
= vhcr_cmd
->flags
& (1 << 6);
1706 /* Lookup command */
1707 for (i
= 0; i
< ARRAY_SIZE(cmd_info
); ++i
) {
1708 if (vhcr
->op
== cmd_info
[i
].opcode
) {
1714 mlx4_err(dev
, "Unknown command:0x%x accepted from slave:%d\n",
1716 vhcr_cmd
->status
= CMD_STAT_BAD_PARAM
;
1721 if (cmd
->has_inbox
) {
1722 vhcr
->in_param
&= INBOX_MASK
;
1723 inbox
= mlx4_alloc_cmd_mailbox(dev
);
1724 if (IS_ERR(inbox
)) {
1725 vhcr_cmd
->status
= CMD_STAT_BAD_SIZE
;
1730 ret
= mlx4_ACCESS_MEM(dev
, inbox
->dma
, slave
,
1732 MLX4_MAILBOX_SIZE
, 1);
1734 if (!(dev
->persist
->state
&
1735 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1736 mlx4_err(dev
, "%s: Failed reading inbox (cmd:0x%x)\n",
1737 __func__
, cmd
->opcode
);
1738 vhcr_cmd
->status
= CMD_STAT_INTERNAL_ERR
;
1743 /* Apply permission and bound checks if applicable */
1744 if (cmd
->verify
&& cmd
->verify(dev
, slave
, vhcr
, inbox
)) {
1745 mlx4_warn(dev
, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1746 vhcr
->op
, slave
, vhcr
->in_modifier
);
1747 vhcr_cmd
->status
= CMD_STAT_BAD_OP
;
1751 /* Allocate outbox */
1752 if (cmd
->has_outbox
) {
1753 outbox
= mlx4_alloc_cmd_mailbox(dev
);
1754 if (IS_ERR(outbox
)) {
1755 vhcr_cmd
->status
= CMD_STAT_BAD_SIZE
;
1761 /* Execute the command! */
1763 err
= cmd
->wrapper(dev
, slave
, vhcr
, inbox
, outbox
,
1765 if (cmd
->out_is_imm
)
1766 vhcr_cmd
->out_param
= cpu_to_be64(vhcr
->out_param
);
1768 in_param
= cmd
->has_inbox
? (u64
) inbox
->dma
:
1770 out_param
= cmd
->has_outbox
? (u64
) outbox
->dma
:
1772 err
= __mlx4_cmd(dev
, in_param
, &out_param
,
1773 cmd
->out_is_imm
, vhcr
->in_modifier
,
1774 vhcr
->op_modifier
, vhcr
->op
,
1775 MLX4_CMD_TIME_CLASS_A
,
1778 if (cmd
->out_is_imm
) {
1779 vhcr
->out_param
= out_param
;
1780 vhcr_cmd
->out_param
= cpu_to_be64(vhcr
->out_param
);
1785 if (!(dev
->persist
->state
& MLX4_DEVICE_STATE_INTERNAL_ERROR
)) {
1786 if (vhcr
->op
== MLX4_CMD_ALLOC_RES
&&
1787 (vhcr
->in_modifier
& 0xff) == RES_COUNTER
&&
1790 "Unable to allocate counter for slave %d (%d)\n",
1793 mlx4_warn(dev
, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1794 vhcr
->op
, slave
, vhcr
->errno
, err
);
1796 vhcr_cmd
->status
= mlx4_errno_to_status(err
);
1801 /* Write outbox if command completed successfully */
1802 if (cmd
->has_outbox
&& !vhcr_cmd
->status
) {
1803 ret
= mlx4_ACCESS_MEM(dev
, outbox
->dma
, slave
,
1805 MLX4_MAILBOX_SIZE
, MLX4_CMD_WRAPPED
);
1807 /* If we failed to write back the outbox after the
1808 *command was successfully executed, we must fail this
1809 * slave, as it is now in undefined state */
1810 if (!(dev
->persist
->state
&
1811 MLX4_DEVICE_STATE_INTERNAL_ERROR
))
1812 mlx4_err(dev
, "%s:Failed writing outbox\n", __func__
);
1818 /* DMA back vhcr result */
1820 ret
= mlx4_ACCESS_MEM(dev
, priv
->mfunc
.vhcr_dma
, slave
,
1821 priv
->mfunc
.master
.slave_state
[slave
].vhcr_dma
,
1822 ALIGN(sizeof(struct mlx4_vhcr
),
1823 MLX4_ACCESS_MEM_ALIGN
),
1826 mlx4_err(dev
, "%s:Failed writing vhcr result\n",
1828 else if (vhcr
->e_bit
&&
1829 mlx4_GEN_EQE(dev
, slave
, &priv
->mfunc
.master
.cmd_eqe
))
1830 mlx4_warn(dev
, "Failed to generate command completion eqe for slave %d\n",
1836 mlx4_free_cmd_mailbox(dev
, inbox
);
1837 mlx4_free_cmd_mailbox(dev
, outbox
);
1841 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv
*priv
,
1842 int slave
, int port
)
1844 struct mlx4_vport_oper_state
*vp_oper
;
1845 struct mlx4_vport_state
*vp_admin
;
1846 struct mlx4_vf_immed_vlan_work
*work
;
1847 struct mlx4_dev
*dev
= &(priv
->dev
);
1849 int admin_vlan_ix
= NO_INDX
;
1851 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
1852 vp_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
1854 if (vp_oper
->state
.default_vlan
== vp_admin
->default_vlan
&&
1855 vp_oper
->state
.default_qos
== vp_admin
->default_qos
&&
1856 vp_oper
->state
.vlan_proto
== vp_admin
->vlan_proto
&&
1857 vp_oper
->state
.link_state
== vp_admin
->link_state
&&
1858 vp_oper
->state
.qos_vport
== vp_admin
->qos_vport
)
1861 if (!(priv
->mfunc
.master
.slave_state
[slave
].active
&&
1862 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
)) {
1863 /* even if the UPDATE_QP command isn't supported, we still want
1864 * to set this VF link according to the admin directive
1866 vp_oper
->state
.link_state
= vp_admin
->link_state
;
1870 mlx4_dbg(dev
, "updating immediately admin params slave %d port %d\n",
1872 mlx4_dbg(dev
, "vlan %d QoS %d link down %d\n",
1873 vp_admin
->default_vlan
, vp_admin
->default_qos
,
1874 vp_admin
->link_state
);
1876 work
= kzalloc(sizeof(*work
), GFP_KERNEL
);
1880 if (vp_oper
->state
.default_vlan
!= vp_admin
->default_vlan
) {
1881 if (MLX4_VGT
!= vp_admin
->default_vlan
) {
1882 err
= __mlx4_register_vlan(&priv
->dev
, port
,
1883 vp_admin
->default_vlan
,
1887 mlx4_warn(&priv
->dev
,
1888 "No vlan resources slave %d, port %d\n",
1893 admin_vlan_ix
= NO_INDX
;
1895 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_VLAN
;
1896 mlx4_dbg(&priv
->dev
,
1897 "alloc vlan %d idx %d slave %d port %d\n",
1898 (int)(vp_admin
->default_vlan
),
1899 admin_vlan_ix
, slave
, port
);
1902 /* save original vlan ix and vlan id */
1903 work
->orig_vlan_id
= vp_oper
->state
.default_vlan
;
1904 work
->orig_vlan_ix
= vp_oper
->vlan_idx
;
1906 /* handle new qos */
1907 if (vp_oper
->state
.default_qos
!= vp_admin
->default_qos
)
1908 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_QOS
;
1910 if (work
->flags
& MLX4_VF_IMMED_VLAN_FLAG_VLAN
)
1911 vp_oper
->vlan_idx
= admin_vlan_ix
;
1913 vp_oper
->state
.default_vlan
= vp_admin
->default_vlan
;
1914 vp_oper
->state
.default_qos
= vp_admin
->default_qos
;
1915 vp_oper
->state
.vlan_proto
= vp_admin
->vlan_proto
;
1916 vp_oper
->state
.link_state
= vp_admin
->link_state
;
1917 vp_oper
->state
.qos_vport
= vp_admin
->qos_vport
;
1919 if (vp_admin
->link_state
== IFLA_VF_LINK_STATE_DISABLE
)
1920 work
->flags
|= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE
;
1922 /* iterate over QPs owned by this slave, using UPDATE_QP */
1924 work
->slave
= slave
;
1925 work
->qos
= vp_oper
->state
.default_qos
;
1926 work
->qos_vport
= vp_oper
->state
.qos_vport
;
1927 work
->vlan_id
= vp_oper
->state
.default_vlan
;
1928 work
->vlan_ix
= vp_oper
->vlan_idx
;
1929 work
->vlan_proto
= vp_oper
->state
.vlan_proto
;
1931 INIT_WORK(&work
->work
, mlx4_vf_immed_vlan_work_handler
);
1932 queue_work(priv
->mfunc
.master
.comm_wq
, &work
->work
);
1937 static void mlx4_set_default_port_qos(struct mlx4_dev
*dev
, int port
)
1939 struct mlx4_qos_manager
*port_qos_ctl
;
1940 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1942 port_qos_ctl
= &priv
->mfunc
.master
.qos_ctl
[port
];
1943 bitmap_zero(port_qos_ctl
->priority_bm
, MLX4_NUM_UP
);
1945 /* Enable only default prio at PF init routine */
1946 set_bit(MLX4_DEFAULT_QOS_PRIO
, port_qos_ctl
->priority_bm
);
1949 static void mlx4_allocate_port_vpps(struct mlx4_dev
*dev
, int port
)
1955 u8 vpp_param
[MLX4_NUM_UP
];
1956 struct mlx4_qos_manager
*port_qos
;
1957 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1959 err
= mlx4_ALLOCATE_VPP_get(dev
, port
, &available_vpp
, vpp_param
);
1961 mlx4_info(dev
, "Failed query available VPPs\n");
1965 port_qos
= &priv
->mfunc
.master
.qos_ctl
[port
];
1966 num_vfs
= (available_vpp
/
1967 bitmap_weight(port_qos
->priority_bm
, MLX4_NUM_UP
));
1969 for (i
= 0; i
< MLX4_NUM_UP
; i
++) {
1970 if (test_bit(i
, port_qos
->priority_bm
))
1971 vpp_param
[i
] = num_vfs
;
1974 err
= mlx4_ALLOCATE_VPP_set(dev
, port
, vpp_param
);
1976 mlx4_info(dev
, "Failed allocating VPPs\n");
1980 /* Query actual allocated VPP, just to make sure */
1981 err
= mlx4_ALLOCATE_VPP_get(dev
, port
, &available_vpp
, vpp_param
);
1983 mlx4_info(dev
, "Failed query available VPPs\n");
1987 port_qos
->num_of_qos_vfs
= num_vfs
;
1988 mlx4_dbg(dev
, "Port %d Available VPPs %d\n", port
, available_vpp
);
1990 for (i
= 0; i
< MLX4_NUM_UP
; i
++)
1991 mlx4_dbg(dev
, "Port %d UP %d Allocated %d VPPs\n", port
, i
,
1995 static int mlx4_master_activate_admin_state(struct mlx4_priv
*priv
, int slave
)
1998 struct mlx4_vport_state
*vp_admin
;
1999 struct mlx4_vport_oper_state
*vp_oper
;
2000 struct mlx4_slave_state
*slave_state
=
2001 &priv
->mfunc
.master
.slave_state
[slave
];
2002 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(
2004 int min_port
= find_first_bit(actv_ports
.ports
,
2005 priv
->dev
.caps
.num_ports
) + 1;
2006 int max_port
= min_port
- 1 +
2007 bitmap_weight(actv_ports
.ports
, priv
->dev
.caps
.num_ports
);
2009 for (port
= min_port
; port
<= max_port
; port
++) {
2010 if (!test_bit(port
- 1, actv_ports
.ports
))
2012 priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] =
2013 priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
];
2014 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
2015 vp_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
2016 if (vp_admin
->vlan_proto
!= htons(ETH_P_8021AD
) ||
2017 slave_state
->vst_qinq_supported
) {
2018 vp_oper
->state
.vlan_proto
= vp_admin
->vlan_proto
;
2019 vp_oper
->state
.default_vlan
= vp_admin
->default_vlan
;
2020 vp_oper
->state
.default_qos
= vp_admin
->default_qos
;
2022 vp_oper
->state
.link_state
= vp_admin
->link_state
;
2023 vp_oper
->state
.mac
= vp_admin
->mac
;
2024 vp_oper
->state
.spoofchk
= vp_admin
->spoofchk
;
2025 vp_oper
->state
.tx_rate
= vp_admin
->tx_rate
;
2026 vp_oper
->state
.qos_vport
= vp_admin
->qos_vport
;
2027 vp_oper
->state
.guid
= vp_admin
->guid
;
2029 if (MLX4_VGT
!= vp_admin
->default_vlan
) {
2030 err
= __mlx4_register_vlan(&priv
->dev
, port
,
2031 vp_admin
->default_vlan
, &(vp_oper
->vlan_idx
));
2033 vp_oper
->vlan_idx
= NO_INDX
;
2034 vp_oper
->state
.default_vlan
= MLX4_VGT
;
2035 vp_oper
->state
.vlan_proto
= htons(ETH_P_8021Q
);
2036 mlx4_warn(&priv
->dev
,
2037 "No vlan resources slave %d, port %d\n",
2041 mlx4_dbg(&priv
->dev
, "alloc vlan %d idx %d slave %d port %d\n",
2042 (int)(vp_oper
->state
.default_vlan
),
2043 vp_oper
->vlan_idx
, slave
, port
);
2045 if (vp_admin
->spoofchk
) {
2046 vp_oper
->mac_idx
= __mlx4_register_mac(&priv
->dev
,
2049 if (0 > vp_oper
->mac_idx
) {
2050 err
= vp_oper
->mac_idx
;
2051 vp_oper
->mac_idx
= NO_INDX
;
2052 mlx4_warn(&priv
->dev
,
2053 "No mac resources slave %d, port %d\n",
2057 mlx4_dbg(&priv
->dev
, "alloc mac %llx idx %d slave %d port %d\n",
2058 vp_oper
->state
.mac
, vp_oper
->mac_idx
, slave
, port
);
2064 static void mlx4_master_deactivate_admin_state(struct mlx4_priv
*priv
, int slave
)
2067 struct mlx4_vport_oper_state
*vp_oper
;
2068 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(
2070 int min_port
= find_first_bit(actv_ports
.ports
,
2071 priv
->dev
.caps
.num_ports
) + 1;
2072 int max_port
= min_port
- 1 +
2073 bitmap_weight(actv_ports
.ports
, priv
->dev
.caps
.num_ports
);
2076 for (port
= min_port
; port
<= max_port
; port
++) {
2077 if (!test_bit(port
- 1, actv_ports
.ports
))
2079 priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] =
2080 MLX4_VF_SMI_DISABLED
;
2081 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
2082 if (NO_INDX
!= vp_oper
->vlan_idx
) {
2083 __mlx4_unregister_vlan(&priv
->dev
,
2084 port
, vp_oper
->state
.default_vlan
);
2085 vp_oper
->vlan_idx
= NO_INDX
;
2087 if (NO_INDX
!= vp_oper
->mac_idx
) {
2088 __mlx4_unregister_mac(&priv
->dev
, port
, vp_oper
->state
.mac
);
2089 vp_oper
->mac_idx
= NO_INDX
;
2095 static void mlx4_master_do_cmd(struct mlx4_dev
*dev
, int slave
, u8 cmd
,
2096 u16 param
, u8 toggle
)
2098 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2099 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
2101 u8 is_going_down
= 0;
2103 unsigned long flags
;
2105 slave_state
[slave
].comm_toggle
^= 1;
2106 reply
= (u32
) slave_state
[slave
].comm_toggle
<< 31;
2107 if (toggle
!= slave_state
[slave
].comm_toggle
) {
2108 mlx4_warn(dev
, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2112 if (cmd
== MLX4_COMM_CMD_RESET
) {
2113 mlx4_warn(dev
, "Received reset from slave:%d\n", slave
);
2114 slave_state
[slave
].active
= false;
2115 slave_state
[slave
].old_vlan_api
= false;
2116 slave_state
[slave
].vst_qinq_supported
= false;
2117 mlx4_master_deactivate_admin_state(priv
, slave
);
2118 for (i
= 0; i
< MLX4_EVENT_TYPES_NUM
; ++i
) {
2119 slave_state
[slave
].event_eq
[i
].eqn
= -1;
2120 slave_state
[slave
].event_eq
[i
].token
= 0;
2122 /*check if we are in the middle of FLR process,
2123 if so return "retry" status to the slave*/
2124 if (MLX4_COMM_CMD_FLR
== slave_state
[slave
].last_cmd
)
2125 goto inform_slave_state
;
2127 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_SLAVE_SHUTDOWN
, slave
);
2129 /* write the version in the event field */
2130 reply
|= mlx4_comm_get_version();
2134 /*command from slave in the middle of FLR*/
2135 if (cmd
!= MLX4_COMM_CMD_RESET
&&
2136 MLX4_COMM_CMD_FLR
== slave_state
[slave
].last_cmd
) {
2137 mlx4_warn(dev
, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2143 case MLX4_COMM_CMD_VHCR0
:
2144 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_RESET
)
2146 slave_state
[slave
].vhcr_dma
= ((u64
) param
) << 48;
2147 priv
->mfunc
.master
.slave_state
[slave
].cookie
= 0;
2149 case MLX4_COMM_CMD_VHCR1
:
2150 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR0
)
2152 slave_state
[slave
].vhcr_dma
|= ((u64
) param
) << 32;
2154 case MLX4_COMM_CMD_VHCR2
:
2155 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR1
)
2157 slave_state
[slave
].vhcr_dma
|= ((u64
) param
) << 16;
2159 case MLX4_COMM_CMD_VHCR_EN
:
2160 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR2
)
2162 slave_state
[slave
].vhcr_dma
|= param
;
2163 if (mlx4_master_activate_admin_state(priv
, slave
))
2165 slave_state
[slave
].active
= true;
2166 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_SLAVE_INIT
, slave
);
2168 case MLX4_COMM_CMD_VHCR_POST
:
2169 if ((slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR_EN
) &&
2170 (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR_POST
)) {
2171 mlx4_warn(dev
, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2172 slave
, cmd
, slave_state
[slave
].last_cmd
);
2176 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
2177 if (mlx4_master_process_vhcr(dev
, slave
, NULL
)) {
2178 mlx4_err(dev
, "Failed processing vhcr for slave:%d, resetting slave\n",
2180 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2183 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2186 mlx4_warn(dev
, "Bad comm cmd:%d from slave:%d\n", cmd
, slave
);
2189 spin_lock_irqsave(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2190 if (!slave_state
[slave
].is_slave_going_down
)
2191 slave_state
[slave
].last_cmd
= cmd
;
2194 spin_unlock_irqrestore(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2195 if (is_going_down
) {
2196 mlx4_warn(dev
, "Slave is going down aborting command(%d) executing from slave:%d\n",
2200 __raw_writel((__force u32
) cpu_to_be32(reply
),
2201 &priv
->mfunc
.comm
[slave
].slave_read
);
2206 /* cleanup any slave resources */
2207 if (dev
->persist
->interface_state
& MLX4_INTERFACE_STATE_UP
)
2208 mlx4_delete_all_resources_for_slave(dev
, slave
);
2210 if (cmd
!= MLX4_COMM_CMD_RESET
) {
2211 mlx4_warn(dev
, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2213 /* Turn on internal error letting slave reset itself immeditaly,
2214 * otherwise it might take till timeout on command is passed
2216 reply
|= ((u32
)COMM_CHAN_EVENT_INTERNAL_ERR
);
2219 spin_lock_irqsave(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2220 if (!slave_state
[slave
].is_slave_going_down
)
2221 slave_state
[slave
].last_cmd
= MLX4_COMM_CMD_RESET
;
2222 spin_unlock_irqrestore(&priv
->mfunc
.master
.slave_state_lock
, flags
);
2223 /*with slave in the middle of flr, no need to clean resources again.*/
2225 memset(&slave_state
[slave
].event_eq
, 0,
2226 sizeof(struct mlx4_slave_event_eq_info
));
2227 __raw_writel((__force u32
) cpu_to_be32(reply
),
2228 &priv
->mfunc
.comm
[slave
].slave_read
);
2232 /* master command processing */
2233 void mlx4_master_comm_channel(struct work_struct
*work
)
2235 struct mlx4_mfunc_master_ctx
*master
=
2237 struct mlx4_mfunc_master_ctx
,
2239 struct mlx4_mfunc
*mfunc
=
2240 container_of(master
, struct mlx4_mfunc
, master
);
2241 struct mlx4_priv
*priv
=
2242 container_of(mfunc
, struct mlx4_priv
, mfunc
);
2243 struct mlx4_dev
*dev
= &priv
->dev
;
2253 bit_vec
= master
->comm_arm_bit_vector
;
2254 for (i
= 0; i
< COMM_CHANNEL_BIT_ARRAY_SIZE
; i
++) {
2255 vec
= be32_to_cpu(bit_vec
[i
]);
2256 for (j
= 0; j
< 32; j
++) {
2257 if (!(vec
& (1 << j
)))
2260 slave
= (i
* 32) + j
;
2261 comm_cmd
= swab32(readl(
2262 &mfunc
->comm
[slave
].slave_write
));
2263 slt
= swab32(readl(&mfunc
->comm
[slave
].slave_read
))
2265 toggle
= comm_cmd
>> 31;
2266 if (toggle
!= slt
) {
2267 if (master
->slave_state
[slave
].comm_toggle
2269 pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2271 master
->slave_state
[slave
].comm_toggle
);
2272 master
->slave_state
[slave
].comm_toggle
=
2275 mlx4_master_do_cmd(dev
, slave
,
2276 comm_cmd
>> 16 & 0xff,
2277 comm_cmd
& 0xffff, toggle
);
2283 if (reported
&& reported
!= served
)
2284 mlx4_warn(dev
, "Got command event with bitmask from %d slaves but %d were served\n",
2287 if (mlx4_ARM_COMM_CHANNEL(dev
))
2288 mlx4_warn(dev
, "Failed to arm comm channel events\n");
2291 static int sync_toggles(struct mlx4_dev
*dev
)
2293 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2298 wr_toggle
= swab32(readl(&priv
->mfunc
.comm
->slave_write
));
2299 if (wr_toggle
== 0xffffffff)
2300 end
= jiffies
+ msecs_to_jiffies(30000);
2302 end
= jiffies
+ msecs_to_jiffies(5000);
2304 while (time_before(jiffies
, end
)) {
2305 rd_toggle
= swab32(readl(&priv
->mfunc
.comm
->slave_read
));
2306 if (wr_toggle
== 0xffffffff || rd_toggle
== 0xffffffff) {
2307 /* PCI might be offline */
2309 /* If device removal has been requested,
2310 * do not continue retrying.
2312 if (dev
->persist
->interface_state
&
2313 MLX4_INTERFACE_STATE_NOWAIT
) {
2315 "communication channel is offline\n");
2320 wr_toggle
= swab32(readl(&priv
->mfunc
.comm
->
2325 if (rd_toggle
>> 31 == wr_toggle
>> 31) {
2326 priv
->cmd
.comm_toggle
= rd_toggle
>> 31;
2334 * we could reach here if for example the previous VM using this
2335 * function misbehaved and left the channel with unsynced state. We
2336 * should fix this here and give this VM a chance to use a properly
2339 mlx4_warn(dev
, "recovering from previously mis-behaved VM\n");
2340 __raw_writel((__force u32
) 0, &priv
->mfunc
.comm
->slave_read
);
2341 __raw_writel((__force u32
) 0, &priv
->mfunc
.comm
->slave_write
);
2342 priv
->cmd
.comm_toggle
= 0;
2347 int mlx4_multi_func_init(struct mlx4_dev
*dev
)
2349 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2350 struct mlx4_slave_state
*s_state
;
2351 int i
, j
, err
, port
;
2353 if (mlx4_is_master(dev
))
2355 ioremap(pci_resource_start(dev
->persist
->pdev
,
2356 priv
->fw
.comm_bar
) +
2357 priv
->fw
.comm_base
, MLX4_COMM_PAGESIZE
);
2360 ioremap(pci_resource_start(dev
->persist
->pdev
, 2) +
2361 MLX4_SLAVE_COMM_BASE
, MLX4_COMM_PAGESIZE
);
2362 if (!priv
->mfunc
.comm
) {
2363 mlx4_err(dev
, "Couldn't map communication vector\n");
2367 if (mlx4_is_master(dev
)) {
2368 struct mlx4_vf_oper_state
*vf_oper
;
2369 struct mlx4_vf_admin_state
*vf_admin
;
2371 priv
->mfunc
.master
.slave_state
=
2372 kcalloc(dev
->num_slaves
,
2373 sizeof(struct mlx4_slave_state
),
2375 if (!priv
->mfunc
.master
.slave_state
)
2378 priv
->mfunc
.master
.vf_admin
=
2379 kcalloc(dev
->num_slaves
,
2380 sizeof(struct mlx4_vf_admin_state
),
2382 if (!priv
->mfunc
.master
.vf_admin
)
2383 goto err_comm_admin
;
2385 priv
->mfunc
.master
.vf_oper
=
2386 kcalloc(dev
->num_slaves
,
2387 sizeof(struct mlx4_vf_oper_state
),
2389 if (!priv
->mfunc
.master
.vf_oper
)
2392 for (i
= 0; i
< dev
->num_slaves
; ++i
) {
2393 vf_admin
= &priv
->mfunc
.master
.vf_admin
[i
];
2394 vf_oper
= &priv
->mfunc
.master
.vf_oper
[i
];
2395 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
2396 s_state
->last_cmd
= MLX4_COMM_CMD_RESET
;
2397 s_state
->vst_qinq_supported
= false;
2398 mutex_init(&priv
->mfunc
.master
.gen_eqe_mutex
[i
]);
2399 for (j
= 0; j
< MLX4_EVENT_TYPES_NUM
; ++j
)
2400 s_state
->event_eq
[j
].eqn
= -1;
2401 __raw_writel((__force u32
) 0,
2402 &priv
->mfunc
.comm
[i
].slave_write
);
2403 __raw_writel((__force u32
) 0,
2404 &priv
->mfunc
.comm
[i
].slave_read
);
2405 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++) {
2406 struct mlx4_vport_state
*admin_vport
;
2407 struct mlx4_vport_state
*oper_vport
;
2409 s_state
->vlan_filter
[port
] =
2410 kzalloc(sizeof(struct mlx4_vlan_fltr
),
2412 if (!s_state
->vlan_filter
[port
]) {
2414 kfree(s_state
->vlan_filter
[port
]);
2418 admin_vport
= &vf_admin
->vport
[port
];
2419 oper_vport
= &vf_oper
->vport
[port
].state
;
2420 INIT_LIST_HEAD(&s_state
->mcast_filters
[port
]);
2421 admin_vport
->default_vlan
= MLX4_VGT
;
2422 oper_vport
->default_vlan
= MLX4_VGT
;
2423 admin_vport
->qos_vport
=
2424 MLX4_VPP_DEFAULT_VPORT
;
2425 oper_vport
->qos_vport
= MLX4_VPP_DEFAULT_VPORT
;
2426 admin_vport
->vlan_proto
= htons(ETH_P_8021Q
);
2427 oper_vport
->vlan_proto
= htons(ETH_P_8021Q
);
2428 vf_oper
->vport
[port
].vlan_idx
= NO_INDX
;
2429 vf_oper
->vport
[port
].mac_idx
= NO_INDX
;
2430 mlx4_set_random_admin_guid(dev
, i
, port
);
2432 spin_lock_init(&s_state
->lock
);
2435 if (dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
) {
2436 for (port
= 1; port
<= dev
->caps
.num_ports
; port
++) {
2437 if (mlx4_is_eth(dev
, port
)) {
2438 mlx4_set_default_port_qos(dev
, port
);
2439 mlx4_allocate_port_vpps(dev
, port
);
2444 memset(&priv
->mfunc
.master
.cmd_eqe
, 0, sizeof(struct mlx4_eqe
));
2445 priv
->mfunc
.master
.cmd_eqe
.type
= MLX4_EVENT_TYPE_CMD
;
2446 INIT_WORK(&priv
->mfunc
.master
.comm_work
,
2447 mlx4_master_comm_channel
);
2448 INIT_WORK(&priv
->mfunc
.master
.slave_event_work
,
2449 mlx4_gen_slave_eqe
);
2450 INIT_WORK(&priv
->mfunc
.master
.slave_flr_event_work
,
2451 mlx4_master_handle_slave_flr
);
2452 spin_lock_init(&priv
->mfunc
.master
.slave_state_lock
);
2453 spin_lock_init(&priv
->mfunc
.master
.slave_eq
.event_lock
);
2454 priv
->mfunc
.master
.comm_wq
=
2455 create_singlethread_workqueue("mlx4_comm");
2456 if (!priv
->mfunc
.master
.comm_wq
)
2459 if (mlx4_init_resource_tracker(dev
))
2463 err
= sync_toggles(dev
);
2465 mlx4_err(dev
, "Couldn't sync toggles\n");
2472 flush_workqueue(priv
->mfunc
.master
.comm_wq
);
2473 destroy_workqueue(priv
->mfunc
.master
.comm_wq
);
2476 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++)
2477 kfree(priv
->mfunc
.master
.slave_state
[i
].vlan_filter
[port
]);
2479 kfree(priv
->mfunc
.master
.vf_oper
);
2481 kfree(priv
->mfunc
.master
.vf_admin
);
2483 kfree(priv
->mfunc
.master
.slave_state
);
2485 iounmap(priv
->mfunc
.comm
);
2486 priv
->mfunc
.comm
= NULL
;
2488 dma_free_coherent(&dev
->persist
->pdev
->dev
, PAGE_SIZE
,
2490 priv
->mfunc
.vhcr_dma
);
2491 priv
->mfunc
.vhcr
= NULL
;
2495 int mlx4_cmd_init(struct mlx4_dev
*dev
)
2497 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2500 if (!priv
->cmd
.initialized
) {
2501 init_rwsem(&priv
->cmd
.switch_sem
);
2502 mutex_init(&priv
->cmd
.slave_cmd_mutex
);
2503 sema_init(&priv
->cmd
.poll_sem
, 1);
2504 priv
->cmd
.use_events
= 0;
2505 priv
->cmd
.toggle
= 1;
2506 priv
->cmd
.initialized
= 1;
2507 flags
|= MLX4_CMD_CLEANUP_STRUCT
;
2510 if (!mlx4_is_slave(dev
) && !priv
->cmd
.hcr
) {
2511 priv
->cmd
.hcr
= ioremap(pci_resource_start(dev
->persist
->pdev
,
2512 0) + MLX4_HCR_BASE
, MLX4_HCR_SIZE
);
2513 if (!priv
->cmd
.hcr
) {
2514 mlx4_err(dev
, "Couldn't map command register\n");
2517 flags
|= MLX4_CMD_CLEANUP_HCR
;
2520 if (mlx4_is_mfunc(dev
) && !priv
->mfunc
.vhcr
) {
2521 priv
->mfunc
.vhcr
= dma_alloc_coherent(&dev
->persist
->pdev
->dev
,
2523 &priv
->mfunc
.vhcr_dma
,
2525 if (!priv
->mfunc
.vhcr
)
2528 flags
|= MLX4_CMD_CLEANUP_VHCR
;
2531 if (!priv
->cmd
.pool
) {
2532 priv
->cmd
.pool
= dma_pool_create("mlx4_cmd",
2533 &dev
->persist
->pdev
->dev
,
2535 MLX4_MAILBOX_SIZE
, 0);
2536 if (!priv
->cmd
.pool
)
2539 flags
|= MLX4_CMD_CLEANUP_POOL
;
2545 mlx4_cmd_cleanup(dev
, flags
);
2549 void mlx4_report_internal_err_comm_event(struct mlx4_dev
*dev
)
2551 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2555 /* If the comm channel has not yet been initialized,
2556 * skip reporting the internal error event to all
2557 * the communication channels.
2559 if (!priv
->mfunc
.comm
)
2562 /* Report an internal error event to all
2563 * communication channels.
2565 for (slave
= 0; slave
< dev
->num_slaves
; slave
++) {
2566 slave_read
= swab32(readl(&priv
->mfunc
.comm
[slave
].slave_read
));
2567 slave_read
|= (u32
)COMM_CHAN_EVENT_INTERNAL_ERR
;
2568 __raw_writel((__force u32
)cpu_to_be32(slave_read
),
2569 &priv
->mfunc
.comm
[slave
].slave_read
);
2573 void mlx4_multi_func_cleanup(struct mlx4_dev
*dev
)
2575 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2578 if (mlx4_is_master(dev
)) {
2579 flush_workqueue(priv
->mfunc
.master
.comm_wq
);
2580 destroy_workqueue(priv
->mfunc
.master
.comm_wq
);
2581 for (i
= 0; i
< dev
->num_slaves
; i
++) {
2582 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++)
2583 kfree(priv
->mfunc
.master
.slave_state
[i
].vlan_filter
[port
]);
2585 kfree(priv
->mfunc
.master
.slave_state
);
2586 kfree(priv
->mfunc
.master
.vf_admin
);
2587 kfree(priv
->mfunc
.master
.vf_oper
);
2588 dev
->num_slaves
= 0;
2591 iounmap(priv
->mfunc
.comm
);
2592 priv
->mfunc
.comm
= NULL
;
2595 void mlx4_cmd_cleanup(struct mlx4_dev
*dev
, int cleanup_mask
)
2597 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2599 if (priv
->cmd
.pool
&& (cleanup_mask
& MLX4_CMD_CLEANUP_POOL
)) {
2600 dma_pool_destroy(priv
->cmd
.pool
);
2601 priv
->cmd
.pool
= NULL
;
2604 if (!mlx4_is_slave(dev
) && priv
->cmd
.hcr
&&
2605 (cleanup_mask
& MLX4_CMD_CLEANUP_HCR
)) {
2606 iounmap(priv
->cmd
.hcr
);
2607 priv
->cmd
.hcr
= NULL
;
2609 if (mlx4_is_mfunc(dev
) && priv
->mfunc
.vhcr
&&
2610 (cleanup_mask
& MLX4_CMD_CLEANUP_VHCR
)) {
2611 dma_free_coherent(&dev
->persist
->pdev
->dev
, PAGE_SIZE
,
2612 priv
->mfunc
.vhcr
, priv
->mfunc
.vhcr_dma
);
2613 priv
->mfunc
.vhcr
= NULL
;
2615 if (priv
->cmd
.initialized
&& (cleanup_mask
& MLX4_CMD_CLEANUP_STRUCT
))
2616 priv
->cmd
.initialized
= 0;
2620 * Switch to using events to issue FW commands (can only be called
2621 * after event queue for command events has been initialized).
2623 int mlx4_cmd_use_events(struct mlx4_dev
*dev
)
2625 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2629 priv
->cmd
.context
= kmalloc_array(priv
->cmd
.max_cmds
,
2630 sizeof(struct mlx4_cmd_context
),
2632 if (!priv
->cmd
.context
)
2635 if (mlx4_is_mfunc(dev
))
2636 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
2637 down_write(&priv
->cmd
.switch_sem
);
2638 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
) {
2639 priv
->cmd
.context
[i
].token
= i
;
2640 priv
->cmd
.context
[i
].next
= i
+ 1;
2641 /* To support fatal error flow, initialize all
2642 * cmd contexts to allow simulating completions
2643 * with complete() at any time.
2645 init_completion(&priv
->cmd
.context
[i
].done
);
2648 priv
->cmd
.context
[priv
->cmd
.max_cmds
- 1].next
= -1;
2649 priv
->cmd
.free_head
= 0;
2651 sema_init(&priv
->cmd
.event_sem
, priv
->cmd
.max_cmds
);
2653 for (priv
->cmd
.token_mask
= 1;
2654 priv
->cmd
.token_mask
< priv
->cmd
.max_cmds
;
2655 priv
->cmd
.token_mask
<<= 1)
2657 --priv
->cmd
.token_mask
;
2659 down(&priv
->cmd
.poll_sem
);
2660 priv
->cmd
.use_events
= 1;
2661 up_write(&priv
->cmd
.switch_sem
);
2662 if (mlx4_is_mfunc(dev
))
2663 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2669 * Switch back to polling (used when shutting down the device)
2671 void mlx4_cmd_use_polling(struct mlx4_dev
*dev
)
2673 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2676 if (mlx4_is_mfunc(dev
))
2677 mutex_lock(&priv
->cmd
.slave_cmd_mutex
);
2678 down_write(&priv
->cmd
.switch_sem
);
2679 priv
->cmd
.use_events
= 0;
2681 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
)
2682 down(&priv
->cmd
.event_sem
);
2684 kfree(priv
->cmd
.context
);
2685 priv
->cmd
.context
= NULL
;
2687 up(&priv
->cmd
.poll_sem
);
2688 up_write(&priv
->cmd
.switch_sem
);
2689 if (mlx4_is_mfunc(dev
))
2690 mutex_unlock(&priv
->cmd
.slave_cmd_mutex
);
2693 struct mlx4_cmd_mailbox
*mlx4_alloc_cmd_mailbox(struct mlx4_dev
*dev
)
2695 struct mlx4_cmd_mailbox
*mailbox
;
2697 mailbox
= kmalloc(sizeof(*mailbox
), GFP_KERNEL
);
2699 return ERR_PTR(-ENOMEM
);
2701 mailbox
->buf
= dma_pool_zalloc(mlx4_priv(dev
)->cmd
.pool
, GFP_KERNEL
,
2703 if (!mailbox
->buf
) {
2705 return ERR_PTR(-ENOMEM
);
2710 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox
);
2712 void mlx4_free_cmd_mailbox(struct mlx4_dev
*dev
,
2713 struct mlx4_cmd_mailbox
*mailbox
)
2718 dma_pool_free(mlx4_priv(dev
)->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
2721 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox
);
2723 u32
mlx4_comm_get_version(void)
2725 return ((u32
) CMD_CHAN_IF_REV
<< 8) | (u32
) CMD_CHAN_VER
;
2728 static int mlx4_get_slave_indx(struct mlx4_dev
*dev
, int vf
)
2730 if ((vf
< 0) || (vf
>= dev
->persist
->num_vfs
)) {
2731 mlx4_err(dev
, "Bad vf number:%d (number of activated vf: %d)\n",
2732 vf
, dev
->persist
->num_vfs
);
2739 int mlx4_get_vf_indx(struct mlx4_dev
*dev
, int slave
)
2741 if (slave
< 1 || slave
> dev
->persist
->num_vfs
) {
2743 "Bad slave number:%d (number of activated slaves: %lu)\n",
2744 slave
, dev
->num_slaves
);
2750 void mlx4_cmd_wake_completions(struct mlx4_dev
*dev
)
2752 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2753 struct mlx4_cmd_context
*context
;
2756 spin_lock(&priv
->cmd
.context_lock
);
2757 if (priv
->cmd
.context
) {
2758 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
) {
2759 context
= &priv
->cmd
.context
[i
];
2760 context
->fw_status
= CMD_STAT_INTERNAL_ERR
;
2762 mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR
);
2763 complete(&context
->done
);
2766 spin_unlock(&priv
->cmd
.context_lock
);
2769 struct mlx4_active_ports
mlx4_get_active_ports(struct mlx4_dev
*dev
, int slave
)
2771 struct mlx4_active_ports actv_ports
;
2774 bitmap_zero(actv_ports
.ports
, MLX4_MAX_PORTS
);
2777 bitmap_fill(actv_ports
.ports
, dev
->caps
.num_ports
);
2781 vf
= mlx4_get_vf_indx(dev
, slave
);
2785 bitmap_set(actv_ports
.ports
, dev
->dev_vfs
[vf
].min_port
- 1,
2786 min((int)dev
->dev_vfs
[mlx4_get_vf_indx(dev
, slave
)].n_ports
,
2787 dev
->caps
.num_ports
));
2791 EXPORT_SYMBOL_GPL(mlx4_get_active_ports
);
2793 int mlx4_slave_convert_port(struct mlx4_dev
*dev
, int slave
, int port
)
2796 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2797 unsigned m
= bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
2799 if (port
<= 0 || port
> m
)
2802 n
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
2808 EXPORT_SYMBOL_GPL(mlx4_slave_convert_port
);
2810 int mlx4_phys_to_slave_port(struct mlx4_dev
*dev
, int slave
, int port
)
2812 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2813 if (test_bit(port
- 1, actv_ports
.ports
))
2815 find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
);
2819 EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port
);
2821 struct mlx4_slaves_pport
mlx4_phys_to_slaves_pport(struct mlx4_dev
*dev
,
2825 struct mlx4_slaves_pport slaves_pport
;
2827 bitmap_zero(slaves_pport
.slaves
, MLX4_MFUNC_MAX
);
2829 if (port
<= 0 || port
> dev
->caps
.num_ports
)
2830 return slaves_pport
;
2832 for (i
= 0; i
< dev
->persist
->num_vfs
+ 1; i
++) {
2833 struct mlx4_active_ports actv_ports
=
2834 mlx4_get_active_ports(dev
, i
);
2835 if (test_bit(port
- 1, actv_ports
.ports
))
2836 set_bit(i
, slaves_pport
.slaves
);
2839 return slaves_pport
;
2841 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport
);
2843 struct mlx4_slaves_pport
mlx4_phys_to_slaves_pport_actv(
2844 struct mlx4_dev
*dev
,
2845 const struct mlx4_active_ports
*crit_ports
)
2848 struct mlx4_slaves_pport slaves_pport
;
2850 bitmap_zero(slaves_pport
.slaves
, MLX4_MFUNC_MAX
);
2852 for (i
= 0; i
< dev
->persist
->num_vfs
+ 1; i
++) {
2853 struct mlx4_active_ports actv_ports
=
2854 mlx4_get_active_ports(dev
, i
);
2855 if (bitmap_equal(crit_ports
->ports
, actv_ports
.ports
,
2856 dev
->caps
.num_ports
))
2857 set_bit(i
, slaves_pport
.slaves
);
2860 return slaves_pport
;
2862 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv
);
2864 static int mlx4_slaves_closest_port(struct mlx4_dev
*dev
, int slave
, int port
)
2866 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(dev
, slave
);
2867 int min_port
= find_first_bit(actv_ports
.ports
, dev
->caps
.num_ports
)
2869 int max_port
= min_port
+
2870 bitmap_weight(actv_ports
.ports
, dev
->caps
.num_ports
);
2872 if (port
< min_port
)
2874 else if (port
>= max_port
)
2875 port
= max_port
- 1;
2880 static int mlx4_set_vport_qos(struct mlx4_priv
*priv
, int slave
, int port
,
2885 struct mlx4_qos_manager
*port_qos
;
2886 struct mlx4_dev
*dev
= &priv
->dev
;
2887 struct mlx4_vport_qos_param vpp_qos
[MLX4_NUM_UP
];
2889 port_qos
= &priv
->mfunc
.master
.qos_ctl
[port
];
2890 memset(vpp_qos
, 0, sizeof(struct mlx4_vport_qos_param
) * MLX4_NUM_UP
);
2892 if (slave
> port_qos
->num_of_qos_vfs
) {
2893 mlx4_info(dev
, "No available VPP resources for this VF\n");
2897 /* Query for default QoS values from Vport 0 is needed */
2898 err
= mlx4_SET_VPORT_QOS_get(dev
, port
, 0, vpp_qos
);
2900 mlx4_info(dev
, "Failed to query Vport 0 QoS values\n");
2904 for (i
= 0; i
< MLX4_NUM_UP
; i
++) {
2905 if (test_bit(i
, port_qos
->priority_bm
) && max_tx_rate
) {
2906 vpp_qos
[i
].max_avg_bw
= max_tx_rate
;
2907 vpp_qos
[i
].enable
= 1;
2909 /* if user supplied tx_rate == 0, meaning no rate limit
2910 * configuration is required. so we are leaving the
2911 * value of max_avg_bw as queried from Vport 0.
2913 vpp_qos
[i
].enable
= 0;
2917 err
= mlx4_SET_VPORT_QOS_set(dev
, port
, slave
, vpp_qos
);
2919 mlx4_info(dev
, "Failed to set Vport %d QoS values\n", slave
);
2926 static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev
*dev
, int port
,
2927 struct mlx4_vport_state
*vf_admin
)
2929 struct mlx4_qos_manager
*info
;
2930 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2932 if (!mlx4_is_master(dev
) ||
2933 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
))
2936 info
= &priv
->mfunc
.master
.qos_ctl
[port
];
2938 if (vf_admin
->default_vlan
!= MLX4_VGT
&&
2939 test_bit(vf_admin
->default_qos
, info
->priority_bm
))
2945 static bool mlx4_valid_vf_state_change(struct mlx4_dev
*dev
, int port
,
2946 struct mlx4_vport_state
*vf_admin
,
2949 struct mlx4_vport_state dummy_admin
= {0};
2951 if (!mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
) ||
2955 dummy_admin
.default_qos
= qos
;
2956 dummy_admin
.default_vlan
= vlan
;
2958 /* VF wants to move to other VST state which is valid with current
2959 * rate limit. Either differnt default vlan in VST or other
2960 * supported QoS priority. Otherwise we don't allow this change when
2961 * the TX rate is still configured.
2963 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, &dummy_admin
))
2966 mlx4_info(dev
, "Cannot change VF state to %s while rate is set\n",
2967 (vlan
== MLX4_VGT
) ? "VGT" : "VST");
2969 if (vlan
!= MLX4_VGT
)
2970 mlx4_info(dev
, "VST priority %d not supported for QoS\n", qos
);
2972 mlx4_info(dev
, "Please set rate to 0 prior to this VF state change\n");
2977 int mlx4_set_vf_mac(struct mlx4_dev
*dev
, int port
, int vf
, u8
*mac
)
2979 struct mlx4_priv
*priv
= mlx4_priv(dev
);
2980 struct mlx4_vport_state
*s_info
;
2983 if (!mlx4_is_master(dev
))
2984 return -EPROTONOSUPPORT
;
2986 if (is_multicast_ether_addr(mac
))
2989 slave
= mlx4_get_slave_indx(dev
, vf
);
2993 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
2994 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
2996 if (s_info
->spoofchk
&& is_zero_ether_addr(mac
)) {
2997 mlx4_info(dev
, "MAC invalidation is not allowed when spoofchk is on\n");
3001 s_info
->mac
= mlx4_mac_to_u64(mac
);
3002 mlx4_info(dev
, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
3003 vf
, port
, s_info
->mac
);
3006 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac
);
3009 int mlx4_set_vf_vlan(struct mlx4_dev
*dev
, int port
, int vf
, u16 vlan
, u8 qos
,
3012 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3013 struct mlx4_vport_state
*vf_admin
;
3014 struct mlx4_slave_state
*slave_state
;
3015 struct mlx4_vport_oper_state
*vf_oper
;
3018 if ((!mlx4_is_master(dev
)) ||
3019 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_VLAN_CONTROL
))
3020 return -EPROTONOSUPPORT
;
3022 if ((vlan
> 4095) || (qos
> 7))
3025 if (proto
== htons(ETH_P_8021AD
) &&
3026 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP
))
3027 return -EPROTONOSUPPORT
;
3029 if (proto
!= htons(ETH_P_8021Q
) &&
3030 proto
!= htons(ETH_P_8021AD
))
3033 if ((proto
== htons(ETH_P_8021AD
)) &&
3034 ((vlan
== 0) || (vlan
== MLX4_VGT
)))
3037 slave
= mlx4_get_slave_indx(dev
, vf
);
3041 slave_state
= &priv
->mfunc
.master
.slave_state
[slave
];
3042 if ((proto
== htons(ETH_P_8021AD
)) && (slave_state
->active
) &&
3043 (!slave_state
->vst_qinq_supported
)) {
3044 mlx4_err(dev
, "vf %d does not support VST QinQ mode\n", vf
);
3045 return -EPROTONOSUPPORT
;
3047 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3048 vf_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3049 vf_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
3051 if (!mlx4_valid_vf_state_change(dev
, port
, vf_admin
, vlan
, qos
))
3054 if ((0 == vlan
) && (0 == qos
))
3055 vf_admin
->default_vlan
= MLX4_VGT
;
3057 vf_admin
->default_vlan
= vlan
;
3058 vf_admin
->default_qos
= qos
;
3059 vf_admin
->vlan_proto
= proto
;
3061 /* If rate was configured prior to VST, we saved the configured rate
3062 * in vf_admin->rate and now, if priority supported we enforce the QoS
3064 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
) &&
3066 vf_admin
->qos_vport
= slave
;
3068 /* Try to activate new vf state without restart,
3069 * this option is not supported while moving to VST QinQ mode.
3071 if ((proto
== htons(ETH_P_8021AD
) &&
3072 vf_oper
->state
.vlan_proto
!= proto
) ||
3073 mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
))
3075 "updating vf %d port %d config will take effect on next VF restart\n",
3079 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan
);
3081 int mlx4_set_vf_rate(struct mlx4_dev
*dev
, int port
, int vf
, int min_tx_rate
,
3086 struct mlx4_vport_state
*vf_admin
;
3087 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3089 if (!mlx4_is_master(dev
) ||
3090 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_QOS_VPP
))
3091 return -EPROTONOSUPPORT
;
3094 mlx4_info(dev
, "Minimum BW share not supported\n");
3095 return -EPROTONOSUPPORT
;
3098 slave
= mlx4_get_slave_indx(dev
, vf
);
3102 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3103 vf_admin
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3105 err
= mlx4_set_vport_qos(priv
, slave
, port
, max_tx_rate
);
3107 mlx4_info(dev
, "vf %d failed to set rate %d\n", vf
,
3112 vf_admin
->tx_rate
= max_tx_rate
;
3113 /* if VF is not in supported mode (VST with supported prio),
3114 * we do not change vport configuration for its QPs, but save
3115 * the rate, so it will be enforced when it moves to supported
3118 if (!mlx4_is_vf_vst_and_prio_qos(dev
, port
, vf_admin
)) {
3120 "rate set for VF %d when not in valid state\n", vf
);
3122 if (vf_admin
->default_vlan
!= MLX4_VGT
)
3123 mlx4_info(dev
, "VST priority not supported by QoS\n");
3125 mlx4_info(dev
, "VF in VGT mode (needed VST)\n");
3128 "rate %d take affect when VF moves to valid state\n",
3133 /* If user sets rate 0 assigning default vport for its QPs */
3134 vf_admin
->qos_vport
= max_tx_rate
? slave
: MLX4_VPP_DEFAULT_VPORT
;
3136 if (priv
->mfunc
.master
.slave_state
[slave
].active
&&
3137 dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_UPDATE_QP
)
3138 mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
);
3142 EXPORT_SYMBOL_GPL(mlx4_set_vf_rate
);
3144 /* mlx4_get_slave_default_vlan -
3145 * return true if VST ( default vlan)
3146 * if VST, will return vlan & qos (if not NULL)
3148 bool mlx4_get_slave_default_vlan(struct mlx4_dev
*dev
, int port
, int slave
,
3151 struct mlx4_vport_oper_state
*vp_oper
;
3152 struct mlx4_priv
*priv
;
3154 priv
= mlx4_priv(dev
);
3155 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3156 vp_oper
= &priv
->mfunc
.master
.vf_oper
[slave
].vport
[port
];
3158 if (MLX4_VGT
!= vp_oper
->state
.default_vlan
) {
3160 *vlan
= vp_oper
->state
.default_vlan
;
3162 *qos
= vp_oper
->state
.default_qos
;
3167 EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan
);
3169 int mlx4_set_vf_spoofchk(struct mlx4_dev
*dev
, int port
, int vf
, bool setting
)
3171 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3172 struct mlx4_vport_state
*s_info
;
3176 if ((!mlx4_is_master(dev
)) ||
3177 !(dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_FSM
))
3178 return -EPROTONOSUPPORT
;
3180 slave
= mlx4_get_slave_indx(dev
, vf
);
3184 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3185 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3187 mlx4_u64_to_mac(mac
, s_info
->mac
);
3188 if (setting
&& !is_valid_ether_addr(mac
)) {
3189 mlx4_info(dev
, "Illegal MAC with spoofchk\n");
3193 s_info
->spoofchk
= setting
;
3197 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk
);
3199 int mlx4_get_vf_config(struct mlx4_dev
*dev
, int port
, int vf
, struct ifla_vf_info
*ivf
)
3201 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3202 struct mlx4_vport_state
*s_info
;
3205 if (!mlx4_is_master(dev
))
3206 return -EPROTONOSUPPORT
;
3208 slave
= mlx4_get_slave_indx(dev
, vf
);
3212 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3215 /* need to convert it to a func */
3216 ivf
->mac
[0] = ((s_info
->mac
>> (5*8)) & 0xff);
3217 ivf
->mac
[1] = ((s_info
->mac
>> (4*8)) & 0xff);
3218 ivf
->mac
[2] = ((s_info
->mac
>> (3*8)) & 0xff);
3219 ivf
->mac
[3] = ((s_info
->mac
>> (2*8)) & 0xff);
3220 ivf
->mac
[4] = ((s_info
->mac
>> (1*8)) & 0xff);
3221 ivf
->mac
[5] = ((s_info
->mac
) & 0xff);
3223 ivf
->vlan
= s_info
->default_vlan
;
3224 ivf
->qos
= s_info
->default_qos
;
3225 ivf
->vlan_proto
= s_info
->vlan_proto
;
3227 if (mlx4_is_vf_vst_and_prio_qos(dev
, port
, s_info
))
3228 ivf
->max_tx_rate
= s_info
->tx_rate
;
3230 ivf
->max_tx_rate
= 0;
3232 ivf
->min_tx_rate
= 0;
3233 ivf
->spoofchk
= s_info
->spoofchk
;
3234 ivf
->linkstate
= s_info
->link_state
;
3238 EXPORT_SYMBOL_GPL(mlx4_get_vf_config
);
3240 int mlx4_set_vf_link_state(struct mlx4_dev
*dev
, int port
, int vf
, int link_state
)
3242 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3243 struct mlx4_vport_state
*s_info
;
3247 slave
= mlx4_get_slave_indx(dev
, vf
);
3251 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3252 switch (link_state
) {
3253 case IFLA_VF_LINK_STATE_AUTO
:
3254 /* get current link state */
3255 if (!priv
->sense
.do_sense_port
[port
])
3256 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_ACTIVE
;
3258 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_DOWN
;
3261 case IFLA_VF_LINK_STATE_ENABLE
:
3262 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_ACTIVE
;
3265 case IFLA_VF_LINK_STATE_DISABLE
:
3266 link_stat_event
= MLX4_PORT_CHANGE_SUBTYPE_DOWN
;
3270 mlx4_warn(dev
, "unknown value for link_state %02x on slave %d port %d\n",
3271 link_state
, slave
, port
);
3274 s_info
= &priv
->mfunc
.master
.vf_admin
[slave
].vport
[port
];
3275 s_info
->link_state
= link_state
;
3278 mlx4_gen_port_state_change_eqe(dev
, slave
, port
, link_stat_event
);
3280 if (mlx4_master_immediate_activate_vlan_qos(priv
, slave
, port
))
3282 "updating vf %d port %d no link state HW enforcement\n",
3286 EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state
);
3288 int mlx4_get_counter_stats(struct mlx4_dev
*dev
, int counter_index
,
3289 struct mlx4_counter
*counter_stats
, int reset
)
3291 struct mlx4_cmd_mailbox
*mailbox
= NULL
;
3292 struct mlx4_counter
*tmp_counter
;
3299 if (counter_index
== MLX4_SINK_COUNTER_INDEX(dev
))
3302 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
3303 if (IS_ERR(mailbox
))
3304 return PTR_ERR(mailbox
);
3306 memset(mailbox
->buf
, 0, sizeof(struct mlx4_counter
));
3307 if_stat_in_mod
= counter_index
;
3309 if_stat_in_mod
|= MLX4_QUERY_IF_STAT_RESET
;
3310 err
= mlx4_cmd_box(dev
, 0, mailbox
->dma
,
3312 MLX4_CMD_QUERY_IF_STAT
,
3313 MLX4_CMD_TIME_CLASS_C
,
3316 mlx4_dbg(dev
, "%s: failed to read statistics for counter index %d\n",
3317 __func__
, counter_index
);
3320 tmp_counter
= (struct mlx4_counter
*)mailbox
->buf
;
3321 counter_stats
->counter_mode
= tmp_counter
->counter_mode
;
3322 if (counter_stats
->counter_mode
== 0) {
3323 counter_stats
->rx_frames
=
3324 cpu_to_be64(be64_to_cpu(counter_stats
->rx_frames
) +
3325 be64_to_cpu(tmp_counter
->rx_frames
));
3326 counter_stats
->tx_frames
=
3327 cpu_to_be64(be64_to_cpu(counter_stats
->tx_frames
) +
3328 be64_to_cpu(tmp_counter
->tx_frames
));
3329 counter_stats
->rx_bytes
=
3330 cpu_to_be64(be64_to_cpu(counter_stats
->rx_bytes
) +
3331 be64_to_cpu(tmp_counter
->rx_bytes
));
3332 counter_stats
->tx_bytes
=
3333 cpu_to_be64(be64_to_cpu(counter_stats
->tx_bytes
) +
3334 be64_to_cpu(tmp_counter
->tx_bytes
));
3338 mlx4_free_cmd_mailbox(dev
, mailbox
);
3342 EXPORT_SYMBOL_GPL(mlx4_get_counter_stats
);
3344 int mlx4_get_vf_stats(struct mlx4_dev
*dev
, int port
, int vf_idx
,
3345 struct ifla_vf_stats
*vf_stats
)
3347 struct mlx4_counter tmp_vf_stats
;
3354 if (!mlx4_is_master(dev
))
3355 return -EPROTONOSUPPORT
;
3357 slave
= mlx4_get_slave_indx(dev
, vf_idx
);
3361 port
= mlx4_slaves_closest_port(dev
, slave
, port
);
3362 err
= mlx4_calc_vf_counters(dev
, slave
, port
, &tmp_vf_stats
);
3363 if (!err
&& tmp_vf_stats
.counter_mode
== 0) {
3364 vf_stats
->rx_packets
= be64_to_cpu(tmp_vf_stats
.rx_frames
);
3365 vf_stats
->tx_packets
= be64_to_cpu(tmp_vf_stats
.tx_frames
);
3366 vf_stats
->rx_bytes
= be64_to_cpu(tmp_vf_stats
.rx_bytes
);
3367 vf_stats
->tx_bytes
= be64_to_cpu(tmp_vf_stats
.tx_bytes
);
3372 EXPORT_SYMBOL_GPL(mlx4_get_vf_stats
);
3374 int mlx4_vf_smi_enabled(struct mlx4_dev
*dev
, int slave
, int port
)
3376 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3378 if (slave
< 1 || slave
>= dev
->num_slaves
||
3379 port
< 1 || port
> MLX4_MAX_PORTS
)
3382 return priv
->mfunc
.master
.vf_oper
[slave
].smi_enabled
[port
] ==
3383 MLX4_VF_SMI_ENABLED
;
3385 EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled
);
3387 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev
*dev
, int slave
, int port
)
3389 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3391 if (slave
== mlx4_master_func_num(dev
))
3394 if (slave
< 1 || slave
>= dev
->num_slaves
||
3395 port
< 1 || port
> MLX4_MAX_PORTS
)
3398 return priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
] ==
3399 MLX4_VF_SMI_ENABLED
;
3401 EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin
);
3403 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev
*dev
, int slave
, int port
,
3406 struct mlx4_priv
*priv
= mlx4_priv(dev
);
3407 struct mlx4_active_ports actv_ports
= mlx4_get_active_ports(
3409 int min_port
= find_first_bit(actv_ports
.ports
,
3410 priv
->dev
.caps
.num_ports
) + 1;
3411 int max_port
= min_port
- 1 +
3412 bitmap_weight(actv_ports
.ports
, priv
->dev
.caps
.num_ports
);
3414 if (slave
== mlx4_master_func_num(dev
))
3417 if (slave
< 1 || slave
>= dev
->num_slaves
||
3418 port
< 1 || port
> MLX4_MAX_PORTS
||
3419 enabled
< 0 || enabled
> 1)
3422 if (min_port
== max_port
&& dev
->caps
.num_ports
> 1) {
3423 mlx4_info(dev
, "SMI access disallowed for single ported VFs\n");
3424 return -EPROTONOSUPPORT
;
3427 priv
->mfunc
.master
.vf_admin
[slave
].enable_smi
[port
] = enabled
;
3430 EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin
);