2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
41 #include <linux/mlx4/cmd.h>
42 #include <linux/semaphore.h>
49 #define CMD_POLL_TOKEN 0xffff
50 #define INBOX_MASK 0xffffffffffffff00ULL
52 #define CMD_CHAN_VER 1
53 #define CMD_CHAN_IF_REV 1
56 /* command completed successfully: */
58 /* Internal error (such as a bus error) occurred while processing command: */
59 CMD_STAT_INTERNAL_ERR
= 0x01,
60 /* Operation/command not supported or opcode modifier not supported: */
61 CMD_STAT_BAD_OP
= 0x02,
62 /* Parameter not supported or parameter out of range: */
63 CMD_STAT_BAD_PARAM
= 0x03,
64 /* System not enabled or bad system state: */
65 CMD_STAT_BAD_SYS_STATE
= 0x04,
66 /* Attempt to access reserved or unallocaterd resource: */
67 CMD_STAT_BAD_RESOURCE
= 0x05,
68 /* Requested resource is currently executing a command, or is otherwise busy: */
69 CMD_STAT_RESOURCE_BUSY
= 0x06,
70 /* Required capability exceeds device limits: */
71 CMD_STAT_EXCEED_LIM
= 0x08,
72 /* Resource is not in the appropriate state or ownership: */
73 CMD_STAT_BAD_RES_STATE
= 0x09,
74 /* Index out of range: */
75 CMD_STAT_BAD_INDEX
= 0x0a,
76 /* FW image corrupted: */
77 CMD_STAT_BAD_NVMEM
= 0x0b,
78 /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
79 CMD_STAT_ICM_ERROR
= 0x0c,
80 /* Attempt to modify a QP/EE which is not in the presumed state: */
81 CMD_STAT_BAD_QP_STATE
= 0x10,
82 /* Bad segment parameters (Address/Size): */
83 CMD_STAT_BAD_SEG_PARAM
= 0x20,
84 /* Memory Region has Memory Windows bound to: */
85 CMD_STAT_REG_BOUND
= 0x21,
86 /* HCA local attached memory not present: */
87 CMD_STAT_LAM_NOT_PRE
= 0x22,
88 /* Bad management packet (silently discarded): */
89 CMD_STAT_BAD_PKT
= 0x30,
90 /* More outstanding CQEs in CQ than new CQ size: */
91 CMD_STAT_BAD_SIZE
= 0x40,
92 /* Multi Function device support required: */
93 CMD_STAT_MULTI_FUNC_REQ
= 0x50,
97 HCR_IN_PARAM_OFFSET
= 0x00,
98 HCR_IN_MODIFIER_OFFSET
= 0x08,
99 HCR_OUT_PARAM_OFFSET
= 0x0c,
100 HCR_TOKEN_OFFSET
= 0x14,
101 HCR_STATUS_OFFSET
= 0x18,
103 HCR_OPMOD_SHIFT
= 12,
110 GO_BIT_TIMEOUT_MSECS
= 10000
113 struct mlx4_cmd_context
{
114 struct completion done
;
122 static int mlx4_master_process_vhcr(struct mlx4_dev
*dev
, int slave
,
123 struct mlx4_vhcr_cmd
*in_vhcr
);
125 static int mlx4_status_to_errno(u8 status
)
127 static const int trans_table
[] = {
128 [CMD_STAT_INTERNAL_ERR
] = -EIO
,
129 [CMD_STAT_BAD_OP
] = -EPERM
,
130 [CMD_STAT_BAD_PARAM
] = -EINVAL
,
131 [CMD_STAT_BAD_SYS_STATE
] = -ENXIO
,
132 [CMD_STAT_BAD_RESOURCE
] = -EBADF
,
133 [CMD_STAT_RESOURCE_BUSY
] = -EBUSY
,
134 [CMD_STAT_EXCEED_LIM
] = -ENOMEM
,
135 [CMD_STAT_BAD_RES_STATE
] = -EBADF
,
136 [CMD_STAT_BAD_INDEX
] = -EBADF
,
137 [CMD_STAT_BAD_NVMEM
] = -EFAULT
,
138 [CMD_STAT_ICM_ERROR
] = -ENFILE
,
139 [CMD_STAT_BAD_QP_STATE
] = -EINVAL
,
140 [CMD_STAT_BAD_SEG_PARAM
] = -EFAULT
,
141 [CMD_STAT_REG_BOUND
] = -EBUSY
,
142 [CMD_STAT_LAM_NOT_PRE
] = -EAGAIN
,
143 [CMD_STAT_BAD_PKT
] = -EINVAL
,
144 [CMD_STAT_BAD_SIZE
] = -ENOMEM
,
145 [CMD_STAT_MULTI_FUNC_REQ
] = -EACCES
,
148 if (status
>= ARRAY_SIZE(trans_table
) ||
149 (status
!= CMD_STAT_OK
&& trans_table
[status
] == 0))
152 return trans_table
[status
];
155 static u8
mlx4_errno_to_status(int errno
)
159 return CMD_STAT_BAD_OP
;
161 return CMD_STAT_BAD_PARAM
;
163 return CMD_STAT_BAD_SYS_STATE
;
165 return CMD_STAT_RESOURCE_BUSY
;
167 return CMD_STAT_EXCEED_LIM
;
169 return CMD_STAT_ICM_ERROR
;
171 return CMD_STAT_INTERNAL_ERR
;
175 static int comm_pending(struct mlx4_dev
*dev
)
177 struct mlx4_priv
*priv
= mlx4_priv(dev
);
178 u32 status
= readl(&priv
->mfunc
.comm
->slave_read
);
180 return (swab32(status
) >> 31) != priv
->cmd
.comm_toggle
;
183 static void mlx4_comm_cmd_post(struct mlx4_dev
*dev
, u8 cmd
, u16 param
)
185 struct mlx4_priv
*priv
= mlx4_priv(dev
);
188 priv
->cmd
.comm_toggle
^= 1;
189 val
= param
| (cmd
<< 16) | (priv
->cmd
.comm_toggle
<< 31);
190 __raw_writel((__force u32
) cpu_to_be32(val
),
191 &priv
->mfunc
.comm
->slave_write
);
195 static int mlx4_comm_cmd_poll(struct mlx4_dev
*dev
, u8 cmd
, u16 param
,
196 unsigned long timeout
)
198 struct mlx4_priv
*priv
= mlx4_priv(dev
);
201 int ret_from_pending
= 0;
203 /* First, verify that the master reports correct status */
204 if (comm_pending(dev
)) {
205 mlx4_warn(dev
, "Communication channel is not idle."
206 "my toggle is %d (cmd:0x%x)\n",
207 priv
->cmd
.comm_toggle
, cmd
);
212 down(&priv
->cmd
.poll_sem
);
213 mlx4_comm_cmd_post(dev
, cmd
, param
);
215 end
= msecs_to_jiffies(timeout
) + jiffies
;
216 while (comm_pending(dev
) && time_before(jiffies
, end
))
218 ret_from_pending
= comm_pending(dev
);
219 if (ret_from_pending
) {
220 /* check if the slave is trying to boot in the middle of
221 * FLR process. The only non-zero result in the RESET command
222 * is MLX4_DELAY_RESET_SLAVE*/
223 if ((MLX4_COMM_CMD_RESET
== cmd
)) {
224 mlx4_warn(dev
, "Got slave FLRed from Communication"
225 " channel (ret:0x%x)\n", ret_from_pending
);
226 err
= MLX4_DELAY_RESET_SLAVE
;
228 mlx4_warn(dev
, "Communication channel timed out\n");
233 up(&priv
->cmd
.poll_sem
);
237 static int mlx4_comm_cmd_wait(struct mlx4_dev
*dev
, u8 op
,
238 u16 param
, unsigned long timeout
)
240 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
241 struct mlx4_cmd_context
*context
;
245 down(&cmd
->event_sem
);
247 spin_lock(&cmd
->context_lock
);
248 BUG_ON(cmd
->free_head
< 0);
249 context
= &cmd
->context
[cmd
->free_head
];
250 context
->token
+= cmd
->token_mask
+ 1;
251 cmd
->free_head
= context
->next
;
252 spin_unlock(&cmd
->context_lock
);
254 init_completion(&context
->done
);
256 mlx4_comm_cmd_post(dev
, op
, param
);
258 if (!wait_for_completion_timeout(&context
->done
,
259 msecs_to_jiffies(timeout
))) {
264 err
= context
->result
;
265 if (err
&& context
->fw_status
!= CMD_STAT_MULTI_FUNC_REQ
) {
266 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
267 op
, context
->fw_status
);
272 /* wait for comm channel ready
273 * this is necessary for prevention the race
274 * when switching between event to polling mode
276 end
= msecs_to_jiffies(timeout
) + jiffies
;
277 while (comm_pending(dev
) && time_before(jiffies
, end
))
280 spin_lock(&cmd
->context_lock
);
281 context
->next
= cmd
->free_head
;
282 cmd
->free_head
= context
- cmd
->context
;
283 spin_unlock(&cmd
->context_lock
);
289 int mlx4_comm_cmd(struct mlx4_dev
*dev
, u8 cmd
, u16 param
,
290 unsigned long timeout
)
292 if (mlx4_priv(dev
)->cmd
.use_events
)
293 return mlx4_comm_cmd_wait(dev
, cmd
, param
, timeout
);
294 return mlx4_comm_cmd_poll(dev
, cmd
, param
, timeout
);
297 static int cmd_pending(struct mlx4_dev
*dev
)
299 u32 status
= readl(mlx4_priv(dev
)->cmd
.hcr
+ HCR_STATUS_OFFSET
);
301 return (status
& swab32(1 << HCR_GO_BIT
)) ||
302 (mlx4_priv(dev
)->cmd
.toggle
==
303 !!(status
& swab32(1 << HCR_T_BIT
)));
306 static int mlx4_cmd_post(struct mlx4_dev
*dev
, u64 in_param
, u64 out_param
,
307 u32 in_modifier
, u8 op_modifier
, u16 op
, u16 token
,
310 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
311 u32 __iomem
*hcr
= cmd
->hcr
;
315 mutex_lock(&cmd
->hcr_mutex
);
319 end
+= msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS
);
321 while (cmd_pending(dev
)) {
322 if (time_after_eq(jiffies
, end
)) {
323 mlx4_err(dev
, "%s:cmd_pending failed\n", __func__
);
330 * We use writel (instead of something like memcpy_toio)
331 * because writes of less than 32 bits to the HCR don't work
332 * (and some architectures such as ia64 implement memcpy_toio
333 * in terms of writeb).
335 __raw_writel((__force u32
) cpu_to_be32(in_param
>> 32), hcr
+ 0);
336 __raw_writel((__force u32
) cpu_to_be32(in_param
& 0xfffffffful
), hcr
+ 1);
337 __raw_writel((__force u32
) cpu_to_be32(in_modifier
), hcr
+ 2);
338 __raw_writel((__force u32
) cpu_to_be32(out_param
>> 32), hcr
+ 3);
339 __raw_writel((__force u32
) cpu_to_be32(out_param
& 0xfffffffful
), hcr
+ 4);
340 __raw_writel((__force u32
) cpu_to_be32(token
<< 16), hcr
+ 5);
342 /* __raw_writel may not order writes. */
345 __raw_writel((__force u32
) cpu_to_be32((1 << HCR_GO_BIT
) |
346 (cmd
->toggle
<< HCR_T_BIT
) |
347 (event
? (1 << HCR_E_BIT
) : 0) |
348 (op_modifier
<< HCR_OPMOD_SHIFT
) |
352 * Make sure that our HCR writes don't get mixed in with
353 * writes from another CPU starting a FW command.
357 cmd
->toggle
= cmd
->toggle
^ 1;
362 mutex_unlock(&cmd
->hcr_mutex
);
366 static int mlx4_slave_cmd(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
367 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
368 u16 op
, unsigned long timeout
)
370 struct mlx4_priv
*priv
= mlx4_priv(dev
);
371 struct mlx4_vhcr_cmd
*vhcr
= priv
->mfunc
.vhcr
;
374 down(&priv
->cmd
.slave_sem
);
375 vhcr
->in_param
= cpu_to_be64(in_param
);
376 vhcr
->out_param
= out_param
? cpu_to_be64(*out_param
) : 0;
377 vhcr
->in_modifier
= cpu_to_be32(in_modifier
);
378 vhcr
->opcode
= cpu_to_be16((((u16
) op_modifier
) << 12) | (op
& 0xfff));
379 vhcr
->token
= cpu_to_be16(CMD_POLL_TOKEN
);
381 vhcr
->flags
= !!(priv
->cmd
.use_events
) << 6;
382 if (mlx4_is_master(dev
)) {
383 ret
= mlx4_master_process_vhcr(dev
, dev
->caps
.function
, vhcr
);
388 be64_to_cpu(vhcr
->out_param
);
390 mlx4_err(dev
, "response expected while"
391 "output mailbox is NULL for "
392 "command 0x%x\n", op
);
393 vhcr
->status
= CMD_STAT_BAD_PARAM
;
396 ret
= mlx4_status_to_errno(vhcr
->status
);
399 ret
= mlx4_comm_cmd(dev
, MLX4_COMM_CMD_VHCR_POST
, 0,
400 MLX4_COMM_TIME
+ timeout
);
405 be64_to_cpu(vhcr
->out_param
);
407 mlx4_err(dev
, "response expected while"
408 "output mailbox is NULL for "
409 "command 0x%x\n", op
);
410 vhcr
->status
= CMD_STAT_BAD_PARAM
;
413 ret
= mlx4_status_to_errno(vhcr
->status
);
415 mlx4_err(dev
, "failed execution of VHCR_POST command"
416 "opcode 0x%x\n", op
);
418 up(&priv
->cmd
.slave_sem
);
422 static int mlx4_cmd_poll(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
423 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
424 u16 op
, unsigned long timeout
)
426 struct mlx4_priv
*priv
= mlx4_priv(dev
);
427 void __iomem
*hcr
= priv
->cmd
.hcr
;
432 down(&priv
->cmd
.poll_sem
);
434 err
= mlx4_cmd_post(dev
, in_param
, out_param
? *out_param
: 0,
435 in_modifier
, op_modifier
, op
, CMD_POLL_TOKEN
, 0);
439 end
= msecs_to_jiffies(timeout
) + jiffies
;
440 while (cmd_pending(dev
) && time_before(jiffies
, end
))
443 if (cmd_pending(dev
)) {
450 (u64
) be32_to_cpu((__force __be32
)
451 __raw_readl(hcr
+ HCR_OUT_PARAM_OFFSET
)) << 32 |
452 (u64
) be32_to_cpu((__force __be32
)
453 __raw_readl(hcr
+ HCR_OUT_PARAM_OFFSET
+ 4));
454 stat
= be32_to_cpu((__force __be32
)
455 __raw_readl(hcr
+ HCR_STATUS_OFFSET
)) >> 24;
456 err
= mlx4_status_to_errno(stat
);
458 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
462 up(&priv
->cmd
.poll_sem
);
466 void mlx4_cmd_event(struct mlx4_dev
*dev
, u16 token
, u8 status
, u64 out_param
)
468 struct mlx4_priv
*priv
= mlx4_priv(dev
);
469 struct mlx4_cmd_context
*context
=
470 &priv
->cmd
.context
[token
& priv
->cmd
.token_mask
];
472 /* previously timed out command completing at long last */
473 if (token
!= context
->token
)
476 context
->fw_status
= status
;
477 context
->result
= mlx4_status_to_errno(status
);
478 context
->out_param
= out_param
;
480 complete(&context
->done
);
483 static int mlx4_cmd_wait(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
484 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
485 u16 op
, unsigned long timeout
)
487 struct mlx4_cmd
*cmd
= &mlx4_priv(dev
)->cmd
;
488 struct mlx4_cmd_context
*context
;
491 down(&cmd
->event_sem
);
493 spin_lock(&cmd
->context_lock
);
494 BUG_ON(cmd
->free_head
< 0);
495 context
= &cmd
->context
[cmd
->free_head
];
496 context
->token
+= cmd
->token_mask
+ 1;
497 cmd
->free_head
= context
->next
;
498 spin_unlock(&cmd
->context_lock
);
500 init_completion(&context
->done
);
502 mlx4_cmd_post(dev
, in_param
, out_param
? *out_param
: 0,
503 in_modifier
, op_modifier
, op
, context
->token
, 1);
505 if (!wait_for_completion_timeout(&context
->done
,
506 msecs_to_jiffies(timeout
))) {
511 err
= context
->result
;
513 mlx4_err(dev
, "command 0x%x failed: fw status = 0x%x\n",
514 op
, context
->fw_status
);
519 *out_param
= context
->out_param
;
522 spin_lock(&cmd
->context_lock
);
523 context
->next
= cmd
->free_head
;
524 cmd
->free_head
= context
- cmd
->context
;
525 spin_unlock(&cmd
->context_lock
);
531 int __mlx4_cmd(struct mlx4_dev
*dev
, u64 in_param
, u64
*out_param
,
532 int out_is_imm
, u32 in_modifier
, u8 op_modifier
,
533 u16 op
, unsigned long timeout
, int native
)
535 if (!mlx4_is_mfunc(dev
) || (native
&& mlx4_is_master(dev
))) {
536 if (mlx4_priv(dev
)->cmd
.use_events
)
537 return mlx4_cmd_wait(dev
, in_param
, out_param
,
538 out_is_imm
, in_modifier
,
539 op_modifier
, op
, timeout
);
541 return mlx4_cmd_poll(dev
, in_param
, out_param
,
542 out_is_imm
, in_modifier
,
543 op_modifier
, op
, timeout
);
545 return mlx4_slave_cmd(dev
, in_param
, out_param
, out_is_imm
,
546 in_modifier
, op_modifier
, op
, timeout
);
548 EXPORT_SYMBOL_GPL(__mlx4_cmd
);
551 static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev
*dev
)
553 return mlx4_cmd(dev
, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL
,
554 MLX4_CMD_TIME_CLASS_B
, MLX4_CMD_NATIVE
);
557 static int mlx4_ACCESS_MEM(struct mlx4_dev
*dev
, u64 master_addr
,
558 int slave
, u64 slave_addr
,
559 int size
, int is_read
)
564 if ((slave_addr
& 0xfff) | (master_addr
& 0xfff) |
565 (slave
& ~0x7f) | (size
& 0xff)) {
566 mlx4_err(dev
, "Bad access mem params - slave_addr:0x%llx "
567 "master_addr:0x%llx slave_id:%d size:%d\n",
568 slave_addr
, master_addr
, slave
, size
);
573 in_param
= (u64
) slave
| slave_addr
;
574 out_param
= (u64
) dev
->caps
.function
| master_addr
;
576 in_param
= (u64
) dev
->caps
.function
| master_addr
;
577 out_param
= (u64
) slave
| slave_addr
;
580 return mlx4_cmd_imm(dev
, in_param
, &out_param
, size
, 0,
582 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
585 int mlx4_DMA_wrapper(struct mlx4_dev
*dev
, int slave
,
586 struct mlx4_vhcr
*vhcr
,
587 struct mlx4_cmd_mailbox
*inbox
,
588 struct mlx4_cmd_mailbox
*outbox
,
589 struct mlx4_cmd_info
*cmd
)
595 in_param
= cmd
->has_inbox
? (u64
) inbox
->dma
: vhcr
->in_param
;
596 out_param
= cmd
->has_outbox
? (u64
) outbox
->dma
: vhcr
->out_param
;
597 if (cmd
->encode_slave_id
) {
598 in_param
&= 0xffffffffffffff00ll
;
602 err
= __mlx4_cmd(dev
, in_param
, &out_param
, cmd
->out_is_imm
,
603 vhcr
->in_modifier
, vhcr
->op_modifier
, vhcr
->op
,
604 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_NATIVE
);
607 vhcr
->out_param
= out_param
;
612 static struct mlx4_cmd_info cmd_info
[] = {
614 .opcode
= MLX4_CMD_QUERY_FW
,
618 .encode_slave_id
= false,
623 .opcode
= MLX4_CMD_QUERY_HCA
,
627 .encode_slave_id
= false,
632 .opcode
= MLX4_CMD_QUERY_DEV_CAP
,
636 .encode_slave_id
= false,
641 .opcode
= MLX4_CMD_QUERY_FUNC_CAP
,
645 .encode_slave_id
= false,
647 .wrapper
= mlx4_QUERY_FUNC_CAP_wrapper
650 .opcode
= MLX4_CMD_QUERY_ADAPTER
,
654 .encode_slave_id
= false,
659 .opcode
= MLX4_CMD_INIT_PORT
,
663 .encode_slave_id
= false,
665 .wrapper
= mlx4_INIT_PORT_wrapper
668 .opcode
= MLX4_CMD_CLOSE_PORT
,
672 .encode_slave_id
= false,
674 .wrapper
= mlx4_CLOSE_PORT_wrapper
677 .opcode
= MLX4_CMD_QUERY_PORT
,
681 .encode_slave_id
= false,
683 .wrapper
= mlx4_QUERY_PORT_wrapper
686 .opcode
= MLX4_CMD_SET_PORT
,
690 .encode_slave_id
= false,
692 .wrapper
= mlx4_SET_PORT_wrapper
695 .opcode
= MLX4_CMD_MAP_EQ
,
699 .encode_slave_id
= false,
701 .wrapper
= mlx4_MAP_EQ_wrapper
704 .opcode
= MLX4_CMD_SW2HW_EQ
,
708 .encode_slave_id
= true,
710 .wrapper
= mlx4_SW2HW_EQ_wrapper
713 .opcode
= MLX4_CMD_HW_HEALTH_CHECK
,
717 .encode_slave_id
= false,
722 .opcode
= MLX4_CMD_NOP
,
726 .encode_slave_id
= false,
731 .opcode
= MLX4_CMD_ALLOC_RES
,
735 .encode_slave_id
= false,
737 .wrapper
= mlx4_ALLOC_RES_wrapper
740 .opcode
= MLX4_CMD_FREE_RES
,
744 .encode_slave_id
= false,
746 .wrapper
= mlx4_FREE_RES_wrapper
749 .opcode
= MLX4_CMD_SW2HW_MPT
,
753 .encode_slave_id
= true,
755 .wrapper
= mlx4_SW2HW_MPT_wrapper
758 .opcode
= MLX4_CMD_QUERY_MPT
,
762 .encode_slave_id
= false,
764 .wrapper
= mlx4_QUERY_MPT_wrapper
767 .opcode
= MLX4_CMD_HW2SW_MPT
,
771 .encode_slave_id
= false,
773 .wrapper
= mlx4_HW2SW_MPT_wrapper
776 .opcode
= MLX4_CMD_READ_MTT
,
780 .encode_slave_id
= false,
785 .opcode
= MLX4_CMD_WRITE_MTT
,
789 .encode_slave_id
= false,
791 .wrapper
= mlx4_WRITE_MTT_wrapper
794 .opcode
= MLX4_CMD_SYNC_TPT
,
798 .encode_slave_id
= false,
803 .opcode
= MLX4_CMD_HW2SW_EQ
,
807 .encode_slave_id
= true,
809 .wrapper
= mlx4_HW2SW_EQ_wrapper
812 .opcode
= MLX4_CMD_QUERY_EQ
,
816 .encode_slave_id
= true,
818 .wrapper
= mlx4_QUERY_EQ_wrapper
821 .opcode
= MLX4_CMD_SW2HW_CQ
,
825 .encode_slave_id
= true,
827 .wrapper
= mlx4_SW2HW_CQ_wrapper
830 .opcode
= MLX4_CMD_HW2SW_CQ
,
834 .encode_slave_id
= false,
836 .wrapper
= mlx4_HW2SW_CQ_wrapper
839 .opcode
= MLX4_CMD_QUERY_CQ
,
843 .encode_slave_id
= false,
845 .wrapper
= mlx4_QUERY_CQ_wrapper
848 .opcode
= MLX4_CMD_MODIFY_CQ
,
852 .encode_slave_id
= false,
854 .wrapper
= mlx4_MODIFY_CQ_wrapper
857 .opcode
= MLX4_CMD_SW2HW_SRQ
,
861 .encode_slave_id
= true,
863 .wrapper
= mlx4_SW2HW_SRQ_wrapper
866 .opcode
= MLX4_CMD_HW2SW_SRQ
,
870 .encode_slave_id
= false,
872 .wrapper
= mlx4_HW2SW_SRQ_wrapper
875 .opcode
= MLX4_CMD_QUERY_SRQ
,
879 .encode_slave_id
= false,
881 .wrapper
= mlx4_QUERY_SRQ_wrapper
884 .opcode
= MLX4_CMD_ARM_SRQ
,
888 .encode_slave_id
= false,
890 .wrapper
= mlx4_ARM_SRQ_wrapper
893 .opcode
= MLX4_CMD_RST2INIT_QP
,
897 .encode_slave_id
= true,
899 .wrapper
= mlx4_RST2INIT_QP_wrapper
902 .opcode
= MLX4_CMD_INIT2INIT_QP
,
906 .encode_slave_id
= false,
908 .wrapper
= mlx4_GEN_QP_wrapper
911 .opcode
= MLX4_CMD_INIT2RTR_QP
,
915 .encode_slave_id
= false,
917 .wrapper
= mlx4_INIT2RTR_QP_wrapper
920 .opcode
= MLX4_CMD_RTR2RTS_QP
,
924 .encode_slave_id
= false,
926 .wrapper
= mlx4_GEN_QP_wrapper
929 .opcode
= MLX4_CMD_RTS2RTS_QP
,
933 .encode_slave_id
= false,
935 .wrapper
= mlx4_GEN_QP_wrapper
938 .opcode
= MLX4_CMD_SQERR2RTS_QP
,
942 .encode_slave_id
= false,
944 .wrapper
= mlx4_GEN_QP_wrapper
947 .opcode
= MLX4_CMD_2ERR_QP
,
951 .encode_slave_id
= false,
953 .wrapper
= mlx4_GEN_QP_wrapper
956 .opcode
= MLX4_CMD_RTS2SQD_QP
,
960 .encode_slave_id
= false,
962 .wrapper
= mlx4_GEN_QP_wrapper
965 .opcode
= MLX4_CMD_SQD2SQD_QP
,
969 .encode_slave_id
= false,
971 .wrapper
= mlx4_GEN_QP_wrapper
974 .opcode
= MLX4_CMD_SQD2RTS_QP
,
978 .encode_slave_id
= false,
980 .wrapper
= mlx4_GEN_QP_wrapper
983 .opcode
= MLX4_CMD_2RST_QP
,
987 .encode_slave_id
= false,
989 .wrapper
= mlx4_2RST_QP_wrapper
992 .opcode
= MLX4_CMD_QUERY_QP
,
996 .encode_slave_id
= false,
998 .wrapper
= mlx4_GEN_QP_wrapper
1001 .opcode
= MLX4_CMD_SUSPEND_QP
,
1003 .has_outbox
= false,
1004 .out_is_imm
= false,
1005 .encode_slave_id
= false,
1007 .wrapper
= mlx4_GEN_QP_wrapper
1010 .opcode
= MLX4_CMD_UNSUSPEND_QP
,
1012 .has_outbox
= false,
1013 .out_is_imm
= false,
1014 .encode_slave_id
= false,
1016 .wrapper
= mlx4_GEN_QP_wrapper
1019 .opcode
= MLX4_CMD_QUERY_IF_STAT
,
1022 .out_is_imm
= false,
1023 .encode_slave_id
= false,
1025 .wrapper
= mlx4_QUERY_IF_STAT_wrapper
1027 /* Native multicast commands are not available for guests */
1029 .opcode
= MLX4_CMD_QP_ATTACH
,
1031 .has_outbox
= false,
1032 .out_is_imm
= false,
1033 .encode_slave_id
= false,
1035 .wrapper
= mlx4_QP_ATTACH_wrapper
1038 .opcode
= MLX4_CMD_PROMISC
,
1040 .has_outbox
= false,
1041 .out_is_imm
= false,
1042 .encode_slave_id
= false,
1044 .wrapper
= mlx4_PROMISC_wrapper
1046 /* Ethernet specific commands */
1048 .opcode
= MLX4_CMD_SET_VLAN_FLTR
,
1050 .has_outbox
= false,
1051 .out_is_imm
= false,
1052 .encode_slave_id
= false,
1054 .wrapper
= mlx4_SET_VLAN_FLTR_wrapper
1057 .opcode
= MLX4_CMD_SET_MCAST_FLTR
,
1059 .has_outbox
= false,
1060 .out_is_imm
= false,
1061 .encode_slave_id
= false,
1063 .wrapper
= mlx4_SET_MCAST_FLTR_wrapper
1066 .opcode
= MLX4_CMD_DUMP_ETH_STATS
,
1069 .out_is_imm
= false,
1070 .encode_slave_id
= false,
1072 .wrapper
= mlx4_DUMP_ETH_STATS_wrapper
1075 .opcode
= MLX4_CMD_INFORM_FLR_DONE
,
1077 .has_outbox
= false,
1078 .out_is_imm
= false,
1079 .encode_slave_id
= false,
1085 static int mlx4_master_process_vhcr(struct mlx4_dev
*dev
, int slave
,
1086 struct mlx4_vhcr_cmd
*in_vhcr
)
1088 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1089 struct mlx4_cmd_info
*cmd
= NULL
;
1090 struct mlx4_vhcr_cmd
*vhcr_cmd
= in_vhcr
? in_vhcr
: priv
->mfunc
.vhcr
;
1091 struct mlx4_vhcr
*vhcr
;
1092 struct mlx4_cmd_mailbox
*inbox
= NULL
;
1093 struct mlx4_cmd_mailbox
*outbox
= NULL
;
1100 /* Create sw representation of Virtual HCR */
1101 vhcr
= kzalloc(sizeof(struct mlx4_vhcr
), GFP_KERNEL
);
1105 /* DMA in the vHCR */
1107 ret
= mlx4_ACCESS_MEM(dev
, priv
->mfunc
.vhcr_dma
, slave
,
1108 priv
->mfunc
.master
.slave_state
[slave
].vhcr_dma
,
1109 ALIGN(sizeof(struct mlx4_vhcr_cmd
),
1110 MLX4_ACCESS_MEM_ALIGN
), 1);
1112 mlx4_err(dev
, "%s:Failed reading vhcr"
1113 "ret: 0x%x\n", __func__
, ret
);
1119 /* Fill SW VHCR fields */
1120 vhcr
->in_param
= be64_to_cpu(vhcr_cmd
->in_param
);
1121 vhcr
->out_param
= be64_to_cpu(vhcr_cmd
->out_param
);
1122 vhcr
->in_modifier
= be32_to_cpu(vhcr_cmd
->in_modifier
);
1123 vhcr
->token
= be16_to_cpu(vhcr_cmd
->token
);
1124 vhcr
->op
= be16_to_cpu(vhcr_cmd
->opcode
) & 0xfff;
1125 vhcr
->op_modifier
= (u8
) (be16_to_cpu(vhcr_cmd
->opcode
) >> 12);
1126 vhcr
->e_bit
= vhcr_cmd
->flags
& (1 << 6);
1128 /* Lookup command */
1129 for (i
= 0; i
< ARRAY_SIZE(cmd_info
); ++i
) {
1130 if (vhcr
->op
== cmd_info
[i
].opcode
) {
1136 mlx4_err(dev
, "Unknown command:0x%x accepted from slave:%d\n",
1138 vhcr_cmd
->status
= CMD_STAT_BAD_PARAM
;
1143 if (cmd
->has_inbox
) {
1144 vhcr
->in_param
&= INBOX_MASK
;
1145 inbox
= mlx4_alloc_cmd_mailbox(dev
);
1146 if (IS_ERR(inbox
)) {
1147 vhcr_cmd
->status
= CMD_STAT_BAD_SIZE
;
1152 if (mlx4_ACCESS_MEM(dev
, inbox
->dma
, slave
,
1154 MLX4_MAILBOX_SIZE
, 1)) {
1155 mlx4_err(dev
, "%s: Failed reading inbox (cmd:0x%x)\n",
1156 __func__
, cmd
->opcode
);
1157 vhcr_cmd
->status
= CMD_STAT_INTERNAL_ERR
;
1162 /* Apply permission and bound checks if applicable */
1163 if (cmd
->verify
&& cmd
->verify(dev
, slave
, vhcr
, inbox
)) {
1164 mlx4_warn(dev
, "Command:0x%x from slave: %d failed protection "
1165 "checks for resource_id:%d\n", vhcr
->op
, slave
,
1167 vhcr_cmd
->status
= CMD_STAT_BAD_OP
;
1171 /* Allocate outbox */
1172 if (cmd
->has_outbox
) {
1173 outbox
= mlx4_alloc_cmd_mailbox(dev
);
1174 if (IS_ERR(outbox
)) {
1175 vhcr_cmd
->status
= CMD_STAT_BAD_SIZE
;
1181 /* Execute the command! */
1183 err
= cmd
->wrapper(dev
, slave
, vhcr
, inbox
, outbox
,
1185 if (cmd
->out_is_imm
)
1186 vhcr_cmd
->out_param
= cpu_to_be64(vhcr
->out_param
);
1188 in_param
= cmd
->has_inbox
? (u64
) inbox
->dma
:
1190 out_param
= cmd
->has_outbox
? (u64
) outbox
->dma
:
1192 err
= __mlx4_cmd(dev
, in_param
, &out_param
,
1193 cmd
->out_is_imm
, vhcr
->in_modifier
,
1194 vhcr
->op_modifier
, vhcr
->op
,
1195 MLX4_CMD_TIME_CLASS_A
,
1198 if (cmd
->out_is_imm
) {
1199 vhcr
->out_param
= out_param
;
1200 vhcr_cmd
->out_param
= cpu_to_be64(vhcr
->out_param
);
1205 mlx4_warn(dev
, "vhcr command:0x%x slave:%d failed with"
1206 " error:%d, status %d\n",
1207 vhcr
->op
, slave
, vhcr
->errno
, err
);
1208 vhcr_cmd
->status
= mlx4_errno_to_status(err
);
1213 /* Write outbox if command completed successfully */
1214 if (cmd
->has_outbox
&& !vhcr_cmd
->status
) {
1215 ret
= mlx4_ACCESS_MEM(dev
, outbox
->dma
, slave
,
1217 MLX4_MAILBOX_SIZE
, MLX4_CMD_WRAPPED
);
1219 /* If we failed to write back the outbox after the
1220 *command was successfully executed, we must fail this
1221 * slave, as it is now in undefined state */
1222 mlx4_err(dev
, "%s:Failed writing outbox\n", __func__
);
1228 /* DMA back vhcr result */
1230 ret
= mlx4_ACCESS_MEM(dev
, priv
->mfunc
.vhcr_dma
, slave
,
1231 priv
->mfunc
.master
.slave_state
[slave
].vhcr_dma
,
1232 ALIGN(sizeof(struct mlx4_vhcr
),
1233 MLX4_ACCESS_MEM_ALIGN
),
1236 mlx4_err(dev
, "%s:Failed writing vhcr result\n",
1238 else if (vhcr
->e_bit
&&
1239 mlx4_GEN_EQE(dev
, slave
, &priv
->mfunc
.master
.cmd_eqe
))
1240 mlx4_warn(dev
, "Failed to generate command completion "
1241 "eqe for slave %d\n", slave
);
1246 mlx4_free_cmd_mailbox(dev
, inbox
);
1247 mlx4_free_cmd_mailbox(dev
, outbox
);
1251 static void mlx4_master_do_cmd(struct mlx4_dev
*dev
, int slave
, u8 cmd
,
1252 u16 param
, u8 toggle
)
1254 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1255 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
1257 u32 slave_status
= 0;
1258 u8 is_going_down
= 0;
1261 slave_state
[slave
].comm_toggle
^= 1;
1262 reply
= (u32
) slave_state
[slave
].comm_toggle
<< 31;
1263 if (toggle
!= slave_state
[slave
].comm_toggle
) {
1264 mlx4_warn(dev
, "Incorrect toggle %d from slave %d. *** MASTER"
1265 "STATE COMPROMISIED ***\n", toggle
, slave
);
1268 if (cmd
== MLX4_COMM_CMD_RESET
) {
1269 mlx4_warn(dev
, "Received reset from slave:%d\n", slave
);
1270 slave_state
[slave
].active
= false;
1271 for (i
= 0; i
< MLX4_EVENT_TYPES_NUM
; ++i
) {
1272 slave_state
[slave
].event_eq
[i
].eqn
= -1;
1273 slave_state
[slave
].event_eq
[i
].token
= 0;
1275 /*check if we are in the middle of FLR process,
1276 if so return "retry" status to the slave*/
1277 if (MLX4_COMM_CMD_FLR
== slave_state
[slave
].last_cmd
) {
1278 slave_status
= MLX4_DELAY_RESET_SLAVE
;
1279 goto inform_slave_state
;
1282 /* write the version in the event field */
1283 reply
|= mlx4_comm_get_version();
1287 /*command from slave in the middle of FLR*/
1288 if (cmd
!= MLX4_COMM_CMD_RESET
&&
1289 MLX4_COMM_CMD_FLR
== slave_state
[slave
].last_cmd
) {
1290 mlx4_warn(dev
, "slave:%d is Trying to run cmd(0x%x) "
1291 "in the middle of FLR\n", slave
, cmd
);
1296 case MLX4_COMM_CMD_VHCR0
:
1297 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_RESET
)
1299 slave_state
[slave
].vhcr_dma
= ((u64
) param
) << 48;
1300 priv
->mfunc
.master
.slave_state
[slave
].cookie
= 0;
1301 mutex_init(&priv
->mfunc
.master
.gen_eqe_mutex
[slave
]);
1303 case MLX4_COMM_CMD_VHCR1
:
1304 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR0
)
1306 slave_state
[slave
].vhcr_dma
|= ((u64
) param
) << 32;
1308 case MLX4_COMM_CMD_VHCR2
:
1309 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR1
)
1311 slave_state
[slave
].vhcr_dma
|= ((u64
) param
) << 16;
1313 case MLX4_COMM_CMD_VHCR_EN
:
1314 if (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR2
)
1316 slave_state
[slave
].vhcr_dma
|= param
;
1317 slave_state
[slave
].active
= true;
1319 case MLX4_COMM_CMD_VHCR_POST
:
1320 if ((slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR_EN
) &&
1321 (slave_state
[slave
].last_cmd
!= MLX4_COMM_CMD_VHCR_POST
))
1323 down(&priv
->cmd
.slave_sem
);
1324 if (mlx4_master_process_vhcr(dev
, slave
, NULL
)) {
1325 mlx4_err(dev
, "Failed processing vhcr for slave:%d,"
1326 " resetting slave.\n", slave
);
1327 up(&priv
->cmd
.slave_sem
);
1330 up(&priv
->cmd
.slave_sem
);
1333 mlx4_warn(dev
, "Bad comm cmd:%d from slave:%d\n", cmd
, slave
);
1336 spin_lock(&priv
->mfunc
.master
.slave_state_lock
);
1337 if (!slave_state
[slave
].is_slave_going_down
)
1338 slave_state
[slave
].last_cmd
= cmd
;
1341 spin_unlock(&priv
->mfunc
.master
.slave_state_lock
);
1342 if (is_going_down
) {
1343 mlx4_warn(dev
, "Slave is going down aborting command(%d)"
1344 " executing from slave:%d\n",
1348 __raw_writel((__force u32
) cpu_to_be32(reply
),
1349 &priv
->mfunc
.comm
[slave
].slave_read
);
1355 /* cleanup any slave resources */
1356 mlx4_delete_all_resources_for_slave(dev
, slave
);
1357 spin_lock(&priv
->mfunc
.master
.slave_state_lock
);
1358 if (!slave_state
[slave
].is_slave_going_down
)
1359 slave_state
[slave
].last_cmd
= MLX4_COMM_CMD_RESET
;
1360 spin_unlock(&priv
->mfunc
.master
.slave_state_lock
);
1361 /*with slave in the middle of flr, no need to clean resources again.*/
1363 memset(&slave_state
[slave
].event_eq
, 0,
1364 sizeof(struct mlx4_slave_event_eq_info
));
1365 __raw_writel((__force u32
) cpu_to_be32(reply
),
1366 &priv
->mfunc
.comm
[slave
].slave_read
);
1370 /* master command processing */
1371 void mlx4_master_comm_channel(struct work_struct
*work
)
1373 struct mlx4_mfunc_master_ctx
*master
=
1375 struct mlx4_mfunc_master_ctx
,
1377 struct mlx4_mfunc
*mfunc
=
1378 container_of(master
, struct mlx4_mfunc
, master
);
1379 struct mlx4_priv
*priv
=
1380 container_of(mfunc
, struct mlx4_priv
, mfunc
);
1381 struct mlx4_dev
*dev
= &priv
->dev
;
1391 bit_vec
= master
->comm_arm_bit_vector
;
1392 for (i
= 0; i
< COMM_CHANNEL_BIT_ARRAY_SIZE
; i
++) {
1393 vec
= be32_to_cpu(bit_vec
[i
]);
1394 for (j
= 0; j
< 32; j
++) {
1395 if (!(vec
& (1 << j
)))
1398 slave
= (i
* 32) + j
;
1399 comm_cmd
= swab32(readl(
1400 &mfunc
->comm
[slave
].slave_write
));
1401 slt
= swab32(readl(&mfunc
->comm
[slave
].slave_read
))
1403 toggle
= comm_cmd
>> 31;
1404 if (toggle
!= slt
) {
1405 if (master
->slave_state
[slave
].comm_toggle
1407 printk(KERN_INFO
"slave %d out of sync."
1408 " read toggle %d, state toggle %d. "
1409 "Resynching.\n", slave
, slt
,
1410 master
->slave_state
[slave
].comm_toggle
);
1411 master
->slave_state
[slave
].comm_toggle
=
1414 mlx4_master_do_cmd(dev
, slave
,
1415 comm_cmd
>> 16 & 0xff,
1416 comm_cmd
& 0xffff, toggle
);
1422 if (reported
&& reported
!= served
)
1423 mlx4_warn(dev
, "Got command event with bitmask from %d slaves"
1424 " but %d were served\n",
1427 if (mlx4_ARM_COMM_CHANNEL(dev
))
1428 mlx4_warn(dev
, "Failed to arm comm channel events\n");
1431 static int sync_toggles(struct mlx4_dev
*dev
)
1433 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1438 wr_toggle
= swab32(readl(&priv
->mfunc
.comm
->slave_write
)) >> 31;
1439 end
= jiffies
+ msecs_to_jiffies(5000);
1441 while (time_before(jiffies
, end
)) {
1442 rd_toggle
= swab32(readl(&priv
->mfunc
.comm
->slave_read
)) >> 31;
1443 if (rd_toggle
== wr_toggle
) {
1444 priv
->cmd
.comm_toggle
= rd_toggle
;
1452 * we could reach here if for example the previous VM using this
1453 * function misbehaved and left the channel with unsynced state. We
1454 * should fix this here and give this VM a chance to use a properly
1457 mlx4_warn(dev
, "recovering from previously mis-behaved VM\n");
1458 __raw_writel((__force u32
) 0, &priv
->mfunc
.comm
->slave_read
);
1459 __raw_writel((__force u32
) 0, &priv
->mfunc
.comm
->slave_write
);
1460 priv
->cmd
.comm_toggle
= 0;
1465 int mlx4_multi_func_init(struct mlx4_dev
*dev
)
1467 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1468 struct mlx4_slave_state
*s_state
;
1469 int i
, j
, err
, port
;
1471 priv
->mfunc
.vhcr
= dma_alloc_coherent(&(dev
->pdev
->dev
), PAGE_SIZE
,
1472 &priv
->mfunc
.vhcr_dma
,
1474 if (!priv
->mfunc
.vhcr
) {
1475 mlx4_err(dev
, "Couldn't allocate vhcr.\n");
1479 if (mlx4_is_master(dev
))
1481 ioremap(pci_resource_start(dev
->pdev
, priv
->fw
.comm_bar
) +
1482 priv
->fw
.comm_base
, MLX4_COMM_PAGESIZE
);
1485 ioremap(pci_resource_start(dev
->pdev
, 2) +
1486 MLX4_SLAVE_COMM_BASE
, MLX4_COMM_PAGESIZE
);
1487 if (!priv
->mfunc
.comm
) {
1488 mlx4_err(dev
, "Couldn't map communication vector.\n");
1492 if (mlx4_is_master(dev
)) {
1493 priv
->mfunc
.master
.slave_state
=
1494 kzalloc(dev
->num_slaves
*
1495 sizeof(struct mlx4_slave_state
), GFP_KERNEL
);
1496 if (!priv
->mfunc
.master
.slave_state
)
1499 for (i
= 0; i
< dev
->num_slaves
; ++i
) {
1500 s_state
= &priv
->mfunc
.master
.slave_state
[i
];
1501 s_state
->last_cmd
= MLX4_COMM_CMD_RESET
;
1502 for (j
= 0; j
< MLX4_EVENT_TYPES_NUM
; ++j
)
1503 s_state
->event_eq
[j
].eqn
= -1;
1504 __raw_writel((__force u32
) 0,
1505 &priv
->mfunc
.comm
[i
].slave_write
);
1506 __raw_writel((__force u32
) 0,
1507 &priv
->mfunc
.comm
[i
].slave_read
);
1509 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++) {
1510 s_state
->vlan_filter
[port
] =
1511 kzalloc(sizeof(struct mlx4_vlan_fltr
),
1513 if (!s_state
->vlan_filter
[port
]) {
1515 kfree(s_state
->vlan_filter
[port
]);
1518 INIT_LIST_HEAD(&s_state
->mcast_filters
[port
]);
1520 spin_lock_init(&s_state
->lock
);
1523 memset(&priv
->mfunc
.master
.cmd_eqe
, 0, sizeof(struct mlx4_eqe
));
1524 priv
->mfunc
.master
.cmd_eqe
.type
= MLX4_EVENT_TYPE_CMD
;
1525 INIT_WORK(&priv
->mfunc
.master
.comm_work
,
1526 mlx4_master_comm_channel
);
1527 INIT_WORK(&priv
->mfunc
.master
.slave_event_work
,
1528 mlx4_gen_slave_eqe
);
1529 INIT_WORK(&priv
->mfunc
.master
.slave_flr_event_work
,
1530 mlx4_master_handle_slave_flr
);
1531 spin_lock_init(&priv
->mfunc
.master
.slave_state_lock
);
1532 priv
->mfunc
.master
.comm_wq
=
1533 create_singlethread_workqueue("mlx4_comm");
1534 if (!priv
->mfunc
.master
.comm_wq
)
1537 if (mlx4_init_resource_tracker(dev
))
1540 sema_init(&priv
->cmd
.slave_sem
, 1);
1541 err
= mlx4_ARM_COMM_CHANNEL(dev
);
1543 mlx4_err(dev
, " Failed to arm comm channel eq: %x\n",
1549 err
= sync_toggles(dev
);
1551 mlx4_err(dev
, "Couldn't sync toggles\n");
1555 sema_init(&priv
->cmd
.slave_sem
, 1);
1560 mlx4_free_resource_tracker(dev
);
1562 flush_workqueue(priv
->mfunc
.master
.comm_wq
);
1563 destroy_workqueue(priv
->mfunc
.master
.comm_wq
);
1566 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++)
1567 kfree(priv
->mfunc
.master
.slave_state
[i
].vlan_filter
[port
]);
1569 kfree(priv
->mfunc
.master
.slave_state
);
1571 iounmap(priv
->mfunc
.comm
);
1573 dma_free_coherent(&(dev
->pdev
->dev
), PAGE_SIZE
,
1575 priv
->mfunc
.vhcr_dma
);
1576 priv
->mfunc
.vhcr
= NULL
;
1580 int mlx4_cmd_init(struct mlx4_dev
*dev
)
1582 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1584 mutex_init(&priv
->cmd
.hcr_mutex
);
1585 sema_init(&priv
->cmd
.poll_sem
, 1);
1586 priv
->cmd
.use_events
= 0;
1587 priv
->cmd
.toggle
= 1;
1589 priv
->cmd
.hcr
= NULL
;
1590 priv
->mfunc
.vhcr
= NULL
;
1592 if (!mlx4_is_slave(dev
)) {
1593 priv
->cmd
.hcr
= ioremap(pci_resource_start(dev
->pdev
, 0) +
1594 MLX4_HCR_BASE
, MLX4_HCR_SIZE
);
1595 if (!priv
->cmd
.hcr
) {
1596 mlx4_err(dev
, "Couldn't map command register.\n");
1601 priv
->cmd
.pool
= pci_pool_create("mlx4_cmd", dev
->pdev
,
1603 MLX4_MAILBOX_SIZE
, 0);
1604 if (!priv
->cmd
.pool
)
1610 if (!mlx4_is_slave(dev
))
1611 iounmap(priv
->cmd
.hcr
);
1615 void mlx4_multi_func_cleanup(struct mlx4_dev
*dev
)
1617 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1620 if (mlx4_is_master(dev
)) {
1621 flush_workqueue(priv
->mfunc
.master
.comm_wq
);
1622 destroy_workqueue(priv
->mfunc
.master
.comm_wq
);
1623 for (i
= 0; i
< dev
->num_slaves
; i
++) {
1624 for (port
= 1; port
<= MLX4_MAX_PORTS
; port
++)
1625 kfree(priv
->mfunc
.master
.slave_state
[i
].vlan_filter
[port
]);
1627 kfree(priv
->mfunc
.master
.slave_state
);
1630 iounmap(priv
->mfunc
.comm
);
1631 dma_free_coherent(&(dev
->pdev
->dev
), PAGE_SIZE
,
1632 priv
->mfunc
.vhcr
, priv
->mfunc
.vhcr_dma
);
1633 priv
->mfunc
.vhcr
= NULL
;
1636 void mlx4_cmd_cleanup(struct mlx4_dev
*dev
)
1638 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1640 pci_pool_destroy(priv
->cmd
.pool
);
1642 if (!mlx4_is_slave(dev
))
1643 iounmap(priv
->cmd
.hcr
);
1647 * Switch to using events to issue FW commands (can only be called
1648 * after event queue for command events has been initialized).
1650 int mlx4_cmd_use_events(struct mlx4_dev
*dev
)
1652 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1656 priv
->cmd
.context
= kmalloc(priv
->cmd
.max_cmds
*
1657 sizeof (struct mlx4_cmd_context
),
1659 if (!priv
->cmd
.context
)
1662 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
) {
1663 priv
->cmd
.context
[i
].token
= i
;
1664 priv
->cmd
.context
[i
].next
= i
+ 1;
1667 priv
->cmd
.context
[priv
->cmd
.max_cmds
- 1].next
= -1;
1668 priv
->cmd
.free_head
= 0;
1670 sema_init(&priv
->cmd
.event_sem
, priv
->cmd
.max_cmds
);
1671 spin_lock_init(&priv
->cmd
.context_lock
);
1673 for (priv
->cmd
.token_mask
= 1;
1674 priv
->cmd
.token_mask
< priv
->cmd
.max_cmds
;
1675 priv
->cmd
.token_mask
<<= 1)
1677 --priv
->cmd
.token_mask
;
1679 down(&priv
->cmd
.poll_sem
);
1680 priv
->cmd
.use_events
= 1;
1686 * Switch back to polling (used when shutting down the device)
1688 void mlx4_cmd_use_polling(struct mlx4_dev
*dev
)
1690 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1693 priv
->cmd
.use_events
= 0;
1695 for (i
= 0; i
< priv
->cmd
.max_cmds
; ++i
)
1696 down(&priv
->cmd
.event_sem
);
1698 kfree(priv
->cmd
.context
);
1700 up(&priv
->cmd
.poll_sem
);
1703 struct mlx4_cmd_mailbox
*mlx4_alloc_cmd_mailbox(struct mlx4_dev
*dev
)
1705 struct mlx4_cmd_mailbox
*mailbox
;
1707 mailbox
= kmalloc(sizeof *mailbox
, GFP_KERNEL
);
1709 return ERR_PTR(-ENOMEM
);
1711 mailbox
->buf
= pci_pool_alloc(mlx4_priv(dev
)->cmd
.pool
, GFP_KERNEL
,
1713 if (!mailbox
->buf
) {
1715 return ERR_PTR(-ENOMEM
);
1720 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox
);
1722 void mlx4_free_cmd_mailbox(struct mlx4_dev
*dev
,
1723 struct mlx4_cmd_mailbox
*mailbox
)
1728 pci_pool_free(mlx4_priv(dev
)->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
1731 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox
);
1733 u32
mlx4_comm_get_version(void)
1735 return ((u32
) CMD_CHAN_IF_REV
<< 8) | (u32
) CMD_CHAN_VER
;