2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/init.h>
35 #include <linux/interrupt.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
39 #include <linux/dma-mapping.h>
41 #include <linux/mlx4/cmd.h>
47 MLX4_IRQNAME_SIZE
= 32
51 MLX4_NUM_ASYNC_EQE
= 0x100,
52 MLX4_NUM_SPARE_EQE
= 0x80,
53 MLX4_EQ_ENTRY_SIZE
= 0x20
56 #define MLX4_EQ_STATUS_OK ( 0 << 28)
57 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
58 #define MLX4_EQ_OWNER_SW ( 0 << 24)
59 #define MLX4_EQ_OWNER_HW ( 1 << 24)
60 #define MLX4_EQ_FLAG_EC ( 1 << 18)
61 #define MLX4_EQ_FLAG_OI ( 1 << 17)
62 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
63 #define MLX4_EQ_STATE_FIRED (10 << 8)
64 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
66 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
67 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
68 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
69 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
70 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
71 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
72 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
73 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
74 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
75 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
76 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
77 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
78 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
79 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
80 (1ull << MLX4_EVENT_TYPE_CMD) | \
81 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
82 (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
84 static void eq_set_ci(struct mlx4_eq
*eq
, int req_not
)
86 __raw_writel((__force u32
) cpu_to_be32((eq
->cons_index
& 0xffffff) |
89 /* We still want ordering, just not swabbing, so add a barrier */
93 static struct mlx4_eqe
*get_eqe(struct mlx4_eq
*eq
, u32 entry
)
95 unsigned long off
= (entry
& (eq
->nent
- 1)) * MLX4_EQ_ENTRY_SIZE
;
96 return eq
->page_list
[off
/ PAGE_SIZE
].buf
+ off
% PAGE_SIZE
;
99 static struct mlx4_eqe
*next_eqe_sw(struct mlx4_eq
*eq
)
101 struct mlx4_eqe
*eqe
= get_eqe(eq
, eq
->cons_index
);
102 return !!(eqe
->owner
& 0x80) ^ !!(eq
->cons_index
& eq
->nent
) ? NULL
: eqe
;
105 static struct mlx4_eqe
*next_slave_event_eqe(struct mlx4_slave_event_eq
*slave_eq
)
107 struct mlx4_eqe
*eqe
=
108 &slave_eq
->event_eqe
[slave_eq
->cons
& (SLAVE_EVENT_EQ_SIZE
- 1)];
109 return (!!(eqe
->owner
& 0x80) ^
110 !!(slave_eq
->cons
& SLAVE_EVENT_EQ_SIZE
)) ?
114 void mlx4_gen_slave_eqe(struct work_struct
*work
)
116 struct mlx4_mfunc_master_ctx
*master
=
117 container_of(work
, struct mlx4_mfunc_master_ctx
,
119 struct mlx4_mfunc
*mfunc
=
120 container_of(master
, struct mlx4_mfunc
, master
);
121 struct mlx4_priv
*priv
= container_of(mfunc
, struct mlx4_priv
, mfunc
);
122 struct mlx4_dev
*dev
= &priv
->dev
;
123 struct mlx4_slave_event_eq
*slave_eq
= &mfunc
->master
.slave_eq
;
124 struct mlx4_eqe
*eqe
;
128 for (eqe
= next_slave_event_eqe(slave_eq
); eqe
;
129 eqe
= next_slave_event_eqe(slave_eq
)) {
130 slave
= eqe
->slave_id
;
132 /* All active slaves need to receive the event */
133 if (slave
== ALL_SLAVES
) {
134 for (i
= 0; i
< dev
->num_slaves
; i
++) {
135 if (i
!= dev
->caps
.function
&&
136 master
->slave_state
[i
].active
)
137 if (mlx4_GEN_EQE(dev
, i
, eqe
))
138 mlx4_warn(dev
, "Failed to "
140 "for slave %d\n", i
);
143 if (mlx4_GEN_EQE(dev
, slave
, eqe
))
144 mlx4_warn(dev
, "Failed to generate event "
145 "for slave %d\n", slave
);
152 static void slave_event(struct mlx4_dev
*dev
, u8 slave
, struct mlx4_eqe
*eqe
)
154 struct mlx4_priv
*priv
= mlx4_priv(dev
);
155 struct mlx4_slave_event_eq
*slave_eq
= &priv
->mfunc
.master
.slave_eq
;
156 struct mlx4_eqe
*s_eqe
=
157 &slave_eq
->event_eqe
[slave_eq
->prod
& (SLAVE_EVENT_EQ_SIZE
- 1)];
159 if ((!!(s_eqe
->owner
& 0x80)) ^
160 (!!(slave_eq
->prod
& SLAVE_EVENT_EQ_SIZE
))) {
161 mlx4_warn(dev
, "Master failed to generate an EQE for slave: %d. "
162 "No free EQE on slave events queue\n", slave
);
166 memcpy(s_eqe
, eqe
, sizeof(struct mlx4_eqe
) - 1);
167 s_eqe
->slave_id
= slave
;
168 /* ensure all information is written before setting the ownersip bit */
170 s_eqe
->owner
= !!(slave_eq
->prod
& SLAVE_EVENT_EQ_SIZE
) ? 0x0 : 0x80;
173 queue_work(priv
->mfunc
.master
.comm_wq
,
174 &priv
->mfunc
.master
.slave_event_work
);
177 static void mlx4_slave_event(struct mlx4_dev
*dev
, int slave
,
178 struct mlx4_eqe
*eqe
)
180 struct mlx4_priv
*priv
= mlx4_priv(dev
);
181 struct mlx4_slave_state
*s_slave
=
182 &priv
->mfunc
.master
.slave_state
[slave
];
184 if (!s_slave
->active
) {
185 /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
189 slave_event(dev
, slave
, eqe
);
192 void mlx4_master_handle_slave_flr(struct work_struct
*work
)
194 struct mlx4_mfunc_master_ctx
*master
=
195 container_of(work
, struct mlx4_mfunc_master_ctx
,
196 slave_flr_event_work
);
197 struct mlx4_mfunc
*mfunc
=
198 container_of(master
, struct mlx4_mfunc
, master
);
199 struct mlx4_priv
*priv
=
200 container_of(mfunc
, struct mlx4_priv
, mfunc
);
201 struct mlx4_dev
*dev
= &priv
->dev
;
202 struct mlx4_slave_state
*slave_state
= priv
->mfunc
.master
.slave_state
;
206 mlx4_dbg(dev
, "mlx4_handle_slave_flr\n");
208 for (i
= 0 ; i
< dev
->num_slaves
; i
++) {
210 if (MLX4_COMM_CMD_FLR
== slave_state
[i
].last_cmd
) {
211 mlx4_dbg(dev
, "mlx4_handle_slave_flr: "
212 "clean slave: %d\n", i
);
214 mlx4_delete_all_resources_for_slave(dev
, i
);
215 /*return the slave to running mode*/
216 spin_lock(&priv
->mfunc
.master
.slave_state_lock
);
217 slave_state
[i
].last_cmd
= MLX4_COMM_CMD_RESET
;
218 slave_state
[i
].is_slave_going_down
= 0;
219 spin_unlock(&priv
->mfunc
.master
.slave_state_lock
);
221 err
= mlx4_cmd(dev
, 0, i
, 0, MLX4_CMD_INFORM_FLR_DONE
,
222 MLX4_CMD_TIME_CLASS_A
, MLX4_CMD_WRAPPED
);
224 mlx4_warn(dev
, "Failed to notify FW on "
225 "FLR done (slave:%d)\n", i
);
230 static int mlx4_eq_int(struct mlx4_dev
*dev
, struct mlx4_eq
*eq
)
232 struct mlx4_priv
*priv
= mlx4_priv(dev
);
233 struct mlx4_eqe
*eqe
;
241 u8 update_slave_state
;
244 while ((eqe
= next_eqe_sw(eq
))) {
246 * Make sure we read EQ entry contents after we've
247 * checked the ownership bit.
252 case MLX4_EVENT_TYPE_COMP
:
253 cqn
= be32_to_cpu(eqe
->event
.comp
.cqn
) & 0xffffff;
254 mlx4_cq_completion(dev
, cqn
);
257 case MLX4_EVENT_TYPE_PATH_MIG
:
258 case MLX4_EVENT_TYPE_COMM_EST
:
259 case MLX4_EVENT_TYPE_SQ_DRAINED
:
260 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
261 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
262 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
263 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
264 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
265 mlx4_dbg(dev
, "event %d arrived\n", eqe
->type
);
266 if (mlx4_is_master(dev
)) {
267 /* forward only to slave owning the QP */
268 ret
= mlx4_get_slave_from_resource_id(dev
,
270 be32_to_cpu(eqe
->event
.qp
.qpn
)
272 if (ret
&& ret
!= -ENOENT
) {
273 mlx4_dbg(dev
, "QP event %02x(%02x) on "
274 "EQ %d at index %u: could "
275 "not get slave id (%d)\n",
276 eqe
->type
, eqe
->subtype
,
277 eq
->eqn
, eq
->cons_index
, ret
);
281 if (!ret
&& slave
!= dev
->caps
.function
) {
282 mlx4_slave_event(dev
, slave
, eqe
);
287 mlx4_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) &
288 0xffffff, eqe
->type
);
291 case MLX4_EVENT_TYPE_SRQ_LIMIT
:
292 mlx4_warn(dev
, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
294 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR
:
295 if (mlx4_is_master(dev
)) {
296 /* forward only to slave owning the SRQ */
297 ret
= mlx4_get_slave_from_resource_id(dev
,
299 be32_to_cpu(eqe
->event
.srq
.srqn
)
302 if (ret
&& ret
!= -ENOENT
) {
303 mlx4_warn(dev
, "SRQ event %02x(%02x) "
304 "on EQ %d at index %u: could"
305 " not get slave id (%d)\n",
306 eqe
->type
, eqe
->subtype
,
307 eq
->eqn
, eq
->cons_index
, ret
);
310 mlx4_warn(dev
, "%s: slave:%d, srq_no:0x%x,"
311 " event: %02x(%02x)\n", __func__
,
313 be32_to_cpu(eqe
->event
.srq
.srqn
),
314 eqe
->type
, eqe
->subtype
);
316 if (!ret
&& slave
!= dev
->caps
.function
) {
317 mlx4_warn(dev
, "%s: sending event "
318 "%02x(%02x) to slave:%d\n",
320 eqe
->subtype
, slave
);
321 mlx4_slave_event(dev
, slave
, eqe
);
325 mlx4_srq_event(dev
, be32_to_cpu(eqe
->event
.srq
.srqn
) &
326 0xffffff, eqe
->type
);
329 case MLX4_EVENT_TYPE_CMD
:
331 be16_to_cpu(eqe
->event
.cmd
.token
),
332 eqe
->event
.cmd
.status
,
333 be64_to_cpu(eqe
->event
.cmd
.out_param
));
336 case MLX4_EVENT_TYPE_PORT_CHANGE
:
337 port
= be32_to_cpu(eqe
->event
.port_change
.port
) >> 28;
338 if (eqe
->subtype
== MLX4_PORT_CHANGE_SUBTYPE_DOWN
) {
339 mlx4_dispatch_event(dev
,
340 MLX4_DEV_EVENT_PORT_DOWN
,
342 mlx4_priv(dev
)->sense
.do_sense_port
[port
] = 1;
343 if (mlx4_is_master(dev
))
344 /*change the state of all slave's port
346 for (i
= 0; i
< dev
->num_slaves
; i
++) {
347 mlx4_dbg(dev
, "%s: Sending "
348 "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
349 " to slave: %d, port:%d\n",
351 if (i
== dev
->caps
.function
)
353 mlx4_slave_event(dev
, i
, eqe
);
356 mlx4_dispatch_event(dev
,
357 MLX4_DEV_EVENT_PORT_UP
,
359 mlx4_priv(dev
)->sense
.do_sense_port
[port
] = 0;
361 if (mlx4_is_master(dev
)) {
362 for (i
= 0; i
< dev
->num_slaves
; i
++) {
363 if (i
== dev
->caps
.function
)
365 mlx4_slave_event(dev
, i
, eqe
);
371 case MLX4_EVENT_TYPE_CQ_ERROR
:
372 mlx4_warn(dev
, "CQ %s on CQN %06x\n",
373 eqe
->event
.cq_err
.syndrome
== 1 ?
374 "overrun" : "access violation",
375 be32_to_cpu(eqe
->event
.cq_err
.cqn
) & 0xffffff);
376 if (mlx4_is_master(dev
)) {
377 ret
= mlx4_get_slave_from_resource_id(dev
,
379 be32_to_cpu(eqe
->event
.cq_err
.cqn
)
381 if (ret
&& ret
!= -ENOENT
) {
382 mlx4_dbg(dev
, "CQ event %02x(%02x) on "
383 "EQ %d at index %u: could "
384 "not get slave id (%d)\n",
385 eqe
->type
, eqe
->subtype
,
386 eq
->eqn
, eq
->cons_index
, ret
);
390 if (!ret
&& slave
!= dev
->caps
.function
) {
391 mlx4_slave_event(dev
, slave
, eqe
);
396 be32_to_cpu(eqe
->event
.cq_err
.cqn
)
401 case MLX4_EVENT_TYPE_EQ_OVERFLOW
:
402 mlx4_warn(dev
, "EQ overrun on EQN %d\n", eq
->eqn
);
405 case MLX4_EVENT_TYPE_COMM_CHANNEL
:
406 if (!mlx4_is_master(dev
)) {
407 mlx4_warn(dev
, "Received comm channel event "
408 "for non master device\n");
411 memcpy(&priv
->mfunc
.master
.comm_arm_bit_vector
,
412 eqe
->event
.comm_channel_arm
.bit_vec
,
413 sizeof eqe
->event
.comm_channel_arm
.bit_vec
);
414 queue_work(priv
->mfunc
.master
.comm_wq
,
415 &priv
->mfunc
.master
.comm_work
);
418 case MLX4_EVENT_TYPE_FLR_EVENT
:
419 flr_slave
= be32_to_cpu(eqe
->event
.flr_event
.slave_id
);
420 if (!mlx4_is_master(dev
)) {
421 mlx4_warn(dev
, "Non-master function received"
426 mlx4_dbg(dev
, "FLR event for slave: %d\n", flr_slave
);
428 if (flr_slave
> dev
->num_slaves
) {
430 "Got FLR for unknown function: %d\n",
432 update_slave_state
= 0;
434 update_slave_state
= 1;
436 spin_lock(&priv
->mfunc
.master
.slave_state_lock
);
437 if (update_slave_state
) {
438 priv
->mfunc
.master
.slave_state
[flr_slave
].active
= false;
439 priv
->mfunc
.master
.slave_state
[flr_slave
].last_cmd
= MLX4_COMM_CMD_FLR
;
440 priv
->mfunc
.master
.slave_state
[flr_slave
].is_slave_going_down
= 1;
442 spin_unlock(&priv
->mfunc
.master
.slave_state_lock
);
443 queue_work(priv
->mfunc
.master
.comm_wq
,
444 &priv
->mfunc
.master
.slave_flr_event_work
);
446 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR
:
447 case MLX4_EVENT_TYPE_ECC_DETECT
:
449 mlx4_warn(dev
, "Unhandled event %02x(%02x) on EQ %d at "
450 "index %u. owner=%x, nent=0x%x, slave=%x, "
452 eqe
->type
, eqe
->subtype
, eq
->eqn
,
453 eq
->cons_index
, eqe
->owner
, eq
->nent
,
455 !!(eqe
->owner
& 0x80) ^
456 !!(eq
->cons_index
& eq
->nent
) ? "HW" : "SW");
465 * The HCA will think the queue has overflowed if we
466 * don't tell it we've been processing events. We
467 * create our EQs with MLX4_NUM_SPARE_EQE extra
468 * entries, so we must update our consumer index at
471 if (unlikely(set_ci
>= MLX4_NUM_SPARE_EQE
)) {
482 static irqreturn_t
mlx4_interrupt(int irq
, void *dev_ptr
)
484 struct mlx4_dev
*dev
= dev_ptr
;
485 struct mlx4_priv
*priv
= mlx4_priv(dev
);
489 writel(priv
->eq_table
.clr_mask
, priv
->eq_table
.clr_int
);
491 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
492 work
|= mlx4_eq_int(dev
, &priv
->eq_table
.eq
[i
]);
494 return IRQ_RETVAL(work
);
497 static irqreturn_t
mlx4_msi_x_interrupt(int irq
, void *eq_ptr
)
499 struct mlx4_eq
*eq
= eq_ptr
;
500 struct mlx4_dev
*dev
= eq
->dev
;
502 mlx4_eq_int(dev
, eq
);
504 /* MSI-X vectors always belong to us */
508 int mlx4_MAP_EQ_wrapper(struct mlx4_dev
*dev
, int slave
,
509 struct mlx4_vhcr
*vhcr
,
510 struct mlx4_cmd_mailbox
*inbox
,
511 struct mlx4_cmd_mailbox
*outbox
,
512 struct mlx4_cmd_info
*cmd
)
514 struct mlx4_priv
*priv
= mlx4_priv(dev
);
515 struct mlx4_slave_event_eq_info
*event_eq
=
516 priv
->mfunc
.master
.slave_state
[slave
].event_eq
;
517 u32 in_modifier
= vhcr
->in_modifier
;
518 u32 eqn
= in_modifier
& 0x1FF;
519 u64 in_param
= vhcr
->in_param
;
523 if (slave
== dev
->caps
.function
)
524 err
= mlx4_cmd(dev
, in_param
, (in_modifier
& 0x80000000) | eqn
,
525 0, MLX4_CMD_MAP_EQ
, MLX4_CMD_TIME_CLASS_B
,
528 for (i
= 0; i
< MLX4_EVENT_TYPES_NUM
; ++i
)
529 if (in_param
& (1LL << i
))
530 event_eq
[i
].eqn
= in_modifier
>> 31 ? -1 : eqn
;
535 static int mlx4_MAP_EQ(struct mlx4_dev
*dev
, u64 event_mask
, int unmap
,
538 return mlx4_cmd(dev
, event_mask
, (unmap
<< 31) | eq_num
,
539 0, MLX4_CMD_MAP_EQ
, MLX4_CMD_TIME_CLASS_B
,
543 static int mlx4_SW2HW_EQ(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
546 return mlx4_cmd(dev
, mailbox
->dma
, eq_num
, 0,
547 MLX4_CMD_SW2HW_EQ
, MLX4_CMD_TIME_CLASS_A
,
551 static int mlx4_HW2SW_EQ(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
554 return mlx4_cmd_box(dev
, 0, mailbox
->dma
, eq_num
,
555 0, MLX4_CMD_HW2SW_EQ
, MLX4_CMD_TIME_CLASS_A
,
559 static int mlx4_num_eq_uar(struct mlx4_dev
*dev
)
562 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
563 * we need to map, take the difference of highest index and
564 * the lowest index we'll use and add 1.
566 return (dev
->caps
.num_comp_vectors
+ 1 + dev
->caps
.reserved_eqs
+
567 dev
->caps
.comp_pool
)/4 - dev
->caps
.reserved_eqs
/4 + 1;
570 static void __iomem
*mlx4_get_eq_uar(struct mlx4_dev
*dev
, struct mlx4_eq
*eq
)
572 struct mlx4_priv
*priv
= mlx4_priv(dev
);
575 index
= eq
->eqn
/ 4 - dev
->caps
.reserved_eqs
/ 4;
577 if (!priv
->eq_table
.uar_map
[index
]) {
578 priv
->eq_table
.uar_map
[index
] =
579 ioremap(pci_resource_start(dev
->pdev
, 2) +
580 ((eq
->eqn
/ 4) << PAGE_SHIFT
),
582 if (!priv
->eq_table
.uar_map
[index
]) {
583 mlx4_err(dev
, "Couldn't map EQ doorbell for EQN 0x%06x\n",
589 return priv
->eq_table
.uar_map
[index
] + 0x800 + 8 * (eq
->eqn
% 4);
592 static int mlx4_create_eq(struct mlx4_dev
*dev
, int nent
,
593 u8 intr
, struct mlx4_eq
*eq
)
595 struct mlx4_priv
*priv
= mlx4_priv(dev
);
596 struct mlx4_cmd_mailbox
*mailbox
;
597 struct mlx4_eq_context
*eq_context
;
599 u64
*dma_list
= NULL
;
606 eq
->nent
= roundup_pow_of_two(max(nent
, 2));
607 npages
= PAGE_ALIGN(eq
->nent
* MLX4_EQ_ENTRY_SIZE
) / PAGE_SIZE
;
609 eq
->page_list
= kmalloc(npages
* sizeof *eq
->page_list
,
614 for (i
= 0; i
< npages
; ++i
)
615 eq
->page_list
[i
].buf
= NULL
;
617 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
621 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
624 eq_context
= mailbox
->buf
;
626 for (i
= 0; i
< npages
; ++i
) {
627 eq
->page_list
[i
].buf
= dma_alloc_coherent(&dev
->pdev
->dev
,
628 PAGE_SIZE
, &t
, GFP_KERNEL
);
629 if (!eq
->page_list
[i
].buf
)
630 goto err_out_free_pages
;
633 eq
->page_list
[i
].map
= t
;
635 memset(eq
->page_list
[i
].buf
, 0, PAGE_SIZE
);
638 eq
->eqn
= mlx4_bitmap_alloc(&priv
->eq_table
.bitmap
);
640 goto err_out_free_pages
;
642 eq
->doorbell
= mlx4_get_eq_uar(dev
, eq
);
645 goto err_out_free_eq
;
648 err
= mlx4_mtt_init(dev
, npages
, PAGE_SHIFT
, &eq
->mtt
);
650 goto err_out_free_eq
;
652 err
= mlx4_write_mtt(dev
, &eq
->mtt
, 0, npages
, dma_list
);
654 goto err_out_free_mtt
;
656 memset(eq_context
, 0, sizeof *eq_context
);
657 eq_context
->flags
= cpu_to_be32(MLX4_EQ_STATUS_OK
|
658 MLX4_EQ_STATE_ARMED
);
659 eq_context
->log_eq_size
= ilog2(eq
->nent
);
660 eq_context
->intr
= intr
;
661 eq_context
->log_page_size
= PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
;
663 mtt_addr
= mlx4_mtt_addr(dev
, &eq
->mtt
);
664 eq_context
->mtt_base_addr_h
= mtt_addr
>> 32;
665 eq_context
->mtt_base_addr_l
= cpu_to_be32(mtt_addr
& 0xffffffff);
667 err
= mlx4_SW2HW_EQ(dev
, mailbox
, eq
->eqn
);
669 mlx4_warn(dev
, "SW2HW_EQ failed (%d)\n", err
);
670 goto err_out_free_mtt
;
674 mlx4_free_cmd_mailbox(dev
, mailbox
);
681 mlx4_mtt_cleanup(dev
, &eq
->mtt
);
684 mlx4_bitmap_free(&priv
->eq_table
.bitmap
, eq
->eqn
);
687 for (i
= 0; i
< npages
; ++i
)
688 if (eq
->page_list
[i
].buf
)
689 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
690 eq
->page_list
[i
].buf
,
691 eq
->page_list
[i
].map
);
693 mlx4_free_cmd_mailbox(dev
, mailbox
);
696 kfree(eq
->page_list
);
703 static void mlx4_free_eq(struct mlx4_dev
*dev
,
706 struct mlx4_priv
*priv
= mlx4_priv(dev
);
707 struct mlx4_cmd_mailbox
*mailbox
;
709 int npages
= PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE
* eq
->nent
) / PAGE_SIZE
;
712 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
716 err
= mlx4_HW2SW_EQ(dev
, mailbox
, eq
->eqn
);
718 mlx4_warn(dev
, "HW2SW_EQ failed (%d)\n", err
);
721 mlx4_dbg(dev
, "Dumping EQ context %02x:\n", eq
->eqn
);
722 for (i
= 0; i
< sizeof (struct mlx4_eq_context
) / 4; ++i
) {
724 pr_cont("[%02x] ", i
* 4);
725 pr_cont(" %08x", be32_to_cpup(mailbox
->buf
+ i
* 4));
726 if ((i
+ 1) % 4 == 0)
731 mlx4_mtt_cleanup(dev
, &eq
->mtt
);
732 for (i
= 0; i
< npages
; ++i
)
733 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
734 eq
->page_list
[i
].buf
,
735 eq
->page_list
[i
].map
);
737 kfree(eq
->page_list
);
738 mlx4_bitmap_free(&priv
->eq_table
.bitmap
, eq
->eqn
);
739 mlx4_free_cmd_mailbox(dev
, mailbox
);
742 static void mlx4_free_irqs(struct mlx4_dev
*dev
)
744 struct mlx4_eq_table
*eq_table
= &mlx4_priv(dev
)->eq_table
;
745 struct mlx4_priv
*priv
= mlx4_priv(dev
);
748 if (eq_table
->have_irq
)
749 free_irq(dev
->pdev
->irq
, dev
);
751 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
752 if (eq_table
->eq
[i
].have_irq
) {
753 free_irq(eq_table
->eq
[i
].irq
, eq_table
->eq
+ i
);
754 eq_table
->eq
[i
].have_irq
= 0;
757 for (i
= 0; i
< dev
->caps
.comp_pool
; i
++) {
759 * Freeing the assigned irq's
760 * all bits should be 0, but we need to validate
762 if (priv
->msix_ctl
.pool_bm
& 1ULL << i
) {
763 /* NO need protecting*/
764 vec
= dev
->caps
.num_comp_vectors
+ 1 + i
;
765 free_irq(priv
->eq_table
.eq
[vec
].irq
,
766 &priv
->eq_table
.eq
[vec
]);
771 kfree(eq_table
->irq_names
);
774 static int mlx4_map_clr_int(struct mlx4_dev
*dev
)
776 struct mlx4_priv
*priv
= mlx4_priv(dev
);
778 priv
->clr_base
= ioremap(pci_resource_start(dev
->pdev
, priv
->fw
.clr_int_bar
) +
779 priv
->fw
.clr_int_base
, MLX4_CLR_INT_SIZE
);
780 if (!priv
->clr_base
) {
781 mlx4_err(dev
, "Couldn't map interrupt clear register, aborting.\n");
788 static void mlx4_unmap_clr_int(struct mlx4_dev
*dev
)
790 struct mlx4_priv
*priv
= mlx4_priv(dev
);
792 iounmap(priv
->clr_base
);
795 int mlx4_alloc_eq_table(struct mlx4_dev
*dev
)
797 struct mlx4_priv
*priv
= mlx4_priv(dev
);
799 priv
->eq_table
.eq
= kcalloc(dev
->caps
.num_eqs
- dev
->caps
.reserved_eqs
,
800 sizeof *priv
->eq_table
.eq
, GFP_KERNEL
);
801 if (!priv
->eq_table
.eq
)
807 void mlx4_free_eq_table(struct mlx4_dev
*dev
)
809 kfree(mlx4_priv(dev
)->eq_table
.eq
);
812 int mlx4_init_eq_table(struct mlx4_dev
*dev
)
814 struct mlx4_priv
*priv
= mlx4_priv(dev
);
818 priv
->eq_table
.uar_map
= kcalloc(mlx4_num_eq_uar(dev
),
819 sizeof *priv
->eq_table
.uar_map
,
821 if (!priv
->eq_table
.uar_map
) {
826 err
= mlx4_bitmap_init(&priv
->eq_table
.bitmap
, dev
->caps
.num_eqs
,
827 dev
->caps
.num_eqs
- 1, dev
->caps
.reserved_eqs
, 0);
831 for (i
= 0; i
< mlx4_num_eq_uar(dev
); ++i
)
832 priv
->eq_table
.uar_map
[i
] = NULL
;
834 if (!mlx4_is_slave(dev
)) {
835 err
= mlx4_map_clr_int(dev
);
839 priv
->eq_table
.clr_mask
=
840 swab32(1 << (priv
->eq_table
.inta_pin
& 31));
841 priv
->eq_table
.clr_int
= priv
->clr_base
+
842 (priv
->eq_table
.inta_pin
< 32 ? 4 : 0);
845 priv
->eq_table
.irq_names
=
846 kmalloc(MLX4_IRQNAME_SIZE
* (dev
->caps
.num_comp_vectors
+ 1 +
847 dev
->caps
.comp_pool
),
849 if (!priv
->eq_table
.irq_names
) {
854 for (i
= 0; i
< dev
->caps
.num_comp_vectors
; ++i
) {
855 err
= mlx4_create_eq(dev
, dev
->caps
.num_cqs
-
856 dev
->caps
.reserved_cqs
+
858 (dev
->flags
& MLX4_FLAG_MSI_X
) ? i
: 0,
859 &priv
->eq_table
.eq
[i
]);
866 err
= mlx4_create_eq(dev
, MLX4_NUM_ASYNC_EQE
+ MLX4_NUM_SPARE_EQE
,
867 (dev
->flags
& MLX4_FLAG_MSI_X
) ? dev
->caps
.num_comp_vectors
: 0,
868 &priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
]);
872 /*if additional completion vectors poolsize is 0 this loop will not run*/
873 for (i
= dev
->caps
.num_comp_vectors
+ 1;
874 i
< dev
->caps
.num_comp_vectors
+ dev
->caps
.comp_pool
+ 1; ++i
) {
876 err
= mlx4_create_eq(dev
, dev
->caps
.num_cqs
-
877 dev
->caps
.reserved_cqs
+
879 (dev
->flags
& MLX4_FLAG_MSI_X
) ? i
: 0,
880 &priv
->eq_table
.eq
[i
]);
888 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
891 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
) {
892 if (i
< dev
->caps
.num_comp_vectors
) {
893 snprintf(priv
->eq_table
.irq_names
+
894 i
* MLX4_IRQNAME_SIZE
,
896 "mlx4-comp-%d@pci:%s", i
,
897 pci_name(dev
->pdev
));
899 snprintf(priv
->eq_table
.irq_names
+
900 i
* MLX4_IRQNAME_SIZE
,
903 pci_name(dev
->pdev
));
906 eq_name
= priv
->eq_table
.irq_names
+
907 i
* MLX4_IRQNAME_SIZE
;
908 err
= request_irq(priv
->eq_table
.eq
[i
].irq
,
909 mlx4_msi_x_interrupt
, 0, eq_name
,
910 priv
->eq_table
.eq
+ i
);
914 priv
->eq_table
.eq
[i
].have_irq
= 1;
917 snprintf(priv
->eq_table
.irq_names
,
920 pci_name(dev
->pdev
));
921 err
= request_irq(dev
->pdev
->irq
, mlx4_interrupt
,
922 IRQF_SHARED
, priv
->eq_table
.irq_names
, dev
);
926 priv
->eq_table
.have_irq
= 1;
929 err
= mlx4_MAP_EQ(dev
, MLX4_ASYNC_EVENT_MASK
, 0,
930 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
);
932 mlx4_warn(dev
, "MAP_EQ for async EQ %d failed (%d)\n",
933 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
, err
);
935 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
936 eq_set_ci(&priv
->eq_table
.eq
[i
], 1);
941 mlx4_free_eq(dev
, &priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
]);
944 i
= dev
->caps
.num_comp_vectors
- 1;
948 mlx4_free_eq(dev
, &priv
->eq_table
.eq
[i
]);
951 if (!mlx4_is_slave(dev
))
952 mlx4_unmap_clr_int(dev
);
956 mlx4_bitmap_cleanup(&priv
->eq_table
.bitmap
);
959 kfree(priv
->eq_table
.uar_map
);
964 void mlx4_cleanup_eq_table(struct mlx4_dev
*dev
)
966 struct mlx4_priv
*priv
= mlx4_priv(dev
);
969 mlx4_MAP_EQ(dev
, MLX4_ASYNC_EVENT_MASK
, 1,
970 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
);
974 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ dev
->caps
.comp_pool
+ 1; ++i
)
975 mlx4_free_eq(dev
, &priv
->eq_table
.eq
[i
]);
977 if (!mlx4_is_slave(dev
))
978 mlx4_unmap_clr_int(dev
);
980 for (i
= 0; i
< mlx4_num_eq_uar(dev
); ++i
)
981 if (priv
->eq_table
.uar_map
[i
])
982 iounmap(priv
->eq_table
.uar_map
[i
]);
984 mlx4_bitmap_cleanup(&priv
->eq_table
.bitmap
);
986 kfree(priv
->eq_table
.uar_map
);
989 /* A test that verifies that we can accept interrupts on all
990 * the irq vectors of the device.
991 * Interrupts are checked using the NOP command.
993 int mlx4_test_interrupts(struct mlx4_dev
*dev
)
995 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1000 /* When not in MSI_X, there is only one irq to check */
1001 if (!(dev
->flags
& MLX4_FLAG_MSI_X
) || mlx4_is_slave(dev
))
1004 /* A loop over all completion vectors, for each vector we will check
1005 * whether it works by mapping command completions to that vector
1006 * and performing a NOP command
1008 for(i
= 0; !err
&& (i
< dev
->caps
.num_comp_vectors
); ++i
) {
1009 /* Temporary use polling for command completions */
1010 mlx4_cmd_use_polling(dev
);
1012 /* Map the new eq to handle all asyncronous events */
1013 err
= mlx4_MAP_EQ(dev
, MLX4_ASYNC_EVENT_MASK
, 0,
1014 priv
->eq_table
.eq
[i
].eqn
);
1016 mlx4_warn(dev
, "Failed mapping eq for interrupt test\n");
1017 mlx4_cmd_use_events(dev
);
1021 /* Go back to using events */
1022 mlx4_cmd_use_events(dev
);
1023 err
= mlx4_NOP(dev
);
1026 /* Return to default */
1027 mlx4_MAP_EQ(dev
, MLX4_ASYNC_EVENT_MASK
, 0,
1028 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
);
1031 EXPORT_SYMBOL(mlx4_test_interrupts
);
1033 int mlx4_assign_eq(struct mlx4_dev
*dev
, char* name
, int * vector
)
1036 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1037 int vec
= 0, err
= 0, i
;
1039 mutex_lock(&priv
->msix_ctl
.pool_lock
);
1040 for (i
= 0; !vec
&& i
< dev
->caps
.comp_pool
; i
++) {
1041 if (~priv
->msix_ctl
.pool_bm
& 1ULL << i
) {
1042 priv
->msix_ctl
.pool_bm
|= 1ULL << i
;
1043 vec
= dev
->caps
.num_comp_vectors
+ 1 + i
;
1044 snprintf(priv
->eq_table
.irq_names
+
1045 vec
* MLX4_IRQNAME_SIZE
,
1046 MLX4_IRQNAME_SIZE
, "%s", name
);
1047 err
= request_irq(priv
->eq_table
.eq
[vec
].irq
,
1048 mlx4_msi_x_interrupt
, 0,
1049 &priv
->eq_table
.irq_names
[vec
<<5],
1050 priv
->eq_table
.eq
+ vec
);
1052 /*zero out bit by fliping it*/
1053 priv
->msix_ctl
.pool_bm
^= 1 << i
;
1056 /*we dont want to break here*/
1058 eq_set_ci(&priv
->eq_table
.eq
[vec
], 1);
1061 mutex_unlock(&priv
->msix_ctl
.pool_lock
);
1067 err
= (i
== dev
->caps
.comp_pool
) ? -ENOSPC
: err
;
1071 EXPORT_SYMBOL(mlx4_assign_eq
);
1073 void mlx4_release_eq(struct mlx4_dev
*dev
, int vec
)
1075 struct mlx4_priv
*priv
= mlx4_priv(dev
);
1077 int i
= vec
- dev
->caps
.num_comp_vectors
- 1;
1079 if (likely(i
>= 0)) {
1080 /*sanity check , making sure were not trying to free irq's
1081 Belonging to a legacy EQ*/
1082 mutex_lock(&priv
->msix_ctl
.pool_lock
);
1083 if (priv
->msix_ctl
.pool_bm
& 1ULL << i
) {
1084 free_irq(priv
->eq_table
.eq
[vec
].irq
,
1085 &priv
->eq_table
.eq
[vec
]);
1086 priv
->msix_ctl
.pool_bm
&= ~(1ULL << i
);
1088 mutex_unlock(&priv
->msix_ctl
.pool_lock
);
1092 EXPORT_SYMBOL(mlx4_release_eq
);