2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/interrupt.h>
35 #include <linux/slab.h>
36 #include <linux/export.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/mlx4/cmd.h>
46 MLX4_IRQNAME_SIZE
= 32
50 MLX4_NUM_ASYNC_EQE
= 0x100,
51 MLX4_NUM_SPARE_EQE
= 0x80,
52 MLX4_EQ_ENTRY_SIZE
= 0x20
56 * Must be packed because start is 64 bits but only aligned to 32 bits.
58 struct mlx4_eq_context
{
72 __be32 mtt_base_addr_l
;
74 __be32 consumer_index
;
75 __be32 producer_index
;
79 #define MLX4_EQ_STATUS_OK ( 0 << 28)
80 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
81 #define MLX4_EQ_OWNER_SW ( 0 << 24)
82 #define MLX4_EQ_OWNER_HW ( 1 << 24)
83 #define MLX4_EQ_FLAG_EC ( 1 << 18)
84 #define MLX4_EQ_FLAG_OI ( 1 << 17)
85 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
86 #define MLX4_EQ_STATE_FIRED (10 << 8)
87 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
89 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
90 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
91 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
92 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
93 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
94 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
95 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
96 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
97 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
98 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
99 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
100 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
101 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
102 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
103 (1ull << MLX4_EVENT_TYPE_CMD))
138 } __packed port_change
;
144 static void eq_set_ci(struct mlx4_eq
*eq
, int req_not
)
146 __raw_writel((__force u32
) cpu_to_be32((eq
->cons_index
& 0xffffff) |
149 /* We still want ordering, just not swabbing, so add a barrier */
153 static struct mlx4_eqe
*get_eqe(struct mlx4_eq
*eq
, u32 entry
)
155 unsigned long off
= (entry
& (eq
->nent
- 1)) * MLX4_EQ_ENTRY_SIZE
;
156 return eq
->page_list
[off
/ PAGE_SIZE
].buf
+ off
% PAGE_SIZE
;
159 static struct mlx4_eqe
*next_eqe_sw(struct mlx4_eq
*eq
)
161 struct mlx4_eqe
*eqe
= get_eqe(eq
, eq
->cons_index
);
162 return !!(eqe
->owner
& 0x80) ^ !!(eq
->cons_index
& eq
->nent
) ? NULL
: eqe
;
165 static int mlx4_eq_int(struct mlx4_dev
*dev
, struct mlx4_eq
*eq
)
167 struct mlx4_eqe
*eqe
;
173 while ((eqe
= next_eqe_sw(eq
))) {
175 * Make sure we read EQ entry contents after we've
176 * checked the ownership bit.
181 case MLX4_EVENT_TYPE_COMP
:
182 cqn
= be32_to_cpu(eqe
->event
.comp
.cqn
) & 0xffffff;
183 mlx4_cq_completion(dev
, cqn
);
186 case MLX4_EVENT_TYPE_PATH_MIG
:
187 case MLX4_EVENT_TYPE_COMM_EST
:
188 case MLX4_EVENT_TYPE_SQ_DRAINED
:
189 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE
:
190 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR
:
191 case MLX4_EVENT_TYPE_PATH_MIG_FAILED
:
192 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
193 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR
:
194 mlx4_qp_event(dev
, be32_to_cpu(eqe
->event
.qp
.qpn
) & 0xffffff,
198 case MLX4_EVENT_TYPE_SRQ_LIMIT
:
199 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR
:
200 mlx4_srq_event(dev
, be32_to_cpu(eqe
->event
.srq
.srqn
) & 0xffffff,
204 case MLX4_EVENT_TYPE_CMD
:
206 be16_to_cpu(eqe
->event
.cmd
.token
),
207 eqe
->event
.cmd
.status
,
208 be64_to_cpu(eqe
->event
.cmd
.out_param
));
211 case MLX4_EVENT_TYPE_PORT_CHANGE
:
212 port
= be32_to_cpu(eqe
->event
.port_change
.port
) >> 28;
213 if (eqe
->subtype
== MLX4_PORT_CHANGE_SUBTYPE_DOWN
) {
214 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_PORT_DOWN
,
216 mlx4_priv(dev
)->sense
.do_sense_port
[port
] = 1;
218 mlx4_dispatch_event(dev
, MLX4_DEV_EVENT_PORT_UP
,
220 mlx4_priv(dev
)->sense
.do_sense_port
[port
] = 0;
224 case MLX4_EVENT_TYPE_CQ_ERROR
:
225 mlx4_warn(dev
, "CQ %s on CQN %06x\n",
226 eqe
->event
.cq_err
.syndrome
== 1 ?
227 "overrun" : "access violation",
228 be32_to_cpu(eqe
->event
.cq_err
.cqn
) & 0xffffff);
229 mlx4_cq_event(dev
, be32_to_cpu(eqe
->event
.cq_err
.cqn
),
233 case MLX4_EVENT_TYPE_EQ_OVERFLOW
:
234 mlx4_warn(dev
, "EQ overrun on EQN %d\n", eq
->eqn
);
237 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR
:
238 case MLX4_EVENT_TYPE_ECC_DETECT
:
240 mlx4_warn(dev
, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
241 eqe
->type
, eqe
->subtype
, eq
->eqn
, eq
->cons_index
);
250 * The HCA will think the queue has overflowed if we
251 * don't tell it we've been processing events. We
252 * create our EQs with MLX4_NUM_SPARE_EQE extra
253 * entries, so we must update our consumer index at
256 if (unlikely(set_ci
>= MLX4_NUM_SPARE_EQE
)) {
267 static irqreturn_t
mlx4_interrupt(int irq
, void *dev_ptr
)
269 struct mlx4_dev
*dev
= dev_ptr
;
270 struct mlx4_priv
*priv
= mlx4_priv(dev
);
274 writel(priv
->eq_table
.clr_mask
, priv
->eq_table
.clr_int
);
276 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
277 work
|= mlx4_eq_int(dev
, &priv
->eq_table
.eq
[i
]);
279 return IRQ_RETVAL(work
);
282 static irqreturn_t
mlx4_msi_x_interrupt(int irq
, void *eq_ptr
)
284 struct mlx4_eq
*eq
= eq_ptr
;
285 struct mlx4_dev
*dev
= eq
->dev
;
287 mlx4_eq_int(dev
, eq
);
289 /* MSI-X vectors always belong to us */
293 static int mlx4_MAP_EQ(struct mlx4_dev
*dev
, u64 event_mask
, int unmap
,
296 return mlx4_cmd(dev
, event_mask
, (unmap
<< 31) | eq_num
,
297 0, MLX4_CMD_MAP_EQ
, MLX4_CMD_TIME_CLASS_B
);
300 static int mlx4_SW2HW_EQ(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
303 return mlx4_cmd(dev
, mailbox
->dma
, eq_num
, 0, MLX4_CMD_SW2HW_EQ
,
304 MLX4_CMD_TIME_CLASS_A
);
307 static int mlx4_HW2SW_EQ(struct mlx4_dev
*dev
, struct mlx4_cmd_mailbox
*mailbox
,
310 return mlx4_cmd_box(dev
, 0, mailbox
->dma
, eq_num
, 0, MLX4_CMD_HW2SW_EQ
,
311 MLX4_CMD_TIME_CLASS_A
);
314 static int mlx4_num_eq_uar(struct mlx4_dev
*dev
)
317 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
318 * we need to map, take the difference of highest index and
319 * the lowest index we'll use and add 1.
321 return (dev
->caps
.num_comp_vectors
+ 1 + dev
->caps
.reserved_eqs
+
322 dev
->caps
.comp_pool
)/4 - dev
->caps
.reserved_eqs
/4 + 1;
325 static void __iomem
*mlx4_get_eq_uar(struct mlx4_dev
*dev
, struct mlx4_eq
*eq
)
327 struct mlx4_priv
*priv
= mlx4_priv(dev
);
330 index
= eq
->eqn
/ 4 - dev
->caps
.reserved_eqs
/ 4;
332 if (!priv
->eq_table
.uar_map
[index
]) {
333 priv
->eq_table
.uar_map
[index
] =
334 ioremap(pci_resource_start(dev
->pdev
, 2) +
335 ((eq
->eqn
/ 4) << PAGE_SHIFT
),
337 if (!priv
->eq_table
.uar_map
[index
]) {
338 mlx4_err(dev
, "Couldn't map EQ doorbell for EQN 0x%06x\n",
344 return priv
->eq_table
.uar_map
[index
] + 0x800 + 8 * (eq
->eqn
% 4);
347 static int mlx4_create_eq(struct mlx4_dev
*dev
, int nent
,
348 u8 intr
, struct mlx4_eq
*eq
)
350 struct mlx4_priv
*priv
= mlx4_priv(dev
);
351 struct mlx4_cmd_mailbox
*mailbox
;
352 struct mlx4_eq_context
*eq_context
;
354 u64
*dma_list
= NULL
;
361 eq
->nent
= roundup_pow_of_two(max(nent
, 2));
362 npages
= PAGE_ALIGN(eq
->nent
* MLX4_EQ_ENTRY_SIZE
) / PAGE_SIZE
;
364 eq
->page_list
= kmalloc(npages
* sizeof *eq
->page_list
,
369 for (i
= 0; i
< npages
; ++i
)
370 eq
->page_list
[i
].buf
= NULL
;
372 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
376 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
379 eq_context
= mailbox
->buf
;
381 for (i
= 0; i
< npages
; ++i
) {
382 eq
->page_list
[i
].buf
= dma_alloc_coherent(&dev
->pdev
->dev
,
383 PAGE_SIZE
, &t
, GFP_KERNEL
);
384 if (!eq
->page_list
[i
].buf
)
385 goto err_out_free_pages
;
388 eq
->page_list
[i
].map
= t
;
390 memset(eq
->page_list
[i
].buf
, 0, PAGE_SIZE
);
393 eq
->eqn
= mlx4_bitmap_alloc(&priv
->eq_table
.bitmap
);
395 goto err_out_free_pages
;
397 eq
->doorbell
= mlx4_get_eq_uar(dev
, eq
);
400 goto err_out_free_eq
;
403 err
= mlx4_mtt_init(dev
, npages
, PAGE_SHIFT
, &eq
->mtt
);
405 goto err_out_free_eq
;
407 err
= mlx4_write_mtt(dev
, &eq
->mtt
, 0, npages
, dma_list
);
409 goto err_out_free_mtt
;
411 memset(eq_context
, 0, sizeof *eq_context
);
412 eq_context
->flags
= cpu_to_be32(MLX4_EQ_STATUS_OK
|
413 MLX4_EQ_STATE_ARMED
);
414 eq_context
->log_eq_size
= ilog2(eq
->nent
);
415 eq_context
->intr
= intr
;
416 eq_context
->log_page_size
= PAGE_SHIFT
- MLX4_ICM_PAGE_SHIFT
;
418 mtt_addr
= mlx4_mtt_addr(dev
, &eq
->mtt
);
419 eq_context
->mtt_base_addr_h
= mtt_addr
>> 32;
420 eq_context
->mtt_base_addr_l
= cpu_to_be32(mtt_addr
& 0xffffffff);
422 err
= mlx4_SW2HW_EQ(dev
, mailbox
, eq
->eqn
);
424 mlx4_warn(dev
, "SW2HW_EQ failed (%d)\n", err
);
425 goto err_out_free_mtt
;
429 mlx4_free_cmd_mailbox(dev
, mailbox
);
436 mlx4_mtt_cleanup(dev
, &eq
->mtt
);
439 mlx4_bitmap_free(&priv
->eq_table
.bitmap
, eq
->eqn
);
442 for (i
= 0; i
< npages
; ++i
)
443 if (eq
->page_list
[i
].buf
)
444 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
445 eq
->page_list
[i
].buf
,
446 eq
->page_list
[i
].map
);
448 mlx4_free_cmd_mailbox(dev
, mailbox
);
451 kfree(eq
->page_list
);
458 static void mlx4_free_eq(struct mlx4_dev
*dev
,
461 struct mlx4_priv
*priv
= mlx4_priv(dev
);
462 struct mlx4_cmd_mailbox
*mailbox
;
464 int npages
= PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE
* eq
->nent
) / PAGE_SIZE
;
467 mailbox
= mlx4_alloc_cmd_mailbox(dev
);
471 err
= mlx4_HW2SW_EQ(dev
, mailbox
, eq
->eqn
);
473 mlx4_warn(dev
, "HW2SW_EQ failed (%d)\n", err
);
476 mlx4_dbg(dev
, "Dumping EQ context %02x:\n", eq
->eqn
);
477 for (i
= 0; i
< sizeof (struct mlx4_eq_context
) / 4; ++i
) {
479 pr_cont("[%02x] ", i
* 4);
480 pr_cont(" %08x", be32_to_cpup(mailbox
->buf
+ i
* 4));
481 if ((i
+ 1) % 4 == 0)
486 mlx4_mtt_cleanup(dev
, &eq
->mtt
);
487 for (i
= 0; i
< npages
; ++i
)
488 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
489 eq
->page_list
[i
].buf
,
490 eq
->page_list
[i
].map
);
492 kfree(eq
->page_list
);
493 mlx4_bitmap_free(&priv
->eq_table
.bitmap
, eq
->eqn
);
494 mlx4_free_cmd_mailbox(dev
, mailbox
);
497 static void mlx4_free_irqs(struct mlx4_dev
*dev
)
499 struct mlx4_eq_table
*eq_table
= &mlx4_priv(dev
)->eq_table
;
500 struct mlx4_priv
*priv
= mlx4_priv(dev
);
503 if (eq_table
->have_irq
)
504 free_irq(dev
->pdev
->irq
, dev
);
506 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
507 if (eq_table
->eq
[i
].have_irq
) {
508 free_irq(eq_table
->eq
[i
].irq
, eq_table
->eq
+ i
);
509 eq_table
->eq
[i
].have_irq
= 0;
512 for (i
= 0; i
< dev
->caps
.comp_pool
; i
++) {
514 * Freeing the assigned irq's
515 * all bits should be 0, but we need to validate
517 if (priv
->msix_ctl
.pool_bm
& 1ULL << i
) {
518 /* NO need protecting*/
519 vec
= dev
->caps
.num_comp_vectors
+ 1 + i
;
520 free_irq(priv
->eq_table
.eq
[vec
].irq
,
521 &priv
->eq_table
.eq
[vec
]);
526 kfree(eq_table
->irq_names
);
529 static int mlx4_map_clr_int(struct mlx4_dev
*dev
)
531 struct mlx4_priv
*priv
= mlx4_priv(dev
);
533 priv
->clr_base
= ioremap(pci_resource_start(dev
->pdev
, priv
->fw
.clr_int_bar
) +
534 priv
->fw
.clr_int_base
, MLX4_CLR_INT_SIZE
);
535 if (!priv
->clr_base
) {
536 mlx4_err(dev
, "Couldn't map interrupt clear register, aborting.\n");
543 static void mlx4_unmap_clr_int(struct mlx4_dev
*dev
)
545 struct mlx4_priv
*priv
= mlx4_priv(dev
);
547 iounmap(priv
->clr_base
);
550 int mlx4_alloc_eq_table(struct mlx4_dev
*dev
)
552 struct mlx4_priv
*priv
= mlx4_priv(dev
);
554 priv
->eq_table
.eq
= kcalloc(dev
->caps
.num_eqs
- dev
->caps
.reserved_eqs
,
555 sizeof *priv
->eq_table
.eq
, GFP_KERNEL
);
556 if (!priv
->eq_table
.eq
)
562 void mlx4_free_eq_table(struct mlx4_dev
*dev
)
564 kfree(mlx4_priv(dev
)->eq_table
.eq
);
567 int mlx4_init_eq_table(struct mlx4_dev
*dev
)
569 struct mlx4_priv
*priv
= mlx4_priv(dev
);
573 priv
->eq_table
.uar_map
= kcalloc(sizeof *priv
->eq_table
.uar_map
,
574 mlx4_num_eq_uar(dev
), GFP_KERNEL
);
575 if (!priv
->eq_table
.uar_map
) {
580 err
= mlx4_bitmap_init(&priv
->eq_table
.bitmap
, dev
->caps
.num_eqs
,
581 dev
->caps
.num_eqs
- 1, dev
->caps
.reserved_eqs
, 0);
585 for (i
= 0; i
< mlx4_num_eq_uar(dev
); ++i
)
586 priv
->eq_table
.uar_map
[i
] = NULL
;
588 err
= mlx4_map_clr_int(dev
);
592 priv
->eq_table
.clr_mask
=
593 swab32(1 << (priv
->eq_table
.inta_pin
& 31));
594 priv
->eq_table
.clr_int
= priv
->clr_base
+
595 (priv
->eq_table
.inta_pin
< 32 ? 4 : 0);
597 priv
->eq_table
.irq_names
=
598 kmalloc(MLX4_IRQNAME_SIZE
* (dev
->caps
.num_comp_vectors
+ 1 +
599 dev
->caps
.comp_pool
),
601 if (!priv
->eq_table
.irq_names
) {
606 for (i
= 0; i
< dev
->caps
.num_comp_vectors
; ++i
) {
607 err
= mlx4_create_eq(dev
, dev
->caps
.num_cqs
-
608 dev
->caps
.reserved_cqs
+
610 (dev
->flags
& MLX4_FLAG_MSI_X
) ? i
: 0,
611 &priv
->eq_table
.eq
[i
]);
618 err
= mlx4_create_eq(dev
, MLX4_NUM_ASYNC_EQE
+ MLX4_NUM_SPARE_EQE
,
619 (dev
->flags
& MLX4_FLAG_MSI_X
) ? dev
->caps
.num_comp_vectors
: 0,
620 &priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
]);
624 /*if additional completion vectors poolsize is 0 this loop will not run*/
625 for (i
= dev
->caps
.num_comp_vectors
+ 1;
626 i
< dev
->caps
.num_comp_vectors
+ dev
->caps
.comp_pool
+ 1; ++i
) {
628 err
= mlx4_create_eq(dev
, dev
->caps
.num_cqs
-
629 dev
->caps
.reserved_cqs
+
631 (dev
->flags
& MLX4_FLAG_MSI_X
) ? i
: 0,
632 &priv
->eq_table
.eq
[i
]);
640 if (dev
->flags
& MLX4_FLAG_MSI_X
) {
643 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
) {
644 if (i
< dev
->caps
.num_comp_vectors
) {
645 snprintf(priv
->eq_table
.irq_names
+
646 i
* MLX4_IRQNAME_SIZE
,
648 "mlx4-comp-%d@pci:%s", i
,
649 pci_name(dev
->pdev
));
651 snprintf(priv
->eq_table
.irq_names
+
652 i
* MLX4_IRQNAME_SIZE
,
655 pci_name(dev
->pdev
));
658 eq_name
= priv
->eq_table
.irq_names
+
659 i
* MLX4_IRQNAME_SIZE
;
660 err
= request_irq(priv
->eq_table
.eq
[i
].irq
,
661 mlx4_msi_x_interrupt
, 0, eq_name
,
662 priv
->eq_table
.eq
+ i
);
666 priv
->eq_table
.eq
[i
].have_irq
= 1;
669 snprintf(priv
->eq_table
.irq_names
,
672 pci_name(dev
->pdev
));
673 err
= request_irq(dev
->pdev
->irq
, mlx4_interrupt
,
674 IRQF_SHARED
, priv
->eq_table
.irq_names
, dev
);
678 priv
->eq_table
.have_irq
= 1;
681 err
= mlx4_MAP_EQ(dev
, MLX4_ASYNC_EVENT_MASK
, 0,
682 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
);
684 mlx4_warn(dev
, "MAP_EQ for async EQ %d failed (%d)\n",
685 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
, err
);
687 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ 1; ++i
)
688 eq_set_ci(&priv
->eq_table
.eq
[i
], 1);
693 mlx4_free_eq(dev
, &priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
]);
696 i
= dev
->caps
.num_comp_vectors
- 1;
700 mlx4_free_eq(dev
, &priv
->eq_table
.eq
[i
]);
703 mlx4_unmap_clr_int(dev
);
707 mlx4_bitmap_cleanup(&priv
->eq_table
.bitmap
);
710 kfree(priv
->eq_table
.uar_map
);
715 void mlx4_cleanup_eq_table(struct mlx4_dev
*dev
)
717 struct mlx4_priv
*priv
= mlx4_priv(dev
);
720 mlx4_MAP_EQ(dev
, MLX4_ASYNC_EVENT_MASK
, 1,
721 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
);
725 for (i
= 0; i
< dev
->caps
.num_comp_vectors
+ dev
->caps
.comp_pool
+ 1; ++i
)
726 mlx4_free_eq(dev
, &priv
->eq_table
.eq
[i
]);
728 mlx4_unmap_clr_int(dev
);
730 for (i
= 0; i
< mlx4_num_eq_uar(dev
); ++i
)
731 if (priv
->eq_table
.uar_map
[i
])
732 iounmap(priv
->eq_table
.uar_map
[i
]);
734 mlx4_bitmap_cleanup(&priv
->eq_table
.bitmap
);
736 kfree(priv
->eq_table
.uar_map
);
739 /* A test that verifies that we can accept interrupts on all
740 * the irq vectors of the device.
741 * Interrupts are checked using the NOP command.
743 int mlx4_test_interrupts(struct mlx4_dev
*dev
)
745 struct mlx4_priv
*priv
= mlx4_priv(dev
);
750 /* When not in MSI_X, there is only one irq to check */
751 if (!(dev
->flags
& MLX4_FLAG_MSI_X
))
754 /* A loop over all completion vectors, for each vector we will check
755 * whether it works by mapping command completions to that vector
756 * and performing a NOP command
758 for(i
= 0; !err
&& (i
< dev
->caps
.num_comp_vectors
); ++i
) {
759 /* Temporary use polling for command completions */
760 mlx4_cmd_use_polling(dev
);
762 /* Map the new eq to handle all asyncronous events */
763 err
= mlx4_MAP_EQ(dev
, MLX4_ASYNC_EVENT_MASK
, 0,
764 priv
->eq_table
.eq
[i
].eqn
);
766 mlx4_warn(dev
, "Failed mapping eq for interrupt test\n");
767 mlx4_cmd_use_events(dev
);
771 /* Go back to using events */
772 mlx4_cmd_use_events(dev
);
776 /* Return to default */
777 mlx4_MAP_EQ(dev
, MLX4_ASYNC_EVENT_MASK
, 0,
778 priv
->eq_table
.eq
[dev
->caps
.num_comp_vectors
].eqn
);
781 EXPORT_SYMBOL(mlx4_test_interrupts
);
783 int mlx4_assign_eq(struct mlx4_dev
*dev
, char* name
, int * vector
)
786 struct mlx4_priv
*priv
= mlx4_priv(dev
);
787 int vec
= 0, err
= 0, i
;
789 spin_lock(&priv
->msix_ctl
.pool_lock
);
790 for (i
= 0; !vec
&& i
< dev
->caps
.comp_pool
; i
++) {
791 if (~priv
->msix_ctl
.pool_bm
& 1ULL << i
) {
792 priv
->msix_ctl
.pool_bm
|= 1ULL << i
;
793 vec
= dev
->caps
.num_comp_vectors
+ 1 + i
;
794 snprintf(priv
->eq_table
.irq_names
+
795 vec
* MLX4_IRQNAME_SIZE
,
796 MLX4_IRQNAME_SIZE
, "%s", name
);
797 err
= request_irq(priv
->eq_table
.eq
[vec
].irq
,
798 mlx4_msi_x_interrupt
, 0,
799 &priv
->eq_table
.irq_names
[vec
<<5],
800 priv
->eq_table
.eq
+ vec
);
802 /*zero out bit by fliping it*/
803 priv
->msix_ctl
.pool_bm
^= 1 << i
;
806 /*we dont want to break here*/
808 eq_set_ci(&priv
->eq_table
.eq
[vec
], 1);
811 spin_unlock(&priv
->msix_ctl
.pool_lock
);
817 err
= (i
== dev
->caps
.comp_pool
) ? -ENOSPC
: err
;
821 EXPORT_SYMBOL(mlx4_assign_eq
);
823 void mlx4_release_eq(struct mlx4_dev
*dev
, int vec
)
825 struct mlx4_priv
*priv
= mlx4_priv(dev
);
827 int i
= vec
- dev
->caps
.num_comp_vectors
- 1;
829 if (likely(i
>= 0)) {
830 /*sanity check , making sure were not trying to free irq's
831 Belonging to a legacy EQ*/
832 spin_lock(&priv
->msix_ctl
.pool_lock
);
833 if (priv
->msix_ctl
.pool_bm
& 1ULL << i
) {
834 free_irq(priv
->eq_table
.eq
[vec
].irq
,
835 &priv
->eq_table
.eq
[vec
]);
836 priv
->msix_ctl
.pool_bm
&= ~(1ULL << i
);
838 spin_unlock(&priv
->msix_ctl
.pool_lock
);
842 EXPORT_SYMBOL(mlx4_release_eq
);