2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/interrupt.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #include "mlx5_core.h"
40 MLX5_EQE_SIZE
= sizeof(struct mlx5_eqe
),
41 MLX5_EQE_OWNER_INIT_VAL
= 0x1,
45 MLX5_EQ_STATE_ARMED
= 0x9,
46 MLX5_EQ_STATE_FIRED
= 0xa,
47 MLX5_EQ_STATE_ALWAYS_ARMED
= 0xb,
51 MLX5_NUM_SPARE_EQE
= 0x80,
52 MLX5_NUM_ASYNC_EQE
= 0x100,
53 MLX5_NUM_CMD_EQE
= 32,
57 MLX5_EQ_DOORBEL_OFFSET
= 0x40,
60 #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
61 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \
62 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \
63 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \
64 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \
65 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \
66 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
67 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \
68 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \
69 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \
70 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \
71 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
84 static int mlx5_cmd_destroy_eq(struct mlx5_core_dev
*dev
, u8 eqn
)
86 struct mlx5_destroy_eq_mbox_in in
;
87 struct mlx5_destroy_eq_mbox_out out
;
90 memset(&in
, 0, sizeof(in
));
91 memset(&out
, 0, sizeof(out
));
92 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_DESTROY_EQ
);
94 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
99 err
= mlx5_cmd_status_to_err(&out
.hdr
);
105 static struct mlx5_eqe
*get_eqe(struct mlx5_eq
*eq
, u32 entry
)
107 return mlx5_buf_offset(&eq
->buf
, entry
* MLX5_EQE_SIZE
);
110 static struct mlx5_eqe
*next_eqe_sw(struct mlx5_eq
*eq
)
112 struct mlx5_eqe
*eqe
= get_eqe(eq
, eq
->cons_index
& (eq
->nent
- 1));
114 return ((eqe
->owner
& 1) ^ !!(eq
->cons_index
& eq
->nent
)) ? NULL
: eqe
;
117 static const char *eqe_type_str(u8 type
)
120 case MLX5_EVENT_TYPE_COMP
:
121 return "MLX5_EVENT_TYPE_COMP";
122 case MLX5_EVENT_TYPE_PATH_MIG
:
123 return "MLX5_EVENT_TYPE_PATH_MIG";
124 case MLX5_EVENT_TYPE_COMM_EST
:
125 return "MLX5_EVENT_TYPE_COMM_EST";
126 case MLX5_EVENT_TYPE_SQ_DRAINED
:
127 return "MLX5_EVENT_TYPE_SQ_DRAINED";
128 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
129 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
130 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
:
131 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
132 case MLX5_EVENT_TYPE_CQ_ERROR
:
133 return "MLX5_EVENT_TYPE_CQ_ERROR";
134 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
135 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
136 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
137 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
138 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
139 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
140 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
141 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
142 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
:
143 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
144 case MLX5_EVENT_TYPE_INTERNAL_ERROR
:
145 return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
146 case MLX5_EVENT_TYPE_PORT_CHANGE
:
147 return "MLX5_EVENT_TYPE_PORT_CHANGE";
148 case MLX5_EVENT_TYPE_GPIO_EVENT
:
149 return "MLX5_EVENT_TYPE_GPIO_EVENT";
150 case MLX5_EVENT_TYPE_REMOTE_CONFIG
:
151 return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
152 case MLX5_EVENT_TYPE_DB_BF_CONGESTION
:
153 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
154 case MLX5_EVENT_TYPE_STALL_EVENT
:
155 return "MLX5_EVENT_TYPE_STALL_EVENT";
156 case MLX5_EVENT_TYPE_CMD
:
157 return "MLX5_EVENT_TYPE_CMD";
158 case MLX5_EVENT_TYPE_PAGE_REQUEST
:
159 return "MLX5_EVENT_TYPE_PAGE_REQUEST";
160 case MLX5_EVENT_TYPE_PAGE_FAULT
:
161 return "MLX5_EVENT_TYPE_PAGE_FAULT";
163 return "Unrecognized event";
167 static enum mlx5_dev_event
port_subtype_event(u8 subtype
)
170 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
171 return MLX5_DEV_EVENT_PORT_DOWN
;
172 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
173 return MLX5_DEV_EVENT_PORT_UP
;
174 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED
:
175 return MLX5_DEV_EVENT_PORT_INITIALIZED
;
176 case MLX5_PORT_CHANGE_SUBTYPE_LID
:
177 return MLX5_DEV_EVENT_LID_CHANGE
;
178 case MLX5_PORT_CHANGE_SUBTYPE_PKEY
:
179 return MLX5_DEV_EVENT_PKEY_CHANGE
;
180 case MLX5_PORT_CHANGE_SUBTYPE_GUID
:
181 return MLX5_DEV_EVENT_GUID_CHANGE
;
182 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG
:
183 return MLX5_DEV_EVENT_CLIENT_REREG
;
188 static void eq_update_ci(struct mlx5_eq
*eq
, int arm
)
190 __be32 __iomem
*addr
= eq
->doorbell
+ (arm
? 0 : 2);
191 u32 val
= (eq
->cons_index
& 0xffffff) | (eq
->eqn
<< 24);
192 __raw_writel((__force u32
) cpu_to_be32(val
), addr
);
193 /* We still want ordering, just not swabbing, so add a barrier */
197 static int mlx5_eq_int(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
)
199 struct mlx5_eqe
*eqe
;
206 while ((eqe
= next_eqe_sw(eq
))) {
208 * Make sure we read EQ entry contents after we've
209 * checked the ownership bit.
213 mlx5_core_dbg(eq
->dev
, "eqn %d, eqe type %s\n",
214 eq
->eqn
, eqe_type_str(eqe
->type
));
216 case MLX5_EVENT_TYPE_COMP
:
217 cqn
= be32_to_cpu(eqe
->data
.comp
.cqn
) & 0xffffff;
218 mlx5_cq_completion(dev
, cqn
);
221 case MLX5_EVENT_TYPE_PATH_MIG
:
222 case MLX5_EVENT_TYPE_COMM_EST
:
223 case MLX5_EVENT_TYPE_SQ_DRAINED
:
224 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
225 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
226 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
227 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
228 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
229 rsn
= be32_to_cpu(eqe
->data
.qp_srq
.qp_srq_n
) & 0xffffff;
230 mlx5_core_dbg(dev
, "event %s(%d) arrived on resource 0x%x\n",
231 eqe_type_str(eqe
->type
), eqe
->type
, rsn
);
232 mlx5_rsc_event(dev
, rsn
, eqe
->type
);
235 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
:
236 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
:
237 rsn
= be32_to_cpu(eqe
->data
.qp_srq
.qp_srq_n
) & 0xffffff;
238 mlx5_core_dbg(dev
, "SRQ event %s(%d): srqn 0x%x\n",
239 eqe_type_str(eqe
->type
), eqe
->type
, rsn
);
240 mlx5_srq_event(dev
, rsn
, eqe
->type
);
243 case MLX5_EVENT_TYPE_CMD
:
244 mlx5_cmd_comp_handler(dev
, be32_to_cpu(eqe
->data
.cmd
.vector
));
247 case MLX5_EVENT_TYPE_PORT_CHANGE
:
248 port
= (eqe
->data
.port
.port
>> 4) & 0xf;
249 switch (eqe
->sub_type
) {
250 case MLX5_PORT_CHANGE_SUBTYPE_DOWN
:
251 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE
:
252 case MLX5_PORT_CHANGE_SUBTYPE_LID
:
253 case MLX5_PORT_CHANGE_SUBTYPE_PKEY
:
254 case MLX5_PORT_CHANGE_SUBTYPE_GUID
:
255 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG
:
256 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED
:
258 dev
->event(dev
, port_subtype_event(eqe
->sub_type
),
259 (unsigned long)port
);
262 mlx5_core_warn(dev
, "Port event with unrecognized subtype: port %d, sub_type %d\n",
263 port
, eqe
->sub_type
);
266 case MLX5_EVENT_TYPE_CQ_ERROR
:
267 cqn
= be32_to_cpu(eqe
->data
.cq_err
.cqn
) & 0xffffff;
268 mlx5_core_warn(dev
, "CQ error on CQN 0x%x, syndrom 0x%x\n",
269 cqn
, eqe
->data
.cq_err
.syndrome
);
270 mlx5_cq_event(dev
, cqn
, eqe
->type
);
273 case MLX5_EVENT_TYPE_PAGE_REQUEST
:
275 u16 func_id
= be16_to_cpu(eqe
->data
.req_pages
.func_id
);
276 s32 npages
= be32_to_cpu(eqe
->data
.req_pages
.num_pages
);
278 mlx5_core_dbg(dev
, "page request for func 0x%x, npages %d\n",
280 mlx5_core_req_pages_handler(dev
, func_id
, npages
);
284 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
285 case MLX5_EVENT_TYPE_PAGE_FAULT
:
286 mlx5_eq_pagefault(dev
, eqe
);
291 mlx5_core_warn(dev
, "Unhandled event 0x%x on EQ 0x%x\n",
300 /* The HCA will think the queue has overflowed if we
301 * don't tell it we've been processing events. We
302 * create our EQs with MLX5_NUM_SPARE_EQE extra
303 * entries, so we must update our consumer index at
306 if (unlikely(set_ci
>= MLX5_NUM_SPARE_EQE
)) {
317 static irqreturn_t
mlx5_msix_handler(int irq
, void *eq_ptr
)
319 struct mlx5_eq
*eq
= eq_ptr
;
320 struct mlx5_core_dev
*dev
= eq
->dev
;
322 mlx5_eq_int(dev
, eq
);
324 /* MSI-X vectors always belong to us */
328 static void init_eq_buf(struct mlx5_eq
*eq
)
330 struct mlx5_eqe
*eqe
;
333 for (i
= 0; i
< eq
->nent
; i
++) {
334 eqe
= get_eqe(eq
, i
);
335 eqe
->owner
= MLX5_EQE_OWNER_INIT_VAL
;
339 int mlx5_create_map_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
, u8 vecidx
,
340 int nent
, u64 mask
, const char *name
, struct mlx5_uar
*uar
)
342 struct mlx5_priv
*priv
= &dev
->priv
;
343 struct mlx5_create_eq_mbox_in
*in
;
344 struct mlx5_create_eq_mbox_out out
;
348 eq
->nent
= roundup_pow_of_two(nent
+ MLX5_NUM_SPARE_EQE
);
350 err
= mlx5_buf_alloc(dev
, eq
->nent
* MLX5_EQE_SIZE
, &eq
->buf
);
356 inlen
= sizeof(*in
) + sizeof(in
->pas
[0]) * eq
->buf
.npages
;
357 in
= mlx5_vzalloc(inlen
);
362 memset(&out
, 0, sizeof(out
));
364 mlx5_fill_page_array(&eq
->buf
, in
->pas
);
366 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_CREATE_EQ
);
367 in
->ctx
.log_sz_usr_page
= cpu_to_be32(ilog2(eq
->nent
) << 24 | uar
->index
);
368 in
->ctx
.intr
= vecidx
;
369 in
->ctx
.log_page_size
= eq
->buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
;
370 in
->events_mask
= cpu_to_be64(mask
);
372 err
= mlx5_cmd_exec(dev
, in
, inlen
, &out
, sizeof(out
));
376 if (out
.hdr
.status
) {
377 err
= mlx5_cmd_status_to_err(&out
.hdr
);
381 snprintf(priv
->irq_info
[vecidx
].name
, MLX5_MAX_IRQ_NAME
, "%s@pci:%s",
382 name
, pci_name(dev
->pdev
));
384 eq
->eqn
= out
.eq_number
;
385 eq
->irqn
= priv
->msix_arr
[vecidx
].vector
;
387 eq
->doorbell
= uar
->map
+ MLX5_EQ_DOORBEL_OFFSET
;
388 err
= request_irq(eq
->irqn
, mlx5_msix_handler
, 0,
389 priv
->irq_info
[vecidx
].name
, eq
);
393 err
= mlx5_debug_eq_add(dev
, eq
);
397 /* EQs are created in ARMED state
405 free_irq(priv
->msix_arr
[vecidx
].vector
, eq
);
408 mlx5_cmd_destroy_eq(dev
, eq
->eqn
);
414 mlx5_buf_free(dev
, &eq
->buf
);
417 EXPORT_SYMBOL_GPL(mlx5_create_map_eq
);
419 int mlx5_destroy_unmap_eq(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
)
423 mlx5_debug_eq_remove(dev
, eq
);
424 free_irq(eq
->irqn
, eq
);
425 err
= mlx5_cmd_destroy_eq(dev
, eq
->eqn
);
427 mlx5_core_warn(dev
, "failed to destroy a previously created eq: eqn %d\n",
429 synchronize_irq(eq
->irqn
);
430 mlx5_buf_free(dev
, &eq
->buf
);
434 EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq
);
436 int mlx5_eq_init(struct mlx5_core_dev
*dev
)
440 spin_lock_init(&dev
->priv
.eq_table
.lock
);
442 err
= mlx5_eq_debugfs_init(dev
);
448 void mlx5_eq_cleanup(struct mlx5_core_dev
*dev
)
450 mlx5_eq_debugfs_cleanup(dev
);
453 int mlx5_start_eqs(struct mlx5_core_dev
*dev
)
455 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
456 u32 async_event_mask
= MLX5_ASYNC_EVENT_MASK
;
459 if (MLX5_CAP_GEN(dev
, pg
))
460 async_event_mask
|= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT
);
462 err
= mlx5_create_map_eq(dev
, &table
->cmd_eq
, MLX5_EQ_VEC_CMD
,
463 MLX5_NUM_CMD_EQE
, 1ull << MLX5_EVENT_TYPE_CMD
,
464 "mlx5_cmd_eq", &dev
->priv
.uuari
.uars
[0]);
466 mlx5_core_warn(dev
, "failed to create cmd EQ %d\n", err
);
470 mlx5_cmd_use_events(dev
);
472 err
= mlx5_create_map_eq(dev
, &table
->async_eq
, MLX5_EQ_VEC_ASYNC
,
473 MLX5_NUM_ASYNC_EQE
, async_event_mask
,
474 "mlx5_async_eq", &dev
->priv
.uuari
.uars
[0]);
476 mlx5_core_warn(dev
, "failed to create async EQ %d\n", err
);
480 err
= mlx5_create_map_eq(dev
, &table
->pages_eq
,
482 /* TODO: sriov max_vf + */ 1,
483 1 << MLX5_EVENT_TYPE_PAGE_REQUEST
, "mlx5_pages_eq",
484 &dev
->priv
.uuari
.uars
[0]);
486 mlx5_core_warn(dev
, "failed to create pages EQ %d\n", err
);
493 mlx5_destroy_unmap_eq(dev
, &table
->async_eq
);
496 mlx5_cmd_use_polling(dev
);
497 mlx5_destroy_unmap_eq(dev
, &table
->cmd_eq
);
501 int mlx5_stop_eqs(struct mlx5_core_dev
*dev
)
503 struct mlx5_eq_table
*table
= &dev
->priv
.eq_table
;
506 err
= mlx5_destroy_unmap_eq(dev
, &table
->pages_eq
);
510 mlx5_destroy_unmap_eq(dev
, &table
->async_eq
);
511 mlx5_cmd_use_polling(dev
);
513 err
= mlx5_destroy_unmap_eq(dev
, &table
->cmd_eq
);
515 mlx5_cmd_use_events(dev
);
520 int mlx5_core_eq_query(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
,
521 struct mlx5_query_eq_mbox_out
*out
, int outlen
)
523 struct mlx5_query_eq_mbox_in in
;
526 memset(&in
, 0, sizeof(in
));
527 memset(out
, 0, outlen
);
528 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_QUERY_EQ
);
530 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), out
, outlen
);
535 err
= mlx5_cmd_status_to_err(&out
->hdr
);
539 EXPORT_SYMBOL_GPL(mlx5_core_eq_query
);