1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /* Copyright (c) 2018 Mellanox Technologies. */
7 #define MLX5_NUM_CMD_EQE (32)
8 #define MLX5_NUM_ASYNC_EQE (0x1000)
9 #define MLX5_NUM_SPARE_EQE (0x80)
15 struct mlx5_eq_param
{
22 mlx5_eq_create_generic(struct mlx5_core_dev
*dev
, struct mlx5_eq_param
*param
);
24 mlx5_eq_destroy_generic(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
);
25 int mlx5_eq_enable(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
,
26 struct notifier_block
*nb
);
27 void mlx5_eq_disable(struct mlx5_core_dev
*dev
, struct mlx5_eq
*eq
,
28 struct notifier_block
*nb
);
30 struct mlx5_eqe
*mlx5_eq_get_eqe(struct mlx5_eq
*eq
, u32 cc
);
31 void mlx5_eq_update_ci(struct mlx5_eq
*eq
, u32 cc
, bool arm
);
33 /* The HCA will think the queue has overflowed if we
34 * don't tell it we've been processing events. We
35 * create EQs with MLX5_NUM_SPARE_EQE extra entries,
36 * so we must update our consumer index at
39 * mlx5_eq_update_cc must be called on every EQE @EQ irq handler
41 static inline u32
mlx5_eq_update_cc(struct mlx5_eq
*eq
, u32 cc
)
43 if (unlikely(cc
>= MLX5_NUM_SPARE_EQE
)) {
44 mlx5_eq_update_ci(eq
, cc
, 0);
51 struct notifier_block nb
;
55 #define mlx5_nb_cof(ptr, type, member) \
56 (container_of(container_of(ptr, struct mlx5_nb, nb), type, member))
58 #define MLX5_NB_INIT(name, handler, event) do { \
59 (name)->nb.notifier_call = handler; \
60 (name)->event_type = MLX5_EVENT_TYPE_##event; \
63 #endif /* MLX5_CORE_EQ_H */