2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/hardirq.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/cmd.h>
38 #include <rdma/ib_verbs.h>
39 #include <linux/mlx5/cq.h>
40 #include "mlx5_core.h"
42 #define TASKLET_MAX_TIME 2
43 #define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
45 void mlx5_cq_tasklet_cb(unsigned long data
)
48 unsigned long end
= jiffies
+ TASKLET_MAX_TIME_JIFFIES
;
49 struct mlx5_eq_tasklet
*ctx
= (struct mlx5_eq_tasklet
*)data
;
50 struct mlx5_core_cq
*mcq
;
51 struct mlx5_core_cq
*temp
;
53 spin_lock_irqsave(&ctx
->lock
, flags
);
54 list_splice_tail_init(&ctx
->list
, &ctx
->process_list
);
55 spin_unlock_irqrestore(&ctx
->lock
, flags
);
57 list_for_each_entry_safe(mcq
, temp
, &ctx
->process_list
,
59 list_del_init(&mcq
->tasklet_ctx
.list
);
60 mcq
->tasklet_ctx
.comp(mcq
);
61 if (refcount_dec_and_test(&mcq
->refcount
))
63 if (time_after(jiffies
, end
))
67 if (!list_empty(&ctx
->process_list
))
68 tasklet_schedule(&ctx
->task
);
71 static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq
*cq
)
74 struct mlx5_eq_tasklet
*tasklet_ctx
= cq
->tasklet_ctx
.priv
;
76 spin_lock_irqsave(&tasklet_ctx
->lock
, flags
);
77 /* When migrating CQs between EQs will be implemented, please note
78 * that you need to sync this point. It is possible that
79 * while migrating a CQ, completions on the old EQs could
82 if (list_empty_careful(&cq
->tasklet_ctx
.list
)) {
83 refcount_inc(&cq
->refcount
);
84 list_add_tail(&cq
->tasklet_ctx
.list
, &tasklet_ctx
->list
);
86 spin_unlock_irqrestore(&tasklet_ctx
->lock
, flags
);
89 void mlx5_cq_completion(struct mlx5_core_dev
*dev
, u32 cqn
)
91 struct mlx5_core_cq
*cq
;
92 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
94 spin_lock(&table
->lock
);
95 cq
= radix_tree_lookup(&table
->tree
, cqn
);
97 refcount_inc(&cq
->refcount
);
98 spin_unlock(&table
->lock
);
101 mlx5_core_warn(dev
, "Completion event for bogus CQ 0x%x\n", cqn
);
109 if (refcount_dec_and_test(&cq
->refcount
))
113 void mlx5_cq_event(struct mlx5_core_dev
*dev
, u32 cqn
, int event_type
)
115 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
116 struct mlx5_core_cq
*cq
;
118 spin_lock(&table
->lock
);
120 cq
= radix_tree_lookup(&table
->tree
, cqn
);
122 refcount_inc(&cq
->refcount
);
124 spin_unlock(&table
->lock
);
127 mlx5_core_warn(dev
, "Async event for bogus CQ 0x%x\n", cqn
);
131 cq
->event(cq
, event_type
);
133 if (refcount_dec_and_test(&cq
->refcount
))
137 int mlx5_core_create_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
,
140 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
141 u32 out
[MLX5_ST_SZ_DW(create_cq_out
)];
142 u32 din
[MLX5_ST_SZ_DW(destroy_cq_in
)];
143 u32 dout
[MLX5_ST_SZ_DW(destroy_cq_out
)];
144 int eqn
= MLX5_GET(cqc
, MLX5_ADDR_OF(create_cq_in
, in
, cq_context
),
149 eq
= mlx5_eqn2eq(dev
, eqn
);
153 memset(out
, 0, sizeof(out
));
154 MLX5_SET(create_cq_in
, in
, opcode
, MLX5_CMD_OP_CREATE_CQ
);
155 err
= mlx5_cmd_exec(dev
, in
, inlen
, out
, sizeof(out
));
159 cq
->cqn
= MLX5_GET(create_cq_out
, out
, cqn
);
162 refcount_set(&cq
->refcount
, 1);
163 init_completion(&cq
->free
);
165 cq
->comp
= mlx5_add_cq_to_tasklet
;
166 /* assuming CQ will be deleted before the EQ */
167 cq
->tasklet_ctx
.priv
= &eq
->tasklet_ctx
;
168 INIT_LIST_HEAD(&cq
->tasklet_ctx
.list
);
170 spin_lock_irq(&table
->lock
);
171 err
= radix_tree_insert(&table
->tree
, cq
->cqn
, cq
);
172 spin_unlock_irq(&table
->lock
);
176 cq
->pid
= current
->pid
;
177 err
= mlx5_debug_cq_add(dev
, cq
);
179 mlx5_core_dbg(dev
, "failed adding CP 0x%x to debug file system\n",
182 cq
->uar
= dev
->priv
.uar
;
187 memset(din
, 0, sizeof(din
));
188 memset(dout
, 0, sizeof(dout
));
189 MLX5_SET(destroy_cq_in
, din
, opcode
, MLX5_CMD_OP_DESTROY_CQ
);
190 MLX5_SET(destroy_cq_in
, din
, cqn
, cq
->cqn
);
191 mlx5_cmd_exec(dev
, din
, sizeof(din
), dout
, sizeof(dout
));
194 EXPORT_SYMBOL(mlx5_core_create_cq
);
196 int mlx5_core_destroy_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
)
198 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
199 u32 out
[MLX5_ST_SZ_DW(destroy_cq_out
)] = {0};
200 u32 in
[MLX5_ST_SZ_DW(destroy_cq_in
)] = {0};
201 struct mlx5_core_cq
*tmp
;
204 spin_lock_irq(&table
->lock
);
205 tmp
= radix_tree_delete(&table
->tree
, cq
->cqn
);
206 spin_unlock_irq(&table
->lock
);
208 mlx5_core_warn(dev
, "cq 0x%x not found in tree\n", cq
->cqn
);
212 mlx5_core_warn(dev
, "corruption on srqn 0x%x\n", cq
->cqn
);
216 MLX5_SET(destroy_cq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_CQ
);
217 MLX5_SET(destroy_cq_in
, in
, cqn
, cq
->cqn
);
218 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
222 synchronize_irq(cq
->irqn
);
224 mlx5_debug_cq_remove(dev
, cq
);
225 if (refcount_dec_and_test(&cq
->refcount
))
227 wait_for_completion(&cq
->free
);
231 EXPORT_SYMBOL(mlx5_core_destroy_cq
);
233 int mlx5_core_query_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
,
234 u32
*out
, int outlen
)
236 u32 in
[MLX5_ST_SZ_DW(query_cq_in
)] = {0};
238 MLX5_SET(query_cq_in
, in
, opcode
, MLX5_CMD_OP_QUERY_CQ
);
239 MLX5_SET(query_cq_in
, in
, cqn
, cq
->cqn
);
240 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, outlen
);
242 EXPORT_SYMBOL(mlx5_core_query_cq
);
244 int mlx5_core_modify_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
,
247 u32 out
[MLX5_ST_SZ_DW(modify_cq_out
)] = {0};
249 MLX5_SET(modify_cq_in
, in
, opcode
, MLX5_CMD_OP_MODIFY_CQ
);
250 return mlx5_cmd_exec(dev
, in
, inlen
, out
, sizeof(out
));
252 EXPORT_SYMBOL(mlx5_core_modify_cq
);
254 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev
*dev
,
255 struct mlx5_core_cq
*cq
,
259 u32 in
[MLX5_ST_SZ_DW(modify_cq_in
)] = {0};
262 MLX5_SET(modify_cq_in
, in
, cqn
, cq
->cqn
);
263 cqc
= MLX5_ADDR_OF(modify_cq_in
, in
, cq_context
);
264 MLX5_SET(cqc
, cqc
, cq_period
, cq_period
);
265 MLX5_SET(cqc
, cqc
, cq_max_count
, cq_max_count
);
266 MLX5_SET(modify_cq_in
, in
,
267 modify_field_select_resize_field_select
.modify_field_select
.modify_field_select
,
268 MLX5_CQ_MODIFY_PERIOD
| MLX5_CQ_MODIFY_COUNT
);
270 return mlx5_core_modify_cq(dev
, cq
, in
, sizeof(in
));
272 EXPORT_SYMBOL(mlx5_core_modify_cq_moderation
);
274 int mlx5_init_cq_table(struct mlx5_core_dev
*dev
)
276 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
279 memset(table
, 0, sizeof(*table
));
280 spin_lock_init(&table
->lock
);
281 INIT_RADIX_TREE(&table
->tree
, GFP_ATOMIC
);
282 err
= mlx5_cq_debugfs_init(dev
);
287 void mlx5_cleanup_cq_table(struct mlx5_core_dev
*dev
)
289 mlx5_cq_debugfs_cleanup(dev
);