2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/hardirq.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/mlx5/cmd.h>
38 #include <rdma/ib_verbs.h>
39 #include <linux/mlx5/cq.h>
40 #include "mlx5_core.h"
42 void mlx5_cq_completion(struct mlx5_core_dev
*dev
, u32 cqn
)
44 struct mlx5_core_cq
*cq
;
45 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
47 spin_lock(&table
->lock
);
48 cq
= radix_tree_lookup(&table
->tree
, cqn
);
50 atomic_inc(&cq
->refcount
);
51 spin_unlock(&table
->lock
);
54 mlx5_core_warn(dev
, "Completion event for bogus CQ 0x%x\n", cqn
);
62 if (atomic_dec_and_test(&cq
->refcount
))
66 void mlx5_cq_event(struct mlx5_core_dev
*dev
, u32 cqn
, int event_type
)
68 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
69 struct mlx5_core_cq
*cq
;
71 spin_lock(&table
->lock
);
73 cq
= radix_tree_lookup(&table
->tree
, cqn
);
75 atomic_inc(&cq
->refcount
);
77 spin_unlock(&table
->lock
);
80 mlx5_core_warn(dev
, "Async event for bogus CQ 0x%x\n", cqn
);
84 cq
->event(cq
, event_type
);
86 if (atomic_dec_and_test(&cq
->refcount
))
91 int mlx5_core_create_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
,
92 struct mlx5_create_cq_mbox_in
*in
, int inlen
)
95 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
96 struct mlx5_create_cq_mbox_out out
;
97 struct mlx5_destroy_cq_mbox_in din
;
98 struct mlx5_destroy_cq_mbox_out dout
;
100 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_CREATE_CQ
);
101 memset(&out
, 0, sizeof(out
));
102 err
= mlx5_cmd_exec(dev
, in
, inlen
, &out
, sizeof(out
));
107 return mlx5_cmd_status_to_err(&out
.hdr
);
109 cq
->cqn
= be32_to_cpu(out
.cqn
) & 0xffffff;
112 atomic_set(&cq
->refcount
, 1);
113 init_completion(&cq
->free
);
115 spin_lock_irq(&table
->lock
);
116 err
= radix_tree_insert(&table
->tree
, cq
->cqn
, cq
);
117 spin_unlock_irq(&table
->lock
);
121 cq
->pid
= current
->pid
;
122 err
= mlx5_debug_cq_add(dev
, cq
);
124 mlx5_core_dbg(dev
, "failed adding CP 0x%x to debug file system\n",
130 memset(&din
, 0, sizeof(din
));
131 memset(&dout
, 0, sizeof(dout
));
132 din
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ
);
133 mlx5_cmd_exec(dev
, &din
, sizeof(din
), &dout
, sizeof(dout
));
136 EXPORT_SYMBOL(mlx5_core_create_cq
);
138 int mlx5_core_destroy_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
)
140 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
141 struct mlx5_destroy_cq_mbox_in in
;
142 struct mlx5_destroy_cq_mbox_out out
;
143 struct mlx5_core_cq
*tmp
;
146 spin_lock_irq(&table
->lock
);
147 tmp
= radix_tree_delete(&table
->tree
, cq
->cqn
);
148 spin_unlock_irq(&table
->lock
);
150 mlx5_core_warn(dev
, "cq 0x%x not found in tree\n", cq
->cqn
);
154 mlx5_core_warn(dev
, "corruption on srqn 0x%x\n", cq
->cqn
);
158 memset(&in
, 0, sizeof(in
));
159 memset(&out
, 0, sizeof(out
));
160 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ
);
161 in
.cqn
= cpu_to_be32(cq
->cqn
);
162 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
167 return mlx5_cmd_status_to_err(&out
.hdr
);
169 synchronize_irq(cq
->irqn
);
171 mlx5_debug_cq_remove(dev
, cq
);
172 if (atomic_dec_and_test(&cq
->refcount
))
174 wait_for_completion(&cq
->free
);
178 EXPORT_SYMBOL(mlx5_core_destroy_cq
);
180 int mlx5_core_query_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
,
181 struct mlx5_query_cq_mbox_out
*out
)
183 struct mlx5_query_cq_mbox_in in
;
186 memset(&in
, 0, sizeof(in
));
187 memset(out
, 0, sizeof(*out
));
189 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_QUERY_CQ
);
190 in
.cqn
= cpu_to_be32(cq
->cqn
);
191 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), out
, sizeof(*out
));
196 return mlx5_cmd_status_to_err(&out
->hdr
);
200 EXPORT_SYMBOL(mlx5_core_query_cq
);
203 int mlx5_core_modify_cq(struct mlx5_core_dev
*dev
, struct mlx5_core_cq
*cq
,
204 struct mlx5_modify_cq_mbox_in
*in
, int in_sz
)
206 struct mlx5_modify_cq_mbox_out out
;
209 memset(&out
, 0, sizeof(out
));
210 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ
);
211 err
= mlx5_cmd_exec(dev
, in
, in_sz
, &out
, sizeof(out
));
216 return mlx5_cmd_status_to_err(&out
.hdr
);
220 EXPORT_SYMBOL(mlx5_core_modify_cq
);
222 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev
*dev
,
223 struct mlx5_core_cq
*cq
,
227 struct mlx5_modify_cq_mbox_in in
;
229 memset(&in
, 0, sizeof(in
));
231 in
.cqn
= cpu_to_be32(cq
->cqn
);
232 in
.ctx
.cq_period
= cpu_to_be16(cq_period
);
233 in
.ctx
.cq_max_count
= cpu_to_be16(cq_max_count
);
234 in
.field_select
= cpu_to_be32(MLX5_CQ_MODIFY_PERIOD
|
235 MLX5_CQ_MODIFY_COUNT
);
237 return mlx5_core_modify_cq(dev
, cq
, &in
, sizeof(in
));
240 int mlx5_init_cq_table(struct mlx5_core_dev
*dev
)
242 struct mlx5_cq_table
*table
= &dev
->priv
.cq_table
;
245 memset(table
, 0, sizeof(*table
));
246 spin_lock_init(&table
->lock
);
247 INIT_RADIX_TREE(&table
->tree
, GFP_ATOMIC
);
248 err
= mlx5_cq_debugfs_init(dev
);
253 void mlx5_cleanup_cq_table(struct mlx5_core_dev
*dev
)
255 mlx5_cq_debugfs_cleanup(dev
);