pvrusb2: reduce stack usage pvr2_eeprom_analyze()
[linux/fpc-iii.git] / drivers / net / ethernet / mellanox / mlx4 / cq.c
blob6b8635378f1fcb2aae4e8ac390bcd09d552c2256
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
37 #include <linux/hardirq.h>
38 #include <linux/export.h>
40 #include <linux/mlx4/cmd.h>
41 #include <linux/mlx4/cq.h>
43 #include "mlx4.h"
44 #include "icm.h"
46 #define MLX4_CQ_STATUS_OK ( 0 << 28)
47 #define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
48 #define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
49 #define MLX4_CQ_FLAG_CC ( 1 << 18)
50 #define MLX4_CQ_FLAG_OI ( 1 << 17)
51 #define MLX4_CQ_STATE_ARMED ( 9 << 8)
52 #define MLX4_CQ_STATE_ARMED_SOL ( 6 << 8)
53 #define MLX4_EQ_STATE_FIRED (10 << 8)
55 #define TASKLET_MAX_TIME 2
56 #define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
58 void mlx4_cq_tasklet_cb(unsigned long data)
60 unsigned long flags;
61 unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
62 struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data;
63 struct mlx4_cq *mcq, *temp;
65 spin_lock_irqsave(&ctx->lock, flags);
66 list_splice_tail_init(&ctx->list, &ctx->process_list);
67 spin_unlock_irqrestore(&ctx->lock, flags);
69 list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) {
70 list_del_init(&mcq->tasklet_ctx.list);
71 mcq->tasklet_ctx.comp(mcq);
72 if (atomic_dec_and_test(&mcq->refcount))
73 complete(&mcq->free);
74 if (time_after(jiffies, end))
75 break;
78 if (!list_empty(&ctx->process_list))
79 tasklet_schedule(&ctx->task);
82 static void mlx4_add_cq_to_tasklet(struct mlx4_cq *cq)
84 unsigned long flags;
85 struct mlx4_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
87 spin_lock_irqsave(&tasklet_ctx->lock, flags);
88 /* When migrating CQs between EQs will be implemented, please note
89 * that you need to sync this point. It is possible that
90 * while migrating a CQ, completions on the old EQs could
91 * still arrive.
93 if (list_empty_careful(&cq->tasklet_ctx.list)) {
94 atomic_inc(&cq->refcount);
95 list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
97 spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
100 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
102 struct mlx4_cq *cq;
104 rcu_read_lock();
105 cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
106 cqn & (dev->caps.num_cqs - 1));
107 rcu_read_unlock();
109 if (!cq) {
110 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
111 return;
114 /* Acessing the CQ outside of rcu_read_lock is safe, because
115 * the CQ is freed only after interrupt handling is completed.
117 ++cq->arm_sn;
119 cq->comp(cq);
122 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
124 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
125 struct mlx4_cq *cq;
127 rcu_read_lock();
128 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
129 rcu_read_unlock();
131 if (!cq) {
132 mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
133 return;
136 /* Acessing the CQ outside of rcu_read_lock is safe, because
137 * the CQ is freed only after interrupt handling is completed.
139 cq->event(cq, event_type);
142 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
143 int cq_num)
145 return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
146 MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
147 MLX4_CMD_WRAPPED);
150 static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
151 int cq_num, u32 opmod)
153 return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
154 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
157 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
158 int cq_num)
160 return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
161 cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
162 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
165 int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
166 u16 count, u16 period)
168 struct mlx4_cmd_mailbox *mailbox;
169 struct mlx4_cq_context *cq_context;
170 int err;
172 mailbox = mlx4_alloc_cmd_mailbox(dev);
173 if (IS_ERR(mailbox))
174 return PTR_ERR(mailbox);
176 cq_context = mailbox->buf;
177 cq_context->cq_max_count = cpu_to_be16(count);
178 cq_context->cq_period = cpu_to_be16(period);
180 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
182 mlx4_free_cmd_mailbox(dev, mailbox);
183 return err;
185 EXPORT_SYMBOL_GPL(mlx4_cq_modify);
187 int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
188 int entries, struct mlx4_mtt *mtt)
190 struct mlx4_cmd_mailbox *mailbox;
191 struct mlx4_cq_context *cq_context;
192 u64 mtt_addr;
193 int err;
195 mailbox = mlx4_alloc_cmd_mailbox(dev);
196 if (IS_ERR(mailbox))
197 return PTR_ERR(mailbox);
199 cq_context = mailbox->buf;
200 cq_context->logsize_usrpage = cpu_to_be32(ilog2(entries) << 24);
201 cq_context->log_page_size = mtt->page_shift - 12;
202 mtt_addr = mlx4_mtt_addr(dev, mtt);
203 cq_context->mtt_base_addr_h = mtt_addr >> 32;
204 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
206 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
208 mlx4_free_cmd_mailbox(dev, mailbox);
209 return err;
211 EXPORT_SYMBOL_GPL(mlx4_cq_resize);
213 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
215 struct mlx4_priv *priv = mlx4_priv(dev);
216 struct mlx4_cq_table *cq_table = &priv->cq_table;
217 int err;
219 *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
220 if (*cqn == -1)
221 return -ENOMEM;
223 err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
224 if (err)
225 goto err_out;
227 err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
228 if (err)
229 goto err_put;
230 return 0;
232 err_put:
233 mlx4_table_put(dev, &cq_table->table, *cqn);
235 err_out:
236 mlx4_bitmap_free(&cq_table->bitmap, *cqn, MLX4_NO_RR);
237 return err;
240 static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
242 u64 out_param;
243 int err;
245 if (mlx4_is_mfunc(dev)) {
246 err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
247 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
248 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
249 if (err)
250 return err;
251 else {
252 *cqn = get_param_l(&out_param);
253 return 0;
256 return __mlx4_cq_alloc_icm(dev, cqn);
259 void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
261 struct mlx4_priv *priv = mlx4_priv(dev);
262 struct mlx4_cq_table *cq_table = &priv->cq_table;
264 mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
265 mlx4_table_put(dev, &cq_table->table, cqn);
266 mlx4_bitmap_free(&cq_table->bitmap, cqn, MLX4_NO_RR);
269 static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
271 u64 in_param = 0;
272 int err;
274 if (mlx4_is_mfunc(dev)) {
275 set_param_l(&in_param, cqn);
276 err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
277 MLX4_CMD_FREE_RES,
278 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
279 if (err)
280 mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
281 } else
282 __mlx4_cq_free_icm(dev, cqn);
285 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
286 struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec,
287 struct mlx4_cq *cq, unsigned vector, int collapsed,
288 int timestamp_en)
290 struct mlx4_priv *priv = mlx4_priv(dev);
291 struct mlx4_cq_table *cq_table = &priv->cq_table;
292 struct mlx4_cmd_mailbox *mailbox;
293 struct mlx4_cq_context *cq_context;
294 u64 mtt_addr;
295 int err;
297 if (vector >= dev->caps.num_comp_vectors)
298 return -EINVAL;
300 cq->vector = vector;
302 err = mlx4_cq_alloc_icm(dev, &cq->cqn);
303 if (err)
304 return err;
306 spin_lock(&cq_table->lock);
307 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
308 spin_unlock(&cq_table->lock);
309 if (err)
310 goto err_icm;
312 mailbox = mlx4_alloc_cmd_mailbox(dev);
313 if (IS_ERR(mailbox)) {
314 err = PTR_ERR(mailbox);
315 goto err_radix;
318 cq_context = mailbox->buf;
319 cq_context->flags = cpu_to_be32(!!collapsed << 18);
320 if (timestamp_en)
321 cq_context->flags |= cpu_to_be32(1 << 19);
323 cq_context->logsize_usrpage =
324 cpu_to_be32((ilog2(nent) << 24) |
325 mlx4_to_hw_uar_index(dev, uar->index));
326 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
327 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
329 mtt_addr = mlx4_mtt_addr(dev, mtt);
330 cq_context->mtt_base_addr_h = mtt_addr >> 32;
331 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
332 cq_context->db_rec_addr = cpu_to_be64(db_rec);
334 err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn);
335 mlx4_free_cmd_mailbox(dev, mailbox);
336 if (err)
337 goto err_radix;
339 cq->cons_index = 0;
340 cq->arm_sn = 1;
341 cq->uar = uar;
342 atomic_set(&cq->refcount, 1);
343 init_completion(&cq->free);
344 cq->comp = mlx4_add_cq_to_tasklet;
345 cq->tasklet_ctx.priv =
346 &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
347 INIT_LIST_HEAD(&cq->tasklet_ctx.list);
350 cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
351 return 0;
353 err_radix:
354 spin_lock(&cq_table->lock);
355 radix_tree_delete(&cq_table->tree, cq->cqn);
356 spin_unlock(&cq_table->lock);
358 err_icm:
359 mlx4_cq_free_icm(dev, cq->cqn);
361 return err;
363 EXPORT_SYMBOL_GPL(mlx4_cq_alloc);
365 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
367 struct mlx4_priv *priv = mlx4_priv(dev);
368 struct mlx4_cq_table *cq_table = &priv->cq_table;
369 int err;
371 err = mlx4_HW2SW_CQ(dev, NULL, cq->cqn);
372 if (err)
373 mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
375 spin_lock(&cq_table->lock);
376 radix_tree_delete(&cq_table->tree, cq->cqn);
377 spin_unlock(&cq_table->lock);
379 synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
380 if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
381 priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
382 synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
384 if (atomic_dec_and_test(&cq->refcount))
385 complete(&cq->free);
386 wait_for_completion(&cq->free);
388 mlx4_cq_free_icm(dev, cq->cqn);
390 EXPORT_SYMBOL_GPL(mlx4_cq_free);
392 int mlx4_init_cq_table(struct mlx4_dev *dev)
394 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
395 int err;
397 spin_lock_init(&cq_table->lock);
398 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
399 if (mlx4_is_slave(dev))
400 return 0;
402 err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
403 dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
404 if (err)
405 return err;
407 return 0;
410 void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
412 if (mlx4_is_slave(dev))
413 return;
414 /* Nothing to do to clean up radix_tree */
415 mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);