2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/mlx4/cq.h>
35 #include <linux/mlx4/qp.h>
36 #include <linux/mlx4/cmd.h>
40 static void mlx4_en_cq_event(struct mlx4_cq
*cq
, enum mlx4_event event
)
46 int mlx4_en_create_cq(struct mlx4_en_priv
*priv
,
47 struct mlx4_en_cq
**pcq
,
48 int entries
, int ring
, enum cq_type mode
,
51 struct mlx4_en_dev
*mdev
= priv
->mdev
;
52 struct mlx4_en_cq
*cq
;
55 cq
= kzalloc_node(sizeof(*cq
), GFP_KERNEL
, node
);
57 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
59 en_err(priv
, "Failed to allocate CQ structure\n");
65 cq
->buf_size
= cq
->size
* mdev
->dev
->caps
.cqe_size
;
69 cq
->vector
= mdev
->dev
->caps
.num_comp_vectors
;
71 /* Allocate HW buffers on provided NUMA node.
72 * dev->numa_node is used in mtt range allocation flow.
74 set_dev_node(&mdev
->dev
->persist
->pdev
->dev
, node
);
75 err
= mlx4_alloc_hwq_res(mdev
->dev
, &cq
->wqres
,
77 set_dev_node(&mdev
->dev
->persist
->pdev
->dev
, mdev
->dev
->numa_node
);
81 cq
->buf
= (struct mlx4_cqe
*)cq
->wqres
.buf
.direct
.buf
;
92 int mlx4_en_activate_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
,
95 struct mlx4_en_dev
*mdev
= priv
->mdev
;
98 bool assigned_eq
= false;
100 cq
->dev
= mdev
->pndev
[priv
->port
];
101 cq
->mcq
.set_ci_db
= cq
->wqres
.db
.db
;
102 cq
->mcq
.arm_db
= cq
->wqres
.db
.db
+ 1;
103 *cq
->mcq
.set_ci_db
= 0;
105 memset(cq
->buf
, 0, cq
->buf_size
);
107 if (cq
->is_tx
== RX
) {
108 if (!mlx4_is_eq_vector_valid(mdev
->dev
, priv
->port
,
110 cq
->vector
= cpumask_first(priv
->rx_ring
[cq
->ring
]->affinity_mask
);
112 err
= mlx4_assign_eq(mdev
->dev
, priv
->port
,
115 mlx4_err(mdev
, "Failed assigning an EQ to CQ vector %d\n",
124 irq_to_desc(mlx4_eq_get_irq(mdev
->dev
,
127 /* For TX we use the same irq per
128 ring we assigned for the RX */
129 struct mlx4_en_cq
*rx_cq
;
132 /* The xdp tx irq must align with the rx ring that forwards to
133 * it, so reindex these from 0. This should only happen when
134 * tx_ring_num is not a multiple of rx_ring_num.
136 xdp_index
= (priv
->xdp_ring_num
- priv
->tx_ring_num
) + cq_idx
;
139 cq_idx
= cq_idx
% priv
->rx_ring_num
;
140 rx_cq
= priv
->rx_cq
[cq_idx
];
141 cq
->vector
= rx_cq
->vector
;
145 cq
->size
= priv
->rx_ring
[cq
->ring
]->actual_size
;
147 if ((cq
->is_tx
&& priv
->hwtstamp_config
.tx_type
) ||
148 (!cq
->is_tx
&& priv
->hwtstamp_config
.rx_filter
))
151 err
= mlx4_cq_alloc(mdev
->dev
, cq
->size
, &cq
->wqres
.mtt
,
152 &mdev
->priv_uar
, cq
->wqres
.db
.dma
, &cq
->mcq
,
153 cq
->vector
, 0, timestamp_en
);
157 cq
->mcq
.comp
= cq
->is_tx
? mlx4_en_tx_irq
: mlx4_en_rx_irq
;
158 cq
->mcq
.event
= mlx4_en_cq_event
;
161 netif_tx_napi_add(cq
->dev
, &cq
->napi
, mlx4_en_poll_tx_cq
,
164 netif_napi_add(cq
->dev
, &cq
->napi
, mlx4_en_poll_rx_cq
, 64);
166 napi_enable(&cq
->napi
);
172 mlx4_release_eq(mdev
->dev
, cq
->vector
);
173 cq
->vector
= mdev
->dev
->caps
.num_comp_vectors
;
177 void mlx4_en_destroy_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
**pcq
)
179 struct mlx4_en_dev
*mdev
= priv
->mdev
;
180 struct mlx4_en_cq
*cq
= *pcq
;
182 mlx4_free_hwq_res(mdev
->dev
, &cq
->wqres
, cq
->buf_size
);
183 if (mlx4_is_eq_vector_valid(mdev
->dev
, priv
->port
, cq
->vector
) &&
185 mlx4_release_eq(priv
->mdev
->dev
, cq
->vector
);
193 void mlx4_en_deactivate_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
)
195 napi_disable(&cq
->napi
);
197 napi_hash_del(&cq
->napi
);
200 netif_napi_del(&cq
->napi
);
202 mlx4_cq_free(priv
->mdev
->dev
, &cq
->mcq
);
205 /* Set rx cq moderation parameters */
206 int mlx4_en_set_cq_moder(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
)
208 return mlx4_cq_modify(priv
->mdev
->dev
, &cq
->mcq
,
209 cq
->moder_cnt
, cq
->moder_time
);
212 int mlx4_en_arm_cq(struct mlx4_en_priv
*priv
, struct mlx4_en_cq
*cq
)
214 mlx4_cq_arm(&cq
->mcq
, MLX4_CQ_DB_REQ_NOT
, priv
->mdev
->uar_map
,
215 &priv
->mdev
->uar_lock
);