2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/errno.h>
34 #include <linux/slab.h>
36 #include <linux/export.h>
37 #include <linux/bitmap.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/vmalloc.h>
40 #include <linux/mlx5/driver.h>
42 #include "mlx5_core.h"
44 struct mlx5_db_pgdir
{
45 struct list_head list
;
46 unsigned long *bitmap
;
51 /* Handling for queue buffers -- we allocate a bunch of memory and
52 * register it in a memory region at HCA virtual address 0.
55 static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev
*dev
,
56 size_t size
, dma_addr_t
*dma_handle
,
59 struct mlx5_priv
*priv
= &dev
->priv
;
63 mutex_lock(&priv
->alloc_mutex
);
64 original_node
= dev_to_node(&dev
->pdev
->dev
);
65 set_dev_node(&dev
->pdev
->dev
, node
);
66 cpu_handle
= dma_zalloc_coherent(&dev
->pdev
->dev
, size
,
67 dma_handle
, GFP_KERNEL
);
68 set_dev_node(&dev
->pdev
->dev
, original_node
);
69 mutex_unlock(&priv
->alloc_mutex
);
73 int mlx5_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
74 struct mlx5_buf
*buf
, int node
)
80 buf
->page_shift
= (u8
)get_order(size
) + PAGE_SHIFT
;
81 buf
->direct
.buf
= mlx5_dma_zalloc_coherent_node(dev
, size
,
88 while (t
& ((1 << buf
->page_shift
) - 1)) {
96 int mlx5_buf_alloc(struct mlx5_core_dev
*dev
, int size
, struct mlx5_buf
*buf
)
98 return mlx5_buf_alloc_node(dev
, size
, buf
, dev
->priv
.numa_node
);
100 EXPORT_SYMBOL_GPL(mlx5_buf_alloc
);
102 void mlx5_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_buf
*buf
)
104 dma_free_coherent(&dev
->pdev
->dev
, buf
->size
, buf
->direct
.buf
,
107 EXPORT_SYMBOL_GPL(mlx5_buf_free
);
109 int mlx5_frag_buf_alloc_node(struct mlx5_core_dev
*dev
, int size
,
110 struct mlx5_frag_buf
*buf
, int node
)
115 buf
->npages
= 1 << get_order(size
);
116 buf
->page_shift
= PAGE_SHIFT
;
117 buf
->frags
= kcalloc(buf
->npages
, sizeof(struct mlx5_buf_list
),
122 for (i
= 0; i
< buf
->npages
; i
++) {
123 struct mlx5_buf_list
*frag
= &buf
->frags
[i
];
124 int frag_sz
= min_t(int, size
, PAGE_SIZE
);
126 frag
->buf
= mlx5_dma_zalloc_coherent_node(dev
, frag_sz
,
130 if (frag
->map
& ((1 << buf
->page_shift
) - 1)) {
131 dma_free_coherent(&dev
->pdev
->dev
, frag_sz
,
132 buf
->frags
[i
].buf
, buf
->frags
[i
].map
);
133 mlx5_core_warn(dev
, "unexpected map alignment: %pad, page_shift=%d\n",
134 &frag
->map
, buf
->page_shift
);
144 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
, buf
->frags
[i
].buf
,
151 void mlx5_frag_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_frag_buf
*buf
)
153 int size
= buf
->size
;
156 for (i
= 0; i
< buf
->npages
; i
++) {
157 int frag_sz
= min_t(int, size
, PAGE_SIZE
);
159 dma_free_coherent(&dev
->pdev
->dev
, frag_sz
, buf
->frags
[i
].buf
,
166 static struct mlx5_db_pgdir
*mlx5_alloc_db_pgdir(struct mlx5_core_dev
*dev
,
169 u32 db_per_page
= PAGE_SIZE
/ cache_line_size();
170 struct mlx5_db_pgdir
*pgdir
;
172 pgdir
= kzalloc(sizeof(*pgdir
), GFP_KERNEL
);
176 pgdir
->bitmap
= kcalloc(BITS_TO_LONGS(db_per_page
),
177 sizeof(unsigned long),
180 if (!pgdir
->bitmap
) {
185 bitmap_fill(pgdir
->bitmap
, db_per_page
);
187 pgdir
->db_page
= mlx5_dma_zalloc_coherent_node(dev
, PAGE_SIZE
,
188 &pgdir
->db_dma
, node
);
189 if (!pgdir
->db_page
) {
190 kfree(pgdir
->bitmap
);
198 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir
*pgdir
,
201 u32 db_per_page
= PAGE_SIZE
/ cache_line_size();
205 i
= find_first_bit(pgdir
->bitmap
, db_per_page
);
206 if (i
>= db_per_page
)
209 __clear_bit(i
, pgdir
->bitmap
);
213 offset
= db
->index
* cache_line_size();
214 db
->db
= pgdir
->db_page
+ offset
/ sizeof(*pgdir
->db_page
);
215 db
->dma
= pgdir
->db_dma
+ offset
;
223 int mlx5_db_alloc_node(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
, int node
)
225 struct mlx5_db_pgdir
*pgdir
;
228 mutex_lock(&dev
->priv
.pgdir_mutex
);
230 list_for_each_entry(pgdir
, &dev
->priv
.pgdir_list
, list
)
231 if (!mlx5_alloc_db_from_pgdir(pgdir
, db
))
234 pgdir
= mlx5_alloc_db_pgdir(dev
, node
);
240 list_add(&pgdir
->list
, &dev
->priv
.pgdir_list
);
242 /* This should never fail -- we just allocated an empty page: */
243 WARN_ON(mlx5_alloc_db_from_pgdir(pgdir
, db
));
246 mutex_unlock(&dev
->priv
.pgdir_mutex
);
250 EXPORT_SYMBOL_GPL(mlx5_db_alloc_node
);
252 int mlx5_db_alloc(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
)
254 return mlx5_db_alloc_node(dev
, db
, dev
->priv
.numa_node
);
256 EXPORT_SYMBOL_GPL(mlx5_db_alloc
);
258 void mlx5_db_free(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
)
260 u32 db_per_page
= PAGE_SIZE
/ cache_line_size();
262 mutex_lock(&dev
->priv
.pgdir_mutex
);
264 __set_bit(db
->index
, db
->u
.pgdir
->bitmap
);
266 if (bitmap_full(db
->u
.pgdir
->bitmap
, db_per_page
)) {
267 dma_free_coherent(&(dev
->pdev
->dev
), PAGE_SIZE
,
268 db
->u
.pgdir
->db_page
, db
->u
.pgdir
->db_dma
);
269 list_del(&db
->u
.pgdir
->list
);
270 kfree(db
->u
.pgdir
->bitmap
);
274 mutex_unlock(&dev
->priv
.pgdir_mutex
);
276 EXPORT_SYMBOL_GPL(mlx5_db_free
);
278 void mlx5_fill_page_array(struct mlx5_buf
*buf
, __be64
*pas
)
283 for (i
= 0; i
< buf
->npages
; i
++) {
284 addr
= buf
->direct
.map
+ (i
<< buf
->page_shift
);
286 pas
[i
] = cpu_to_be64(addr
);
289 EXPORT_SYMBOL_GPL(mlx5_fill_page_array
);
291 void mlx5_fill_page_frag_array(struct mlx5_frag_buf
*buf
, __be64
*pas
)
295 for (i
= 0; i
< buf
->npages
; i
++)
296 pas
[i
] = cpu_to_be64(buf
->frags
[i
].map
);
298 EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array
);