2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/errno.h>
34 #include <linux/slab.h>
36 #include <linux/export.h>
37 #include <linux/bitmap.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/vmalloc.h>
40 #include <linux/mlx5/driver.h>
42 #include "mlx5_core.h"
44 /* Handling for queue buffers -- we allocate a bunch of memory and
45 * register it in a memory region at HCA virtual address 0. If the
46 * requested size is > max_direct, we split the allocation into
47 * multiple pages, so we don't require too much contiguous memory.
50 int mlx5_buf_alloc(struct mlx5_core_dev
*dev
, int size
, int max_direct
,
56 if (size
<= max_direct
) {
59 buf
->page_shift
= (u8
)get_order(size
) + PAGE_SHIFT
;
60 buf
->direct
.buf
= dma_zalloc_coherent(&dev
->pdev
->dev
,
61 size
, &t
, GFP_KERNEL
);
67 while (t
& ((1 << buf
->page_shift
) - 1)) {
74 buf
->direct
.buf
= NULL
;
75 buf
->nbufs
= (size
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
76 buf
->npages
= buf
->nbufs
;
77 buf
->page_shift
= PAGE_SHIFT
;
78 buf
->page_list
= kcalloc(buf
->nbufs
, sizeof(*buf
->page_list
),
83 for (i
= 0; i
< buf
->nbufs
; i
++) {
84 buf
->page_list
[i
].buf
=
85 dma_zalloc_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
87 if (!buf
->page_list
[i
].buf
)
90 buf
->page_list
[i
].map
= t
;
93 if (BITS_PER_LONG
== 64) {
95 pages
= kmalloc(sizeof(*pages
) * buf
->nbufs
, GFP_KERNEL
);
98 for (i
= 0; i
< buf
->nbufs
; i
++)
99 pages
[i
] = virt_to_page(buf
->page_list
[i
].buf
);
100 buf
->direct
.buf
= vmap(pages
, buf
->nbufs
, VM_MAP
, PAGE_KERNEL
);
102 if (!buf
->direct
.buf
)
110 mlx5_buf_free(dev
, buf
);
114 EXPORT_SYMBOL_GPL(mlx5_buf_alloc
);
116 void mlx5_buf_free(struct mlx5_core_dev
*dev
, struct mlx5_buf
*buf
)
121 dma_free_coherent(&dev
->pdev
->dev
, buf
->size
, buf
->direct
.buf
,
124 if (BITS_PER_LONG
== 64 && buf
->direct
.buf
)
125 vunmap(buf
->direct
.buf
);
127 for (i
= 0; i
< buf
->nbufs
; i
++)
128 if (buf
->page_list
[i
].buf
)
129 dma_free_coherent(&dev
->pdev
->dev
, PAGE_SIZE
,
130 buf
->page_list
[i
].buf
,
131 buf
->page_list
[i
].map
);
132 kfree(buf
->page_list
);
135 EXPORT_SYMBOL_GPL(mlx5_buf_free
);
137 static struct mlx5_db_pgdir
*mlx5_alloc_db_pgdir(struct device
*dma_device
)
139 struct mlx5_db_pgdir
*pgdir
;
141 pgdir
= kzalloc(sizeof(*pgdir
), GFP_KERNEL
);
145 bitmap_fill(pgdir
->bitmap
, MLX5_DB_PER_PAGE
);
146 pgdir
->db_page
= dma_alloc_coherent(dma_device
, PAGE_SIZE
,
147 &pgdir
->db_dma
, GFP_KERNEL
);
148 if (!pgdir
->db_page
) {
156 static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir
*pgdir
,
162 i
= find_first_bit(pgdir
->bitmap
, MLX5_DB_PER_PAGE
);
163 if (i
>= MLX5_DB_PER_PAGE
)
166 __clear_bit(i
, pgdir
->bitmap
);
170 offset
= db
->index
* L1_CACHE_BYTES
;
171 db
->db
= pgdir
->db_page
+ offset
/ sizeof(*pgdir
->db_page
);
172 db
->dma
= pgdir
->db_dma
+ offset
;
177 int mlx5_db_alloc(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
)
179 struct mlx5_db_pgdir
*pgdir
;
182 mutex_lock(&dev
->priv
.pgdir_mutex
);
184 list_for_each_entry(pgdir
, &dev
->priv
.pgdir_list
, list
)
185 if (!mlx5_alloc_db_from_pgdir(pgdir
, db
))
188 pgdir
= mlx5_alloc_db_pgdir(&(dev
->pdev
->dev
));
194 list_add(&pgdir
->list
, &dev
->priv
.pgdir_list
);
196 /* This should never fail -- we just allocated an empty page: */
197 WARN_ON(mlx5_alloc_db_from_pgdir(pgdir
, db
));
200 mutex_unlock(&dev
->priv
.pgdir_mutex
);
204 EXPORT_SYMBOL_GPL(mlx5_db_alloc
);
206 void mlx5_db_free(struct mlx5_core_dev
*dev
, struct mlx5_db
*db
)
208 mutex_lock(&dev
->priv
.pgdir_mutex
);
210 __set_bit(db
->index
, db
->u
.pgdir
->bitmap
);
212 if (bitmap_full(db
->u
.pgdir
->bitmap
, MLX5_DB_PER_PAGE
)) {
213 dma_free_coherent(&(dev
->pdev
->dev
), PAGE_SIZE
,
214 db
->u
.pgdir
->db_page
, db
->u
.pgdir
->db_dma
);
215 list_del(&db
->u
.pgdir
->list
);
219 mutex_unlock(&dev
->priv
.pgdir_mutex
);
221 EXPORT_SYMBOL_GPL(mlx5_db_free
);
224 void mlx5_fill_page_array(struct mlx5_buf
*buf
, __be64
*pas
)
229 for (i
= 0; i
< buf
->npages
; i
++) {
231 addr
= buf
->direct
.map
+ (i
<< buf
->page_shift
);
233 addr
= buf
->page_list
[i
].map
;
235 pas
[i
] = cpu_to_be64(addr
);
238 EXPORT_SYMBOL_GPL(mlx5_fill_page_array
);