2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/slab.h>
37 struct mlx4_ib_db_pgdir
{
38 struct list_head list
;
39 DECLARE_BITMAP(order0
, MLX4_IB_DB_PER_PAGE
);
40 DECLARE_BITMAP(order1
, MLX4_IB_DB_PER_PAGE
/ 2);
41 unsigned long *bits
[2];
46 static struct mlx4_ib_db_pgdir
*mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev
*dev
)
48 struct mlx4_ib_db_pgdir
*pgdir
;
50 pgdir
= kzalloc(sizeof *pgdir
, GFP_KERNEL
);
54 bitmap_fill(pgdir
->order1
, MLX4_IB_DB_PER_PAGE
/ 2);
55 pgdir
->bits
[0] = pgdir
->order0
;
56 pgdir
->bits
[1] = pgdir
->order1
;
57 pgdir
->db_page
= dma_alloc_coherent(dev
->ib_dev
.dma_device
,
58 PAGE_SIZE
, &pgdir
->db_dma
,
60 if (!pgdir
->db_page
) {
68 static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir
*pgdir
,
69 struct mlx4_ib_db
*db
, int order
)
74 for (o
= order
; o
<= 1; ++o
) {
75 i
= find_first_bit(pgdir
->bits
[o
], MLX4_IB_DB_PER_PAGE
>> o
);
76 if (i
< MLX4_IB_DB_PER_PAGE
>> o
)
83 clear_bit(i
, pgdir
->bits
[o
]);
88 set_bit(i
^ 1, pgdir
->bits
[order
]);
92 db
->db
= pgdir
->db_page
+ db
->index
;
93 db
->dma
= pgdir
->db_dma
+ db
->index
* 4;
99 int mlx4_ib_db_alloc(struct mlx4_ib_dev
*dev
, struct mlx4_ib_db
*db
, int order
)
101 struct mlx4_ib_db_pgdir
*pgdir
;
104 mutex_lock(&dev
->pgdir_mutex
);
106 list_for_each_entry(pgdir
, &dev
->pgdir_list
, list
)
107 if (!mlx4_ib_alloc_db_from_pgdir(pgdir
, db
, order
))
110 pgdir
= mlx4_ib_alloc_db_pgdir(dev
);
116 list_add(&pgdir
->list
, &dev
->pgdir_list
);
118 /* This should never fail -- we just allocated an empty page: */
119 WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir
, db
, order
));
122 mutex_unlock(&dev
->pgdir_mutex
);
127 void mlx4_ib_db_free(struct mlx4_ib_dev
*dev
, struct mlx4_ib_db
*db
)
132 mutex_lock(&dev
->pgdir_mutex
);
137 if (db
->order
== 0 && test_bit(i
^ 1, db
->u
.pgdir
->order0
)) {
138 clear_bit(i
^ 1, db
->u
.pgdir
->order0
);
143 set_bit(i
, db
->u
.pgdir
->bits
[o
]);
145 if (bitmap_full(db
->u
.pgdir
->order1
, MLX4_IB_DB_PER_PAGE
/ 2)) {
146 dma_free_coherent(dev
->ib_dev
.dma_device
, PAGE_SIZE
,
147 db
->u
.pgdir
->db_page
, db
->u
.pgdir
->db_dma
);
148 list_del(&db
->u
.pgdir
->list
);
152 mutex_unlock(&dev
->pgdir_mutex
);
155 struct mlx4_ib_user_db_page
{
156 struct list_head list
;
157 struct ib_umem
*umem
;
158 unsigned long user_virt
;
162 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext
*context
, unsigned long virt
,
163 struct mlx4_ib_db
*db
)
165 struct mlx4_ib_user_db_page
*page
;
166 struct ib_umem_chunk
*chunk
;
169 mutex_lock(&context
->db_page_mutex
);
171 list_for_each_entry(page
, &context
->db_page_list
, list
)
172 if (page
->user_virt
== (virt
& PAGE_MASK
))
175 page
= kmalloc(sizeof *page
, GFP_KERNEL
);
181 page
->user_virt
= (virt
& PAGE_MASK
);
183 page
->umem
= ib_umem_get(&context
->ibucontext
, virt
& PAGE_MASK
,
185 if (IS_ERR(page
->umem
)) {
186 err
= PTR_ERR(page
->umem
);
191 list_add(&page
->list
, &context
->db_page_list
);
194 chunk
= list_entry(page
->umem
->chunk_list
.next
, struct ib_umem_chunk
, list
);
195 db
->dma
= sg_dma_address(chunk
->page_list
) + (virt
& ~PAGE_MASK
);
196 db
->u
.user_page
= page
;
200 mutex_unlock(&context
->db_page_mutex
);
205 void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext
*context
, struct mlx4_ib_db
*db
)
207 mutex_lock(&context
->db_page_mutex
);
209 if (!--db
->u
.user_page
->refcnt
) {
210 list_del(&db
->u
.user_page
->list
);
211 ib_umem_release(db
->u
.user_page
->umem
);
212 kfree(db
->u
.user_page
);
215 mutex_unlock(&context
->db_page_mutex
);