1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
3 * Copyright (c) 2017 Hisilicon Limited.
4 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
7 #include <linux/platform_device.h>
8 #include <rdma/ib_umem.h>
9 #include "hns_roce_device.h"
11 int hns_roce_db_map_user(struct hns_roce_ucontext
*context
,
12 struct ib_udata
*udata
, unsigned long virt
,
13 struct hns_roce_db
*db
)
15 unsigned long page_addr
= virt
& PAGE_MASK
;
16 struct hns_roce_user_db_page
*page
;
20 mutex_lock(&context
->page_mutex
);
22 list_for_each_entry(page
, &context
->page_list
, list
)
23 if (page
->user_virt
== page_addr
)
26 page
= kmalloc(sizeof(*page
), GFP_KERNEL
);
32 refcount_set(&page
->refcount
, 1);
33 page
->user_virt
= page_addr
;
34 page
->umem
= ib_umem_get(context
->ibucontext
.device
, page_addr
,
36 if (IS_ERR(page
->umem
)) {
37 ret
= PTR_ERR(page
->umem
);
42 list_add(&page
->list
, &context
->page_list
);
45 offset
= virt
- page_addr
;
46 db
->dma
= sg_dma_address(page
->umem
->sg_head
.sgl
) + offset
;
47 db
->virt_addr
= sg_virt(page
->umem
->sg_head
.sgl
) + offset
;
48 db
->u
.user_page
= page
;
49 refcount_inc(&page
->refcount
);
52 mutex_unlock(&context
->page_mutex
);
57 void hns_roce_db_unmap_user(struct hns_roce_ucontext
*context
,
58 struct hns_roce_db
*db
)
60 mutex_lock(&context
->page_mutex
);
62 refcount_dec(&db
->u
.user_page
->refcount
);
63 if (refcount_dec_if_one(&db
->u
.user_page
->refcount
)) {
64 list_del(&db
->u
.user_page
->list
);
65 ib_umem_release(db
->u
.user_page
->umem
);
66 kfree(db
->u
.user_page
);
69 mutex_unlock(&context
->page_mutex
);
72 static struct hns_roce_db_pgdir
*hns_roce_alloc_db_pgdir(
73 struct device
*dma_device
)
75 struct hns_roce_db_pgdir
*pgdir
;
77 pgdir
= kzalloc(sizeof(*pgdir
), GFP_KERNEL
);
81 bitmap_fill(pgdir
->order1
,
82 HNS_ROCE_DB_PER_PAGE
/ HNS_ROCE_DB_TYPE_COUNT
);
83 pgdir
->bits
[0] = pgdir
->order0
;
84 pgdir
->bits
[1] = pgdir
->order1
;
85 pgdir
->page
= dma_alloc_coherent(dma_device
, PAGE_SIZE
,
86 &pgdir
->db_dma
, GFP_KERNEL
);
95 static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir
*pgdir
,
96 struct hns_roce_db
*db
, int order
)
101 for (o
= order
; o
<= 1; ++o
) {
102 i
= find_first_bit(pgdir
->bits
[o
], HNS_ROCE_DB_PER_PAGE
>> o
);
103 if (i
< HNS_ROCE_DB_PER_PAGE
>> o
)
110 clear_bit(i
, pgdir
->bits
[o
]);
115 set_bit(i
^ 1, pgdir
->bits
[order
]);
119 db
->db_record
= pgdir
->page
+ db
->index
;
120 db
->dma
= pgdir
->db_dma
+ db
->index
* HNS_ROCE_DB_UNIT_SIZE
;
126 int hns_roce_alloc_db(struct hns_roce_dev
*hr_dev
, struct hns_roce_db
*db
,
129 struct hns_roce_db_pgdir
*pgdir
;
132 mutex_lock(&hr_dev
->pgdir_mutex
);
134 list_for_each_entry(pgdir
, &hr_dev
->pgdir_list
, list
)
135 if (!hns_roce_alloc_db_from_pgdir(pgdir
, db
, order
))
138 pgdir
= hns_roce_alloc_db_pgdir(hr_dev
->dev
);
144 list_add(&pgdir
->list
, &hr_dev
->pgdir_list
);
146 /* This should never fail -- we just allocated an empty page: */
147 WARN_ON(hns_roce_alloc_db_from_pgdir(pgdir
, db
, order
));
150 mutex_unlock(&hr_dev
->pgdir_mutex
);
155 void hns_roce_free_db(struct hns_roce_dev
*hr_dev
, struct hns_roce_db
*db
)
160 mutex_lock(&hr_dev
->pgdir_mutex
);
165 if (db
->order
== 0 && test_bit(i
^ 1, db
->u
.pgdir
->order0
)) {
166 clear_bit(i
^ 1, db
->u
.pgdir
->order0
);
171 set_bit(i
, db
->u
.pgdir
->bits
[o
]);
173 if (bitmap_full(db
->u
.pgdir
->order1
,
174 HNS_ROCE_DB_PER_PAGE
/ HNS_ROCE_DB_TYPE_COUNT
)) {
175 dma_free_coherent(hr_dev
->dev
, PAGE_SIZE
, db
->u
.pgdir
->page
,
176 db
->u
.pgdir
->db_dma
);
177 list_del(&db
->u
.pgdir
->list
);
181 mutex_unlock(&hr_dev
->pgdir_mutex
);