1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
3 * Copyright (c) 2017 Hisilicon Limited.
4 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
7 #include <linux/platform_device.h>
8 #include <rdma/ib_umem.h>
9 #include "hns_roce_device.h"
11 int hns_roce_db_map_user(struct hns_roce_ucontext
*context
,
12 struct ib_udata
*udata
, unsigned long virt
,
13 struct hns_roce_db
*db
)
15 unsigned long page_addr
= virt
& PAGE_MASK
;
16 struct hns_roce_user_db_page
*page
;
20 mutex_lock(&context
->page_mutex
);
22 list_for_each_entry(page
, &context
->page_list
, list
)
23 if (page
->user_virt
== page_addr
)
26 page
= kmalloc(sizeof(*page
), GFP_KERNEL
);
32 refcount_set(&page
->refcount
, 1);
33 page
->user_virt
= page_addr
;
34 page
->umem
= ib_umem_get(udata
, page_addr
, PAGE_SIZE
, 0);
35 if (IS_ERR(page
->umem
)) {
36 ret
= PTR_ERR(page
->umem
);
41 list_add(&page
->list
, &context
->page_list
);
44 offset
= virt
- page_addr
;
45 db
->dma
= sg_dma_address(page
->umem
->sg_head
.sgl
) + offset
;
46 db
->virt_addr
= sg_virt(page
->umem
->sg_head
.sgl
) + offset
;
47 db
->u
.user_page
= page
;
48 refcount_inc(&page
->refcount
);
51 mutex_unlock(&context
->page_mutex
);
56 void hns_roce_db_unmap_user(struct hns_roce_ucontext
*context
,
57 struct hns_roce_db
*db
)
59 mutex_lock(&context
->page_mutex
);
61 refcount_dec(&db
->u
.user_page
->refcount
);
62 if (refcount_dec_if_one(&db
->u
.user_page
->refcount
)) {
63 list_del(&db
->u
.user_page
->list
);
64 ib_umem_release(db
->u
.user_page
->umem
);
65 kfree(db
->u
.user_page
);
68 mutex_unlock(&context
->page_mutex
);
71 static struct hns_roce_db_pgdir
*hns_roce_alloc_db_pgdir(
72 struct device
*dma_device
)
74 struct hns_roce_db_pgdir
*pgdir
;
76 pgdir
= kzalloc(sizeof(*pgdir
), GFP_KERNEL
);
80 bitmap_fill(pgdir
->order1
,
81 HNS_ROCE_DB_PER_PAGE
/ HNS_ROCE_DB_TYPE_COUNT
);
82 pgdir
->bits
[0] = pgdir
->order0
;
83 pgdir
->bits
[1] = pgdir
->order1
;
84 pgdir
->page
= dma_alloc_coherent(dma_device
, PAGE_SIZE
,
85 &pgdir
->db_dma
, GFP_KERNEL
);
94 static int hns_roce_alloc_db_from_pgdir(struct hns_roce_db_pgdir
*pgdir
,
95 struct hns_roce_db
*db
, int order
)
100 for (o
= order
; o
<= 1; ++o
) {
101 i
= find_first_bit(pgdir
->bits
[o
], HNS_ROCE_DB_PER_PAGE
>> o
);
102 if (i
< HNS_ROCE_DB_PER_PAGE
>> o
)
109 clear_bit(i
, pgdir
->bits
[o
]);
114 set_bit(i
^ 1, pgdir
->bits
[order
]);
118 db
->db_record
= pgdir
->page
+ db
->index
;
119 db
->dma
= pgdir
->db_dma
+ db
->index
* HNS_ROCE_DB_UNIT_SIZE
;
125 int hns_roce_alloc_db(struct hns_roce_dev
*hr_dev
, struct hns_roce_db
*db
,
128 struct hns_roce_db_pgdir
*pgdir
;
131 mutex_lock(&hr_dev
->pgdir_mutex
);
133 list_for_each_entry(pgdir
, &hr_dev
->pgdir_list
, list
)
134 if (!hns_roce_alloc_db_from_pgdir(pgdir
, db
, order
))
137 pgdir
= hns_roce_alloc_db_pgdir(hr_dev
->dev
);
143 list_add(&pgdir
->list
, &hr_dev
->pgdir_list
);
145 /* This should never fail -- we just allocated an empty page: */
146 WARN_ON(hns_roce_alloc_db_from_pgdir(pgdir
, db
, order
));
149 mutex_unlock(&hr_dev
->pgdir_mutex
);
154 void hns_roce_free_db(struct hns_roce_dev
*hr_dev
, struct hns_roce_db
*db
)
159 mutex_lock(&hr_dev
->pgdir_mutex
);
164 if (db
->order
== 0 && test_bit(i
^ 1, db
->u
.pgdir
->order0
)) {
165 clear_bit(i
^ 1, db
->u
.pgdir
->order0
);
170 set_bit(i
, db
->u
.pgdir
->bits
[o
]);
172 if (bitmap_full(db
->u
.pgdir
->order1
,
173 HNS_ROCE_DB_PER_PAGE
/ HNS_ROCE_DB_TYPE_COUNT
)) {
174 dma_free_coherent(hr_dev
->dev
, PAGE_SIZE
, db
->u
.pgdir
->page
,
175 db
->u
.pgdir
->db_dma
);
176 list_del(&db
->u
.pgdir
->list
);
180 mutex_unlock(&hr_dev
->pgdir_mutex
);