2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * internal queue handling
6 * Authors: Waleri Fomin <fomin@de.ibm.com>
7 * Reinhard Ernst <rernst@de.ibm.com>
8 * Christoph Raisch <raisch@de.ibm.com>
10 * Copyright (c) 2005 IBM Corporation
12 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are met:
20 * Redistributions of source code must retain the above copyright notice, this
21 * list of conditions and the following disclaimer.
23 * Redistributions in binary form must reproduce the above copyright notice,
24 * this list of conditions and the following disclaimer in the documentation
25 * and/or other materials
26 * provided with the distribution.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
29 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
32 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
36 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38 * POSSIBILITY OF SUCH DAMAGE.
41 #include "ehca_tools.h"
42 #include "ipz_pt_fn.h"
43 #include "ehca_classes.h"
45 #define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
47 struct kmem_cache
*small_qp_cache
;
49 void *ipz_qpageit_get_inc(struct ipz_queue
*queue
)
51 void *ret
= ipz_qeit_get(queue
);
52 queue
->current_q_offset
+= queue
->pagesize
;
53 if (queue
->current_q_offset
> queue
->queue_length
) {
54 queue
->current_q_offset
-= queue
->pagesize
;
57 if (((u64
)ret
) % queue
->pagesize
) {
58 ehca_gen_err("ERROR!! not at PAGE-Boundary");
64 void *ipz_qeit_eq_get_inc(struct ipz_queue
*queue
)
66 void *ret
= ipz_qeit_get(queue
);
67 u64 last_entry_in_q
= queue
->queue_length
- queue
->qe_size
;
69 queue
->current_q_offset
+= queue
->qe_size
;
70 if (queue
->current_q_offset
> last_entry_in_q
) {
71 queue
->current_q_offset
= 0;
72 queue
->toggle_state
= (~queue
->toggle_state
) & 1;
78 int ipz_queue_abs_to_offset(struct ipz_queue
*queue
, u64 addr
, u64
*q_offset
)
81 for (i
= 0; i
< queue
->queue_length
/ queue
->pagesize
; i
++) {
82 u64 page
= (u64
)virt_to_abs(queue
->queue_pages
[i
]);
83 if (addr
>= page
&& addr
< page
+ queue
->pagesize
) {
84 *q_offset
= addr
- page
+ i
* queue
->pagesize
;
91 #if PAGE_SHIFT < EHCA_PAGESHIFT
92 #error Kernel pages must be at least as large than eHCA pages (4K) !
96 * allocate pages for queue:
97 * outer loop allocates whole kernel pages (page aligned) and
98 * inner loop divides a kernel page into smaller hca queue pages
100 static int alloc_queue_pages(struct ipz_queue
*queue
, const u32 nr_of_pages
)
105 while (f
< nr_of_pages
) {
106 kpage
= (u8
*)get_zeroed_page(GFP_KERNEL
);
110 for (k
= 0; k
< PAGES_PER_KPAGE
&& f
< nr_of_pages
; k
++) {
111 queue
->queue_pages
[f
] = (struct ipz_page
*)kpage
;
112 kpage
+= EHCA_PAGESIZE
;
119 for (f
= 0; f
< nr_of_pages
&& queue
->queue_pages
[f
];
120 f
+= PAGES_PER_KPAGE
)
121 free_page((unsigned long)(queue
->queue_pages
)[f
]);
125 static int alloc_small_queue_page(struct ipz_queue
*queue
, struct ehca_pd
*pd
)
127 int order
= ilog2(queue
->pagesize
) - 9;
128 struct ipz_small_queue_page
*page
;
131 mutex_lock(&pd
->lock
);
133 if (!list_empty(&pd
->free
[order
]))
134 page
= list_entry(pd
->free
[order
].next
,
135 struct ipz_small_queue_page
, list
);
137 page
= kmem_cache_zalloc(small_qp_cache
, GFP_KERNEL
);
141 page
->page
= get_zeroed_page(GFP_KERNEL
);
143 kmem_cache_free(small_qp_cache
, page
);
147 list_add(&page
->list
, &pd
->free
[order
]);
150 bit
= find_first_zero_bit(page
->bitmap
, IPZ_SPAGE_PER_KPAGE
>> order
);
151 __set_bit(bit
, page
->bitmap
);
154 if (page
->fill
== IPZ_SPAGE_PER_KPAGE
>> order
)
155 list_move(&page
->list
, &pd
->full
[order
]);
157 mutex_unlock(&pd
->lock
);
159 queue
->queue_pages
[0] = (void *)(page
->page
| (bit
<< (order
+ 9)));
160 queue
->small_page
= page
;
161 queue
->offset
= bit
<< (order
+ 9);
165 ehca_err(pd
->ib_pd
.device
, "failed to allocate small queue page");
166 mutex_unlock(&pd
->lock
);
170 static void free_small_queue_page(struct ipz_queue
*queue
, struct ehca_pd
*pd
)
172 int order
= ilog2(queue
->pagesize
) - 9;
173 struct ipz_small_queue_page
*page
= queue
->small_page
;
177 bit
= ((unsigned long)queue
->queue_pages
[0] & ~PAGE_MASK
)
180 mutex_lock(&pd
->lock
);
182 __clear_bit(bit
, page
->bitmap
);
185 if (page
->fill
== 0) {
186 list_del(&page
->list
);
190 if (page
->fill
== (IPZ_SPAGE_PER_KPAGE
>> order
) - 1)
191 /* the page was full until we freed the chunk */
192 list_move_tail(&page
->list
, &pd
->free
[order
]);
194 mutex_unlock(&pd
->lock
);
197 free_page(page
->page
);
198 kmem_cache_free(small_qp_cache
, page
);
202 int ipz_queue_ctor(struct ehca_pd
*pd
, struct ipz_queue
*queue
,
203 const u32 nr_of_pages
, const u32 pagesize
,
204 const u32 qe_size
, const u32 nr_of_sg
,
207 if (pagesize
> PAGE_SIZE
) {
208 ehca_gen_err("FATAL ERROR: pagesize=%x "
209 "is greater than kernel page size", pagesize
);
213 /* init queue fields */
214 queue
->queue_length
= nr_of_pages
* pagesize
;
215 queue
->pagesize
= pagesize
;
216 queue
->qe_size
= qe_size
;
217 queue
->act_nr_of_sg
= nr_of_sg
;
218 queue
->current_q_offset
= 0;
219 queue
->toggle_state
= 1;
220 queue
->small_page
= NULL
;
222 /* allocate queue page pointers */
223 queue
->queue_pages
= kmalloc(nr_of_pages
* sizeof(void *), GFP_KERNEL
);
224 if (!queue
->queue_pages
) {
225 queue
->queue_pages
= vmalloc(nr_of_pages
* sizeof(void *));
226 if (!queue
->queue_pages
) {
227 ehca_gen_err("Couldn't allocate queue page list");
231 memset(queue
->queue_pages
, 0, nr_of_pages
* sizeof(void *));
233 /* allocate actual queue pages */
235 if (!alloc_small_queue_page(queue
, pd
))
236 goto ipz_queue_ctor_exit0
;
238 if (!alloc_queue_pages(queue
, nr_of_pages
))
239 goto ipz_queue_ctor_exit0
;
243 ipz_queue_ctor_exit0
:
244 ehca_gen_err("Couldn't alloc pages queue=%p "
245 "nr_of_pages=%x", queue
, nr_of_pages
);
246 if (is_vmalloc_addr(queue
->queue_pages
))
247 vfree(queue
->queue_pages
);
249 kfree(queue
->queue_pages
);
254 int ipz_queue_dtor(struct ehca_pd
*pd
, struct ipz_queue
*queue
)
258 if (!queue
|| !queue
->queue_pages
) {
259 ehca_gen_dbg("queue or queue_pages is NULL");
263 if (queue
->small_page
)
264 free_small_queue_page(queue
, pd
);
266 nr_pages
= queue
->queue_length
/ queue
->pagesize
;
267 for (i
= 0; i
< nr_pages
; i
+= PAGES_PER_KPAGE
)
268 free_page((unsigned long)queue
->queue_pages
[i
]);
271 if (is_vmalloc_addr(queue
->queue_pages
))
272 vfree(queue
->queue_pages
);
274 kfree(queue
->queue_pages
);
279 int ehca_init_small_qp_cache(void)
281 small_qp_cache
= kmem_cache_create("ehca_cache_small_qp",
282 sizeof(struct ipz_small_queue_page
),
283 0, SLAB_HWCACHE_ALIGN
, NULL
);
290 void ehca_cleanup_small_qp_cache(void)
292 kmem_cache_destroy(small_qp_cache
);