2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <linux/prime_numbers.h>
25 #include <linux/random.h>
27 #include "../i915_selftest.h"
29 #define PFN_BIAS (1 << 10)
33 unsigned long start
, end
;
36 typedef unsigned int (*npages_fn_t
)(unsigned long n
,
38 struct rnd_state
*rnd
);
40 static noinline
int expect_pfn_sg(struct pfn_table
*pt
,
41 npages_fn_t npages_fn
,
42 struct rnd_state
*rnd
,
44 unsigned long timeout
)
46 struct scatterlist
*sg
;
50 for_each_sg(pt
->st
.sgl
, sg
, pt
->st
.nents
, n
) {
51 struct page
*page
= sg_page(sg
);
52 unsigned int npages
= npages_fn(n
, pt
->st
.nents
, rnd
);
54 if (page_to_pfn(page
) != pfn
) {
55 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg)\n",
56 __func__
, who
, pfn
, page_to_pfn(page
));
60 if (sg
->length
!= npages
* PAGE_SIZE
) {
61 pr_err("%s: %s copied wrong sg length, expected size %lu, found %u (using for_each_sg)\n",
62 __func__
, who
, npages
* PAGE_SIZE
, sg
->length
);
66 if (igt_timeout(timeout
, "%s timed out\n", who
))
72 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
73 __func__
, who
, pt
->end
, pfn
);
80 static noinline
int expect_pfn_sg_page_iter(struct pfn_table
*pt
,
82 unsigned long timeout
)
84 struct sg_page_iter sgiter
;
88 for_each_sg_page(pt
->st
.sgl
, &sgiter
, pt
->st
.nents
, 0) {
89 struct page
*page
= sg_page_iter_page(&sgiter
);
91 if (page
!= pfn_to_page(pfn
)) {
92 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg_page)\n",
93 __func__
, who
, pfn
, page_to_pfn(page
));
97 if (igt_timeout(timeout
, "%s timed out\n", who
))
102 if (pfn
!= pt
->end
) {
103 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
104 __func__
, who
, pt
->end
, pfn
);
111 static noinline
int expect_pfn_sgtiter(struct pfn_table
*pt
,
113 unsigned long timeout
)
120 for_each_sgt_page(page
, sgt
, &pt
->st
) {
121 if (page
!= pfn_to_page(pfn
)) {
122 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sgt_page)\n",
123 __func__
, who
, pfn
, page_to_pfn(page
));
127 if (igt_timeout(timeout
, "%s timed out\n", who
))
132 if (pfn
!= pt
->end
) {
133 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
134 __func__
, who
, pt
->end
, pfn
);
141 static int expect_pfn_sgtable(struct pfn_table
*pt
,
142 npages_fn_t npages_fn
,
143 struct rnd_state
*rnd
,
145 unsigned long timeout
)
149 err
= expect_pfn_sg(pt
, npages_fn
, rnd
, who
, timeout
);
153 err
= expect_pfn_sg_page_iter(pt
, who
, timeout
);
157 err
= expect_pfn_sgtiter(pt
, who
, timeout
);
164 static unsigned int one(unsigned long n
,
166 struct rnd_state
*rnd
)
171 static unsigned int grow(unsigned long n
,
173 struct rnd_state
*rnd
)
178 static unsigned int shrink(unsigned long n
,
180 struct rnd_state
*rnd
)
185 static unsigned int random(unsigned long n
,
187 struct rnd_state
*rnd
)
189 return 1 + (prandom_u32_state(rnd
) % 1024);
192 static unsigned int random_page_size_pages(unsigned long n
,
194 struct rnd_state
*rnd
)
197 static unsigned int page_count
[] = {
198 BIT(12) >> PAGE_SHIFT
,
199 BIT(16) >> PAGE_SHIFT
,
200 BIT(21) >> PAGE_SHIFT
,
203 return page_count
[(prandom_u32_state(rnd
) % 3)];
206 static inline bool page_contiguous(struct page
*first
,
208 unsigned long npages
)
210 return first
+ npages
== last
;
213 static int alloc_table(struct pfn_table
*pt
,
214 unsigned long count
, unsigned long max
,
215 npages_fn_t npages_fn
,
216 struct rnd_state
*rnd
,
219 struct scatterlist
*sg
;
220 unsigned long n
, pfn
;
222 if (sg_alloc_table(&pt
->st
, max
,
223 GFP_KERNEL
| __GFP_NORETRY
| __GFP_NOWARN
))
226 /* count should be less than 20 to prevent overflowing sg->length */
227 GEM_BUG_ON(overflows_type(count
* PAGE_SIZE
, sg
->length
));
229 /* Construct a table where each scatterlist contains different number
230 * of entries. The idea is to check that we can iterate the individual
231 * pages from inside the coalesced lists.
233 pt
->start
= PFN_BIAS
;
236 for (n
= 0; n
< count
; n
++) {
237 unsigned long npages
= npages_fn(n
, count
, rnd
);
239 /* Nobody expects the Sparse Memmap! */
240 if (!page_contiguous(pfn_to_page(pfn
),
241 pfn_to_page(pfn
+ npages
),
243 sg_free_table(&pt
->st
);
249 sg_set_page(sg
, pfn_to_page(pfn
), npages
* PAGE_SIZE
, 0);
251 GEM_BUG_ON(page_to_pfn(sg_page(sg
)) != pfn
);
252 GEM_BUG_ON(sg
->length
!= npages
* PAGE_SIZE
);
253 GEM_BUG_ON(sg
->offset
!= 0);
264 static const npages_fn_t npages_funcs
[] = {
269 random_page_size_pages
,
273 static int igt_sg_alloc(void *ignored
)
275 IGT_TIMEOUT(end_time
);
276 const unsigned long max_order
= 20; /* approximating a 4GiB object */
277 struct rnd_state prng
;
279 int alloc_error
= -ENOMEM
;
281 for_each_prime_number(prime
, max_order
) {
282 unsigned long size
= BIT(prime
);
285 for (offset
= -1; offset
<= 1; offset
++) {
286 unsigned long sz
= size
+ offset
;
287 const npages_fn_t
*npages
;
291 for (npages
= npages_funcs
; *npages
; npages
++) {
292 prandom_seed_state(&prng
,
293 i915_selftest
.random_seed
);
294 err
= alloc_table(&pt
, sz
, sz
, *npages
, &prng
,
301 prandom_seed_state(&prng
,
302 i915_selftest
.random_seed
);
303 err
= expect_pfn_sgtable(&pt
, *npages
, &prng
,
306 sg_free_table(&pt
.st
);
312 /* Test at least one continuation before accepting oom */
313 if (size
> SG_MAX_SINGLE_ALLOC
)
314 alloc_error
= -ENOSPC
;
320 static int igt_sg_trim(void *ignored
)
322 IGT_TIMEOUT(end_time
);
323 const unsigned long max
= PAGE_SIZE
; /* not prime! */
326 int alloc_error
= -ENOMEM
;
328 for_each_prime_number(prime
, max
) {
329 const npages_fn_t
*npages
;
332 for (npages
= npages_funcs
; *npages
; npages
++) {
333 struct rnd_state prng
;
335 prandom_seed_state(&prng
, i915_selftest
.random_seed
);
336 err
= alloc_table(&pt
, prime
, max
, *npages
, &prng
,
343 if (i915_sg_trim(&pt
.st
)) {
344 if (pt
.st
.orig_nents
!= prime
||
345 pt
.st
.nents
!= prime
) {
346 pr_err("i915_sg_trim failed (nents %u, orig_nents %u), expected %lu\n",
347 pt
.st
.nents
, pt
.st
.orig_nents
, prime
);
350 prandom_seed_state(&prng
,
351 i915_selftest
.random_seed
);
352 err
= expect_pfn_sgtable(&pt
,
358 sg_free_table(&pt
.st
);
363 /* Test at least one continuation before accepting oom */
364 if (prime
> SG_MAX_SINGLE_ALLOC
)
365 alloc_error
= -ENOSPC
;
371 int scatterlist_mock_selftests(void)
373 static const struct i915_subtest tests
[] = {
374 SUBTEST(igt_sg_alloc
),
375 SUBTEST(igt_sg_trim
),
378 return i915_subtests(tests
, NULL
);