Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / drivers / gpu / drm / i915 / selftests / scatterlist.c
blobcd6d2a16071faf60b8999992dd375aadc1138c31
1 /*
2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
24 #include <linux/prime_numbers.h>
25 #include <linux/random.h>
27 #include "../i915_selftest.h"
29 #define PFN_BIAS (1 << 10)
31 struct pfn_table {
32 struct sg_table st;
33 unsigned long start, end;
36 typedef unsigned int (*npages_fn_t)(unsigned long n,
37 unsigned long count,
38 struct rnd_state *rnd);
40 static noinline int expect_pfn_sg(struct pfn_table *pt,
41 npages_fn_t npages_fn,
42 struct rnd_state *rnd,
43 const char *who,
44 unsigned long timeout)
46 struct scatterlist *sg;
47 unsigned long pfn, n;
49 pfn = pt->start;
50 for_each_sg(pt->st.sgl, sg, pt->st.nents, n) {
51 struct page *page = sg_page(sg);
52 unsigned int npages = npages_fn(n, pt->st.nents, rnd);
54 if (page_to_pfn(page) != pfn) {
55 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg)\n",
56 __func__, who, pfn, page_to_pfn(page));
57 return -EINVAL;
60 if (sg->length != npages * PAGE_SIZE) {
61 pr_err("%s: %s copied wrong sg length, expected size %lu, found %u (using for_each_sg)\n",
62 __func__, who, npages * PAGE_SIZE, sg->length);
63 return -EINVAL;
66 if (igt_timeout(timeout, "%s timed out\n", who))
67 return -EINTR;
69 pfn += npages;
71 if (pfn != pt->end) {
72 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
73 __func__, who, pt->end, pfn);
74 return -EINVAL;
77 return 0;
80 static noinline int expect_pfn_sg_page_iter(struct pfn_table *pt,
81 const char *who,
82 unsigned long timeout)
84 struct sg_page_iter sgiter;
85 unsigned long pfn;
87 pfn = pt->start;
88 for_each_sg_page(pt->st.sgl, &sgiter, pt->st.nents, 0) {
89 struct page *page = sg_page_iter_page(&sgiter);
91 if (page != pfn_to_page(pfn)) {
92 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg_page)\n",
93 __func__, who, pfn, page_to_pfn(page));
94 return -EINVAL;
97 if (igt_timeout(timeout, "%s timed out\n", who))
98 return -EINTR;
100 pfn++;
102 if (pfn != pt->end) {
103 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
104 __func__, who, pt->end, pfn);
105 return -EINVAL;
108 return 0;
111 static noinline int expect_pfn_sgtiter(struct pfn_table *pt,
112 const char *who,
113 unsigned long timeout)
115 struct sgt_iter sgt;
116 struct page *page;
117 unsigned long pfn;
119 pfn = pt->start;
120 for_each_sgt_page(page, sgt, &pt->st) {
121 if (page != pfn_to_page(pfn)) {
122 pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sgt_page)\n",
123 __func__, who, pfn, page_to_pfn(page));
124 return -EINVAL;
127 if (igt_timeout(timeout, "%s timed out\n", who))
128 return -EINTR;
130 pfn++;
132 if (pfn != pt->end) {
133 pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
134 __func__, who, pt->end, pfn);
135 return -EINVAL;
138 return 0;
141 static int expect_pfn_sgtable(struct pfn_table *pt,
142 npages_fn_t npages_fn,
143 struct rnd_state *rnd,
144 const char *who,
145 unsigned long timeout)
147 int err;
149 err = expect_pfn_sg(pt, npages_fn, rnd, who, timeout);
150 if (err)
151 return err;
153 err = expect_pfn_sg_page_iter(pt, who, timeout);
154 if (err)
155 return err;
157 err = expect_pfn_sgtiter(pt, who, timeout);
158 if (err)
159 return err;
161 return 0;
164 static unsigned int one(unsigned long n,
165 unsigned long count,
166 struct rnd_state *rnd)
168 return 1;
171 static unsigned int grow(unsigned long n,
172 unsigned long count,
173 struct rnd_state *rnd)
175 return n + 1;
178 static unsigned int shrink(unsigned long n,
179 unsigned long count,
180 struct rnd_state *rnd)
182 return count - n;
185 static unsigned int random(unsigned long n,
186 unsigned long count,
187 struct rnd_state *rnd)
189 return 1 + (prandom_u32_state(rnd) % 1024);
192 static unsigned int random_page_size_pages(unsigned long n,
193 unsigned long count,
194 struct rnd_state *rnd)
196 /* 4K, 64K, 2M */
197 static unsigned int page_count[] = {
198 BIT(12) >> PAGE_SHIFT,
199 BIT(16) >> PAGE_SHIFT,
200 BIT(21) >> PAGE_SHIFT,
203 return page_count[(prandom_u32_state(rnd) % 3)];
206 static inline bool page_contiguous(struct page *first,
207 struct page *last,
208 unsigned long npages)
210 return first + npages == last;
213 static int alloc_table(struct pfn_table *pt,
214 unsigned long count, unsigned long max,
215 npages_fn_t npages_fn,
216 struct rnd_state *rnd,
217 int alloc_error)
219 struct scatterlist *sg;
220 unsigned long n, pfn;
222 if (sg_alloc_table(&pt->st, max,
223 GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN))
224 return alloc_error;
226 /* count should be less than 20 to prevent overflowing sg->length */
227 GEM_BUG_ON(overflows_type(count * PAGE_SIZE, sg->length));
229 /* Construct a table where each scatterlist contains different number
230 * of entries. The idea is to check that we can iterate the individual
231 * pages from inside the coalesced lists.
233 pt->start = PFN_BIAS;
234 pfn = pt->start;
235 sg = pt->st.sgl;
236 for (n = 0; n < count; n++) {
237 unsigned long npages = npages_fn(n, count, rnd);
239 /* Nobody expects the Sparse Memmap! */
240 if (!page_contiguous(pfn_to_page(pfn),
241 pfn_to_page(pfn + npages),
242 npages)) {
243 sg_free_table(&pt->st);
244 return -ENOSPC;
247 if (n)
248 sg = sg_next(sg);
249 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0);
251 GEM_BUG_ON(page_to_pfn(sg_page(sg)) != pfn);
252 GEM_BUG_ON(sg->length != npages * PAGE_SIZE);
253 GEM_BUG_ON(sg->offset != 0);
255 pfn += npages;
257 sg_mark_end(sg);
258 pt->st.nents = n;
259 pt->end = pfn;
261 return 0;
264 static const npages_fn_t npages_funcs[] = {
265 one,
266 grow,
267 shrink,
268 random,
269 random_page_size_pages,
270 NULL,
273 static int igt_sg_alloc(void *ignored)
275 IGT_TIMEOUT(end_time);
276 const unsigned long max_order = 20; /* approximating a 4GiB object */
277 struct rnd_state prng;
278 unsigned long prime;
279 int alloc_error = -ENOMEM;
281 for_each_prime_number(prime, max_order) {
282 unsigned long size = BIT(prime);
283 int offset;
285 for (offset = -1; offset <= 1; offset++) {
286 unsigned long sz = size + offset;
287 const npages_fn_t *npages;
288 struct pfn_table pt;
289 int err;
291 for (npages = npages_funcs; *npages; npages++) {
292 prandom_seed_state(&prng,
293 i915_selftest.random_seed);
294 err = alloc_table(&pt, sz, sz, *npages, &prng,
295 alloc_error);
296 if (err == -ENOSPC)
297 break;
298 if (err)
299 return err;
301 prandom_seed_state(&prng,
302 i915_selftest.random_seed);
303 err = expect_pfn_sgtable(&pt, *npages, &prng,
304 "sg_alloc_table",
305 end_time);
306 sg_free_table(&pt.st);
307 if (err)
308 return err;
312 /* Test at least one continuation before accepting oom */
313 if (size > SG_MAX_SINGLE_ALLOC)
314 alloc_error = -ENOSPC;
317 return 0;
320 static int igt_sg_trim(void *ignored)
322 IGT_TIMEOUT(end_time);
323 const unsigned long max = PAGE_SIZE; /* not prime! */
324 struct pfn_table pt;
325 unsigned long prime;
326 int alloc_error = -ENOMEM;
328 for_each_prime_number(prime, max) {
329 const npages_fn_t *npages;
330 int err;
332 for (npages = npages_funcs; *npages; npages++) {
333 struct rnd_state prng;
335 prandom_seed_state(&prng, i915_selftest.random_seed);
336 err = alloc_table(&pt, prime, max, *npages, &prng,
337 alloc_error);
338 if (err == -ENOSPC)
339 break;
340 if (err)
341 return err;
343 if (i915_sg_trim(&pt.st)) {
344 if (pt.st.orig_nents != prime ||
345 pt.st.nents != prime) {
346 pr_err("i915_sg_trim failed (nents %u, orig_nents %u), expected %lu\n",
347 pt.st.nents, pt.st.orig_nents, prime);
348 err = -EINVAL;
349 } else {
350 prandom_seed_state(&prng,
351 i915_selftest.random_seed);
352 err = expect_pfn_sgtable(&pt,
353 *npages, &prng,
354 "i915_sg_trim",
355 end_time);
358 sg_free_table(&pt.st);
359 if (err)
360 return err;
363 /* Test at least one continuation before accepting oom */
364 if (prime > SG_MAX_SINGLE_ALLOC)
365 alloc_error = -ENOSPC;
368 return 0;
371 int scatterlist_mock_selftests(void)
373 static const struct i915_subtest tests[] = {
374 SUBTEST(igt_sg_alloc),
375 SUBTEST(igt_sg_trim),
378 return i915_subtests(tests, NULL);