2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008-2012 Intel Corporation
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
13 #include "gem/i915_gem_region.h"
15 #include "i915_gem_stolen.h"
16 #include "i915_vgpu.h"
19 * The BIOS typically reserves some of the system's memory for the exclusive
20 * use of the integrated graphics. This memory is no longer available for
21 * use by the OS and so the user finds that his system has less memory
22 * available than he put in. We refer to this memory as stolen.
24 * The BIOS will allocate its framebuffer from the stolen memory. Our
25 * goal is try to reuse that object for our own fbcon which must always
26 * be available for panics. Anything else we can reuse the stolen memory
30 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private
*i915
,
31 struct drm_mm_node
*node
, u64 size
,
32 unsigned alignment
, u64 start
, u64 end
)
36 if (!drm_mm_initialized(&i915
->mm
.stolen
))
39 /* WaSkipStolenMemoryFirstPage:bdw+ */
40 if (INTEL_GEN(i915
) >= 8 && start
< 4096)
43 mutex_lock(&i915
->mm
.stolen_lock
);
44 ret
= drm_mm_insert_node_in_range(&i915
->mm
.stolen
, node
,
46 start
, end
, DRM_MM_INSERT_BEST
);
47 mutex_unlock(&i915
->mm
.stolen_lock
);
52 int i915_gem_stolen_insert_node(struct drm_i915_private
*i915
,
53 struct drm_mm_node
*node
, u64 size
,
56 return i915_gem_stolen_insert_node_in_range(i915
, node
,
62 void i915_gem_stolen_remove_node(struct drm_i915_private
*i915
,
63 struct drm_mm_node
*node
)
65 mutex_lock(&i915
->mm
.stolen_lock
);
66 drm_mm_remove_node(node
);
67 mutex_unlock(&i915
->mm
.stolen_lock
);
70 static int i915_adjust_stolen(struct drm_i915_private
*i915
,
73 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
74 struct intel_uncore
*uncore
= ggtt
->vm
.gt
->uncore
;
77 if (dsm
->start
== 0 || dsm
->end
<= dsm
->start
)
81 * TODO: We have yet too encounter the case where the GTT wasn't at the
82 * end of stolen. With that assumption we could simplify this.
85 /* Make sure we don't clobber the GTT if it's within stolen memory */
86 if (INTEL_GEN(i915
) <= 4 &&
87 !IS_G33(i915
) && !IS_PINEVIEW(i915
) && !IS_G4X(i915
)) {
88 struct resource stolen
[2] = {*dsm
, *dsm
};
89 struct resource ggtt_res
;
90 resource_size_t ggtt_start
;
92 ggtt_start
= intel_uncore_read(uncore
, PGTBL_CTL
);
94 ggtt_start
= (ggtt_start
& PGTBL_ADDRESS_LO_MASK
) |
95 (ggtt_start
& PGTBL_ADDRESS_HI_MASK
) << 28;
97 ggtt_start
&= PGTBL_ADDRESS_LO_MASK
;
100 (struct resource
) DEFINE_RES_MEM(ggtt_start
,
101 ggtt_total_entries(ggtt
) * 4);
103 if (ggtt_res
.start
>= stolen
[0].start
&& ggtt_res
.start
< stolen
[0].end
)
104 stolen
[0].end
= ggtt_res
.start
;
105 if (ggtt_res
.end
> stolen
[1].start
&& ggtt_res
.end
<= stolen
[1].end
)
106 stolen
[1].start
= ggtt_res
.end
;
108 /* Pick the larger of the two chunks */
109 if (resource_size(&stolen
[0]) > resource_size(&stolen
[1]))
114 if (stolen
[0].start
!= stolen
[1].start
||
115 stolen
[0].end
!= stolen
[1].end
) {
117 "GTT within stolen memory at %pR\n",
119 drm_dbg(&i915
->drm
, "Stolen memory adjusted to %pR\n",
125 * Verify that nothing else uses this physical address. Stolen
126 * memory should be reserved by the BIOS and hidden from the
127 * kernel. So if the region is already marked as busy, something
128 * is seriously wrong.
130 r
= devm_request_mem_region(i915
->drm
.dev
, dsm
->start
,
132 "Graphics Stolen Memory");
135 * One more attempt but this time requesting region from
136 * start + 1, as we have seen that this resolves the region
137 * conflict with the PCI Bus.
138 * This is a BIOS w/a: Some BIOS wrap stolen in the root
139 * PCI bus, but have an off-by-one error. Hence retry the
140 * reservation starting from 1 instead of 0.
141 * There's also BIOS with off-by-one on the other end.
143 r
= devm_request_mem_region(i915
->drm
.dev
, dsm
->start
+ 1,
144 resource_size(dsm
) - 2,
145 "Graphics Stolen Memory");
147 * GEN3 firmware likes to smash pci bridges into the stolen
148 * range. Apparently this works.
150 if (!r
&& !IS_GEN(i915
, 3)) {
152 "conflict detected with stolen region: %pR\n",
162 static void i915_gem_cleanup_stolen(struct drm_i915_private
*i915
)
164 if (!drm_mm_initialized(&i915
->mm
.stolen
))
167 drm_mm_takedown(&i915
->mm
.stolen
);
170 static void g4x_get_stolen_reserved(struct drm_i915_private
*i915
,
171 struct intel_uncore
*uncore
,
172 resource_size_t
*base
,
173 resource_size_t
*size
)
175 u32 reg_val
= intel_uncore_read(uncore
,
177 CTG_STOLEN_RESERVED
:
178 ELK_STOLEN_RESERVED
);
179 resource_size_t stolen_top
= i915
->dsm
.end
+ 1;
181 drm_dbg(&i915
->drm
, "%s_STOLEN_RESERVED = %08x\n",
182 IS_GM45(i915
) ? "CTG" : "ELK", reg_val
);
184 if ((reg_val
& G4X_STOLEN_RESERVED_ENABLE
) == 0)
188 * Whether ILK really reuses the ELK register for this is unclear.
189 * Let's see if we catch anyone with this supposedly enabled on ILK.
191 drm_WARN(&i915
->drm
, IS_GEN(i915
, 5),
192 "ILK stolen reserved found? 0x%08x\n",
195 if (!(reg_val
& G4X_STOLEN_RESERVED_ADDR2_MASK
))
198 *base
= (reg_val
& G4X_STOLEN_RESERVED_ADDR2_MASK
) << 16;
199 drm_WARN_ON(&i915
->drm
,
200 (reg_val
& G4X_STOLEN_RESERVED_ADDR1_MASK
) < *base
);
202 *size
= stolen_top
- *base
;
205 static void gen6_get_stolen_reserved(struct drm_i915_private
*i915
,
206 struct intel_uncore
*uncore
,
207 resource_size_t
*base
,
208 resource_size_t
*size
)
210 u32 reg_val
= intel_uncore_read(uncore
, GEN6_STOLEN_RESERVED
);
212 drm_dbg(&i915
->drm
, "GEN6_STOLEN_RESERVED = %08x\n", reg_val
);
214 if (!(reg_val
& GEN6_STOLEN_RESERVED_ENABLE
))
217 *base
= reg_val
& GEN6_STOLEN_RESERVED_ADDR_MASK
;
219 switch (reg_val
& GEN6_STOLEN_RESERVED_SIZE_MASK
) {
220 case GEN6_STOLEN_RESERVED_1M
:
223 case GEN6_STOLEN_RESERVED_512K
:
226 case GEN6_STOLEN_RESERVED_256K
:
229 case GEN6_STOLEN_RESERVED_128K
:
234 MISSING_CASE(reg_val
& GEN6_STOLEN_RESERVED_SIZE_MASK
);
238 static void vlv_get_stolen_reserved(struct drm_i915_private
*i915
,
239 struct intel_uncore
*uncore
,
240 resource_size_t
*base
,
241 resource_size_t
*size
)
243 u32 reg_val
= intel_uncore_read(uncore
, GEN6_STOLEN_RESERVED
);
244 resource_size_t stolen_top
= i915
->dsm
.end
+ 1;
246 drm_dbg(&i915
->drm
, "GEN6_STOLEN_RESERVED = %08x\n", reg_val
);
248 if (!(reg_val
& GEN6_STOLEN_RESERVED_ENABLE
))
251 switch (reg_val
& GEN7_STOLEN_RESERVED_SIZE_MASK
) {
253 MISSING_CASE(reg_val
& GEN7_STOLEN_RESERVED_SIZE_MASK
);
255 case GEN7_STOLEN_RESERVED_1M
:
261 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
262 * reserved location as (top - size).
264 *base
= stolen_top
- *size
;
267 static void gen7_get_stolen_reserved(struct drm_i915_private
*i915
,
268 struct intel_uncore
*uncore
,
269 resource_size_t
*base
,
270 resource_size_t
*size
)
272 u32 reg_val
= intel_uncore_read(uncore
, GEN6_STOLEN_RESERVED
);
274 drm_dbg(&i915
->drm
, "GEN6_STOLEN_RESERVED = %08x\n", reg_val
);
276 if (!(reg_val
& GEN6_STOLEN_RESERVED_ENABLE
))
279 *base
= reg_val
& GEN7_STOLEN_RESERVED_ADDR_MASK
;
281 switch (reg_val
& GEN7_STOLEN_RESERVED_SIZE_MASK
) {
282 case GEN7_STOLEN_RESERVED_1M
:
285 case GEN7_STOLEN_RESERVED_256K
:
290 MISSING_CASE(reg_val
& GEN7_STOLEN_RESERVED_SIZE_MASK
);
294 static void chv_get_stolen_reserved(struct drm_i915_private
*i915
,
295 struct intel_uncore
*uncore
,
296 resource_size_t
*base
,
297 resource_size_t
*size
)
299 u32 reg_val
= intel_uncore_read(uncore
, GEN6_STOLEN_RESERVED
);
301 drm_dbg(&i915
->drm
, "GEN6_STOLEN_RESERVED = %08x\n", reg_val
);
303 if (!(reg_val
& GEN6_STOLEN_RESERVED_ENABLE
))
306 *base
= reg_val
& GEN6_STOLEN_RESERVED_ADDR_MASK
;
308 switch (reg_val
& GEN8_STOLEN_RESERVED_SIZE_MASK
) {
309 case GEN8_STOLEN_RESERVED_1M
:
312 case GEN8_STOLEN_RESERVED_2M
:
313 *size
= 2 * 1024 * 1024;
315 case GEN8_STOLEN_RESERVED_4M
:
316 *size
= 4 * 1024 * 1024;
318 case GEN8_STOLEN_RESERVED_8M
:
319 *size
= 8 * 1024 * 1024;
322 *size
= 8 * 1024 * 1024;
323 MISSING_CASE(reg_val
& GEN8_STOLEN_RESERVED_SIZE_MASK
);
327 static void bdw_get_stolen_reserved(struct drm_i915_private
*i915
,
328 struct intel_uncore
*uncore
,
329 resource_size_t
*base
,
330 resource_size_t
*size
)
332 u32 reg_val
= intel_uncore_read(uncore
, GEN6_STOLEN_RESERVED
);
333 resource_size_t stolen_top
= i915
->dsm
.end
+ 1;
335 drm_dbg(&i915
->drm
, "GEN6_STOLEN_RESERVED = %08x\n", reg_val
);
337 if (!(reg_val
& GEN6_STOLEN_RESERVED_ENABLE
))
340 if (!(reg_val
& GEN6_STOLEN_RESERVED_ADDR_MASK
))
343 *base
= reg_val
& GEN6_STOLEN_RESERVED_ADDR_MASK
;
344 *size
= stolen_top
- *base
;
347 static void icl_get_stolen_reserved(struct drm_i915_private
*i915
,
348 struct intel_uncore
*uncore
,
349 resource_size_t
*base
,
350 resource_size_t
*size
)
352 u64 reg_val
= intel_uncore_read64(uncore
, GEN6_STOLEN_RESERVED
);
354 drm_dbg(&i915
->drm
, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val
);
356 *base
= reg_val
& GEN11_STOLEN_RESERVED_ADDR_MASK
;
358 switch (reg_val
& GEN8_STOLEN_RESERVED_SIZE_MASK
) {
359 case GEN8_STOLEN_RESERVED_1M
:
362 case GEN8_STOLEN_RESERVED_2M
:
363 *size
= 2 * 1024 * 1024;
365 case GEN8_STOLEN_RESERVED_4M
:
366 *size
= 4 * 1024 * 1024;
368 case GEN8_STOLEN_RESERVED_8M
:
369 *size
= 8 * 1024 * 1024;
372 *size
= 8 * 1024 * 1024;
373 MISSING_CASE(reg_val
& GEN8_STOLEN_RESERVED_SIZE_MASK
);
377 static int i915_gem_init_stolen(struct drm_i915_private
*i915
)
379 struct intel_uncore
*uncore
= &i915
->uncore
;
380 resource_size_t reserved_base
, stolen_top
;
381 resource_size_t reserved_total
, reserved_size
;
383 mutex_init(&i915
->mm
.stolen_lock
);
385 if (intel_vgpu_active(i915
)) {
386 drm_notice(&i915
->drm
,
387 "%s, disabling use of stolen memory\n",
392 if (intel_vtd_active() && INTEL_GEN(i915
) < 8) {
393 drm_notice(&i915
->drm
,
394 "%s, disabling use of stolen memory\n",
399 if (resource_size(&intel_graphics_stolen_res
) == 0)
402 i915
->dsm
= intel_graphics_stolen_res
;
404 if (i915_adjust_stolen(i915
, &i915
->dsm
))
407 GEM_BUG_ON(i915
->dsm
.start
== 0);
408 GEM_BUG_ON(i915
->dsm
.end
<= i915
->dsm
.start
);
410 stolen_top
= i915
->dsm
.end
+ 1;
411 reserved_base
= stolen_top
;
414 switch (INTEL_GEN(i915
)) {
423 g4x_get_stolen_reserved(i915
, uncore
,
424 &reserved_base
, &reserved_size
);
427 gen6_get_stolen_reserved(i915
, uncore
,
428 &reserved_base
, &reserved_size
);
431 if (IS_VALLEYVIEW(i915
))
432 vlv_get_stolen_reserved(i915
, uncore
,
433 &reserved_base
, &reserved_size
);
435 gen7_get_stolen_reserved(i915
, uncore
,
436 &reserved_base
, &reserved_size
);
442 chv_get_stolen_reserved(i915
, uncore
,
443 &reserved_base
, &reserved_size
);
445 bdw_get_stolen_reserved(i915
, uncore
,
446 &reserved_base
, &reserved_size
);
449 MISSING_CASE(INTEL_GEN(i915
));
453 icl_get_stolen_reserved(i915
, uncore
,
460 * Our expectation is that the reserved space is at the top of the
461 * stolen region and *never* at the bottom. If we see !reserved_base,
462 * it likely means we failed to read the registers correctly.
464 if (!reserved_base
) {
466 "inconsistent reservation %pa + %pa; ignoring\n",
467 &reserved_base
, &reserved_size
);
468 reserved_base
= stolen_top
;
473 (struct resource
)DEFINE_RES_MEM(reserved_base
, reserved_size
);
475 if (!resource_contains(&i915
->dsm
, &i915
->dsm_reserved
)) {
477 "Stolen reserved area %pR outside stolen memory %pR\n",
478 &i915
->dsm_reserved
, &i915
->dsm
);
482 /* It is possible for the reserved area to end before the end of stolen
483 * memory, so just consider the start. */
484 reserved_total
= stolen_top
- reserved_base
;
487 "Memory reserved for graphics device: %lluK, usable: %lluK\n",
488 (u64
)resource_size(&i915
->dsm
) >> 10,
489 ((u64
)resource_size(&i915
->dsm
) - reserved_total
) >> 10);
491 i915
->stolen_usable_size
=
492 resource_size(&i915
->dsm
) - reserved_total
;
494 /* Basic memrange allocator for stolen space. */
495 drm_mm_init(&i915
->mm
.stolen
, 0, i915
->stolen_usable_size
);
500 static void dbg_poison(struct i915_ggtt
*ggtt
,
501 dma_addr_t addr
, resource_size_t size
,
504 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
505 if (!drm_mm_node_allocated(&ggtt
->error_capture
))
508 if (ggtt
->vm
.bind_async_flags
& I915_VMA_GLOBAL_BIND
)
509 return; /* beware stop_machine() inversion */
511 GEM_BUG_ON(!IS_ALIGNED(size
, PAGE_SIZE
));
513 mutex_lock(&ggtt
->error_mutex
);
517 ggtt
->vm
.insert_page(&ggtt
->vm
, addr
,
518 ggtt
->error_capture
.start
,
522 s
= io_mapping_map_wc(&ggtt
->iomap
,
523 ggtt
->error_capture
.start
,
525 memset_io(s
, x
, PAGE_SIZE
);
532 ggtt
->vm
.clear_range(&ggtt
->vm
, ggtt
->error_capture
.start
, PAGE_SIZE
);
533 mutex_unlock(&ggtt
->error_mutex
);
537 static struct sg_table
*
538 i915_pages_create_for_stolen(struct drm_device
*dev
,
539 resource_size_t offset
, resource_size_t size
)
541 struct drm_i915_private
*i915
= to_i915(dev
);
543 struct scatterlist
*sg
;
545 GEM_BUG_ON(range_overflows(offset
, size
, resource_size(&i915
->dsm
)));
547 /* We hide that we have no struct page backing our stolen object
548 * by wrapping the contiguous physical allocation with a fake
549 * dma mapping in a single scatterlist.
552 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
554 return ERR_PTR(-ENOMEM
);
556 if (sg_alloc_table(st
, 1, GFP_KERNEL
)) {
558 return ERR_PTR(-ENOMEM
);
565 sg_dma_address(sg
) = (dma_addr_t
)i915
->dsm
.start
+ offset
;
566 sg_dma_len(sg
) = size
;
571 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object
*obj
)
573 struct sg_table
*pages
=
574 i915_pages_create_for_stolen(obj
->base
.dev
,
578 return PTR_ERR(pages
);
580 dbg_poison(&to_i915(obj
->base
.dev
)->ggtt
,
581 sg_dma_address(pages
->sgl
),
582 sg_dma_len(pages
->sgl
),
585 __i915_gem_object_set_pages(obj
, pages
, obj
->stolen
->size
);
590 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object
*obj
,
591 struct sg_table
*pages
)
593 /* Should only be called from i915_gem_object_release_stolen() */
595 dbg_poison(&to_i915(obj
->base
.dev
)->ggtt
,
596 sg_dma_address(pages
->sgl
),
597 sg_dma_len(pages
->sgl
),
600 sg_free_table(pages
);
605 i915_gem_object_release_stolen(struct drm_i915_gem_object
*obj
)
607 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
608 struct drm_mm_node
*stolen
= fetch_and_zero(&obj
->stolen
);
612 i915_gem_object_release_memory_region(obj
);
614 i915_gem_stolen_remove_node(i915
, stolen
);
618 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops
= {
619 .name
= "i915_gem_object_stolen",
620 .get_pages
= i915_gem_object_get_pages_stolen
,
621 .put_pages
= i915_gem_object_put_pages_stolen
,
622 .release
= i915_gem_object_release_stolen
,
625 static struct drm_i915_gem_object
*
626 __i915_gem_object_create_stolen(struct intel_memory_region
*mem
,
627 struct drm_mm_node
*stolen
)
629 static struct lock_class_key lock_class
;
630 struct drm_i915_gem_object
*obj
;
631 unsigned int cache_level
;
634 obj
= i915_gem_object_alloc();
638 drm_gem_private_object_init(&mem
->i915
->drm
, &obj
->base
, stolen
->size
);
639 i915_gem_object_init(obj
, &i915_gem_object_stolen_ops
, &lock_class
);
641 obj
->stolen
= stolen
;
642 obj
->read_domains
= I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
;
643 cache_level
= HAS_LLC(mem
->i915
) ? I915_CACHE_LLC
: I915_CACHE_NONE
;
644 i915_gem_object_set_cache_coherency(obj
, cache_level
);
646 err
= i915_gem_object_pin_pages(obj
);
650 i915_gem_object_init_memory_region(obj
, mem
, 0);
655 i915_gem_object_free(obj
);
660 static struct drm_i915_gem_object
*
661 _i915_gem_object_create_stolen(struct intel_memory_region
*mem
,
662 resource_size_t size
,
665 struct drm_i915_private
*i915
= mem
->i915
;
666 struct drm_i915_gem_object
*obj
;
667 struct drm_mm_node
*stolen
;
670 if (!drm_mm_initialized(&i915
->mm
.stolen
))
671 return ERR_PTR(-ENODEV
);
674 return ERR_PTR(-EINVAL
);
676 stolen
= kzalloc(sizeof(*stolen
), GFP_KERNEL
);
678 return ERR_PTR(-ENOMEM
);
680 ret
= i915_gem_stolen_insert_node(i915
, stolen
, size
, 4096);
686 obj
= __i915_gem_object_create_stolen(mem
, stolen
);
693 i915_gem_stolen_remove_node(i915
, stolen
);
699 struct drm_i915_gem_object
*
700 i915_gem_object_create_stolen(struct drm_i915_private
*i915
,
701 resource_size_t size
)
703 return i915_gem_object_create_region(i915
->mm
.regions
[INTEL_REGION_STOLEN
],
704 size
, I915_BO_ALLOC_CONTIGUOUS
);
707 static int init_stolen(struct intel_memory_region
*mem
)
709 intel_memory_region_set_name(mem
, "stolen");
712 * Initialise stolen early so that we may reserve preallocated
713 * objects for the BIOS to KMS transition.
715 return i915_gem_init_stolen(mem
->i915
);
718 static void release_stolen(struct intel_memory_region
*mem
)
720 i915_gem_cleanup_stolen(mem
->i915
);
723 static const struct intel_memory_region_ops i915_region_stolen_ops
= {
725 .release
= release_stolen
,
726 .create_object
= _i915_gem_object_create_stolen
,
729 struct intel_memory_region
*i915_gem_stolen_setup(struct drm_i915_private
*i915
)
731 return intel_memory_region_create(i915
,
732 intel_graphics_stolen_res
.start
,
733 resource_size(&intel_graphics_stolen_res
),
735 &i915_region_stolen_ops
);
738 struct drm_i915_gem_object
*
739 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private
*i915
,
740 resource_size_t stolen_offset
,
741 resource_size_t size
)
743 struct intel_memory_region
*mem
= i915
->mm
.regions
[INTEL_REGION_STOLEN
];
744 struct drm_i915_gem_object
*obj
;
745 struct drm_mm_node
*stolen
;
748 if (!drm_mm_initialized(&i915
->mm
.stolen
))
749 return ERR_PTR(-ENODEV
);
752 "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
753 &stolen_offset
, &size
);
755 /* KISS and expect everything to be page-aligned */
756 if (GEM_WARN_ON(size
== 0) ||
757 GEM_WARN_ON(!IS_ALIGNED(size
, I915_GTT_PAGE_SIZE
)) ||
758 GEM_WARN_ON(!IS_ALIGNED(stolen_offset
, I915_GTT_MIN_ALIGNMENT
)))
759 return ERR_PTR(-EINVAL
);
761 stolen
= kzalloc(sizeof(*stolen
), GFP_KERNEL
);
763 return ERR_PTR(-ENOMEM
);
765 stolen
->start
= stolen_offset
;
767 mutex_lock(&i915
->mm
.stolen_lock
);
768 ret
= drm_mm_reserve_node(&i915
->mm
.stolen
, stolen
);
769 mutex_unlock(&i915
->mm
.stolen_lock
);
775 obj
= __i915_gem_object_create_stolen(mem
, stolen
);
779 i915_gem_object_set_cache_coherency(obj
, I915_CACHE_NONE
);
783 i915_gem_stolen_remove_node(i915
, stolen
);