2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008-2012 Intel Corporation
7 #include <linux/errno.h>
8 #include <linux/mutex.h>
10 #include <drm/drm_mm.h>
11 #include <drm/i915_drm.h>
13 #include "gem/i915_gem_region.h"
15 #include "i915_gem_stolen.h"
18 * The BIOS typically reserves some of the system's memory for the exclusive
19 * use of the integrated graphics. This memory is no longer available for
20 * use by the OS and so the user finds that his system has less memory
21 * available than he put in. We refer to this memory as stolen.
23 * The BIOS will allocate its framebuffer from the stolen memory. Our
24 * goal is try to reuse that object for our own fbcon which must always
25 * be available for panics. Anything else we can reuse the stolen memory
29 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private
*i915
,
30 struct drm_mm_node
*node
, u64 size
,
31 unsigned alignment
, u64 start
, u64 end
)
35 if (!drm_mm_initialized(&i915
->mm
.stolen
))
38 /* WaSkipStolenMemoryFirstPage:bdw+ */
39 if (INTEL_GEN(i915
) >= 8 && start
< 4096)
42 mutex_lock(&i915
->mm
.stolen_lock
);
43 ret
= drm_mm_insert_node_in_range(&i915
->mm
.stolen
, node
,
45 start
, end
, DRM_MM_INSERT_BEST
);
46 mutex_unlock(&i915
->mm
.stolen_lock
);
51 int i915_gem_stolen_insert_node(struct drm_i915_private
*i915
,
52 struct drm_mm_node
*node
, u64 size
,
55 return i915_gem_stolen_insert_node_in_range(i915
, node
, size
,
56 alignment
, 0, U64_MAX
);
59 void i915_gem_stolen_remove_node(struct drm_i915_private
*i915
,
60 struct drm_mm_node
*node
)
62 mutex_lock(&i915
->mm
.stolen_lock
);
63 drm_mm_remove_node(node
);
64 mutex_unlock(&i915
->mm
.stolen_lock
);
67 static int i915_adjust_stolen(struct drm_i915_private
*i915
,
70 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
71 struct intel_uncore
*uncore
= ggtt
->vm
.gt
->uncore
;
74 if (dsm
->start
== 0 || dsm
->end
<= dsm
->start
)
78 * TODO: We have yet too encounter the case where the GTT wasn't at the
79 * end of stolen. With that assumption we could simplify this.
82 /* Make sure we don't clobber the GTT if it's within stolen memory */
83 if (INTEL_GEN(i915
) <= 4 &&
84 !IS_G33(i915
) && !IS_PINEVIEW(i915
) && !IS_G4X(i915
)) {
85 struct resource stolen
[2] = {*dsm
, *dsm
};
86 struct resource ggtt_res
;
87 resource_size_t ggtt_start
;
89 ggtt_start
= intel_uncore_read(uncore
, PGTBL_CTL
);
91 ggtt_start
= (ggtt_start
& PGTBL_ADDRESS_LO_MASK
) |
92 (ggtt_start
& PGTBL_ADDRESS_HI_MASK
) << 28;
94 ggtt_start
&= PGTBL_ADDRESS_LO_MASK
;
97 (struct resource
) DEFINE_RES_MEM(ggtt_start
,
98 ggtt_total_entries(ggtt
) * 4);
100 if (ggtt_res
.start
>= stolen
[0].start
&& ggtt_res
.start
< stolen
[0].end
)
101 stolen
[0].end
= ggtt_res
.start
;
102 if (ggtt_res
.end
> stolen
[1].start
&& ggtt_res
.end
<= stolen
[1].end
)
103 stolen
[1].start
= ggtt_res
.end
;
105 /* Pick the larger of the two chunks */
106 if (resource_size(&stolen
[0]) > resource_size(&stolen
[1]))
111 if (stolen
[0].start
!= stolen
[1].start
||
112 stolen
[0].end
!= stolen
[1].end
) {
113 DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res
);
114 DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm
);
119 * Verify that nothing else uses this physical address. Stolen
120 * memory should be reserved by the BIOS and hidden from the
121 * kernel. So if the region is already marked as busy, something
122 * is seriously wrong.
124 r
= devm_request_mem_region(i915
->drm
.dev
, dsm
->start
,
126 "Graphics Stolen Memory");
129 * One more attempt but this time requesting region from
130 * start + 1, as we have seen that this resolves the region
131 * conflict with the PCI Bus.
132 * This is a BIOS w/a: Some BIOS wrap stolen in the root
133 * PCI bus, but have an off-by-one error. Hence retry the
134 * reservation starting from 1 instead of 0.
135 * There's also BIOS with off-by-one on the other end.
137 r
= devm_request_mem_region(i915
->drm
.dev
, dsm
->start
+ 1,
138 resource_size(dsm
) - 2,
139 "Graphics Stolen Memory");
141 * GEN3 firmware likes to smash pci bridges into the stolen
142 * range. Apparently this works.
144 if (!r
&& !IS_GEN(i915
, 3)) {
145 DRM_ERROR("conflict detected with stolen region: %pR\n",
155 static void i915_gem_cleanup_stolen(struct drm_i915_private
*i915
)
157 if (!drm_mm_initialized(&i915
->mm
.stolen
))
160 drm_mm_takedown(&i915
->mm
.stolen
);
163 static void g4x_get_stolen_reserved(struct drm_i915_private
*i915
,
164 struct intel_uncore
*uncore
,
165 resource_size_t
*base
,
166 resource_size_t
*size
)
168 u32 reg_val
= intel_uncore_read(uncore
,
170 CTG_STOLEN_RESERVED
:
171 ELK_STOLEN_RESERVED
);
172 resource_size_t stolen_top
= i915
->dsm
.end
+ 1;
174 DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
175 IS_GM45(i915
) ? "CTG" : "ELK", reg_val
);
177 if ((reg_val
& G4X_STOLEN_RESERVED_ENABLE
) == 0)
181 * Whether ILK really reuses the ELK register for this is unclear.
182 * Let's see if we catch anyone with this supposedly enabled on ILK.
184 WARN(IS_GEN(i915
, 5), "ILK stolen reserved found? 0x%08x\n",
187 if (!(reg_val
& G4X_STOLEN_RESERVED_ADDR2_MASK
))
190 *base
= (reg_val
& G4X_STOLEN_RESERVED_ADDR2_MASK
) << 16;
191 WARN_ON((reg_val
& G4X_STOLEN_RESERVED_ADDR1_MASK
) < *base
);
193 *size
= stolen_top
- *base
;
196 static void gen6_get_stolen_reserved(struct drm_i915_private
*i915
,
197 struct intel_uncore
*uncore
,
198 resource_size_t
*base
,
199 resource_size_t
*size
)
201 u32 reg_val
= intel_uncore_read(uncore
, GEN6_STOLEN_RESERVED
);
203 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val
);
205 if (!(reg_val
& GEN6_STOLEN_RESERVED_ENABLE
))
208 *base
= reg_val
& GEN6_STOLEN_RESERVED_ADDR_MASK
;
210 switch (reg_val
& GEN6_STOLEN_RESERVED_SIZE_MASK
) {
211 case GEN6_STOLEN_RESERVED_1M
:
214 case GEN6_STOLEN_RESERVED_512K
:
217 case GEN6_STOLEN_RESERVED_256K
:
220 case GEN6_STOLEN_RESERVED_128K
:
225 MISSING_CASE(reg_val
& GEN6_STOLEN_RESERVED_SIZE_MASK
);
229 static void vlv_get_stolen_reserved(struct drm_i915_private
*i915
,
230 struct intel_uncore
*uncore
,
231 resource_size_t
*base
,
232 resource_size_t
*size
)
234 u32 reg_val
= intel_uncore_read(uncore
, GEN6_STOLEN_RESERVED
);
235 resource_size_t stolen_top
= i915
->dsm
.end
+ 1;
237 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val
);
239 if (!(reg_val
& GEN6_STOLEN_RESERVED_ENABLE
))
242 switch (reg_val
& GEN7_STOLEN_RESERVED_SIZE_MASK
) {
244 MISSING_CASE(reg_val
& GEN7_STOLEN_RESERVED_SIZE_MASK
);
246 case GEN7_STOLEN_RESERVED_1M
:
252 * On vlv, the ADDR_MASK portion is left as 0 and HW deduces the
253 * reserved location as (top - size).
255 *base
= stolen_top
- *size
;
258 static void gen7_get_stolen_reserved(struct drm_i915_private
*i915
,
259 struct intel_uncore
*uncore
,
260 resource_size_t
*base
,
261 resource_size_t
*size
)
263 u32 reg_val
= intel_uncore_read(uncore
, GEN6_STOLEN_RESERVED
);
265 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val
);
267 if (!(reg_val
& GEN6_STOLEN_RESERVED_ENABLE
))
270 *base
= reg_val
& GEN7_STOLEN_RESERVED_ADDR_MASK
;
272 switch (reg_val
& GEN7_STOLEN_RESERVED_SIZE_MASK
) {
273 case GEN7_STOLEN_RESERVED_1M
:
276 case GEN7_STOLEN_RESERVED_256K
:
281 MISSING_CASE(reg_val
& GEN7_STOLEN_RESERVED_SIZE_MASK
);
285 static void chv_get_stolen_reserved(struct drm_i915_private
*i915
,
286 struct intel_uncore
*uncore
,
287 resource_size_t
*base
,
288 resource_size_t
*size
)
290 u32 reg_val
= intel_uncore_read(uncore
, GEN6_STOLEN_RESERVED
);
292 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val
);
294 if (!(reg_val
& GEN6_STOLEN_RESERVED_ENABLE
))
297 *base
= reg_val
& GEN6_STOLEN_RESERVED_ADDR_MASK
;
299 switch (reg_val
& GEN8_STOLEN_RESERVED_SIZE_MASK
) {
300 case GEN8_STOLEN_RESERVED_1M
:
303 case GEN8_STOLEN_RESERVED_2M
:
304 *size
= 2 * 1024 * 1024;
306 case GEN8_STOLEN_RESERVED_4M
:
307 *size
= 4 * 1024 * 1024;
309 case GEN8_STOLEN_RESERVED_8M
:
310 *size
= 8 * 1024 * 1024;
313 *size
= 8 * 1024 * 1024;
314 MISSING_CASE(reg_val
& GEN8_STOLEN_RESERVED_SIZE_MASK
);
318 static void bdw_get_stolen_reserved(struct drm_i915_private
*i915
,
319 struct intel_uncore
*uncore
,
320 resource_size_t
*base
,
321 resource_size_t
*size
)
323 u32 reg_val
= intel_uncore_read(uncore
, GEN6_STOLEN_RESERVED
);
324 resource_size_t stolen_top
= i915
->dsm
.end
+ 1;
326 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val
);
328 if (!(reg_val
& GEN6_STOLEN_RESERVED_ENABLE
))
331 if (!(reg_val
& GEN6_STOLEN_RESERVED_ADDR_MASK
))
334 *base
= reg_val
& GEN6_STOLEN_RESERVED_ADDR_MASK
;
335 *size
= stolen_top
- *base
;
338 static void icl_get_stolen_reserved(struct drm_i915_private
*i915
,
339 struct intel_uncore
*uncore
,
340 resource_size_t
*base
,
341 resource_size_t
*size
)
343 u64 reg_val
= intel_uncore_read64(uncore
, GEN6_STOLEN_RESERVED
);
345 DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val
);
347 *base
= reg_val
& GEN11_STOLEN_RESERVED_ADDR_MASK
;
349 switch (reg_val
& GEN8_STOLEN_RESERVED_SIZE_MASK
) {
350 case GEN8_STOLEN_RESERVED_1M
:
353 case GEN8_STOLEN_RESERVED_2M
:
354 *size
= 2 * 1024 * 1024;
356 case GEN8_STOLEN_RESERVED_4M
:
357 *size
= 4 * 1024 * 1024;
359 case GEN8_STOLEN_RESERVED_8M
:
360 *size
= 8 * 1024 * 1024;
363 *size
= 8 * 1024 * 1024;
364 MISSING_CASE(reg_val
& GEN8_STOLEN_RESERVED_SIZE_MASK
);
368 static int i915_gem_init_stolen(struct drm_i915_private
*i915
)
370 struct intel_uncore
*uncore
= &i915
->uncore
;
371 resource_size_t reserved_base
, stolen_top
;
372 resource_size_t reserved_total
, reserved_size
;
374 mutex_init(&i915
->mm
.stolen_lock
);
376 if (intel_vgpu_active(i915
)) {
377 dev_notice(i915
->drm
.dev
,
378 "%s, disabling use of stolen memory\n",
383 if (intel_vtd_active() && INTEL_GEN(i915
) < 8) {
384 dev_notice(i915
->drm
.dev
,
385 "%s, disabling use of stolen memory\n",
390 if (resource_size(&intel_graphics_stolen_res
) == 0)
393 i915
->dsm
= intel_graphics_stolen_res
;
395 if (i915_adjust_stolen(i915
, &i915
->dsm
))
398 GEM_BUG_ON(i915
->dsm
.start
== 0);
399 GEM_BUG_ON(i915
->dsm
.end
<= i915
->dsm
.start
);
401 stolen_top
= i915
->dsm
.end
+ 1;
402 reserved_base
= stolen_top
;
405 switch (INTEL_GEN(i915
)) {
414 g4x_get_stolen_reserved(i915
, uncore
,
415 &reserved_base
, &reserved_size
);
418 gen6_get_stolen_reserved(i915
, uncore
,
419 &reserved_base
, &reserved_size
);
422 if (IS_VALLEYVIEW(i915
))
423 vlv_get_stolen_reserved(i915
, uncore
,
424 &reserved_base
, &reserved_size
);
426 gen7_get_stolen_reserved(i915
, uncore
,
427 &reserved_base
, &reserved_size
);
433 chv_get_stolen_reserved(i915
, uncore
,
434 &reserved_base
, &reserved_size
);
436 bdw_get_stolen_reserved(i915
, uncore
,
437 &reserved_base
, &reserved_size
);
440 MISSING_CASE(INTEL_GEN(i915
));
444 icl_get_stolen_reserved(i915
, uncore
,
451 * Our expectation is that the reserved space is at the top of the
452 * stolen region and *never* at the bottom. If we see !reserved_base,
453 * it likely means we failed to read the registers correctly.
455 if (!reserved_base
) {
456 DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
457 &reserved_base
, &reserved_size
);
458 reserved_base
= stolen_top
;
463 (struct resource
)DEFINE_RES_MEM(reserved_base
, reserved_size
);
465 if (!resource_contains(&i915
->dsm
, &i915
->dsm_reserved
)) {
466 DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
467 &i915
->dsm_reserved
, &i915
->dsm
);
471 /* It is possible for the reserved area to end before the end of stolen
472 * memory, so just consider the start. */
473 reserved_total
= stolen_top
- reserved_base
;
475 DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
476 (u64
)resource_size(&i915
->dsm
) >> 10,
477 ((u64
)resource_size(&i915
->dsm
) - reserved_total
) >> 10);
479 i915
->stolen_usable_size
=
480 resource_size(&i915
->dsm
) - reserved_total
;
482 /* Basic memrange allocator for stolen space. */
483 drm_mm_init(&i915
->mm
.stolen
, 0, i915
->stolen_usable_size
);
488 static struct sg_table
*
489 i915_pages_create_for_stolen(struct drm_device
*dev
,
490 resource_size_t offset
, resource_size_t size
)
492 struct drm_i915_private
*i915
= to_i915(dev
);
494 struct scatterlist
*sg
;
496 GEM_BUG_ON(range_overflows(offset
, size
, resource_size(&i915
->dsm
)));
498 /* We hide that we have no struct page backing our stolen object
499 * by wrapping the contiguous physical allocation with a fake
500 * dma mapping in a single scatterlist.
503 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
505 return ERR_PTR(-ENOMEM
);
507 if (sg_alloc_table(st
, 1, GFP_KERNEL
)) {
509 return ERR_PTR(-ENOMEM
);
516 sg_dma_address(sg
) = (dma_addr_t
)i915
->dsm
.start
+ offset
;
517 sg_dma_len(sg
) = size
;
522 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object
*obj
)
524 struct sg_table
*pages
=
525 i915_pages_create_for_stolen(obj
->base
.dev
,
529 return PTR_ERR(pages
);
531 __i915_gem_object_set_pages(obj
, pages
, obj
->stolen
->size
);
536 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object
*obj
,
537 struct sg_table
*pages
)
539 /* Should only be called from i915_gem_object_release_stolen() */
540 sg_free_table(pages
);
545 i915_gem_object_release_stolen(struct drm_i915_gem_object
*obj
)
547 struct drm_i915_private
*i915
= to_i915(obj
->base
.dev
);
548 struct drm_mm_node
*stolen
= fetch_and_zero(&obj
->stolen
);
552 i915_gem_object_release_memory_region(obj
);
554 i915_gem_stolen_remove_node(i915
, stolen
);
558 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops
= {
559 .get_pages
= i915_gem_object_get_pages_stolen
,
560 .put_pages
= i915_gem_object_put_pages_stolen
,
561 .release
= i915_gem_object_release_stolen
,
564 static struct drm_i915_gem_object
*
565 __i915_gem_object_create_stolen(struct intel_memory_region
*mem
,
566 struct drm_mm_node
*stolen
)
568 static struct lock_class_key lock_class
;
569 struct drm_i915_gem_object
*obj
;
570 unsigned int cache_level
;
573 obj
= i915_gem_object_alloc();
577 drm_gem_private_object_init(&mem
->i915
->drm
, &obj
->base
, stolen
->size
);
578 i915_gem_object_init(obj
, &i915_gem_object_stolen_ops
, &lock_class
);
580 obj
->stolen
= stolen
;
581 obj
->read_domains
= I915_GEM_DOMAIN_CPU
| I915_GEM_DOMAIN_GTT
;
582 cache_level
= HAS_LLC(mem
->i915
) ? I915_CACHE_LLC
: I915_CACHE_NONE
;
583 i915_gem_object_set_cache_coherency(obj
, cache_level
);
585 err
= i915_gem_object_pin_pages(obj
);
589 i915_gem_object_init_memory_region(obj
, mem
, 0);
594 i915_gem_object_free(obj
);
599 static struct drm_i915_gem_object
*
600 _i915_gem_object_create_stolen(struct intel_memory_region
*mem
,
601 resource_size_t size
,
604 struct drm_i915_private
*i915
= mem
->i915
;
605 struct drm_i915_gem_object
*obj
;
606 struct drm_mm_node
*stolen
;
609 if (!drm_mm_initialized(&i915
->mm
.stolen
))
610 return ERR_PTR(-ENODEV
);
613 return ERR_PTR(-EINVAL
);
615 stolen
= kzalloc(sizeof(*stolen
), GFP_KERNEL
);
617 return ERR_PTR(-ENOMEM
);
619 ret
= i915_gem_stolen_insert_node(i915
, stolen
, size
, 4096);
625 obj
= __i915_gem_object_create_stolen(mem
, stolen
);
632 i915_gem_stolen_remove_node(i915
, stolen
);
638 struct drm_i915_gem_object
*
639 i915_gem_object_create_stolen(struct drm_i915_private
*i915
,
640 resource_size_t size
)
642 return i915_gem_object_create_region(i915
->mm
.regions
[INTEL_REGION_STOLEN
],
643 size
, I915_BO_ALLOC_CONTIGUOUS
);
646 static int init_stolen(struct intel_memory_region
*mem
)
648 intel_memory_region_set_name(mem
, "stolen");
651 * Initialise stolen early so that we may reserve preallocated
652 * objects for the BIOS to KMS transition.
654 return i915_gem_init_stolen(mem
->i915
);
657 static void release_stolen(struct intel_memory_region
*mem
)
659 i915_gem_cleanup_stolen(mem
->i915
);
662 static const struct intel_memory_region_ops i915_region_stolen_ops
= {
664 .release
= release_stolen
,
665 .create_object
= _i915_gem_object_create_stolen
,
668 struct intel_memory_region
*i915_gem_stolen_setup(struct drm_i915_private
*i915
)
670 return intel_memory_region_create(i915
,
671 intel_graphics_stolen_res
.start
,
672 resource_size(&intel_graphics_stolen_res
),
674 &i915_region_stolen_ops
);
677 struct drm_i915_gem_object
*
678 i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private
*i915
,
679 resource_size_t stolen_offset
,
680 resource_size_t gtt_offset
,
681 resource_size_t size
)
683 struct intel_memory_region
*mem
= i915
->mm
.regions
[INTEL_REGION_STOLEN
];
684 struct i915_ggtt
*ggtt
= &i915
->ggtt
;
685 struct drm_i915_gem_object
*obj
;
686 struct drm_mm_node
*stolen
;
687 struct i915_vma
*vma
;
690 if (!drm_mm_initialized(&i915
->mm
.stolen
))
691 return ERR_PTR(-ENODEV
);
693 DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
694 &stolen_offset
, >t_offset
, &size
);
696 /* KISS and expect everything to be page-aligned */
697 if (WARN_ON(size
== 0) ||
698 WARN_ON(!IS_ALIGNED(size
, I915_GTT_PAGE_SIZE
)) ||
699 WARN_ON(!IS_ALIGNED(stolen_offset
, I915_GTT_MIN_ALIGNMENT
)))
700 return ERR_PTR(-EINVAL
);
702 stolen
= kzalloc(sizeof(*stolen
), GFP_KERNEL
);
704 return ERR_PTR(-ENOMEM
);
706 stolen
->start
= stolen_offset
;
708 mutex_lock(&i915
->mm
.stolen_lock
);
709 ret
= drm_mm_reserve_node(&i915
->mm
.stolen
, stolen
);
710 mutex_unlock(&i915
->mm
.stolen_lock
);
712 DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
717 obj
= __i915_gem_object_create_stolen(mem
, stolen
);
719 DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
720 i915_gem_stolen_remove_node(i915
, stolen
);
725 /* Some objects just need physical mem from stolen space */
726 if (gtt_offset
== I915_GTT_OFFSET_NONE
)
729 ret
= i915_gem_object_pin_pages(obj
);
733 vma
= i915_vma_instance(obj
, &ggtt
->vm
, NULL
);
739 /* To simplify the initialisation sequence between KMS and GTT,
740 * we allow construction of the stolen object prior to
741 * setting up the GTT space. The actual reservation will occur
744 mutex_lock(&ggtt
->vm
.mutex
);
745 ret
= i915_gem_gtt_reserve(&ggtt
->vm
, &vma
->node
,
746 size
, gtt_offset
, obj
->cache_level
,
749 DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
750 mutex_unlock(&ggtt
->vm
.mutex
);
754 GEM_BUG_ON(!drm_mm_node_allocated(&vma
->node
));
756 GEM_BUG_ON(vma
->pages
);
757 vma
->pages
= obj
->mm
.pages
;
758 atomic_set(&vma
->pages_count
, I915_VMA_PAGES_ACTIVE
);
760 set_bit(I915_VMA_GLOBAL_BIND_BIT
, __i915_vma_flags(vma
));
761 __i915_vma_set_map_and_fenceable(vma
);
763 list_add_tail(&vma
->vm_link
, &ggtt
->vm
.bound_list
);
764 mutex_unlock(&ggtt
->vm
.mutex
);
766 GEM_BUG_ON(i915_gem_object_is_shrinkable(obj
));
767 atomic_inc(&obj
->bind_count
);
772 i915_gem_object_unpin_pages(obj
);
774 i915_gem_object_put(obj
);