1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014-2019 Intel Corporation
6 #include "gt/intel_gt.h"
7 #include "gt/intel_gt_irq.h"
8 #include "gt/intel_gt_pm_irq.h"
10 #include "intel_guc_ads.h"
11 #include "intel_guc_submission.h"
17 * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is
18 * designed to offload some of the functionality usually performed by the host
19 * driver; currently the main operations it can take care of are:
21 * - Authentication of the HuC, which is required to fully enable HuC usage.
22 * - Low latency graphics context scheduling (a.k.a. GuC submission).
23 * - GT Power management.
25 * The enable_guc module parameter can be used to select which of those
26 * operations to enable within GuC. Note that not all the operations are
27 * supported on all gen9+ platforms.
29 * Enabling the GuC is not mandatory and therefore the firmware is only loaded
30 * if at least one of the operations is selected. However, not loading the GuC
31 * might result in the loss of some features that do require the GuC (currently
32 * just the HuC, but more are expected to land in the future).
35 void intel_guc_notify(struct intel_guc
*guc
)
37 struct intel_gt
*gt
= guc_to_gt(guc
);
40 * On Gen11+, the value written to the register is passes as a payload
41 * to the FW. However, the FW currently treats all values the same way
42 * (H2G interrupt), so we can just write the value that the HW expects
45 intel_uncore_write(gt
->uncore
, guc
->notify_reg
, GUC_SEND_TRIGGER
);
48 static inline i915_reg_t
guc_send_reg(struct intel_guc
*guc
, u32 i
)
50 GEM_BUG_ON(!guc
->send_regs
.base
);
51 GEM_BUG_ON(!guc
->send_regs
.count
);
52 GEM_BUG_ON(i
>= guc
->send_regs
.count
);
54 return _MMIO(guc
->send_regs
.base
+ 4 * i
);
57 void intel_guc_init_send_regs(struct intel_guc
*guc
)
59 struct intel_gt
*gt
= guc_to_gt(guc
);
60 enum forcewake_domains fw_domains
= 0;
63 if (INTEL_GEN(gt
->i915
) >= 11) {
65 i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
66 guc
->send_regs
.count
= GEN11_SOFT_SCRATCH_COUNT
;
68 guc
->send_regs
.base
= i915_mmio_reg_offset(SOFT_SCRATCH(0));
69 guc
->send_regs
.count
= GUC_MAX_MMIO_MSG_LEN
;
70 BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN
> SOFT_SCRATCH_COUNT
);
73 for (i
= 0; i
< guc
->send_regs
.count
; i
++) {
74 fw_domains
|= intel_uncore_forcewake_for_reg(gt
->uncore
,
76 FW_REG_READ
| FW_REG_WRITE
);
78 guc
->send_regs
.fw_domains
= fw_domains
;
81 static void gen9_reset_guc_interrupts(struct intel_guc
*guc
)
83 struct intel_gt
*gt
= guc_to_gt(guc
);
85 assert_rpm_wakelock_held(>
->i915
->runtime_pm
);
87 spin_lock_irq(>
->irq_lock
);
88 gen6_gt_pm_reset_iir(gt
, gt
->pm_guc_events
);
89 spin_unlock_irq(>
->irq_lock
);
92 static void gen9_enable_guc_interrupts(struct intel_guc
*guc
)
94 struct intel_gt
*gt
= guc_to_gt(guc
);
96 assert_rpm_wakelock_held(>
->i915
->runtime_pm
);
98 spin_lock_irq(>
->irq_lock
);
99 if (!guc
->interrupts
.enabled
) {
100 WARN_ON_ONCE(intel_uncore_read(gt
->uncore
, GEN8_GT_IIR(2)) &
102 guc
->interrupts
.enabled
= true;
103 gen6_gt_pm_enable_irq(gt
, gt
->pm_guc_events
);
105 spin_unlock_irq(>
->irq_lock
);
108 static void gen9_disable_guc_interrupts(struct intel_guc
*guc
)
110 struct intel_gt
*gt
= guc_to_gt(guc
);
112 assert_rpm_wakelock_held(>
->i915
->runtime_pm
);
114 spin_lock_irq(>
->irq_lock
);
115 guc
->interrupts
.enabled
= false;
117 gen6_gt_pm_disable_irq(gt
, gt
->pm_guc_events
);
119 spin_unlock_irq(>
->irq_lock
);
120 intel_synchronize_irq(gt
->i915
);
122 gen9_reset_guc_interrupts(guc
);
125 static void gen11_reset_guc_interrupts(struct intel_guc
*guc
)
127 struct intel_gt
*gt
= guc_to_gt(guc
);
129 spin_lock_irq(>
->irq_lock
);
130 gen11_gt_reset_one_iir(gt
, 0, GEN11_GUC
);
131 spin_unlock_irq(>
->irq_lock
);
134 static void gen11_enable_guc_interrupts(struct intel_guc
*guc
)
136 struct intel_gt
*gt
= guc_to_gt(guc
);
138 spin_lock_irq(>
->irq_lock
);
139 if (!guc
->interrupts
.enabled
) {
140 u32 events
= REG_FIELD_PREP(ENGINE1_MASK
, GUC_INTR_GUC2HOST
);
142 WARN_ON_ONCE(gen11_gt_reset_one_iir(gt
, 0, GEN11_GUC
));
143 intel_uncore_write(gt
->uncore
,
144 GEN11_GUC_SG_INTR_ENABLE
, events
);
145 intel_uncore_write(gt
->uncore
,
146 GEN11_GUC_SG_INTR_MASK
, ~events
);
147 guc
->interrupts
.enabled
= true;
149 spin_unlock_irq(>
->irq_lock
);
152 static void gen11_disable_guc_interrupts(struct intel_guc
*guc
)
154 struct intel_gt
*gt
= guc_to_gt(guc
);
156 spin_lock_irq(>
->irq_lock
);
157 guc
->interrupts
.enabled
= false;
159 intel_uncore_write(gt
->uncore
, GEN11_GUC_SG_INTR_MASK
, ~0);
160 intel_uncore_write(gt
->uncore
, GEN11_GUC_SG_INTR_ENABLE
, 0);
162 spin_unlock_irq(>
->irq_lock
);
163 intel_synchronize_irq(gt
->i915
);
165 gen11_reset_guc_interrupts(guc
);
168 void intel_guc_init_early(struct intel_guc
*guc
)
170 struct drm_i915_private
*i915
= guc_to_gt(guc
)->i915
;
172 intel_guc_fw_init_early(guc
);
173 intel_guc_ct_init_early(&guc
->ct
);
174 intel_guc_log_init_early(&guc
->log
);
175 intel_guc_submission_init_early(guc
);
177 mutex_init(&guc
->send_mutex
);
178 spin_lock_init(&guc
->irq_lock
);
179 if (INTEL_GEN(i915
) >= 11) {
180 guc
->notify_reg
= GEN11_GUC_HOST_INTERRUPT
;
181 guc
->interrupts
.reset
= gen11_reset_guc_interrupts
;
182 guc
->interrupts
.enable
= gen11_enable_guc_interrupts
;
183 guc
->interrupts
.disable
= gen11_disable_guc_interrupts
;
185 guc
->notify_reg
= GUC_SEND_INTERRUPT
;
186 guc
->interrupts
.reset
= gen9_reset_guc_interrupts
;
187 guc
->interrupts
.enable
= gen9_enable_guc_interrupts
;
188 guc
->interrupts
.disable
= gen9_disable_guc_interrupts
;
192 static u32
guc_ctl_debug_flags(struct intel_guc
*guc
)
194 u32 level
= intel_guc_log_get_level(&guc
->log
);
197 if (!GUC_LOG_LEVEL_IS_VERBOSE(level
))
198 flags
|= GUC_LOG_DISABLED
;
200 flags
|= GUC_LOG_LEVEL_TO_VERBOSITY(level
) <<
201 GUC_LOG_VERBOSITY_SHIFT
;
206 static u32
guc_ctl_feature_flags(struct intel_guc
*guc
)
210 if (!intel_guc_is_submission_supported(guc
))
211 flags
|= GUC_CTL_DISABLE_SCHEDULER
;
216 static u32
guc_ctl_ctxinfo_flags(struct intel_guc
*guc
)
220 if (intel_guc_is_submission_supported(guc
)) {
223 base
= intel_guc_ggtt_offset(guc
, guc
->stage_desc_pool
);
224 ctxnum
= GUC_MAX_STAGE_DESCRIPTORS
/ 16;
227 flags
|= (base
<< GUC_CTL_BASE_ADDR_SHIFT
) |
228 (ctxnum
<< GUC_CTL_CTXNUM_IN16_SHIFT
);
233 static u32
guc_ctl_log_params_flags(struct intel_guc
*guc
)
235 u32 offset
= intel_guc_ggtt_offset(guc
, guc
->log
.vma
) >> PAGE_SHIFT
;
238 #if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
240 #define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
246 BUILD_BUG_ON(!CRASH_BUFFER_SIZE
);
247 BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE
, UNIT
));
248 BUILD_BUG_ON(!DPC_BUFFER_SIZE
);
249 BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE
, UNIT
));
250 BUILD_BUG_ON(!ISR_BUFFER_SIZE
);
251 BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE
, UNIT
));
253 BUILD_BUG_ON((CRASH_BUFFER_SIZE
/ UNIT
- 1) >
254 (GUC_LOG_CRASH_MASK
>> GUC_LOG_CRASH_SHIFT
));
255 BUILD_BUG_ON((DPC_BUFFER_SIZE
/ UNIT
- 1) >
256 (GUC_LOG_DPC_MASK
>> GUC_LOG_DPC_SHIFT
));
257 BUILD_BUG_ON((ISR_BUFFER_SIZE
/ UNIT
- 1) >
258 (GUC_LOG_ISR_MASK
>> GUC_LOG_ISR_SHIFT
));
260 flags
= GUC_LOG_VALID
|
261 GUC_LOG_NOTIFY_ON_HALF_FULL
|
263 ((CRASH_BUFFER_SIZE
/ UNIT
- 1) << GUC_LOG_CRASH_SHIFT
) |
264 ((DPC_BUFFER_SIZE
/ UNIT
- 1) << GUC_LOG_DPC_SHIFT
) |
265 ((ISR_BUFFER_SIZE
/ UNIT
- 1) << GUC_LOG_ISR_SHIFT
) |
266 (offset
<< GUC_LOG_BUF_ADDR_SHIFT
);
274 static u32
guc_ctl_ads_flags(struct intel_guc
*guc
)
276 u32 ads
= intel_guc_ggtt_offset(guc
, guc
->ads_vma
) >> PAGE_SHIFT
;
277 u32 flags
= ads
<< GUC_ADS_ADDR_SHIFT
;
283 * Initialise the GuC parameter block before starting the firmware
284 * transfer. These parameters are read by the firmware on startup
285 * and cannot be changed thereafter.
287 static void guc_init_params(struct intel_guc
*guc
)
289 u32
*params
= guc
->params
;
292 BUILD_BUG_ON(sizeof(guc
->params
) != GUC_CTL_MAX_DWORDS
* sizeof(u32
));
294 params
[GUC_CTL_CTXINFO
] = guc_ctl_ctxinfo_flags(guc
);
295 params
[GUC_CTL_LOG_PARAMS
] = guc_ctl_log_params_flags(guc
);
296 params
[GUC_CTL_FEATURE
] = guc_ctl_feature_flags(guc
);
297 params
[GUC_CTL_DEBUG
] = guc_ctl_debug_flags(guc
);
298 params
[GUC_CTL_ADS
] = guc_ctl_ads_flags(guc
);
300 for (i
= 0; i
< GUC_CTL_MAX_DWORDS
; i
++)
301 DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i
, params
[i
]);
305 * Initialise the GuC parameter block before starting the firmware
306 * transfer. These parameters are read by the firmware on startup
307 * and cannot be changed thereafter.
309 void intel_guc_write_params(struct intel_guc
*guc
)
311 struct intel_uncore
*uncore
= guc_to_gt(guc
)->uncore
;
315 * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
316 * they are power context saved so it's ok to release forcewake
317 * when we are done here and take it again at xfer time.
319 intel_uncore_forcewake_get(uncore
, FORCEWAKE_BLITTER
);
321 intel_uncore_write(uncore
, SOFT_SCRATCH(0), 0);
323 for (i
= 0; i
< GUC_CTL_MAX_DWORDS
; i
++)
324 intel_uncore_write(uncore
, SOFT_SCRATCH(1 + i
), guc
->params
[i
]);
326 intel_uncore_forcewake_put(uncore
, FORCEWAKE_BLITTER
);
329 int intel_guc_init(struct intel_guc
*guc
)
331 struct intel_gt
*gt
= guc_to_gt(guc
);
334 ret
= intel_uc_fw_init(&guc
->fw
);
338 ret
= intel_guc_log_create(&guc
->log
);
342 ret
= intel_guc_ads_create(guc
);
345 GEM_BUG_ON(!guc
->ads_vma
);
347 ret
= intel_guc_ct_init(&guc
->ct
);
351 if (intel_guc_is_submission_supported(guc
)) {
353 * This is stuff we need to have available at fw load time
354 * if we are planning to enable submission later
356 ret
= intel_guc_submission_init(guc
);
361 /* now that everything is perma-pinned, initialize the parameters */
362 guc_init_params(guc
);
364 /* We need to notify the guc whenever we change the GGTT */
365 i915_ggtt_enable_guc(gt
->ggtt
);
370 intel_guc_ct_fini(&guc
->ct
);
372 intel_guc_ads_destroy(guc
);
374 intel_guc_log_destroy(&guc
->log
);
376 intel_uc_fw_fini(&guc
->fw
);
378 intel_uc_fw_cleanup_fetch(&guc
->fw
);
379 DRM_DEV_DEBUG_DRIVER(gt
->i915
->drm
.dev
, "failed with %d\n", ret
);
383 void intel_guc_fini(struct intel_guc
*guc
)
385 struct intel_gt
*gt
= guc_to_gt(guc
);
387 if (!intel_uc_fw_is_available(&guc
->fw
))
390 i915_ggtt_disable_guc(gt
->ggtt
);
392 if (intel_guc_is_submission_supported(guc
))
393 intel_guc_submission_fini(guc
);
395 intel_guc_ct_fini(&guc
->ct
);
397 intel_guc_ads_destroy(guc
);
398 intel_guc_log_destroy(&guc
->log
);
399 intel_uc_fw_fini(&guc
->fw
);
400 intel_uc_fw_cleanup_fetch(&guc
->fw
);
402 intel_uc_fw_change_status(&guc
->fw
, INTEL_UC_FIRMWARE_DISABLED
);
406 * This function implements the MMIO based host to GuC interface.
408 int intel_guc_send_mmio(struct intel_guc
*guc
, const u32
*action
, u32 len
,
409 u32
*response_buf
, u32 response_buf_size
)
411 struct intel_uncore
*uncore
= guc_to_gt(guc
)->uncore
;
417 GEM_BUG_ON(len
> guc
->send_regs
.count
);
419 /* We expect only action code */
420 GEM_BUG_ON(*action
& ~INTEL_GUC_MSG_CODE_MASK
);
422 /* If CT is available, we expect to use MMIO only during init/fini */
423 GEM_BUG_ON(*action
!= INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER
&&
424 *action
!= INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER
);
426 mutex_lock(&guc
->send_mutex
);
427 intel_uncore_forcewake_get(uncore
, guc
->send_regs
.fw_domains
);
429 for (i
= 0; i
< len
; i
++)
430 intel_uncore_write(uncore
, guc_send_reg(guc
, i
), action
[i
]);
432 intel_uncore_posting_read(uncore
, guc_send_reg(guc
, i
- 1));
434 intel_guc_notify(guc
);
437 * No GuC command should ever take longer than 10ms.
438 * Fast commands should still complete in 10us.
440 ret
= __intel_wait_for_register_fw(uncore
,
441 guc_send_reg(guc
, 0),
442 INTEL_GUC_MSG_TYPE_MASK
,
443 INTEL_GUC_MSG_TYPE_RESPONSE
<<
444 INTEL_GUC_MSG_TYPE_SHIFT
,
446 /* If GuC explicitly returned an error, convert it to -EIO */
447 if (!ret
&& !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status
))
451 DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
452 action
[0], ret
, status
);
457 int count
= min(response_buf_size
, guc
->send_regs
.count
- 1);
459 for (i
= 0; i
< count
; i
++)
460 response_buf
[i
] = intel_uncore_read(uncore
,
461 guc_send_reg(guc
, i
+ 1));
464 /* Use data from the GuC response as our return value */
465 ret
= INTEL_GUC_MSG_TO_DATA(status
);
468 intel_uncore_forcewake_put(uncore
, guc
->send_regs
.fw_domains
);
469 mutex_unlock(&guc
->send_mutex
);
474 int intel_guc_to_host_process_recv_msg(struct intel_guc
*guc
,
475 const u32
*payload
, u32 len
)
482 /* Make sure to handle only enabled messages */
483 msg
= payload
[0] & guc
->msg_enabled_mask
;
485 if (msg
& (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER
|
486 INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED
))
487 intel_guc_log_handle_flush_event(&guc
->log
);
492 int intel_guc_sample_forcewake(struct intel_guc
*guc
)
494 struct drm_i915_private
*dev_priv
= guc_to_gt(guc
)->i915
;
497 action
[0] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE
;
498 /* WaRsDisableCoarsePowerGating:skl,cnl */
499 if (!HAS_RC6(dev_priv
) || NEEDS_WaRsDisableCoarsePowerGating(dev_priv
))
502 /* bit 0 and 1 are for Render and Media domain separately */
503 action
[1] = GUC_FORCEWAKE_RENDER
| GUC_FORCEWAKE_MEDIA
;
505 return intel_guc_send(guc
, action
, ARRAY_SIZE(action
));
509 * intel_guc_auth_huc() - Send action to GuC to authenticate HuC ucode
510 * @guc: intel_guc structure
511 * @rsa_offset: rsa offset w.r.t ggtt base of huc vma
513 * Triggers a HuC firmware authentication request to the GuC via intel_guc_send
514 * INTEL_GUC_ACTION_AUTHENTICATE_HUC interface. This function is invoked by
517 * Return: non-zero code on error
519 int intel_guc_auth_huc(struct intel_guc
*guc
, u32 rsa_offset
)
522 INTEL_GUC_ACTION_AUTHENTICATE_HUC
,
526 return intel_guc_send(guc
, action
, ARRAY_SIZE(action
));
530 * intel_guc_suspend() - notify GuC entering suspend state
533 int intel_guc_suspend(struct intel_guc
*guc
)
535 struct intel_uncore
*uncore
= guc_to_gt(guc
)->uncore
;
539 INTEL_GUC_ACTION_ENTER_S_STATE
,
540 GUC_POWER_D1
, /* any value greater than GUC_POWER_D0 */
544 * If GuC communication is enabled but submission is not supported,
545 * we do not need to suspend the GuC.
547 if (!intel_guc_submission_is_enabled(guc
))
551 * The ENTER_S_STATE action queues the save/restore operation in GuC FW
552 * and then returns, so waiting on the H2G is not enough to guarantee
553 * GuC is done. When all the processing is done, GuC writes
554 * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
555 * on that. Note that GuC does not ensure that the value in the register
556 * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
557 * in progress so we need to take care of that ourselves as well.
560 intel_uncore_write(uncore
, SOFT_SCRATCH(14),
561 INTEL_GUC_SLEEP_STATE_INVALID_MASK
);
563 ret
= intel_guc_send(guc
, action
, ARRAY_SIZE(action
));
567 ret
= __intel_wait_for_register(uncore
, SOFT_SCRATCH(14),
568 INTEL_GUC_SLEEP_STATE_INVALID_MASK
,
573 if (status
!= INTEL_GUC_SLEEP_STATE_SUCCESS
) {
574 DRM_ERROR("GuC failed to change sleep state. "
575 "action=0x%x, err=%u\n",
584 * intel_guc_reset_engine() - ask GuC to reset an engine
585 * @guc: intel_guc structure
586 * @engine: engine to be reset
588 int intel_guc_reset_engine(struct intel_guc
*guc
,
589 struct intel_engine_cs
*engine
)
591 /* XXX: to be implemented with submission interface rework */
597 * intel_guc_resume() - notify GuC resuming from suspend state
600 int intel_guc_resume(struct intel_guc
*guc
)
603 INTEL_GUC_ACTION_EXIT_S_STATE
,
608 * If GuC communication is enabled but submission is not supported,
609 * we do not need to resume the GuC but we do need to enable the
610 * GuC communication on resume (above).
612 if (!intel_guc_submission_is_enabled(guc
))
615 return intel_guc_send(guc
, action
, ARRAY_SIZE(action
));
619 * DOC: GuC Memory Management
621 * GuC can't allocate any memory for its own usage, so all the allocations must
622 * be handled by the host driver. GuC accesses the memory via the GGTT, with the
623 * exception of the top and bottom parts of the 4GB address space, which are
624 * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM)
625 * or other parts of the HW. The driver must take care not to place objects that
626 * the GuC is going to access in these reserved ranges. The layout of the GuC
627 * address space is shown below:
631 * +===========> +====================+ <== FFFF_FFFF
633 * | +====================+ <== GUC_GGTT_TOP
637 * Address +===> +====================+ <== GuC ggtt_pin_bias
645 * +=======+===> +====================+ <== 0000_0000
647 * The lower part of GuC Address Space [0, ggtt_pin_bias) is mapped to GuC WOPCM
648 * while upper part of GuC Address Space [ggtt_pin_bias, GUC_GGTT_TOP) is mapped
649 * to DRAM. The value of the GuC ggtt_pin_bias is the GuC WOPCM size.
653 * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
655 * @size: size of area to allocate (both virtual space and memory)
657 * This is a wrapper to create an object for use with the GuC. In order to
658 * use it inside the GuC, an object needs to be pinned lifetime, so we allocate
659 * both some backing storage and a range inside the Global GTT. We must pin
660 * it in the GGTT somewhere other than than [0, GUC ggtt_pin_bias) because that
661 * range is reserved inside GuC.
663 * Return: A i915_vma if successful, otherwise an ERR_PTR.
665 struct i915_vma
*intel_guc_allocate_vma(struct intel_guc
*guc
, u32 size
)
667 struct intel_gt
*gt
= guc_to_gt(guc
);
668 struct drm_i915_gem_object
*obj
;
669 struct i915_vma
*vma
;
673 obj
= i915_gem_object_create_shmem(gt
->i915
, size
);
675 return ERR_CAST(obj
);
677 vma
= i915_vma_instance(obj
, >
->ggtt
->vm
, NULL
);
681 flags
= PIN_GLOBAL
| PIN_OFFSET_BIAS
| i915_ggtt_pin_bias(vma
);
682 ret
= i915_vma_pin(vma
, 0, 0, flags
);
688 return i915_vma_make_unshrinkable(vma
);
691 i915_gem_object_put(obj
);
696 * intel_guc_allocate_and_map_vma() - Allocate and map VMA for GuC usage
698 * @size: size of area to allocate (both virtual space and memory)
699 * @out_vma: return variable for the allocated vma pointer
700 * @out_vaddr: return variable for the obj mapping
702 * This wrapper calls intel_guc_allocate_vma() and then maps the allocated
703 * object with I915_MAP_WB.
705 * Return: 0 if successful, a negative errno code otherwise.
707 int intel_guc_allocate_and_map_vma(struct intel_guc
*guc
, u32 size
,
708 struct i915_vma
**out_vma
, void **out_vaddr
)
710 struct i915_vma
*vma
;
713 vma
= intel_guc_allocate_vma(guc
, size
);
717 vaddr
= i915_gem_object_pin_map(vma
->obj
, I915_MAP_WB
);
719 i915_vma_unpin_and_release(&vma
, 0);
720 return PTR_ERR(vaddr
);