2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/ascii85.h>
21 #include <linux/kernel.h>
22 #include <linux/pm_opp.h>
23 #include <linux/slab.h>
24 #include "adreno_gpu.h"
28 int adreno_get_param(struct msm_gpu
*gpu
, uint32_t param
, uint64_t *value
)
30 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
33 case MSM_PARAM_GPU_ID
:
34 *value
= adreno_gpu
->info
->revn
;
36 case MSM_PARAM_GMEM_SIZE
:
37 *value
= adreno_gpu
->gmem
;
39 case MSM_PARAM_GMEM_BASE
:
42 case MSM_PARAM_CHIP_ID
:
43 *value
= adreno_gpu
->rev
.patchid
|
44 (adreno_gpu
->rev
.minor
<< 8) |
45 (adreno_gpu
->rev
.major
<< 16) |
46 (adreno_gpu
->rev
.core
<< 24);
48 case MSM_PARAM_MAX_FREQ
:
49 *value
= adreno_gpu
->base
.fast_rate
;
51 case MSM_PARAM_TIMESTAMP
:
52 if (adreno_gpu
->funcs
->get_timestamp
) {
55 pm_runtime_get_sync(&gpu
->pdev
->dev
);
56 ret
= adreno_gpu
->funcs
->get_timestamp(gpu
, value
);
57 pm_runtime_put_autosuspend(&gpu
->pdev
->dev
);
62 case MSM_PARAM_NR_RINGS
:
63 *value
= gpu
->nr_rings
;
66 DBG("%s: invalid param: %u", gpu
->name
, param
);
71 const struct firmware
*
72 adreno_request_fw(struct adreno_gpu
*adreno_gpu
, const char *fwname
)
74 struct drm_device
*drm
= adreno_gpu
->base
.dev
;
75 const struct firmware
*fw
= NULL
;
79 newname
= kasprintf(GFP_KERNEL
, "qcom/%s", fwname
);
81 return ERR_PTR(-ENOMEM
);
84 * Try first to load from qcom/$fwfile using a direct load (to avoid
85 * a potential timeout waiting for usermode helper)
87 if ((adreno_gpu
->fwloc
== FW_LOCATION_UNKNOWN
) ||
88 (adreno_gpu
->fwloc
== FW_LOCATION_NEW
)) {
90 ret
= request_firmware_direct(&fw
, newname
, drm
->dev
);
92 dev_info(drm
->dev
, "loaded %s from new location\n",
94 adreno_gpu
->fwloc
= FW_LOCATION_NEW
;
96 } else if (adreno_gpu
->fwloc
!= FW_LOCATION_UNKNOWN
) {
97 dev_err(drm
->dev
, "failed to load %s: %d\n",
105 * Then try the legacy location without qcom/ prefix
107 if ((adreno_gpu
->fwloc
== FW_LOCATION_UNKNOWN
) ||
108 (adreno_gpu
->fwloc
== FW_LOCATION_LEGACY
)) {
110 ret
= request_firmware_direct(&fw
, fwname
, drm
->dev
);
112 dev_info(drm
->dev
, "loaded %s from legacy location\n",
114 adreno_gpu
->fwloc
= FW_LOCATION_LEGACY
;
116 } else if (adreno_gpu
->fwloc
!= FW_LOCATION_UNKNOWN
) {
117 dev_err(drm
->dev
, "failed to load %s: %d\n",
125 * Finally fall back to request_firmware() for cases where the
126 * usermode helper is needed (I think mainly android)
128 if ((adreno_gpu
->fwloc
== FW_LOCATION_UNKNOWN
) ||
129 (adreno_gpu
->fwloc
== FW_LOCATION_HELPER
)) {
131 ret
= request_firmware(&fw
, newname
, drm
->dev
);
133 dev_info(drm
->dev
, "loaded %s with helper\n",
135 adreno_gpu
->fwloc
= FW_LOCATION_HELPER
;
137 } else if (adreno_gpu
->fwloc
!= FW_LOCATION_UNKNOWN
) {
138 dev_err(drm
->dev
, "failed to load %s: %d\n",
145 dev_err(drm
->dev
, "failed to load %s\n", fwname
);
146 fw
= ERR_PTR(-ENOENT
);
152 int adreno_load_fw(struct adreno_gpu
*adreno_gpu
)
156 for (i
= 0; i
< ARRAY_SIZE(adreno_gpu
->info
->fw
); i
++) {
157 const struct firmware
*fw
;
159 if (!adreno_gpu
->info
->fw
[i
])
162 /* Skip if the firmware has already been loaded */
163 if (adreno_gpu
->fw
[i
])
166 fw
= adreno_request_fw(adreno_gpu
, adreno_gpu
->info
->fw
[i
]);
170 adreno_gpu
->fw
[i
] = fw
;
176 struct drm_gem_object
*adreno_fw_create_bo(struct msm_gpu
*gpu
,
177 const struct firmware
*fw
, u64
*iova
)
179 struct drm_gem_object
*bo
;
182 ptr
= msm_gem_kernel_new_locked(gpu
->dev
, fw
->size
- 4,
183 MSM_BO_UNCACHED
| MSM_BO_GPU_READONLY
, gpu
->aspace
, &bo
, iova
);
186 return ERR_CAST(ptr
);
188 memcpy(ptr
, &fw
->data
[4], fw
->size
- 4);
190 msm_gem_put_vaddr(bo
);
195 int adreno_hw_init(struct msm_gpu
*gpu
)
197 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
200 DBG("%s", gpu
->name
);
202 ret
= adreno_load_fw(adreno_gpu
);
206 for (i
= 0; i
< gpu
->nr_rings
; i
++) {
207 struct msm_ringbuffer
*ring
= gpu
->rb
[i
];
212 ret
= msm_gem_get_iova(ring
->bo
, gpu
->aspace
, &ring
->iova
);
215 dev_err(gpu
->dev
->dev
,
216 "could not map ringbuffer %d: %d\n", i
, ret
);
220 ring
->cur
= ring
->start
;
221 ring
->next
= ring
->start
;
223 /* reset completed fence seqno: */
224 ring
->memptrs
->fence
= ring
->seqno
;
225 ring
->memptrs
->rptr
= 0;
229 * Setup REG_CP_RB_CNTL. The same value is used across targets (with
230 * the excpetion of A430 that disables the RPTR shadow) - the cacluation
231 * for the ringbuffer size and block size is moved to msm_gpu.h for the
232 * pre-processor to deal with and the A430 variant is ORed in here
234 adreno_gpu_write(adreno_gpu
, REG_ADRENO_CP_RB_CNTL
,
235 MSM_GPU_RB_CNTL_DEFAULT
|
236 (adreno_is_a430(adreno_gpu
) ? AXXX_CP_RB_CNTL_NO_UPDATE
: 0));
238 /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
239 adreno_gpu_write64(adreno_gpu
, REG_ADRENO_CP_RB_BASE
,
240 REG_ADRENO_CP_RB_BASE_HI
, gpu
->rb
[0]->iova
);
242 if (!adreno_is_a430(adreno_gpu
)) {
243 adreno_gpu_write64(adreno_gpu
, REG_ADRENO_CP_RB_RPTR_ADDR
,
244 REG_ADRENO_CP_RB_RPTR_ADDR_HI
,
245 rbmemptr(gpu
->rb
[0], rptr
));
251 /* Use this helper to read rptr, since a430 doesn't update rptr in memory */
252 static uint32_t get_rptr(struct adreno_gpu
*adreno_gpu
,
253 struct msm_ringbuffer
*ring
)
255 if (adreno_is_a430(adreno_gpu
))
256 return ring
->memptrs
->rptr
= adreno_gpu_read(
257 adreno_gpu
, REG_ADRENO_CP_RB_RPTR
);
259 return ring
->memptrs
->rptr
;
262 struct msm_ringbuffer
*adreno_active_ring(struct msm_gpu
*gpu
)
267 void adreno_recover(struct msm_gpu
*gpu
)
269 struct drm_device
*dev
= gpu
->dev
;
272 // XXX pm-runtime?? we *need* the device to be off after this
273 // so maybe continuing to call ->pm_suspend/resume() is better?
275 gpu
->funcs
->pm_suspend(gpu
);
276 gpu
->funcs
->pm_resume(gpu
);
278 ret
= msm_gpu_hw_init(gpu
);
280 dev_err(dev
->dev
, "gpu hw init failed: %d\n", ret
);
285 void adreno_submit(struct msm_gpu
*gpu
, struct msm_gem_submit
*submit
,
286 struct msm_file_private
*ctx
)
288 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
289 struct msm_drm_private
*priv
= gpu
->dev
->dev_private
;
290 struct msm_ringbuffer
*ring
= submit
->ring
;
293 for (i
= 0; i
< submit
->nr_cmds
; i
++) {
294 switch (submit
->cmd
[i
].type
) {
295 case MSM_SUBMIT_CMD_IB_TARGET_BUF
:
296 /* ignore IB-targets */
298 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF
:
299 /* ignore if there has not been a ctx switch: */
300 if (priv
->lastctx
== ctx
)
302 case MSM_SUBMIT_CMD_BUF
:
303 OUT_PKT3(ring
, adreno_is_a430(adreno_gpu
) ?
304 CP_INDIRECT_BUFFER_PFE
: CP_INDIRECT_BUFFER_PFD
, 2);
305 OUT_RING(ring
, lower_32_bits(submit
->cmd
[i
].iova
));
306 OUT_RING(ring
, submit
->cmd
[i
].size
);
312 OUT_PKT0(ring
, REG_AXXX_CP_SCRATCH_REG2
, 1);
313 OUT_RING(ring
, submit
->seqno
);
315 if (adreno_is_a3xx(adreno_gpu
) || adreno_is_a4xx(adreno_gpu
)) {
316 /* Flush HLSQ lazy updates to make sure there is nothing
317 * pending for indirect loads after the timestamp has
320 OUT_PKT3(ring
, CP_EVENT_WRITE
, 1);
321 OUT_RING(ring
, HLSQ_FLUSH
);
323 OUT_PKT3(ring
, CP_WAIT_FOR_IDLE
, 1);
324 OUT_RING(ring
, 0x00000000);
327 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
328 OUT_PKT3(ring
, CP_EVENT_WRITE
, 3);
329 OUT_RING(ring
, CACHE_FLUSH_TS
| BIT(31));
330 OUT_RING(ring
, rbmemptr(ring
, fence
));
331 OUT_RING(ring
, submit
->seqno
);
334 if (adreno_is_a3xx(adreno_gpu
)) {
335 /* Dummy set-constant to trigger context rollover */
336 OUT_PKT3(ring
, CP_SET_CONSTANT
, 2);
337 OUT_RING(ring
, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG
));
338 OUT_RING(ring
, 0x00000000);
342 gpu
->funcs
->flush(gpu
, ring
);
345 void adreno_flush(struct msm_gpu
*gpu
, struct msm_ringbuffer
*ring
)
347 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
350 /* Copy the shadow to the actual register */
351 ring
->cur
= ring
->next
;
354 * Mask wptr value that we calculate to fit in the HW range. This is
355 * to account for the possibility that the last command fit exactly into
356 * the ringbuffer and rb->next hasn't wrapped to zero yet
358 wptr
= get_wptr(ring
);
360 /* ensure writes to ringbuffer have hit system memory: */
363 adreno_gpu_write(adreno_gpu
, REG_ADRENO_CP_RB_WPTR
, wptr
);
366 bool adreno_idle(struct msm_gpu
*gpu
, struct msm_ringbuffer
*ring
)
368 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
369 uint32_t wptr
= get_wptr(ring
);
371 /* wait for CP to drain ringbuffer: */
372 if (!spin_until(get_rptr(adreno_gpu
, ring
) == wptr
))
375 /* TODO maybe we need to reset GPU here to recover from hang? */
376 DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
377 gpu
->name
, ring
->id
, get_rptr(adreno_gpu
, ring
), wptr
);
382 int adreno_gpu_state_get(struct msm_gpu
*gpu
, struct msm_gpu_state
*state
)
384 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
387 kref_init(&state
->ref
);
389 ktime_get_real_ts64(&state
->time
);
391 for (i
= 0; i
< gpu
->nr_rings
; i
++) {
394 state
->ring
[i
].fence
= gpu
->rb
[i
]->memptrs
->fence
;
395 state
->ring
[i
].iova
= gpu
->rb
[i
]->iova
;
396 state
->ring
[i
].seqno
= gpu
->rb
[i
]->seqno
;
397 state
->ring
[i
].rptr
= get_rptr(adreno_gpu
, gpu
->rb
[i
]);
398 state
->ring
[i
].wptr
= get_wptr(gpu
->rb
[i
]);
400 /* Copy at least 'wptr' dwords of the data */
401 size
= state
->ring
[i
].wptr
;
403 /* After wptr find the last non zero dword to save space */
404 for (j
= state
->ring
[i
].wptr
; j
< MSM_GPU_RINGBUFFER_SZ
>> 2; j
++)
405 if (gpu
->rb
[i
]->start
[j
])
409 state
->ring
[i
].data
= kmalloc(size
<< 2, GFP_KERNEL
);
410 if (state
->ring
[i
].data
) {
411 memcpy(state
->ring
[i
].data
, gpu
->rb
[i
]->start
, size
<< 2);
412 state
->ring
[i
].data_size
= size
<< 2;
417 /* Count the number of registers */
418 for (i
= 0; adreno_gpu
->registers
[i
] != ~0; i
+= 2)
419 count
+= adreno_gpu
->registers
[i
+ 1] -
420 adreno_gpu
->registers
[i
] + 1;
422 state
->registers
= kcalloc(count
* 2, sizeof(u32
), GFP_KERNEL
);
423 if (state
->registers
) {
426 for (i
= 0; adreno_gpu
->registers
[i
] != ~0; i
+= 2) {
427 u32 start
= adreno_gpu
->registers
[i
];
428 u32 end
= adreno_gpu
->registers
[i
+ 1];
431 for (addr
= start
; addr
<= end
; addr
++) {
432 state
->registers
[pos
++] = addr
;
433 state
->registers
[pos
++] = gpu_read(gpu
, addr
);
437 state
->nr_registers
= count
;
443 void adreno_gpu_state_destroy(struct msm_gpu_state
*state
)
447 for (i
= 0; i
< ARRAY_SIZE(state
->ring
); i
++)
448 kfree(state
->ring
[i
].data
);
450 for (i
= 0; state
->bos
&& i
< state
->nr_bos
; i
++)
451 kvfree(state
->bos
[i
].data
);
456 kfree(state
->registers
);
459 static void adreno_gpu_state_kref_destroy(struct kref
*kref
)
461 struct msm_gpu_state
*state
= container_of(kref
,
462 struct msm_gpu_state
, ref
);
464 adreno_gpu_state_destroy(state
);
468 int adreno_gpu_state_put(struct msm_gpu_state
*state
)
470 if (IS_ERR_OR_NULL(state
))
473 return kref_put(&state
->ref
, adreno_gpu_state_kref_destroy
);
476 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
478 static void adreno_show_object(struct drm_printer
*p
, u32
*ptr
, int len
)
480 char out
[ASCII85_BUFSZ
];
487 * Only dump the non-zero part of the buffer - rarely will any data
488 * completely fill the entire allocated size of the buffer
490 for (datalen
= 0, i
= 0; i
< len
>> 2; i
++) {
492 datalen
= (i
<< 2) + 1;
495 /* Skip printing the object if it is empty */
499 l
= ascii85_encode_len(datalen
);
501 drm_puts(p
, " data: !!ascii85 |\n");
504 for (i
= 0; i
< l
; i
++)
505 drm_puts(p
, ascii85_encode(ptr
[i
], out
));
510 void adreno_show(struct msm_gpu
*gpu
, struct msm_gpu_state
*state
,
511 struct drm_printer
*p
)
513 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
516 if (IS_ERR_OR_NULL(state
))
519 drm_printf(p
, "revision: %d (%d.%d.%d.%d)\n",
520 adreno_gpu
->info
->revn
, adreno_gpu
->rev
.core
,
521 adreno_gpu
->rev
.major
, adreno_gpu
->rev
.minor
,
522 adreno_gpu
->rev
.patchid
);
524 drm_printf(p
, "rbbm-status: 0x%08x\n", state
->rbbm_status
);
526 drm_puts(p
, "ringbuffer:\n");
528 for (i
= 0; i
< gpu
->nr_rings
; i
++) {
529 drm_printf(p
, " - id: %d\n", i
);
530 drm_printf(p
, " iova: 0x%016llx\n", state
->ring
[i
].iova
);
531 drm_printf(p
, " last-fence: %d\n", state
->ring
[i
].seqno
);
532 drm_printf(p
, " retired-fence: %d\n", state
->ring
[i
].fence
);
533 drm_printf(p
, " rptr: %d\n", state
->ring
[i
].rptr
);
534 drm_printf(p
, " wptr: %d\n", state
->ring
[i
].wptr
);
535 drm_printf(p
, " size: %d\n", MSM_GPU_RINGBUFFER_SZ
);
537 adreno_show_object(p
, state
->ring
[i
].data
,
538 state
->ring
[i
].data_size
);
542 drm_puts(p
, "bos:\n");
544 for (i
= 0; i
< state
->nr_bos
; i
++) {
545 drm_printf(p
, " - iova: 0x%016llx\n",
547 drm_printf(p
, " size: %zd\n", state
->bos
[i
].size
);
549 adreno_show_object(p
, state
->bos
[i
].data
,
554 drm_puts(p
, "registers:\n");
556 for (i
= 0; i
< state
->nr_registers
; i
++) {
557 drm_printf(p
, " - { offset: 0x%04x, value: 0x%08x }\n",
558 state
->registers
[i
* 2] << 2,
559 state
->registers
[(i
* 2) + 1]);
564 /* Dump common gpu status and scratch registers on any hang, to make
565 * the hangcheck logs more useful. The scratch registers seem always
566 * safe to read when GPU has hung (unlike some other regs, depending
567 * on how the GPU hung), and they are useful to match up to cmdstream
568 * dumps when debugging hangs:
570 void adreno_dump_info(struct msm_gpu
*gpu
)
572 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
575 printk("revision: %d (%d.%d.%d.%d)\n",
576 adreno_gpu
->info
->revn
, adreno_gpu
->rev
.core
,
577 adreno_gpu
->rev
.major
, adreno_gpu
->rev
.minor
,
578 adreno_gpu
->rev
.patchid
);
580 for (i
= 0; i
< gpu
->nr_rings
; i
++) {
581 struct msm_ringbuffer
*ring
= gpu
->rb
[i
];
583 printk("rb %d: fence: %d/%d\n", i
,
584 ring
->memptrs
->fence
,
587 printk("rptr: %d\n", get_rptr(adreno_gpu
, ring
));
588 printk("rb wptr: %d\n", get_wptr(ring
));
592 /* would be nice to not have to duplicate the _show() stuff with printk(): */
593 void adreno_dump(struct msm_gpu
*gpu
)
595 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(gpu
);
598 /* dump these out in a form that can be parsed by demsm: */
599 printk("IO:region %s 00000000 00020000\n", gpu
->name
);
600 for (i
= 0; adreno_gpu
->registers
[i
] != ~0; i
+= 2) {
601 uint32_t start
= adreno_gpu
->registers
[i
];
602 uint32_t end
= adreno_gpu
->registers
[i
+1];
605 for (addr
= start
; addr
<= end
; addr
++) {
606 uint32_t val
= gpu_read(gpu
, addr
);
607 printk("IO:R %08x %08x\n", addr
<<2, val
);
612 static uint32_t ring_freewords(struct msm_ringbuffer
*ring
)
614 struct adreno_gpu
*adreno_gpu
= to_adreno_gpu(ring
->gpu
);
615 uint32_t size
= MSM_GPU_RINGBUFFER_SZ
>> 2;
616 /* Use ring->next to calculate free size */
617 uint32_t wptr
= ring
->next
- ring
->start
;
618 uint32_t rptr
= get_rptr(adreno_gpu
, ring
);
619 return (rptr
+ (size
- 1) - wptr
) % size
;
622 void adreno_wait_ring(struct msm_ringbuffer
*ring
, uint32_t ndwords
)
624 if (spin_until(ring_freewords(ring
) >= ndwords
))
625 DRM_DEV_ERROR(ring
->gpu
->dev
->dev
,
626 "timeout waiting for space in ringbuffer %d\n",
630 /* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
631 static int adreno_get_legacy_pwrlevels(struct device
*dev
)
633 struct device_node
*child
, *node
;
636 node
= of_get_compatible_child(dev
->of_node
, "qcom,gpu-pwrlevels");
638 dev_err(dev
, "Could not find the GPU powerlevels\n");
642 for_each_child_of_node(node
, child
) {
645 ret
= of_property_read_u32(child
, "qcom,gpu-freq", &val
);
650 * Skip the intentionally bogus clock value found at the bottom
651 * of most legacy frequency tables
654 dev_pm_opp_add(dev
, val
, 0);
662 static int adreno_get_pwrlevels(struct device
*dev
,
665 unsigned long freq
= ULONG_MAX
;
666 struct dev_pm_opp
*opp
;
671 /* You down with OPP? */
672 if (!of_find_property(dev
->of_node
, "operating-points-v2", NULL
))
673 ret
= adreno_get_legacy_pwrlevels(dev
);
675 ret
= dev_pm_opp_of_add_table(dev
);
677 dev_err(dev
, "Unable to set the OPP table\n");
681 /* Find the fastest defined rate */
682 opp
= dev_pm_opp_find_freq_floor(dev
, &freq
);
684 gpu
->fast_rate
= freq
;
689 if (!gpu
->fast_rate
) {
691 "Could not find a clock rate. Using a reasonable default\n");
692 /* Pick a suitably safe clock speed for any target */
693 gpu
->fast_rate
= 200000000;
696 DBG("fast_rate=%u, slow_rate=27000000", gpu
->fast_rate
);
701 int adreno_gpu_init(struct drm_device
*drm
, struct platform_device
*pdev
,
702 struct adreno_gpu
*adreno_gpu
,
703 const struct adreno_gpu_funcs
*funcs
, int nr_rings
)
705 struct adreno_platform_config
*config
= pdev
->dev
.platform_data
;
706 struct msm_gpu_config adreno_gpu_config
= { 0 };
707 struct msm_gpu
*gpu
= &adreno_gpu
->base
;
709 adreno_gpu
->funcs
= funcs
;
710 adreno_gpu
->info
= adreno_info(config
->rev
);
711 adreno_gpu
->gmem
= adreno_gpu
->info
->gmem
;
712 adreno_gpu
->revn
= adreno_gpu
->info
->revn
;
713 adreno_gpu
->rev
= config
->rev
;
715 adreno_gpu_config
.ioname
= "kgsl_3d0_reg_memory";
716 adreno_gpu_config
.irqname
= "kgsl_3d0_irq";
718 adreno_gpu_config
.va_start
= SZ_16M
;
719 adreno_gpu_config
.va_end
= 0xffffffff;
721 adreno_gpu_config
.nr_rings
= nr_rings
;
723 adreno_get_pwrlevels(&pdev
->dev
, gpu
);
725 pm_runtime_set_autosuspend_delay(&pdev
->dev
,
726 adreno_gpu
->info
->inactive_period
);
727 pm_runtime_use_autosuspend(&pdev
->dev
);
728 pm_runtime_enable(&pdev
->dev
);
730 return msm_gpu_init(drm
, pdev
, &adreno_gpu
->base
, &funcs
->base
,
731 adreno_gpu
->info
->name
, &adreno_gpu_config
);
734 void adreno_gpu_cleanup(struct adreno_gpu
*adreno_gpu
)
738 for (i
= 0; i
< ARRAY_SIZE(adreno_gpu
->info
->fw
); i
++)
739 release_firmware(adreno_gpu
->fw
[i
]);
741 msm_gpu_cleanup(&adreno_gpu
->base
);