2 * Copyright © 2007 David Airlie
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/pm_runtime.h>
31 #include <drm/drm_crtc.h>
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/amdgpu_drm.h>
36 #include "amdgpu_gem.h"
38 #include <drm/drm_fb_helper.h>
40 #include <linux/vga_switcheroo.h>
42 #include "amdgpu_display.h"
45 this contains a helper + a amdgpu fb
46 the helper contains a pointer to amdgpu framebuffer baseclass.
50 amdgpufb_open(struct fb_info
*info
, int user
)
52 struct amdgpu_fbdev
*rfbdev
= info
->par
;
53 struct amdgpu_device
*adev
= rfbdev
->adev
;
54 int ret
= pm_runtime_get_sync(adev
->ddev
->dev
);
55 if (ret
< 0 && ret
!= -EACCES
) {
56 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
57 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
64 amdgpufb_release(struct fb_info
*info
, int user
)
66 struct amdgpu_fbdev
*rfbdev
= info
->par
;
67 struct amdgpu_device
*adev
= rfbdev
->adev
;
69 pm_runtime_mark_last_busy(adev
->ddev
->dev
);
70 pm_runtime_put_autosuspend(adev
->ddev
->dev
);
74 static struct fb_ops amdgpufb_ops
= {
76 DRM_FB_HELPER_DEFAULT_OPS
,
77 .fb_open
= amdgpufb_open
,
78 .fb_release
= amdgpufb_release
,
79 .fb_fillrect
= drm_fb_helper_cfb_fillrect
,
80 .fb_copyarea
= drm_fb_helper_cfb_copyarea
,
81 .fb_imageblit
= drm_fb_helper_cfb_imageblit
,
85 int amdgpu_align_pitch(struct amdgpu_device
*adev
, int width
, int cpp
, bool tiled
)
103 aligned
+= pitch_mask
;
104 aligned
&= ~pitch_mask
;
105 return aligned
* cpp
;
108 static void amdgpufb_destroy_pinned_object(struct drm_gem_object
*gobj
)
110 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(gobj
);
113 ret
= amdgpu_bo_reserve(abo
, true);
114 if (likely(ret
== 0)) {
115 amdgpu_bo_kunmap(abo
);
116 amdgpu_bo_unpin(abo
);
117 amdgpu_bo_unreserve(abo
);
119 drm_gem_object_put_unlocked(gobj
);
122 static int amdgpufb_create_pinned_object(struct amdgpu_fbdev
*rfbdev
,
123 struct drm_mode_fb_cmd2
*mode_cmd
,
124 struct drm_gem_object
**gobj_p
)
126 struct amdgpu_device
*adev
= rfbdev
->adev
;
127 struct drm_gem_object
*gobj
= NULL
;
128 struct amdgpu_bo
*abo
= NULL
;
129 bool fb_tiled
= false; /* useful for testing */
130 u32 tiling_flags
= 0, domain
;
132 int aligned_size
, size
;
133 int height
= mode_cmd
->height
;
136 cpp
= drm_format_plane_cpp(mode_cmd
->pixel_format
, 0);
138 /* need to align pitch with crtc limits */
139 mode_cmd
->pitches
[0] = amdgpu_align_pitch(adev
, mode_cmd
->width
, cpp
,
141 domain
= amdgpu_display_supported_domains(adev
);
143 height
= ALIGN(mode_cmd
->height
, 8);
144 size
= mode_cmd
->pitches
[0] * height
;
145 aligned_size
= ALIGN(size
, PAGE_SIZE
);
146 ret
= amdgpu_gem_object_create(adev
, aligned_size
, 0, domain
,
147 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
148 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
|
149 AMDGPU_GEM_CREATE_VRAM_CLEARED
,
150 ttm_bo_type_kernel
, NULL
, &gobj
);
152 pr_err("failed to allocate framebuffer (%d)\n", aligned_size
);
155 abo
= gem_to_amdgpu_bo(gobj
);
158 tiling_flags
= AMDGPU_TILING_SET(ARRAY_MODE
, GRPH_ARRAY_2D_TILED_THIN1
);
160 ret
= amdgpu_bo_reserve(abo
, false);
161 if (unlikely(ret
!= 0))
165 ret
= amdgpu_bo_set_tiling_flags(abo
,
168 dev_err(adev
->dev
, "FB failed to set tiling flags\n");
172 ret
= amdgpu_bo_pin(abo
, domain
);
174 amdgpu_bo_unreserve(abo
);
178 ret
= amdgpu_ttm_alloc_gart(&abo
->tbo
);
180 amdgpu_bo_unreserve(abo
);
181 dev_err(adev
->dev
, "%p bind failed\n", abo
);
185 ret
= amdgpu_bo_kmap(abo
, NULL
);
186 amdgpu_bo_unreserve(abo
);
194 amdgpufb_destroy_pinned_object(gobj
);
199 static int amdgpufb_create(struct drm_fb_helper
*helper
,
200 struct drm_fb_helper_surface_size
*sizes
)
202 struct amdgpu_fbdev
*rfbdev
= (struct amdgpu_fbdev
*)helper
;
203 struct amdgpu_device
*adev
= rfbdev
->adev
;
204 struct fb_info
*info
;
205 struct drm_framebuffer
*fb
= NULL
;
206 struct drm_mode_fb_cmd2 mode_cmd
;
207 struct drm_gem_object
*gobj
= NULL
;
208 struct amdgpu_bo
*abo
= NULL
;
212 mode_cmd
.width
= sizes
->surface_width
;
213 mode_cmd
.height
= sizes
->surface_height
;
215 if (sizes
->surface_bpp
== 24)
216 sizes
->surface_bpp
= 32;
218 mode_cmd
.pixel_format
= drm_mode_legacy_fb_format(sizes
->surface_bpp
,
219 sizes
->surface_depth
);
221 ret
= amdgpufb_create_pinned_object(rfbdev
, &mode_cmd
, &gobj
);
223 DRM_ERROR("failed to create fbcon object %d\n", ret
);
227 abo
= gem_to_amdgpu_bo(gobj
);
229 /* okay we have an object now allocate the framebuffer */
230 info
= drm_fb_helper_alloc_fbi(helper
);
237 info
->skip_vt_switch
= true;
239 ret
= amdgpu_display_framebuffer_init(adev
->ddev
, &rfbdev
->rfb
,
242 DRM_ERROR("failed to initialize framebuffer %d\n", ret
);
246 fb
= &rfbdev
->rfb
.base
;
249 rfbdev
->helper
.fb
= fb
;
251 strcpy(info
->fix
.id
, "amdgpudrmfb");
253 drm_fb_helper_fill_fix(info
, fb
->pitches
[0], fb
->format
->depth
);
255 info
->fbops
= &amdgpufb_ops
;
257 tmp
= amdgpu_bo_gpu_offset(abo
) - adev
->gmc
.vram_start
;
258 info
->fix
.smem_start
= adev
->gmc
.aper_base
+ tmp
;
259 info
->fix
.smem_len
= amdgpu_bo_size(abo
);
260 info
->screen_base
= amdgpu_bo_kptr(abo
);
261 info
->screen_size
= amdgpu_bo_size(abo
);
263 drm_fb_helper_fill_var(info
, &rfbdev
->helper
, sizes
->fb_width
, sizes
->fb_height
);
265 /* setup aperture base/size for vesafb takeover */
266 info
->apertures
->ranges
[0].base
= adev
->ddev
->mode_config
.fb_base
;
267 info
->apertures
->ranges
[0].size
= adev
->gmc
.aper_size
;
269 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
271 if (info
->screen_base
== NULL
) {
276 DRM_INFO("fb mappable at 0x%lX\n", info
->fix
.smem_start
);
277 DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev
->gmc
.aper_base
);
278 DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo
));
279 DRM_INFO("fb depth is %d\n", fb
->format
->depth
);
280 DRM_INFO(" pitch is %d\n", fb
->pitches
[0]);
282 vga_switcheroo_client_fb_set(adev
->ddev
->pdev
, info
);
290 drm_gem_object_put_unlocked(gobj
);
291 drm_framebuffer_unregister_private(fb
);
292 drm_framebuffer_cleanup(fb
);
298 static int amdgpu_fbdev_destroy(struct drm_device
*dev
, struct amdgpu_fbdev
*rfbdev
)
300 struct amdgpu_framebuffer
*rfb
= &rfbdev
->rfb
;
302 drm_fb_helper_unregister_fbi(&rfbdev
->helper
);
304 if (rfb
->base
.obj
[0]) {
305 amdgpufb_destroy_pinned_object(rfb
->base
.obj
[0]);
306 rfb
->base
.obj
[0] = NULL
;
307 drm_framebuffer_unregister_private(&rfb
->base
);
308 drm_framebuffer_cleanup(&rfb
->base
);
310 drm_fb_helper_fini(&rfbdev
->helper
);
315 static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs
= {
316 .fb_probe
= amdgpufb_create
,
319 int amdgpu_fbdev_init(struct amdgpu_device
*adev
)
321 struct amdgpu_fbdev
*rfbdev
;
325 /* don't init fbdev on hw without DCE */
326 if (!adev
->mode_info
.mode_config_initialized
)
329 /* don't init fbdev if there are no connectors */
330 if (list_empty(&adev
->ddev
->mode_config
.connector_list
))
333 /* select 8 bpp console on low vram cards */
334 if (adev
->gmc
.real_vram_size
<= (32*1024*1024))
337 rfbdev
= kzalloc(sizeof(struct amdgpu_fbdev
), GFP_KERNEL
);
342 adev
->mode_info
.rfbdev
= rfbdev
;
344 drm_fb_helper_prepare(adev
->ddev
, &rfbdev
->helper
,
345 &amdgpu_fb_helper_funcs
);
347 ret
= drm_fb_helper_init(adev
->ddev
, &rfbdev
->helper
,
348 AMDGPUFB_CONN_LIMIT
);
354 drm_fb_helper_single_add_all_connectors(&rfbdev
->helper
);
356 /* disable all the possible outputs/crtcs before entering KMS mode */
357 if (!amdgpu_device_has_dc_support(adev
))
358 drm_helper_disable_unused_functions(adev
->ddev
);
360 drm_fb_helper_initial_config(&rfbdev
->helper
, bpp_sel
);
364 void amdgpu_fbdev_fini(struct amdgpu_device
*adev
)
366 if (!adev
->mode_info
.rfbdev
)
369 amdgpu_fbdev_destroy(adev
->ddev
, adev
->mode_info
.rfbdev
);
370 kfree(adev
->mode_info
.rfbdev
);
371 adev
->mode_info
.rfbdev
= NULL
;
374 void amdgpu_fbdev_set_suspend(struct amdgpu_device
*adev
, int state
)
376 if (adev
->mode_info
.rfbdev
)
377 drm_fb_helper_set_suspend_unlocked(&adev
->mode_info
.rfbdev
->helper
,
381 int amdgpu_fbdev_total_size(struct amdgpu_device
*adev
)
383 struct amdgpu_bo
*robj
;
386 if (!adev
->mode_info
.rfbdev
)
389 robj
= gem_to_amdgpu_bo(adev
->mode_info
.rfbdev
->rfb
.base
.obj
[0]);
390 size
+= amdgpu_bo_size(robj
);
394 bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device
*adev
, struct amdgpu_bo
*robj
)
396 if (!adev
->mode_info
.rfbdev
)
398 if (robj
== gem_to_amdgpu_bo(adev
->mode_info
.rfbdev
->rfb
.base
.obj
[0]))