2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include "drm_sarea.h"
31 #include "radeon_drm.h"
33 #include <linux/vga_switcheroo.h>
34 #include <linux/slab.h>
36 int radeon_driver_unload_kms(struct drm_device
*dev
)
38 struct radeon_device
*rdev
= dev
->dev_private
;
42 radeon_modeset_fini(rdev
);
43 radeon_device_fini(rdev
);
45 dev
->dev_private
= NULL
;
49 int radeon_driver_load_kms(struct drm_device
*dev
, unsigned long flags
)
51 struct radeon_device
*rdev
;
54 rdev
= kzalloc(sizeof(struct radeon_device
), GFP_KERNEL
);
58 dev
->dev_private
= (void *)rdev
;
61 if (drm_pci_device_is_agp(dev
)) {
62 flags
|= RADEON_IS_AGP
;
63 } else if (pci_is_pcie(dev
->pdev
)) {
64 flags
|= RADEON_IS_PCIE
;
66 flags
|= RADEON_IS_PCI
;
69 /* radeon_device_init should report only fatal error
70 * like memory allocation failure or iomapping failure,
71 * or memory manager initialization failure, it must
72 * properly initialize the GPU MC controller and permit
75 r
= radeon_device_init(rdev
, dev
, dev
->pdev
, flags
);
77 dev_err(&dev
->pdev
->dev
, "Fatal error during GPU init\n");
81 /* Call ACPI methods */
82 acpi_status
= radeon_acpi_init(rdev
);
84 dev_dbg(&dev
->pdev
->dev
, "Error during ACPI methods call\n");
86 /* Again modeset_init should fail only on fatal error
87 * otherwise it should provide enough functionalities
90 r
= radeon_modeset_init(rdev
);
92 dev_err(&dev
->pdev
->dev
, "Fatal error during modeset init\n");
95 radeon_driver_unload_kms(dev
);
99 static void radeon_set_filp_rights(struct drm_device
*dev
,
100 struct drm_file
**owner
,
101 struct drm_file
*applier
,
104 mutex_lock(&dev
->struct_mutex
);
109 } else if (*value
== 0) {
111 if (*owner
== applier
)
114 *value
= *owner
== applier
? 1 : 0;
115 mutex_unlock(&dev
->struct_mutex
);
119 * Userspace get information ioctl
121 int radeon_info_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
123 struct radeon_device
*rdev
= dev
->dev_private
;
124 struct drm_radeon_info
*info
;
125 struct radeon_mode_info
*minfo
= &rdev
->mode_info
;
128 struct drm_crtc
*crtc
;
132 value_ptr
= (uint32_t *)((unsigned long)info
->value
);
133 if (DRM_COPY_FROM_USER(&value
, value_ptr
, sizeof(value
)))
136 switch (info
->request
) {
137 case RADEON_INFO_DEVICE_ID
:
138 value
= dev
->pci_device
;
140 case RADEON_INFO_NUM_GB_PIPES
:
141 value
= rdev
->num_gb_pipes
;
143 case RADEON_INFO_NUM_Z_PIPES
:
144 value
= rdev
->num_z_pipes
;
146 case RADEON_INFO_ACCEL_WORKING
:
147 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
148 if ((rdev
->family
>= CHIP_CEDAR
) && (rdev
->family
<= CHIP_HEMLOCK
))
151 value
= rdev
->accel_working
;
153 case RADEON_INFO_CRTC_FROM_ID
:
154 for (i
= 0, found
= 0; i
< rdev
->num_crtc
; i
++) {
155 crtc
= (struct drm_crtc
*)minfo
->crtcs
[i
];
156 if (crtc
&& crtc
->base
.id
== value
) {
157 struct radeon_crtc
*radeon_crtc
= to_radeon_crtc(crtc
);
158 value
= radeon_crtc
->crtc_id
;
164 DRM_DEBUG_KMS("unknown crtc id %d\n", value
);
168 case RADEON_INFO_ACCEL_WORKING2
:
169 value
= rdev
->accel_working
;
171 case RADEON_INFO_TILING_CONFIG
:
172 if (rdev
->family
>= CHIP_CAYMAN
)
173 value
= rdev
->config
.cayman
.tile_config
;
174 else if (rdev
->family
>= CHIP_CEDAR
)
175 value
= rdev
->config
.evergreen
.tile_config
;
176 else if (rdev
->family
>= CHIP_RV770
)
177 value
= rdev
->config
.rv770
.tile_config
;
178 else if (rdev
->family
>= CHIP_R600
)
179 value
= rdev
->config
.r600
.tile_config
;
181 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
185 case RADEON_INFO_WANT_HYPERZ
:
186 /* The "value" here is both an input and output parameter.
187 * If the input value is 1, filp requests hyper-z access.
188 * If the input value is 0, filp revokes its hyper-z access.
190 * When returning, the value is 1 if filp owns hyper-z access,
193 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value
);
196 radeon_set_filp_rights(dev
, &rdev
->hyperz_filp
, filp
, &value
);
198 case RADEON_INFO_WANT_CMASK
:
199 /* The same logic as Hyper-Z. */
201 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value
);
204 radeon_set_filp_rights(dev
, &rdev
->cmask_filp
, filp
, &value
);
206 case RADEON_INFO_CLOCK_CRYSTAL_FREQ
:
207 /* return clock value in KHz */
208 value
= rdev
->clock
.spll
.reference_freq
* 10;
210 case RADEON_INFO_NUM_BACKENDS
:
211 if (rdev
->family
>= CHIP_CAYMAN
)
212 value
= rdev
->config
.cayman
.max_backends_per_se
*
213 rdev
->config
.cayman
.max_shader_engines
;
214 else if (rdev
->family
>= CHIP_CEDAR
)
215 value
= rdev
->config
.evergreen
.max_backends
;
216 else if (rdev
->family
>= CHIP_RV770
)
217 value
= rdev
->config
.rv770
.max_backends
;
218 else if (rdev
->family
>= CHIP_R600
)
219 value
= rdev
->config
.r600
.max_backends
;
224 case RADEON_INFO_NUM_TILE_PIPES
:
225 if (rdev
->family
>= CHIP_CAYMAN
)
226 value
= rdev
->config
.cayman
.max_tile_pipes
;
227 else if (rdev
->family
>= CHIP_CEDAR
)
228 value
= rdev
->config
.evergreen
.max_tile_pipes
;
229 else if (rdev
->family
>= CHIP_RV770
)
230 value
= rdev
->config
.rv770
.max_tile_pipes
;
231 else if (rdev
->family
>= CHIP_R600
)
232 value
= rdev
->config
.r600
.max_tile_pipes
;
237 case RADEON_INFO_FUSION_GART_WORKING
:
240 case RADEON_INFO_BACKEND_MAP
:
241 if (rdev
->family
>= CHIP_CAYMAN
)
242 value
= rdev
->config
.cayman
.backend_map
;
243 else if (rdev
->family
>= CHIP_CEDAR
)
244 value
= rdev
->config
.evergreen
.backend_map
;
245 else if (rdev
->family
>= CHIP_RV770
)
246 value
= rdev
->config
.rv770
.backend_map
;
247 else if (rdev
->family
>= CHIP_R600
)
248 value
= rdev
->config
.r600
.backend_map
;
253 case RADEON_INFO_VA_START
:
254 /* this is where we report if vm is supported or not */
255 if (rdev
->family
< CHIP_CAYMAN
)
257 value
= RADEON_VA_RESERVED_SIZE
;
259 case RADEON_INFO_IB_VM_MAX_SIZE
:
260 /* this is where we report if vm is supported or not */
261 if (rdev
->family
< CHIP_CAYMAN
)
263 value
= RADEON_IB_VM_MAX_SIZE
;
266 DRM_DEBUG_KMS("Invalid request %d\n", info
->request
);
269 if (DRM_COPY_TO_USER(value_ptr
, &value
, sizeof(uint32_t))) {
270 DRM_ERROR("copy_to_user\n");
278 * Outdated mess for old drm with Xorg being in charge (void function now).
280 int radeon_driver_firstopen_kms(struct drm_device
*dev
)
285 void radeon_driver_lastclose_kms(struct drm_device
*dev
)
287 vga_switcheroo_process_delayed_switch();
290 int radeon_driver_open_kms(struct drm_device
*dev
, struct drm_file
*file_priv
)
292 struct radeon_device
*rdev
= dev
->dev_private
;
294 file_priv
->driver_priv
= NULL
;
296 /* new gpu have virtual address space support */
297 if (rdev
->family
>= CHIP_CAYMAN
) {
298 struct radeon_fpriv
*fpriv
;
301 fpriv
= kzalloc(sizeof(*fpriv
), GFP_KERNEL
);
302 if (unlikely(!fpriv
)) {
306 r
= radeon_vm_init(rdev
, &fpriv
->vm
);
308 radeon_vm_fini(rdev
, &fpriv
->vm
);
313 file_priv
->driver_priv
= fpriv
;
318 void radeon_driver_postclose_kms(struct drm_device
*dev
,
319 struct drm_file
*file_priv
)
321 struct radeon_device
*rdev
= dev
->dev_private
;
323 /* new gpu have virtual address space support */
324 if (rdev
->family
>= CHIP_CAYMAN
&& file_priv
->driver_priv
) {
325 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
327 radeon_vm_fini(rdev
, &fpriv
->vm
);
329 file_priv
->driver_priv
= NULL
;
333 void radeon_driver_preclose_kms(struct drm_device
*dev
,
334 struct drm_file
*file_priv
)
336 struct radeon_device
*rdev
= dev
->dev_private
;
337 if (rdev
->hyperz_filp
== file_priv
)
338 rdev
->hyperz_filp
= NULL
;
339 if (rdev
->cmask_filp
== file_priv
)
340 rdev
->cmask_filp
= NULL
;
344 * VBlank related functions.
346 u32
radeon_get_vblank_counter_kms(struct drm_device
*dev
, int crtc
)
348 struct radeon_device
*rdev
= dev
->dev_private
;
350 if (crtc
< 0 || crtc
>= rdev
->num_crtc
) {
351 DRM_ERROR("Invalid crtc %d\n", crtc
);
355 return radeon_get_vblank_counter(rdev
, crtc
);
358 int radeon_enable_vblank_kms(struct drm_device
*dev
, int crtc
)
360 struct radeon_device
*rdev
= dev
->dev_private
;
362 if (crtc
< 0 || crtc
>= rdev
->num_crtc
) {
363 DRM_ERROR("Invalid crtc %d\n", crtc
);
367 rdev
->irq
.crtc_vblank_int
[crtc
] = true;
369 return radeon_irq_set(rdev
);
372 void radeon_disable_vblank_kms(struct drm_device
*dev
, int crtc
)
374 struct radeon_device
*rdev
= dev
->dev_private
;
376 if (crtc
< 0 || crtc
>= rdev
->num_crtc
) {
377 DRM_ERROR("Invalid crtc %d\n", crtc
);
381 rdev
->irq
.crtc_vblank_int
[crtc
] = false;
383 radeon_irq_set(rdev
);
386 int radeon_get_vblank_timestamp_kms(struct drm_device
*dev
, int crtc
,
388 struct timeval
*vblank_time
,
391 struct drm_crtc
*drmcrtc
;
392 struct radeon_device
*rdev
= dev
->dev_private
;
394 if (crtc
< 0 || crtc
>= dev
->num_crtcs
) {
395 DRM_ERROR("Invalid crtc %d\n", crtc
);
399 /* Get associated drm_crtc: */
400 drmcrtc
= &rdev
->mode_info
.crtcs
[crtc
]->base
;
402 /* Helper routine in DRM core does all the work: */
403 return drm_calc_vbltimestamp_from_scanoutpos(dev
, crtc
, max_error
,
411 int radeon_dma_ioctl_kms(struct drm_device
*dev
, void *data
,
412 struct drm_file
*file_priv
)
414 /* Not valid in KMS. */
418 #define KMS_INVALID_IOCTL(name) \
419 int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
421 DRM_ERROR("invalid ioctl with kms %s\n", __func__); \
426 * All these ioctls are invalid in kms world.
428 KMS_INVALID_IOCTL(radeon_cp_init_kms
)
429 KMS_INVALID_IOCTL(radeon_cp_start_kms
)
430 KMS_INVALID_IOCTL(radeon_cp_stop_kms
)
431 KMS_INVALID_IOCTL(radeon_cp_reset_kms
)
432 KMS_INVALID_IOCTL(radeon_cp_idle_kms
)
433 KMS_INVALID_IOCTL(radeon_cp_resume_kms
)
434 KMS_INVALID_IOCTL(radeon_engine_reset_kms
)
435 KMS_INVALID_IOCTL(radeon_fullscreen_kms
)
436 KMS_INVALID_IOCTL(radeon_cp_swap_kms
)
437 KMS_INVALID_IOCTL(radeon_cp_clear_kms
)
438 KMS_INVALID_IOCTL(radeon_cp_vertex_kms
)
439 KMS_INVALID_IOCTL(radeon_cp_indices_kms
)
440 KMS_INVALID_IOCTL(radeon_cp_texture_kms
)
441 KMS_INVALID_IOCTL(radeon_cp_stipple_kms
)
442 KMS_INVALID_IOCTL(radeon_cp_indirect_kms
)
443 KMS_INVALID_IOCTL(radeon_cp_vertex2_kms
)
444 KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms
)
445 KMS_INVALID_IOCTL(radeon_cp_getparam_kms
)
446 KMS_INVALID_IOCTL(radeon_cp_flip_kms
)
447 KMS_INVALID_IOCTL(radeon_mem_alloc_kms
)
448 KMS_INVALID_IOCTL(radeon_mem_free_kms
)
449 KMS_INVALID_IOCTL(radeon_mem_init_heap_kms
)
450 KMS_INVALID_IOCTL(radeon_irq_emit_kms
)
451 KMS_INVALID_IOCTL(radeon_irq_wait_kms
)
452 KMS_INVALID_IOCTL(radeon_cp_setparam_kms
)
453 KMS_INVALID_IOCTL(radeon_surface_alloc_kms
)
454 KMS_INVALID_IOCTL(radeon_surface_free_kms
)
457 struct drm_ioctl_desc radeon_ioctls_kms
[] = {
458 DRM_IOCTL_DEF_DRV(RADEON_CP_INIT
, radeon_cp_init_kms
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
459 DRM_IOCTL_DEF_DRV(RADEON_CP_START
, radeon_cp_start_kms
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
460 DRM_IOCTL_DEF_DRV(RADEON_CP_STOP
, radeon_cp_stop_kms
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
461 DRM_IOCTL_DEF_DRV(RADEON_CP_RESET
, radeon_cp_reset_kms
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
462 DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE
, radeon_cp_idle_kms
, DRM_AUTH
),
463 DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME
, radeon_cp_resume_kms
, DRM_AUTH
),
464 DRM_IOCTL_DEF_DRV(RADEON_RESET
, radeon_engine_reset_kms
, DRM_AUTH
),
465 DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN
, radeon_fullscreen_kms
, DRM_AUTH
),
466 DRM_IOCTL_DEF_DRV(RADEON_SWAP
, radeon_cp_swap_kms
, DRM_AUTH
),
467 DRM_IOCTL_DEF_DRV(RADEON_CLEAR
, radeon_cp_clear_kms
, DRM_AUTH
),
468 DRM_IOCTL_DEF_DRV(RADEON_VERTEX
, radeon_cp_vertex_kms
, DRM_AUTH
),
469 DRM_IOCTL_DEF_DRV(RADEON_INDICES
, radeon_cp_indices_kms
, DRM_AUTH
),
470 DRM_IOCTL_DEF_DRV(RADEON_TEXTURE
, radeon_cp_texture_kms
, DRM_AUTH
),
471 DRM_IOCTL_DEF_DRV(RADEON_STIPPLE
, radeon_cp_stipple_kms
, DRM_AUTH
),
472 DRM_IOCTL_DEF_DRV(RADEON_INDIRECT
, radeon_cp_indirect_kms
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
473 DRM_IOCTL_DEF_DRV(RADEON_VERTEX2
, radeon_cp_vertex2_kms
, DRM_AUTH
),
474 DRM_IOCTL_DEF_DRV(RADEON_CMDBUF
, radeon_cp_cmdbuf_kms
, DRM_AUTH
),
475 DRM_IOCTL_DEF_DRV(RADEON_GETPARAM
, radeon_cp_getparam_kms
, DRM_AUTH
),
476 DRM_IOCTL_DEF_DRV(RADEON_FLIP
, radeon_cp_flip_kms
, DRM_AUTH
),
477 DRM_IOCTL_DEF_DRV(RADEON_ALLOC
, radeon_mem_alloc_kms
, DRM_AUTH
),
478 DRM_IOCTL_DEF_DRV(RADEON_FREE
, radeon_mem_free_kms
, DRM_AUTH
),
479 DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP
, radeon_mem_init_heap_kms
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
480 DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT
, radeon_irq_emit_kms
, DRM_AUTH
),
481 DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT
, radeon_irq_wait_kms
, DRM_AUTH
),
482 DRM_IOCTL_DEF_DRV(RADEON_SETPARAM
, radeon_cp_setparam_kms
, DRM_AUTH
),
483 DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC
, radeon_surface_alloc_kms
, DRM_AUTH
),
484 DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE
, radeon_surface_free_kms
, DRM_AUTH
),
486 DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO
, radeon_gem_info_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
487 DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE
, radeon_gem_create_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
488 DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP
, radeon_gem_mmap_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
489 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN
, radeon_gem_set_domain_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
490 DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD
, radeon_gem_pread_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
491 DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE
, radeon_gem_pwrite_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
492 DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE
, radeon_gem_wait_idle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
493 DRM_IOCTL_DEF_DRV(RADEON_CS
, radeon_cs_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
494 DRM_IOCTL_DEF_DRV(RADEON_INFO
, radeon_info_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
495 DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING
, radeon_gem_set_tiling_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
496 DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING
, radeon_gem_get_tiling_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
497 DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY
, radeon_gem_busy_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
498 DRM_IOCTL_DEF_DRV(RADEON_GEM_VA
, radeon_gem_va_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
500 int radeon_max_kms_ioctl
= DRM_ARRAY_SIZE(radeon_ioctls_kms
);