Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / vmwgfx / vmwgfx_ioctl.c
blobf681b7b4df1b87f996f46a5c488f7c3279e641ee
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include "vmwgfx_kms.h"
31 #include "device_include/svga3d_caps.h"
33 struct svga_3d_compat_cap {
34 SVGA3dCapsRecordHeader header;
35 SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
38 int vmw_getparam_ioctl(struct drm_device *dev, void *data,
39 struct drm_file *file_priv)
41 struct vmw_private *dev_priv = vmw_priv(dev);
42 struct drm_vmw_getparam_arg *param =
43 (struct drm_vmw_getparam_arg *)data;
44 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
46 switch (param->param) {
47 case DRM_VMW_PARAM_NUM_STREAMS:
48 param->value = vmw_overlay_num_overlays(dev_priv);
49 break;
50 case DRM_VMW_PARAM_NUM_FREE_STREAMS:
51 param->value = vmw_overlay_num_free_overlays(dev_priv);
52 break;
53 case DRM_VMW_PARAM_3D:
54 param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
55 break;
56 case DRM_VMW_PARAM_HW_CAPS:
57 param->value = dev_priv->capabilities;
58 break;
59 case DRM_VMW_PARAM_HW_CAPS2:
60 param->value = dev_priv->capabilities2;
61 break;
62 case DRM_VMW_PARAM_FIFO_CAPS:
63 param->value = dev_priv->fifo.capabilities;
64 break;
65 case DRM_VMW_PARAM_MAX_FB_SIZE:
66 param->value = dev_priv->prim_bb_mem;
67 break;
68 case DRM_VMW_PARAM_FIFO_HW_VERSION:
70 u32 *fifo_mem = dev_priv->mmio_virt;
71 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
73 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
74 param->value = SVGA3D_HWVERSION_WS8_B1;
75 break;
78 param->value =
79 vmw_mmio_read(fifo_mem +
80 ((fifo->capabilities &
81 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
82 SVGA_FIFO_3D_HWVERSION_REVISED :
83 SVGA_FIFO_3D_HWVERSION));
84 break;
86 case DRM_VMW_PARAM_MAX_SURF_MEMORY:
87 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
88 !vmw_fp->gb_aware)
89 param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
90 else
91 param->value = dev_priv->memory_size;
92 break;
93 case DRM_VMW_PARAM_3D_CAPS_SIZE:
94 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
95 vmw_fp->gb_aware)
96 param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
97 else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
98 param->value = sizeof(struct svga_3d_compat_cap) +
99 sizeof(uint32_t);
100 else
101 param->value = (SVGA_FIFO_3D_CAPS_LAST -
102 SVGA_FIFO_3D_CAPS + 1) *
103 sizeof(uint32_t);
104 break;
105 case DRM_VMW_PARAM_MAX_MOB_MEMORY:
106 vmw_fp->gb_aware = true;
107 param->value = dev_priv->max_mob_pages * PAGE_SIZE;
108 break;
109 case DRM_VMW_PARAM_MAX_MOB_SIZE:
110 param->value = dev_priv->max_mob_size;
111 break;
112 case DRM_VMW_PARAM_SCREEN_TARGET:
113 param->value =
114 (dev_priv->active_display_unit == vmw_du_screen_target);
115 break;
116 case DRM_VMW_PARAM_DX:
117 param->value = has_sm4_context(dev_priv);
118 break;
119 case DRM_VMW_PARAM_SM4_1:
120 param->value = has_sm4_1_context(dev_priv);
121 break;
122 case DRM_VMW_PARAM_SM5:
123 param->value = has_sm5_context(dev_priv);
124 break;
125 default:
126 return -EINVAL;
129 return 0;
132 static u32 vmw_mask_legacy_multisample(unsigned int cap, u32 fmt_value)
135 * A version of user-space exists which use MULTISAMPLE_MASKABLESAMPLES
136 * to check the sample count supported by virtual device. Since there
137 * never was support for multisample count for backing MOB return 0.
139 * MULTISAMPLE_MASKABLESAMPLES devcap is marked as deprecated by virtual
140 * device.
142 if (cap == SVGA3D_DEVCAP_DEAD5)
143 return 0;
145 return fmt_value;
148 static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
149 size_t size)
151 struct svga_3d_compat_cap *compat_cap =
152 (struct svga_3d_compat_cap *) bounce;
153 unsigned int i;
154 size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
155 unsigned int max_size;
157 if (size < pair_offset)
158 return -EINVAL;
160 max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
162 if (max_size > SVGA3D_DEVCAP_MAX)
163 max_size = SVGA3D_DEVCAP_MAX;
165 compat_cap->header.length =
166 (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
167 compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
169 spin_lock(&dev_priv->cap_lock);
170 for (i = 0; i < max_size; ++i) {
171 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
172 compat_cap->pairs[i][0] = i;
173 compat_cap->pairs[i][1] = vmw_mask_legacy_multisample
174 (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
176 spin_unlock(&dev_priv->cap_lock);
178 return 0;
182 int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
183 struct drm_file *file_priv)
185 struct drm_vmw_get_3d_cap_arg *arg =
186 (struct drm_vmw_get_3d_cap_arg *) data;
187 struct vmw_private *dev_priv = vmw_priv(dev);
188 uint32_t size;
189 u32 *fifo_mem;
190 void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
191 void *bounce;
192 int ret;
193 bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
194 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
196 if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
197 VMW_DEBUG_USER("Illegal GET_3D_CAP argument.\n");
198 return -EINVAL;
201 if (gb_objects && vmw_fp->gb_aware)
202 size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
203 else if (gb_objects)
204 size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
205 else
206 size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
207 sizeof(uint32_t);
209 if (arg->max_size < size)
210 size = arg->max_size;
212 bounce = vzalloc(size);
213 if (unlikely(bounce == NULL)) {
214 DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
215 return -ENOMEM;
218 if (gb_objects && vmw_fp->gb_aware) {
219 int i, num;
220 uint32_t *bounce32 = (uint32_t *) bounce;
222 num = size / sizeof(uint32_t);
223 if (num > SVGA3D_DEVCAP_MAX)
224 num = SVGA3D_DEVCAP_MAX;
226 spin_lock(&dev_priv->cap_lock);
227 for (i = 0; i < num; ++i) {
228 vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
229 *bounce32++ = vmw_mask_legacy_multisample
230 (i, vmw_read(dev_priv, SVGA_REG_DEV_CAP));
232 spin_unlock(&dev_priv->cap_lock);
233 } else if (gb_objects) {
234 ret = vmw_fill_compat_cap(dev_priv, bounce, size);
235 if (unlikely(ret != 0))
236 goto out_err;
237 } else {
238 fifo_mem = dev_priv->mmio_virt;
239 memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
242 ret = copy_to_user(buffer, bounce, size);
243 if (ret)
244 ret = -EFAULT;
245 out_err:
246 vfree(bounce);
248 if (unlikely(ret != 0))
249 DRM_ERROR("Failed to report 3D caps info.\n");
251 return ret;
254 int vmw_present_ioctl(struct drm_device *dev, void *data,
255 struct drm_file *file_priv)
257 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
258 struct vmw_private *dev_priv = vmw_priv(dev);
259 struct drm_vmw_present_arg *arg =
260 (struct drm_vmw_present_arg *)data;
261 struct vmw_surface *surface;
262 struct drm_vmw_rect __user *clips_ptr;
263 struct drm_vmw_rect *clips = NULL;
264 struct drm_framebuffer *fb;
265 struct vmw_framebuffer *vfb;
266 struct vmw_resource *res;
267 uint32_t num_clips;
268 int ret;
270 num_clips = arg->num_clips;
271 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
273 if (unlikely(num_clips == 0))
274 return 0;
276 if (clips_ptr == NULL) {
277 VMW_DEBUG_USER("Variable clips_ptr must be specified.\n");
278 ret = -EINVAL;
279 goto out_clips;
282 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
283 if (clips == NULL) {
284 DRM_ERROR("Failed to allocate clip rect list.\n");
285 ret = -ENOMEM;
286 goto out_clips;
289 ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
290 if (ret) {
291 DRM_ERROR("Failed to copy clip rects from userspace.\n");
292 ret = -EFAULT;
293 goto out_no_copy;
296 drm_modeset_lock_all(dev);
298 fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
299 if (!fb) {
300 VMW_DEBUG_USER("Invalid framebuffer id.\n");
301 ret = -ENOENT;
302 goto out_no_fb;
304 vfb = vmw_framebuffer_to_vfb(fb);
306 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
307 if (unlikely(ret != 0))
308 goto out_no_ttm_lock;
310 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
311 user_surface_converter,
312 &res);
313 if (ret)
314 goto out_no_surface;
316 surface = vmw_res_to_srf(res);
317 ret = vmw_kms_present(dev_priv, file_priv,
318 vfb, surface, arg->sid,
319 arg->dest_x, arg->dest_y,
320 clips, num_clips);
322 /* vmw_user_surface_lookup takes one ref so does new_fb */
323 vmw_surface_unreference(&surface);
325 out_no_surface:
326 ttm_read_unlock(&dev_priv->reservation_sem);
327 out_no_ttm_lock:
328 drm_framebuffer_put(fb);
329 out_no_fb:
330 drm_modeset_unlock_all(dev);
331 out_no_copy:
332 kfree(clips);
333 out_clips:
334 return ret;
337 int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
338 struct drm_file *file_priv)
340 struct vmw_private *dev_priv = vmw_priv(dev);
341 struct drm_vmw_present_readback_arg *arg =
342 (struct drm_vmw_present_readback_arg *)data;
343 struct drm_vmw_fence_rep __user *user_fence_rep =
344 (struct drm_vmw_fence_rep __user *)
345 (unsigned long)arg->fence_rep;
346 struct drm_vmw_rect __user *clips_ptr;
347 struct drm_vmw_rect *clips = NULL;
348 struct drm_framebuffer *fb;
349 struct vmw_framebuffer *vfb;
350 uint32_t num_clips;
351 int ret;
353 num_clips = arg->num_clips;
354 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
356 if (unlikely(num_clips == 0))
357 return 0;
359 if (clips_ptr == NULL) {
360 VMW_DEBUG_USER("Argument clips_ptr must be specified.\n");
361 ret = -EINVAL;
362 goto out_clips;
365 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
366 if (clips == NULL) {
367 DRM_ERROR("Failed to allocate clip rect list.\n");
368 ret = -ENOMEM;
369 goto out_clips;
372 ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
373 if (ret) {
374 DRM_ERROR("Failed to copy clip rects from userspace.\n");
375 ret = -EFAULT;
376 goto out_no_copy;
379 drm_modeset_lock_all(dev);
381 fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
382 if (!fb) {
383 VMW_DEBUG_USER("Invalid framebuffer id.\n");
384 ret = -ENOENT;
385 goto out_no_fb;
388 vfb = vmw_framebuffer_to_vfb(fb);
389 if (!vfb->bo) {
390 VMW_DEBUG_USER("Framebuffer not buffer backed.\n");
391 ret = -EINVAL;
392 goto out_no_ttm_lock;
395 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
396 if (unlikely(ret != 0))
397 goto out_no_ttm_lock;
399 ret = vmw_kms_readback(dev_priv, file_priv,
400 vfb, user_fence_rep,
401 clips, num_clips);
403 ttm_read_unlock(&dev_priv->reservation_sem);
404 out_no_ttm_lock:
405 drm_framebuffer_put(fb);
406 out_no_fb:
407 drm_modeset_unlock_all(dev);
408 out_no_copy:
409 kfree(clips);
410 out_clips:
411 return ret;
416 * vmw_fops_poll - wrapper around the drm_poll function
418 * @filp: See the linux fops poll documentation.
419 * @wait: See the linux fops poll documentation.
421 * Wrapper around the drm_poll function that makes sure the device is
422 * processing the fifo if drm_poll decides to wait.
424 __poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
426 struct drm_file *file_priv = filp->private_data;
427 struct vmw_private *dev_priv =
428 vmw_priv(file_priv->minor->dev);
430 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
431 return drm_poll(filp, wait);
436 * vmw_fops_read - wrapper around the drm_read function
438 * @filp: See the linux fops read documentation.
439 * @buffer: See the linux fops read documentation.
440 * @count: See the linux fops read documentation.
441 * offset: See the linux fops read documentation.
443 * Wrapper around the drm_read function that makes sure the device is
444 * processing the fifo if drm_read decides to wait.
446 ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
447 size_t count, loff_t *offset)
449 struct drm_file *file_priv = filp->private_data;
450 struct vmw_private *dev_priv =
451 vmw_priv(file_priv->minor->dev);
453 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
454 return drm_read(filp, buffer, count, offset);