bna: remove oper_state_cbfn from struct bna_rxf
[linux/fpc-iii.git] / drivers / gpu / drm / armada / armada_gem.c
blob580e10acaa3ace493c16305eabecd75394c6e49a
1 /*
2 * Copyright (C) 2012 Russell King
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #include <linux/dma-buf.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/shmem_fs.h>
11 #include <drm/drmP.h>
12 #include "armada_drm.h"
13 #include "armada_gem.h"
14 #include <drm/armada_drm.h>
15 #include "armada_ioctlP.h"
17 static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
19 struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
20 unsigned long addr = (unsigned long)vmf->virtual_address;
21 unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
22 int ret;
24 pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
25 ret = vm_insert_pfn(vma, addr, pfn);
27 switch (ret) {
28 case 0:
29 case -EBUSY:
30 return VM_FAULT_NOPAGE;
31 case -ENOMEM:
32 return VM_FAULT_OOM;
33 default:
34 return VM_FAULT_SIGBUS;
38 const struct vm_operations_struct armada_gem_vm_ops = {
39 .fault = armada_gem_vm_fault,
40 .open = drm_gem_vm_open,
41 .close = drm_gem_vm_close,
44 static size_t roundup_gem_size(size_t size)
46 return roundup(size, PAGE_SIZE);
49 /* dev->struct_mutex is held here */
50 void armada_gem_free_object(struct drm_gem_object *obj)
52 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
54 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
56 drm_gem_free_mmap_offset(&dobj->obj);
58 if (dobj->page) {
59 /* page backed memory */
60 unsigned int order = get_order(dobj->obj.size);
61 __free_pages(dobj->page, order);
62 } else if (dobj->linear) {
63 /* linear backed memory */
64 drm_mm_remove_node(dobj->linear);
65 kfree(dobj->linear);
66 if (dobj->addr)
67 iounmap(dobj->addr);
70 if (dobj->obj.import_attach) {
71 /* We only ever display imported data */
72 dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
73 DMA_TO_DEVICE);
74 drm_prime_gem_destroy(&dobj->obj, NULL);
77 drm_gem_object_release(&dobj->obj);
79 kfree(dobj);
82 int
83 armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
85 struct armada_private *priv = dev->dev_private;
86 size_t size = obj->obj.size;
88 if (obj->page || obj->linear)
89 return 0;
92 * If it is a small allocation (typically cursor, which will
93 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
94 * Framebuffers will never be this small (our minimum size for
95 * framebuffers is larger than this anyway.) Such objects are
96 * only accessed by the CPU so we don't need any special handing
97 * here.
99 if (size <= 8192) {
100 unsigned int order = get_order(size);
101 struct page *p = alloc_pages(GFP_KERNEL, order);
103 if (p) {
104 obj->addr = page_address(p);
105 obj->phys_addr = page_to_phys(p);
106 obj->page = p;
108 memset(obj->addr, 0, PAGE_ALIGN(size));
113 * We could grab something from CMA if it's enabled, but that
114 * involves building in a problem:
116 * CMA's interface uses dma_alloc_coherent(), which provides us
117 * with an CPU virtual address and a device address.
119 * The CPU virtual address may be either an address in the kernel
120 * direct mapped region (for example, as it would be on x86) or
121 * it may be remapped into another part of kernel memory space
122 * (eg, as it would be on ARM.) This means virt_to_phys() on the
123 * returned virtual address is invalid depending on the architecture
124 * implementation.
126 * The device address may also not be a physical address; it may
127 * be that there is some kind of remapping between the device and
128 * system RAM, which makes the use of the device address also
129 * unsafe to re-use as a physical address.
131 * This makes DRM usage of dma_alloc_coherent() in a generic way
132 * at best very questionable and unsafe.
135 /* Otherwise, grab it from our linear allocation */
136 if (!obj->page) {
137 struct drm_mm_node *node;
138 unsigned align = min_t(unsigned, size, SZ_2M);
139 void __iomem *ptr;
140 int ret;
142 node = kzalloc(sizeof(*node), GFP_KERNEL);
143 if (!node)
144 return -ENOSPC;
146 mutex_lock(&dev->struct_mutex);
147 ret = drm_mm_insert_node(&priv->linear, node, size, align,
148 DRM_MM_SEARCH_DEFAULT);
149 mutex_unlock(&dev->struct_mutex);
150 if (ret) {
151 kfree(node);
152 return ret;
155 obj->linear = node;
157 /* Ensure that the memory we're returning is cleared. */
158 ptr = ioremap_wc(obj->linear->start, size);
159 if (!ptr) {
160 mutex_lock(&dev->struct_mutex);
161 drm_mm_remove_node(obj->linear);
162 mutex_unlock(&dev->struct_mutex);
163 kfree(obj->linear);
164 obj->linear = NULL;
165 return -ENOMEM;
168 memset_io(ptr, 0, size);
169 iounmap(ptr);
171 obj->phys_addr = obj->linear->start;
172 obj->dev_addr = obj->linear->start;
175 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
176 (unsigned long long)obj->phys_addr,
177 (unsigned long long)obj->dev_addr);
179 return 0;
182 void *
183 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
185 /* only linear objects need to be ioremap'd */
186 if (!dobj->addr && dobj->linear)
187 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
188 return dobj->addr;
191 struct armada_gem_object *
192 armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
194 struct armada_gem_object *obj;
196 size = roundup_gem_size(size);
198 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
199 if (!obj)
200 return NULL;
202 drm_gem_private_object_init(dev, &obj->obj, size);
203 obj->dev_addr = DMA_ERROR_CODE;
205 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
207 return obj;
210 struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
211 size_t size)
213 struct armada_gem_object *obj;
214 struct address_space *mapping;
216 size = roundup_gem_size(size);
218 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
219 if (!obj)
220 return NULL;
222 if (drm_gem_object_init(dev, &obj->obj, size)) {
223 kfree(obj);
224 return NULL;
227 obj->dev_addr = DMA_ERROR_CODE;
229 mapping = file_inode(obj->obj.filp)->i_mapping;
230 mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
232 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
234 return obj;
237 /* Dumb alloc support */
238 int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
239 struct drm_mode_create_dumb *args)
241 struct armada_gem_object *dobj;
242 u32 handle;
243 size_t size;
244 int ret;
246 args->pitch = armada_pitch(args->width, args->bpp);
247 args->size = size = args->pitch * args->height;
249 dobj = armada_gem_alloc_private_object(dev, size);
250 if (dobj == NULL)
251 return -ENOMEM;
253 ret = armada_gem_linear_back(dev, dobj);
254 if (ret)
255 goto err;
257 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
258 if (ret)
259 goto err;
261 args->handle = handle;
263 /* drop reference from allocate - handle holds it now */
264 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
265 err:
266 drm_gem_object_unreference_unlocked(&dobj->obj);
267 return ret;
270 int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
271 uint32_t handle, uint64_t *offset)
273 struct armada_gem_object *obj;
274 int ret = 0;
276 mutex_lock(&dev->struct_mutex);
277 obj = armada_gem_object_lookup(dev, file, handle);
278 if (!obj) {
279 DRM_ERROR("failed to lookup gem object\n");
280 ret = -EINVAL;
281 goto err_unlock;
284 /* Don't allow imported objects to be mapped */
285 if (obj->obj.import_attach) {
286 ret = -EINVAL;
287 goto err_unlock;
290 ret = drm_gem_create_mmap_offset(&obj->obj);
291 if (ret == 0) {
292 *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
293 DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
296 drm_gem_object_unreference(&obj->obj);
297 err_unlock:
298 mutex_unlock(&dev->struct_mutex);
300 return ret;
303 int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
304 uint32_t handle)
306 return drm_gem_handle_delete(file, handle);
309 /* Private driver gem ioctls */
310 int armada_gem_create_ioctl(struct drm_device *dev, void *data,
311 struct drm_file *file)
313 struct drm_armada_gem_create *args = data;
314 struct armada_gem_object *dobj;
315 size_t size;
316 u32 handle;
317 int ret;
319 if (args->size == 0)
320 return -ENOMEM;
322 size = args->size;
324 dobj = armada_gem_alloc_object(dev, size);
325 if (dobj == NULL)
326 return -ENOMEM;
328 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
329 if (ret)
330 goto err;
332 args->handle = handle;
334 /* drop reference from allocate - handle holds it now */
335 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
336 err:
337 drm_gem_object_unreference_unlocked(&dobj->obj);
338 return ret;
341 /* Map a shmem-backed object into process memory space */
342 int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
343 struct drm_file *file)
345 struct drm_armada_gem_mmap *args = data;
346 struct armada_gem_object *dobj;
347 unsigned long addr;
349 dobj = armada_gem_object_lookup(dev, file, args->handle);
350 if (dobj == NULL)
351 return -ENOENT;
353 if (!dobj->obj.filp) {
354 drm_gem_object_unreference(&dobj->obj);
355 return -EINVAL;
358 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
359 MAP_SHARED, args->offset);
360 drm_gem_object_unreference(&dobj->obj);
361 if (IS_ERR_VALUE(addr))
362 return addr;
364 args->addr = addr;
366 return 0;
369 int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
370 struct drm_file *file)
372 struct drm_armada_gem_pwrite *args = data;
373 struct armada_gem_object *dobj;
374 char __user *ptr;
375 int ret;
377 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
378 args->handle, args->offset, args->size, args->ptr);
380 if (args->size == 0)
381 return 0;
383 ptr = (char __user *)(uintptr_t)args->ptr;
385 if (!access_ok(VERIFY_READ, ptr, args->size))
386 return -EFAULT;
388 ret = fault_in_multipages_readable(ptr, args->size);
389 if (ret)
390 return ret;
392 dobj = armada_gem_object_lookup(dev, file, args->handle);
393 if (dobj == NULL)
394 return -ENOENT;
396 /* Must be a kernel-mapped object */
397 if (!dobj->addr)
398 return -EINVAL;
400 if (args->offset > dobj->obj.size ||
401 args->size > dobj->obj.size - args->offset) {
402 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
403 ret = -EINVAL;
404 goto unref;
407 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
408 ret = -EFAULT;
409 } else if (dobj->update) {
410 dobj->update(dobj->update_data);
411 ret = 0;
414 unref:
415 drm_gem_object_unreference_unlocked(&dobj->obj);
416 return ret;
419 /* Prime support */
420 struct sg_table *
421 armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
422 enum dma_data_direction dir)
424 struct drm_gem_object *obj = attach->dmabuf->priv;
425 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
426 struct scatterlist *sg;
427 struct sg_table *sgt;
428 int i, num;
430 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
431 if (!sgt)
432 return NULL;
434 if (dobj->obj.filp) {
435 struct address_space *mapping;
436 int count;
438 count = dobj->obj.size / PAGE_SIZE;
439 if (sg_alloc_table(sgt, count, GFP_KERNEL))
440 goto free_sgt;
442 mapping = file_inode(dobj->obj.filp)->i_mapping;
444 for_each_sg(sgt->sgl, sg, count, i) {
445 struct page *page;
447 page = shmem_read_mapping_page(mapping, i);
448 if (IS_ERR(page)) {
449 num = i;
450 goto release;
453 sg_set_page(sg, page, PAGE_SIZE, 0);
456 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
457 num = sgt->nents;
458 goto release;
460 } else if (dobj->page) {
461 /* Single contiguous page */
462 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
463 goto free_sgt;
465 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
467 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
468 goto free_table;
469 } else if (dobj->linear) {
470 /* Single contiguous physical region - no struct page */
471 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
472 goto free_sgt;
473 sg_dma_address(sgt->sgl) = dobj->dev_addr;
474 sg_dma_len(sgt->sgl) = dobj->obj.size;
475 } else {
476 goto free_sgt;
478 return sgt;
480 release:
481 for_each_sg(sgt->sgl, sg, num, i)
482 page_cache_release(sg_page(sg));
483 free_table:
484 sg_free_table(sgt);
485 free_sgt:
486 kfree(sgt);
487 return NULL;
490 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
491 struct sg_table *sgt, enum dma_data_direction dir)
493 struct drm_gem_object *obj = attach->dmabuf->priv;
494 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
495 int i;
497 if (!dobj->linear)
498 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
500 if (dobj->obj.filp) {
501 struct scatterlist *sg;
502 for_each_sg(sgt->sgl, sg, sgt->nents, i)
503 page_cache_release(sg_page(sg));
506 sg_free_table(sgt);
507 kfree(sgt);
510 static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
512 return NULL;
515 static void
516 armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
520 static int
521 armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
523 return -EINVAL;
526 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
527 .map_dma_buf = armada_gem_prime_map_dma_buf,
528 .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
529 .release = drm_gem_dmabuf_release,
530 .kmap_atomic = armada_gem_dmabuf_no_kmap,
531 .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
532 .kmap = armada_gem_dmabuf_no_kmap,
533 .kunmap = armada_gem_dmabuf_no_kunmap,
534 .mmap = armada_gem_dmabuf_mmap,
537 struct dma_buf *
538 armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
539 int flags)
541 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
543 exp_info.ops = &armada_gem_prime_dmabuf_ops;
544 exp_info.size = obj->size;
545 exp_info.flags = O_RDWR;
546 exp_info.priv = obj;
548 return dma_buf_export(&exp_info);
551 struct drm_gem_object *
552 armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
554 struct dma_buf_attachment *attach;
555 struct armada_gem_object *dobj;
557 if (buf->ops == &armada_gem_prime_dmabuf_ops) {
558 struct drm_gem_object *obj = buf->priv;
559 if (obj->dev == dev) {
561 * Importing our own dmabuf(s) increases the
562 * refcount on the gem object itself.
564 drm_gem_object_reference(obj);
565 return obj;
569 attach = dma_buf_attach(buf, dev->dev);
570 if (IS_ERR(attach))
571 return ERR_CAST(attach);
573 dobj = armada_gem_alloc_private_object(dev, buf->size);
574 if (!dobj) {
575 dma_buf_detach(buf, attach);
576 return ERR_PTR(-ENOMEM);
579 dobj->obj.import_attach = attach;
580 get_dma_buf(buf);
583 * Don't call dma_buf_map_attachment() here - it maps the
584 * scatterlist immediately for DMA, and this is not always
585 * an appropriate thing to do.
587 return &dobj->obj;
590 int armada_gem_map_import(struct armada_gem_object *dobj)
592 int ret;
594 dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
595 DMA_TO_DEVICE);
596 if (!dobj->sgt) {
597 DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
598 return -EINVAL;
600 if (IS_ERR(dobj->sgt)) {
601 ret = PTR_ERR(dobj->sgt);
602 dobj->sgt = NULL;
603 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
604 return ret;
606 if (dobj->sgt->nents > 1) {
607 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
608 return -EINVAL;
610 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
611 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
612 return -EINVAL;
614 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
615 return 0;