2 * Copyright (C) 2012 Russell King
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 #include <linux/dma-buf.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/shmem_fs.h>
11 #include "armada_drm.h"
12 #include "armada_gem.h"
13 #include <drm/armada_drm.h>
14 #include "armada_ioctlP.h"
16 static int armada_gem_vm_fault(struct vm_fault
*vmf
)
18 struct drm_gem_object
*gobj
= vmf
->vma
->vm_private_data
;
19 struct armada_gem_object
*obj
= drm_to_armada_gem(gobj
);
20 unsigned long pfn
= obj
->phys_addr
>> PAGE_SHIFT
;
23 pfn
+= (vmf
->address
- vmf
->vma
->vm_start
) >> PAGE_SHIFT
;
24 ret
= vm_insert_pfn(vmf
->vma
, vmf
->address
, pfn
);
29 return VM_FAULT_NOPAGE
;
33 return VM_FAULT_SIGBUS
;
37 const struct vm_operations_struct armada_gem_vm_ops
= {
38 .fault
= armada_gem_vm_fault
,
39 .open
= drm_gem_vm_open
,
40 .close
= drm_gem_vm_close
,
43 static size_t roundup_gem_size(size_t size
)
45 return roundup(size
, PAGE_SIZE
);
48 void armada_gem_free_object(struct drm_gem_object
*obj
)
50 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
51 struct armada_private
*priv
= obj
->dev
->dev_private
;
53 DRM_DEBUG_DRIVER("release obj %p\n", dobj
);
55 drm_gem_free_mmap_offset(&dobj
->obj
);
57 might_lock(&priv
->linear_lock
);
60 /* page backed memory */
61 unsigned int order
= get_order(dobj
->obj
.size
);
62 __free_pages(dobj
->page
, order
);
63 } else if (dobj
->linear
) {
64 /* linear backed memory */
65 mutex_lock(&priv
->linear_lock
);
66 drm_mm_remove_node(dobj
->linear
);
67 mutex_unlock(&priv
->linear_lock
);
73 if (dobj
->obj
.import_attach
) {
74 /* We only ever display imported data */
76 dma_buf_unmap_attachment(dobj
->obj
.import_attach
,
77 dobj
->sgt
, DMA_TO_DEVICE
);
78 drm_prime_gem_destroy(&dobj
->obj
, NULL
);
81 drm_gem_object_release(&dobj
->obj
);
87 armada_gem_linear_back(struct drm_device
*dev
, struct armada_gem_object
*obj
)
89 struct armada_private
*priv
= dev
->dev_private
;
90 size_t size
= obj
->obj
.size
;
92 if (obj
->page
|| obj
->linear
)
96 * If it is a small allocation (typically cursor, which will
97 * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
98 * Framebuffers will never be this small (our minimum size for
99 * framebuffers is larger than this anyway.) Such objects are
100 * only accessed by the CPU so we don't need any special handing
104 unsigned int order
= get_order(size
);
105 struct page
*p
= alloc_pages(GFP_KERNEL
, order
);
108 obj
->addr
= page_address(p
);
109 obj
->phys_addr
= page_to_phys(p
);
112 memset(obj
->addr
, 0, PAGE_ALIGN(size
));
117 * We could grab something from CMA if it's enabled, but that
118 * involves building in a problem:
120 * CMA's interface uses dma_alloc_coherent(), which provides us
121 * with an CPU virtual address and a device address.
123 * The CPU virtual address may be either an address in the kernel
124 * direct mapped region (for example, as it would be on x86) or
125 * it may be remapped into another part of kernel memory space
126 * (eg, as it would be on ARM.) This means virt_to_phys() on the
127 * returned virtual address is invalid depending on the architecture
130 * The device address may also not be a physical address; it may
131 * be that there is some kind of remapping between the device and
132 * system RAM, which makes the use of the device address also
133 * unsafe to re-use as a physical address.
135 * This makes DRM usage of dma_alloc_coherent() in a generic way
136 * at best very questionable and unsafe.
139 /* Otherwise, grab it from our linear allocation */
141 struct drm_mm_node
*node
;
142 unsigned align
= min_t(unsigned, size
, SZ_2M
);
146 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
150 mutex_lock(&priv
->linear_lock
);
151 ret
= drm_mm_insert_node_generic(&priv
->linear
, node
,
153 mutex_unlock(&priv
->linear_lock
);
161 /* Ensure that the memory we're returning is cleared. */
162 ptr
= ioremap_wc(obj
->linear
->start
, size
);
164 mutex_lock(&priv
->linear_lock
);
165 drm_mm_remove_node(obj
->linear
);
166 mutex_unlock(&priv
->linear_lock
);
172 memset_io(ptr
, 0, size
);
175 obj
->phys_addr
= obj
->linear
->start
;
176 obj
->dev_addr
= obj
->linear
->start
;
180 DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj
,
181 (unsigned long long)obj
->phys_addr
,
182 (unsigned long long)obj
->dev_addr
);
188 armada_gem_map_object(struct drm_device
*dev
, struct armada_gem_object
*dobj
)
190 /* only linear objects need to be ioremap'd */
191 if (!dobj
->addr
&& dobj
->linear
)
192 dobj
->addr
= ioremap_wc(dobj
->phys_addr
, dobj
->obj
.size
);
196 struct armada_gem_object
*
197 armada_gem_alloc_private_object(struct drm_device
*dev
, size_t size
)
199 struct armada_gem_object
*obj
;
201 size
= roundup_gem_size(size
);
203 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
207 drm_gem_private_object_init(dev
, &obj
->obj
, size
);
209 DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj
, size
);
214 static struct armada_gem_object
*armada_gem_alloc_object(struct drm_device
*dev
,
217 struct armada_gem_object
*obj
;
218 struct address_space
*mapping
;
220 size
= roundup_gem_size(size
);
222 obj
= kzalloc(sizeof(*obj
), GFP_KERNEL
);
226 if (drm_gem_object_init(dev
, &obj
->obj
, size
)) {
231 mapping
= obj
->obj
.filp
->f_mapping
;
232 mapping_set_gfp_mask(mapping
, GFP_HIGHUSER
| __GFP_RECLAIMABLE
);
234 DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj
, size
);
239 /* Dumb alloc support */
240 int armada_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
241 struct drm_mode_create_dumb
*args
)
243 struct armada_gem_object
*dobj
;
248 args
->pitch
= armada_pitch(args
->width
, args
->bpp
);
249 args
->size
= size
= args
->pitch
* args
->height
;
251 dobj
= armada_gem_alloc_private_object(dev
, size
);
255 ret
= armada_gem_linear_back(dev
, dobj
);
259 ret
= drm_gem_handle_create(file
, &dobj
->obj
, &handle
);
263 args
->handle
= handle
;
265 /* drop reference from allocate - handle holds it now */
266 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj
, size
, handle
);
268 drm_gem_object_put_unlocked(&dobj
->obj
);
272 /* Private driver gem ioctls */
273 int armada_gem_create_ioctl(struct drm_device
*dev
, void *data
,
274 struct drm_file
*file
)
276 struct drm_armada_gem_create
*args
= data
;
277 struct armada_gem_object
*dobj
;
287 dobj
= armada_gem_alloc_object(dev
, size
);
291 ret
= drm_gem_handle_create(file
, &dobj
->obj
, &handle
);
295 args
->handle
= handle
;
297 /* drop reference from allocate - handle holds it now */
298 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj
, size
, handle
);
300 drm_gem_object_put_unlocked(&dobj
->obj
);
304 /* Map a shmem-backed object into process memory space */
305 int armada_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
306 struct drm_file
*file
)
308 struct drm_armada_gem_mmap
*args
= data
;
309 struct armada_gem_object
*dobj
;
312 dobj
= armada_gem_object_lookup(file
, args
->handle
);
316 if (!dobj
->obj
.filp
) {
317 drm_gem_object_put_unlocked(&dobj
->obj
);
321 addr
= vm_mmap(dobj
->obj
.filp
, 0, args
->size
, PROT_READ
| PROT_WRITE
,
322 MAP_SHARED
, args
->offset
);
323 drm_gem_object_put_unlocked(&dobj
->obj
);
324 if (IS_ERR_VALUE(addr
))
332 int armada_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
333 struct drm_file
*file
)
335 struct drm_armada_gem_pwrite
*args
= data
;
336 struct armada_gem_object
*dobj
;
340 DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
341 args
->handle
, args
->offset
, args
->size
, args
->ptr
);
346 ptr
= (char __user
*)(uintptr_t)args
->ptr
;
348 if (!access_ok(VERIFY_READ
, ptr
, args
->size
))
351 ret
= fault_in_pages_readable(ptr
, args
->size
);
355 dobj
= armada_gem_object_lookup(file
, args
->handle
);
359 /* Must be a kernel-mapped object */
363 if (args
->offset
> dobj
->obj
.size
||
364 args
->size
> dobj
->obj
.size
- args
->offset
) {
365 DRM_ERROR("invalid size: object size %u\n", dobj
->obj
.size
);
370 if (copy_from_user(dobj
->addr
+ args
->offset
, ptr
, args
->size
)) {
372 } else if (dobj
->update
) {
373 dobj
->update(dobj
->update_data
);
378 drm_gem_object_put_unlocked(&dobj
->obj
);
383 static struct sg_table
*
384 armada_gem_prime_map_dma_buf(struct dma_buf_attachment
*attach
,
385 enum dma_data_direction dir
)
387 struct drm_gem_object
*obj
= attach
->dmabuf
->priv
;
388 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
389 struct scatterlist
*sg
;
390 struct sg_table
*sgt
;
393 sgt
= kmalloc(sizeof(*sgt
), GFP_KERNEL
);
397 if (dobj
->obj
.filp
) {
398 struct address_space
*mapping
;
401 count
= dobj
->obj
.size
/ PAGE_SIZE
;
402 if (sg_alloc_table(sgt
, count
, GFP_KERNEL
))
405 mapping
= dobj
->obj
.filp
->f_mapping
;
407 for_each_sg(sgt
->sgl
, sg
, count
, i
) {
410 page
= shmem_read_mapping_page(mapping
, i
);
416 sg_set_page(sg
, page
, PAGE_SIZE
, 0);
419 if (dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
) == 0) {
423 } else if (dobj
->page
) {
424 /* Single contiguous page */
425 if (sg_alloc_table(sgt
, 1, GFP_KERNEL
))
428 sg_set_page(sgt
->sgl
, dobj
->page
, dobj
->obj
.size
, 0);
430 if (dma_map_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
) == 0)
432 } else if (dobj
->linear
) {
433 /* Single contiguous physical region - no struct page */
434 if (sg_alloc_table(sgt
, 1, GFP_KERNEL
))
436 sg_dma_address(sgt
->sgl
) = dobj
->dev_addr
;
437 sg_dma_len(sgt
->sgl
) = dobj
->obj
.size
;
444 for_each_sg(sgt
->sgl
, sg
, num
, i
)
445 put_page(sg_page(sg
));
453 static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment
*attach
,
454 struct sg_table
*sgt
, enum dma_data_direction dir
)
456 struct drm_gem_object
*obj
= attach
->dmabuf
->priv
;
457 struct armada_gem_object
*dobj
= drm_to_armada_gem(obj
);
461 dma_unmap_sg(attach
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
463 if (dobj
->obj
.filp
) {
464 struct scatterlist
*sg
;
465 for_each_sg(sgt
->sgl
, sg
, sgt
->nents
, i
)
466 put_page(sg_page(sg
));
473 static void *armada_gem_dmabuf_no_kmap(struct dma_buf
*buf
, unsigned long n
)
479 armada_gem_dmabuf_no_kunmap(struct dma_buf
*buf
, unsigned long n
, void *addr
)
484 armada_gem_dmabuf_mmap(struct dma_buf
*buf
, struct vm_area_struct
*vma
)
489 static const struct dma_buf_ops armada_gem_prime_dmabuf_ops
= {
490 .map_dma_buf
= armada_gem_prime_map_dma_buf
,
491 .unmap_dma_buf
= armada_gem_prime_unmap_dma_buf
,
492 .release
= drm_gem_dmabuf_release
,
493 .map_atomic
= armada_gem_dmabuf_no_kmap
,
494 .unmap_atomic
= armada_gem_dmabuf_no_kunmap
,
495 .map
= armada_gem_dmabuf_no_kmap
,
496 .unmap
= armada_gem_dmabuf_no_kunmap
,
497 .mmap
= armada_gem_dmabuf_mmap
,
501 armada_gem_prime_export(struct drm_device
*dev
, struct drm_gem_object
*obj
,
504 DEFINE_DMA_BUF_EXPORT_INFO(exp_info
);
506 exp_info
.ops
= &armada_gem_prime_dmabuf_ops
;
507 exp_info
.size
= obj
->size
;
508 exp_info
.flags
= O_RDWR
;
511 return drm_gem_dmabuf_export(dev
, &exp_info
);
514 struct drm_gem_object
*
515 armada_gem_prime_import(struct drm_device
*dev
, struct dma_buf
*buf
)
517 struct dma_buf_attachment
*attach
;
518 struct armada_gem_object
*dobj
;
520 if (buf
->ops
== &armada_gem_prime_dmabuf_ops
) {
521 struct drm_gem_object
*obj
= buf
->priv
;
522 if (obj
->dev
== dev
) {
524 * Importing our own dmabuf(s) increases the
525 * refcount on the gem object itself.
527 drm_gem_object_get(obj
);
532 attach
= dma_buf_attach(buf
, dev
->dev
);
534 return ERR_CAST(attach
);
536 dobj
= armada_gem_alloc_private_object(dev
, buf
->size
);
538 dma_buf_detach(buf
, attach
);
539 return ERR_PTR(-ENOMEM
);
542 dobj
->obj
.import_attach
= attach
;
546 * Don't call dma_buf_map_attachment() here - it maps the
547 * scatterlist immediately for DMA, and this is not always
548 * an appropriate thing to do.
553 int armada_gem_map_import(struct armada_gem_object
*dobj
)
557 dobj
->sgt
= dma_buf_map_attachment(dobj
->obj
.import_attach
,
559 if (IS_ERR(dobj
->sgt
)) {
560 ret
= PTR_ERR(dobj
->sgt
);
562 DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret
);
565 if (dobj
->sgt
->nents
> 1) {
566 DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
569 if (sg_dma_len(dobj
->sgt
->sgl
) < dobj
->obj
.size
) {
570 DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
573 dobj
->dev_addr
= sg_dma_address(dobj
->sgt
->sgl
);