2 * Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 * GK20A does not have dedicated video memory, and to accurately represent this
25 * fact Nouveau will not create a RAM device for it. Therefore its instmem
26 * implementation must be done directly on top of system memory, while
27 * preserving coherency for read and write operations.
29 * Instmem can be allocated through two means:
30 * 1) If an IOMMU unit has been probed, the IOMMU API is used to make memory
31 * pages contiguous to the GPU. This is the preferred way.
32 * 2) If no IOMMU unit is probed, the DMA API is used to allocate physically
35 * In both cases CPU read and writes are performed by creating a write-combined
36 * mapping. The GPU L2 cache must thus be flushed/invalidated when required. To
37 * be conservative we do this every time we acquire or release an instobj, but
38 * ideally L2 management should be handled at a higher level.
40 * To improve performance, CPU mappings are not removed upon instobj release.
41 * Instead they are placed into a LRU list to be recycled when the mapped space
42 * goes beyond a certain threshold. At the moment this limit is 1MB.
46 #include <core/memory.h>
48 #include <core/tegra.h>
49 #include <subdev/fb.h>
50 #include <subdev/ltc.h>
52 struct gk20a_instobj
{
53 struct nvkm_memory memory
;
55 struct gk20a_instmem
*imem
;
59 struct list_head vaddr_node
;
61 #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory)
64 * Used for objects allocated using the DMA API
66 struct gk20a_instobj_dma
{
67 struct gk20a_instobj base
;
71 struct nvkm_mm_node r
;
73 #define gk20a_instobj_dma(p) \
74 container_of(gk20a_instobj(p), struct gk20a_instobj_dma, base)
77 * Used for objects flattened using the IOMMU API
79 struct gk20a_instobj_iommu
{
80 struct gk20a_instobj base
;
82 /* will point to the higher half of pages */
83 dma_addr_t
*dma_addrs
;
84 /* array of base.mem->size pages (+ dma_addr_ts) */
87 #define gk20a_instobj_iommu(p) \
88 container_of(gk20a_instobj(p), struct gk20a_instobj_iommu, base)
90 struct gk20a_instmem
{
91 struct nvkm_instmem base
;
93 /* protects vaddr_* and gk20a_instobj::vaddr* */
96 /* CPU mappings LRU */
97 unsigned int vaddr_use
;
98 unsigned int vaddr_max
;
99 struct list_head vaddr_lru
;
101 /* Only used if IOMMU if present */
102 struct mutex
*mm_mutex
;
104 struct iommu_domain
*domain
;
105 unsigned long iommu_pgshift
;
108 /* Only used by DMA API */
109 struct dma_attrs attrs
;
111 void __iomem
* (*cpu_map
)(struct nvkm_memory
*);
113 #define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
115 static enum nvkm_memory_target
116 gk20a_instobj_target(struct nvkm_memory
*memory
)
118 return NVKM_MEM_TARGET_HOST
;
122 gk20a_instobj_addr(struct nvkm_memory
*memory
)
124 return gk20a_instobj(memory
)->mem
.offset
;
128 gk20a_instobj_size(struct nvkm_memory
*memory
)
130 return (u64
)gk20a_instobj(memory
)->mem
.size
<< 12;
133 static void __iomem
*
134 gk20a_instobj_cpu_map_dma(struct nvkm_memory
*memory
)
136 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
137 struct gk20a_instobj_dma
*node
= gk20a_instobj_dma(memory
);
138 struct device
*dev
= node
->base
.imem
->base
.subdev
.device
->dev
;
139 int npages
= nvkm_memory_size(memory
) >> 12;
140 struct page
*pages
[npages
];
143 /* we shouldn't see a gk20a on anything but arm/arm64 anyways */
144 /* phys_to_page does not exist on all platforms... */
145 pages
[0] = pfn_to_page(dma_to_phys(dev
, node
->handle
) >> PAGE_SHIFT
);
146 for (i
= 1; i
< npages
; i
++)
147 pages
[i
] = pages
[0] + i
;
149 return vmap(pages
, npages
, VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
156 static void __iomem
*
157 gk20a_instobj_cpu_map_iommu(struct nvkm_memory
*memory
)
159 struct gk20a_instobj_iommu
*node
= gk20a_instobj_iommu(memory
);
160 int npages
= nvkm_memory_size(memory
) >> 12;
162 return vmap(node
->pages
, npages
, VM_MAP
,
163 pgprot_writecombine(PAGE_KERNEL
));
167 * Must be called while holding gk20a_instmem_lock
170 gk20a_instmem_vaddr_gc(struct gk20a_instmem
*imem
, const u64 size
)
172 while (imem
->vaddr_use
+ size
> imem
->vaddr_max
) {
173 struct gk20a_instobj
*obj
;
175 /* no candidate that can be unmapped, abort... */
176 if (list_empty(&imem
->vaddr_lru
))
179 obj
= list_first_entry(&imem
->vaddr_lru
, struct gk20a_instobj
,
181 list_del(&obj
->vaddr_node
);
184 imem
->vaddr_use
-= nvkm_memory_size(&obj
->memory
);
185 nvkm_debug(&imem
->base
.subdev
, "(GC) vaddr used: %x/%x\n",
186 imem
->vaddr_use
, imem
->vaddr_max
);
191 static void __iomem
*
192 gk20a_instobj_acquire(struct nvkm_memory
*memory
)
194 struct gk20a_instobj
*node
= gk20a_instobj(memory
);
195 struct gk20a_instmem
*imem
= node
->imem
;
196 struct nvkm_ltc
*ltc
= imem
->base
.subdev
.device
->ltc
;
197 const u64 size
= nvkm_memory_size(memory
);
202 spin_lock_irqsave(&imem
->lock
, flags
);
205 /* remove us from the LRU list since we cannot be unmapped */
206 list_del(&node
->vaddr_node
);
211 /* try to free some address space if we reached the limit */
212 gk20a_instmem_vaddr_gc(imem
, size
);
214 node
->vaddr
= imem
->cpu_map(memory
);
217 nvkm_error(&imem
->base
.subdev
, "cannot map instobj - "
218 "this is not going to end well...\n");
222 imem
->vaddr_use
+= size
;
223 nvkm_debug(&imem
->base
.subdev
, "vaddr used: %x/%x\n",
224 imem
->vaddr_use
, imem
->vaddr_max
);
227 spin_unlock_irqrestore(&imem
->lock
, flags
);
233 gk20a_instobj_release(struct nvkm_memory
*memory
)
235 struct gk20a_instobj
*node
= gk20a_instobj(memory
);
236 struct gk20a_instmem
*imem
= node
->imem
;
237 struct nvkm_ltc
*ltc
= imem
->base
.subdev
.device
->ltc
;
240 spin_lock_irqsave(&imem
->lock
, flags
);
242 /* add ourselves to the LRU list so our CPU mapping can be freed */
243 list_add_tail(&node
->vaddr_node
, &imem
->vaddr_lru
);
245 spin_unlock_irqrestore(&imem
->lock
, flags
);
248 nvkm_ltc_invalidate(ltc
);
252 gk20a_instobj_rd32(struct nvkm_memory
*memory
, u64 offset
)
254 struct gk20a_instobj
*node
= gk20a_instobj(memory
);
256 return node
->vaddr
[offset
/ 4];
260 gk20a_instobj_wr32(struct nvkm_memory
*memory
, u64 offset
, u32 data
)
262 struct gk20a_instobj
*node
= gk20a_instobj(memory
);
264 node
->vaddr
[offset
/ 4] = data
;
268 gk20a_instobj_map(struct nvkm_memory
*memory
, struct nvkm_vma
*vma
, u64 offset
)
270 struct gk20a_instobj
*node
= gk20a_instobj(memory
);
272 nvkm_vm_map_at(vma
, offset
, &node
->mem
);
276 * Clear the CPU mapping of an instobj if it exists
279 gk20a_instobj_dtor(struct gk20a_instobj
*node
)
281 struct gk20a_instmem
*imem
= node
->imem
;
282 struct gk20a_instobj
*obj
;
285 spin_lock_irqsave(&imem
->lock
, flags
);
290 list_for_each_entry(obj
, &imem
->vaddr_lru
, vaddr_node
) {
292 list_del(&obj
->vaddr_node
);
298 imem
->vaddr_use
-= nvkm_memory_size(&node
->memory
);
299 nvkm_debug(&imem
->base
.subdev
, "vaddr used: %x/%x\n",
300 imem
->vaddr_use
, imem
->vaddr_max
);
303 spin_unlock_irqrestore(&imem
->lock
, flags
);
307 gk20a_instobj_dtor_dma(struct nvkm_memory
*memory
)
309 struct gk20a_instobj_dma
*node
= gk20a_instobj_dma(memory
);
310 struct gk20a_instmem
*imem
= node
->base
.imem
;
311 struct device
*dev
= imem
->base
.subdev
.device
->dev
;
313 gk20a_instobj_dtor(&node
->base
);
315 if (unlikely(!node
->cpuaddr
))
318 dma_free_attrs(dev
, node
->base
.mem
.size
<< PAGE_SHIFT
, node
->cpuaddr
,
319 node
->handle
, &imem
->attrs
);
326 gk20a_instobj_dtor_iommu(struct nvkm_memory
*memory
)
328 struct gk20a_instobj_iommu
*node
= gk20a_instobj_iommu(memory
);
329 struct gk20a_instmem
*imem
= node
->base
.imem
;
330 struct device
*dev
= imem
->base
.subdev
.device
->dev
;
331 struct nvkm_mm_node
*r
;
334 gk20a_instobj_dtor(&node
->base
);
336 if (unlikely(list_empty(&node
->base
.mem
.regions
)))
339 r
= list_first_entry(&node
->base
.mem
.regions
, struct nvkm_mm_node
,
342 /* clear IOMMU bit to unmap pages */
343 r
->offset
&= ~BIT(imem
->iommu_bit
- imem
->iommu_pgshift
);
345 /* Unmap pages from GPU address space and free them */
346 for (i
= 0; i
< node
->base
.mem
.size
; i
++) {
347 iommu_unmap(imem
->domain
,
348 (r
->offset
+ i
) << imem
->iommu_pgshift
, PAGE_SIZE
);
349 dma_unmap_page(dev
, node
->dma_addrs
[i
], PAGE_SIZE
,
351 __free_page(node
->pages
[i
]);
354 /* Release area from GPU address space */
355 mutex_lock(imem
->mm_mutex
);
356 nvkm_mm_free(imem
->mm
, &r
);
357 mutex_unlock(imem
->mm_mutex
);
363 static const struct nvkm_memory_func
364 gk20a_instobj_func_dma
= {
365 .dtor
= gk20a_instobj_dtor_dma
,
366 .target
= gk20a_instobj_target
,
367 .addr
= gk20a_instobj_addr
,
368 .size
= gk20a_instobj_size
,
369 .acquire
= gk20a_instobj_acquire
,
370 .release
= gk20a_instobj_release
,
371 .rd32
= gk20a_instobj_rd32
,
372 .wr32
= gk20a_instobj_wr32
,
373 .map
= gk20a_instobj_map
,
376 static const struct nvkm_memory_func
377 gk20a_instobj_func_iommu
= {
378 .dtor
= gk20a_instobj_dtor_iommu
,
379 .target
= gk20a_instobj_target
,
380 .addr
= gk20a_instobj_addr
,
381 .size
= gk20a_instobj_size
,
382 .acquire
= gk20a_instobj_acquire
,
383 .release
= gk20a_instobj_release
,
384 .rd32
= gk20a_instobj_rd32
,
385 .wr32
= gk20a_instobj_wr32
,
386 .map
= gk20a_instobj_map
,
390 gk20a_instobj_ctor_dma(struct gk20a_instmem
*imem
, u32 npages
, u32 align
,
391 struct gk20a_instobj
**_node
)
393 struct gk20a_instobj_dma
*node
;
394 struct nvkm_subdev
*subdev
= &imem
->base
.subdev
;
395 struct device
*dev
= subdev
->device
->dev
;
397 if (!(node
= kzalloc(sizeof(*node
), GFP_KERNEL
)))
399 *_node
= &node
->base
;
401 nvkm_memory_ctor(&gk20a_instobj_func_dma
, &node
->base
.memory
);
403 node
->cpuaddr
= dma_alloc_attrs(dev
, npages
<< PAGE_SHIFT
,
404 &node
->handle
, GFP_KERNEL
,
406 if (!node
->cpuaddr
) {
407 nvkm_error(subdev
, "cannot allocate DMA memory\n");
411 /* alignment check */
412 if (unlikely(node
->handle
& (align
- 1)))
414 "memory not aligned as requested: %pad (0x%x)\n",
415 &node
->handle
, align
);
417 /* present memory for being mapped using small pages */
419 node
->r
.offset
= node
->handle
>> 12;
420 node
->r
.length
= (npages
<< PAGE_SHIFT
) >> 12;
422 node
->base
.mem
.offset
= node
->handle
;
424 INIT_LIST_HEAD(&node
->base
.mem
.regions
);
425 list_add_tail(&node
->r
.rl_entry
, &node
->base
.mem
.regions
);
431 gk20a_instobj_ctor_iommu(struct gk20a_instmem
*imem
, u32 npages
, u32 align
,
432 struct gk20a_instobj
**_node
)
434 struct gk20a_instobj_iommu
*node
;
435 struct nvkm_subdev
*subdev
= &imem
->base
.subdev
;
436 struct device
*dev
= subdev
->device
->dev
;
437 struct nvkm_mm_node
*r
;
442 * despite their variable size, instmem allocations are small enough
443 * (< 1 page) to be handled by kzalloc
445 if (!(node
= kzalloc(sizeof(*node
) + ((sizeof(node
->pages
[0]) +
446 sizeof(*node
->dma_addrs
)) * npages
), GFP_KERNEL
)))
448 *_node
= &node
->base
;
449 node
->dma_addrs
= (void *)(node
->pages
+ npages
);
451 nvkm_memory_ctor(&gk20a_instobj_func_iommu
, &node
->base
.memory
);
453 /* Allocate backing memory */
454 for (i
= 0; i
< npages
; i
++) {
455 struct page
*p
= alloc_page(GFP_KERNEL
);
463 dma_adr
= dma_map_page(dev
, p
, 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
464 if (dma_mapping_error(dev
, dma_adr
)) {
465 nvkm_error(subdev
, "DMA mapping error!\n");
469 node
->dma_addrs
[i
] = dma_adr
;
472 mutex_lock(imem
->mm_mutex
);
473 /* Reserve area from GPU address space */
474 ret
= nvkm_mm_head(imem
->mm
, 0, 1, npages
, npages
,
475 align
>> imem
->iommu_pgshift
, &r
);
476 mutex_unlock(imem
->mm_mutex
);
478 nvkm_error(subdev
, "IOMMU space is full!\n");
482 /* Map into GPU address space */
483 for (i
= 0; i
< npages
; i
++) {
484 u32 offset
= (r
->offset
+ i
) << imem
->iommu_pgshift
;
486 ret
= iommu_map(imem
->domain
, offset
, node
->dma_addrs
[i
],
487 PAGE_SIZE
, IOMMU_READ
| IOMMU_WRITE
);
489 nvkm_error(subdev
, "IOMMU mapping failure: %d\n", ret
);
493 iommu_unmap(imem
->domain
, offset
, PAGE_SIZE
);
499 /* IOMMU bit tells that an address is to be resolved through the IOMMU */
500 r
->offset
|= BIT(imem
->iommu_bit
- imem
->iommu_pgshift
);
502 node
->base
.mem
.offset
= ((u64
)r
->offset
) << imem
->iommu_pgshift
;
504 INIT_LIST_HEAD(&node
->base
.mem
.regions
);
505 list_add_tail(&r
->rl_entry
, &node
->base
.mem
.regions
);
510 mutex_lock(imem
->mm_mutex
);
511 nvkm_mm_free(imem
->mm
, &r
);
512 mutex_unlock(imem
->mm_mutex
);
515 for (i
= 0; i
< npages
&& node
->pages
[i
] != NULL
; i
++) {
516 dma_addr_t dma_addr
= node
->dma_addrs
[i
];
518 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
,
520 __free_page(node
->pages
[i
]);
527 gk20a_instobj_new(struct nvkm_instmem
*base
, u32 size
, u32 align
, bool zero
,
528 struct nvkm_memory
**pmemory
)
530 struct gk20a_instmem
*imem
= gk20a_instmem(base
);
531 struct nvkm_subdev
*subdev
= &imem
->base
.subdev
;
532 struct gk20a_instobj
*node
= NULL
;
535 nvkm_debug(subdev
, "%s (%s): size: %x align: %x\n", __func__
,
536 imem
->domain
? "IOMMU" : "DMA", size
, align
);
538 /* Round size and align to page bounds */
539 size
= max(roundup(size
, PAGE_SIZE
), PAGE_SIZE
);
540 align
= max(roundup(align
, PAGE_SIZE
), PAGE_SIZE
);
543 ret
= gk20a_instobj_ctor_iommu(imem
, size
>> PAGE_SHIFT
,
546 ret
= gk20a_instobj_ctor_dma(imem
, size
>> PAGE_SHIFT
,
548 *pmemory
= node
? &node
->memory
: NULL
;
554 /* present memory for being mapped using small pages */
555 node
->mem
.size
= size
>> 12;
556 node
->mem
.memtype
= 0;
557 node
->mem
.page_shift
= 12;
559 nvkm_debug(subdev
, "alloc size: 0x%x, align: 0x%x, gaddr: 0x%llx\n",
560 size
, align
, node
->mem
.offset
);
566 gk20a_instmem_dtor(struct nvkm_instmem
*base
)
568 struct gk20a_instmem
*imem
= gk20a_instmem(base
);
570 /* perform some sanity checks... */
571 if (!list_empty(&imem
->vaddr_lru
))
572 nvkm_warn(&base
->subdev
, "instobj LRU not empty!\n");
574 if (imem
->vaddr_use
!= 0)
575 nvkm_warn(&base
->subdev
, "instobj vmap area not empty! "
576 "0x%x bytes still mapped\n", imem
->vaddr_use
);
581 static const struct nvkm_instmem_func
583 .dtor
= gk20a_instmem_dtor
,
584 .memory_new
= gk20a_instobj_new
,
590 gk20a_instmem_new(struct nvkm_device
*device
, int index
,
591 struct nvkm_instmem
**pimem
)
593 struct nvkm_device_tegra
*tdev
= device
->func
->tegra(device
);
594 struct gk20a_instmem
*imem
;
596 if (!(imem
= kzalloc(sizeof(*imem
), GFP_KERNEL
)))
598 nvkm_instmem_ctor(&gk20a_instmem
, device
, index
, &imem
->base
);
599 spin_lock_init(&imem
->lock
);
600 *pimem
= &imem
->base
;
602 /* do not allow more than 1MB of CPU-mapped instmem */
604 imem
->vaddr_max
= 0x100000;
605 INIT_LIST_HEAD(&imem
->vaddr_lru
);
607 if (tdev
->iommu
.domain
) {
608 imem
->mm_mutex
= &tdev
->iommu
.mutex
;
609 imem
->mm
= &tdev
->iommu
.mm
;
610 imem
->domain
= tdev
->iommu
.domain
;
611 imem
->iommu_pgshift
= tdev
->iommu
.pgshift
;
612 imem
->cpu_map
= gk20a_instobj_cpu_map_iommu
;
613 imem
->iommu_bit
= tdev
->func
->iommu_bit
;
615 nvkm_info(&imem
->base
.subdev
, "using IOMMU\n");
617 init_dma_attrs(&imem
->attrs
);
618 /* We will access the memory through our own mapping */
619 dma_set_attr(DMA_ATTR_NON_CONSISTENT
, &imem
->attrs
);
620 dma_set_attr(DMA_ATTR_WEAK_ORDERING
, &imem
->attrs
);
621 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &imem
->attrs
);
622 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING
, &imem
->attrs
);
623 imem
->cpu_map
= gk20a_instobj_cpu_map_dma
;
625 nvkm_info(&imem
->base
.subdev
, "using DMA API\n");