2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 #include <core/device.h>
31 #define to_gk20a_mem(m) container_of(m, struct gk20a_mem, base)
34 gk20a_ram_put(struct nvkm_fb
*pfb
, struct nvkm_mem
**pmem
)
36 struct device
*dev
= nv_device_base(nv_device(pfb
));
37 struct gk20a_mem
*mem
= to_gk20a_mem(*pmem
);
40 if (unlikely(mem
== NULL
))
43 if (likely(mem
->cpuaddr
))
44 dma_free_coherent(dev
, mem
->base
.size
<< PAGE_SHIFT
,
45 mem
->cpuaddr
, mem
->handle
);
47 kfree(mem
->base
.pages
);
52 gk20a_ram_get(struct nvkm_fb
*pfb
, u64 size
, u32 align
, u32 ncmin
,
53 u32 memtype
, struct nvkm_mem
**pmem
)
55 struct device
*dev
= nv_device_base(nv_device(pfb
));
56 struct gk20a_mem
*mem
;
57 u32 type
= memtype
& 0xff;
61 nv_debug(pfb
, "%s: size: %llx align: %x, ncmin: %x\n", __func__
, size
,
64 npages
= size
>> PAGE_SHIFT
;
72 /* round alignment to the next power of 2, if needed */
74 if ((align
& (align
- 1)) == 0)
78 /* ensure returned address is correctly aligned */
79 npages
= max(align
, npages
);
81 mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
);
85 mem
->base
.size
= npages
;
86 mem
->base
.memtype
= type
;
88 mem
->base
.pages
= kzalloc(sizeof(dma_addr_t
) * npages
, GFP_KERNEL
);
89 if (!mem
->base
.pages
) {
96 mem
->cpuaddr
= dma_alloc_coherent(dev
, npages
<< PAGE_SHIFT
,
97 &mem
->handle
, GFP_KERNEL
);
99 nv_error(pfb
, "%s: cannot allocate memory!\n", __func__
);
100 gk20a_ram_put(pfb
, pmem
);
104 align
<<= PAGE_SHIFT
;
106 /* alignment check */
107 if (unlikely(mem
->handle
& (align
- 1)))
108 nv_warn(pfb
, "memory not aligned as requested: %pad (0x%x)\n",
109 &mem
->handle
, align
);
111 nv_debug(pfb
, "alloc size: 0x%x, align: 0x%x, paddr: %pad, vaddr: %p\n",
112 npages
<< PAGE_SHIFT
, align
, &mem
->handle
, mem
->cpuaddr
);
114 for (i
= 0; i
< npages
; i
++)
115 mem
->base
.pages
[i
] = mem
->handle
+ (PAGE_SIZE
* i
);
117 mem
->base
.offset
= (u64
)mem
->base
.pages
[0];
122 gk20a_ram_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
123 struct nvkm_oclass
*oclass
, void *data
, u32 datasize
,
124 struct nvkm_object
**pobject
)
126 struct nvkm_ram
*ram
;
129 ret
= nvkm_ram_create(parent
, engine
, oclass
, &ram
);
130 *pobject
= nv_object(ram
);
133 ram
->type
= NV_MEM_TYPE_STOLEN
;
134 ram
->size
= get_num_physpages() << PAGE_SHIFT
;
136 ram
->get
= gk20a_ram_get
;
137 ram
->put
= gk20a_ram_put
;
143 .ofuncs
= &(struct nvkm_ofuncs
) {
144 .ctor
= gk20a_ram_ctor
,
145 .dtor
= _nvkm_ram_dtor
,
146 .init
= _nvkm_ram_init
,
147 .fini
= _nvkm_ram_fini
,