1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2009 Nokia Corporation
6 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
11 #include <linux/err.h>
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/ioport.h>
16 #include <linux/bitops.h>
17 #include <linux/mutex.h>
18 #include <linux/platform_device.h>
20 #include <video/omapvrfb.h>
23 #define DBG(format, ...) pr_debug("VRFB: " format, ## __VA_ARGS__)
25 #define DBG(format, ...)
28 #define SMS_ROT_CONTROL(context) (0x0 + 0x10 * context)
29 #define SMS_ROT_SIZE(context) (0x4 + 0x10 * context)
30 #define SMS_ROT_PHYSICAL_BA(context) (0x8 + 0x10 * context)
31 #define SMS_ROT_VIRT_BASE(rot) (0x1000000 * (rot))
33 #define OMAP_VRFB_SIZE (2048 * 2048 * 4)
35 #define VRFB_PAGE_WIDTH_EXP 5 /* Assuming SDRAM pagesize= 1024 */
36 #define VRFB_PAGE_HEIGHT_EXP 5 /* 1024 = 2^5 * 2^5 */
37 #define VRFB_PAGE_WIDTH (1 << VRFB_PAGE_WIDTH_EXP)
38 #define VRFB_PAGE_HEIGHT (1 << VRFB_PAGE_HEIGHT_EXP)
39 #define SMS_IMAGEHEIGHT_OFFSET 16
40 #define SMS_IMAGEWIDTH_OFFSET 0
41 #define SMS_PH_OFFSET 8
42 #define SMS_PW_OFFSET 4
43 #define SMS_PS_OFFSET 0
45 /* bitmap of reserved contexts */
46 static unsigned long ctx_map
;
55 static DEFINE_MUTEX(ctx_lock
);
58 * Access to this happens from client drivers or the PM core after wake-up.
59 * For the first case we require locking at the driver level, for the second
60 * we don't need locking, since no drivers will run until after the wake-up
64 static void __iomem
*vrfb_base
;
67 static struct vrfb_ctx
*ctxs
;
69 static bool vrfb_loaded
;
71 static void omap2_sms_write_rot_control(u32 val
, unsigned ctx
)
73 __raw_writel(val
, vrfb_base
+ SMS_ROT_CONTROL(ctx
));
76 static void omap2_sms_write_rot_size(u32 val
, unsigned ctx
)
78 __raw_writel(val
, vrfb_base
+ SMS_ROT_SIZE(ctx
));
81 static void omap2_sms_write_rot_physical_ba(u32 val
, unsigned ctx
)
83 __raw_writel(val
, vrfb_base
+ SMS_ROT_PHYSICAL_BA(ctx
));
86 static inline void restore_hw_context(int ctx
)
88 omap2_sms_write_rot_control(ctxs
[ctx
].control
, ctx
);
89 omap2_sms_write_rot_size(ctxs
[ctx
].size
, ctx
);
90 omap2_sms_write_rot_physical_ba(ctxs
[ctx
].physical_ba
, ctx
);
93 static u32
get_image_width_roundup(u16 width
, u8 bytespp
)
95 unsigned long stride
= width
* bytespp
;
96 unsigned long ceil_pages_per_stride
= (stride
/ VRFB_PAGE_WIDTH
) +
97 (stride
% VRFB_PAGE_WIDTH
!= 0);
99 return ceil_pages_per_stride
* VRFB_PAGE_WIDTH
/ bytespp
;
103 * This the extra space needed in the VRFB physical area for VRFB to safely wrap
104 * any memory accesses to the invisible part of the virtual view to the physical
107 static inline u32
get_extra_physical_size(u16 image_width_roundup
, u8 bytespp
)
109 return (OMAP_VRFB_LINE_LEN
- image_width_roundup
) * VRFB_PAGE_HEIGHT
*
113 void omap_vrfb_restore_context(void)
116 unsigned long map
= ctx_map
;
118 for (i
= ffs(map
); i
; i
= ffs(map
)) {
122 restore_hw_context(i
);
126 void omap_vrfb_adjust_size(u16
*width
, u16
*height
,
129 *width
= ALIGN(*width
* bytespp
, VRFB_PAGE_WIDTH
) / bytespp
;
130 *height
= ALIGN(*height
, VRFB_PAGE_HEIGHT
);
132 EXPORT_SYMBOL(omap_vrfb_adjust_size
);
134 u32
omap_vrfb_min_phys_size(u16 width
, u16 height
, u8 bytespp
)
136 unsigned long image_width_roundup
= get_image_width_roundup(width
,
139 if (image_width_roundup
> OMAP_VRFB_LINE_LEN
)
142 return (width
* height
* bytespp
) + get_extra_physical_size(
143 image_width_roundup
, bytespp
);
145 EXPORT_SYMBOL(omap_vrfb_min_phys_size
);
147 u16
omap_vrfb_max_height(u32 phys_size
, u16 width
, u8 bytespp
)
149 unsigned long image_width_roundup
= get_image_width_roundup(width
,
151 unsigned long height
;
154 if (image_width_roundup
> OMAP_VRFB_LINE_LEN
)
157 extra
= get_extra_physical_size(image_width_roundup
, bytespp
);
159 if (phys_size
< extra
)
162 height
= (phys_size
- extra
) / (width
* bytespp
);
164 /* Virtual views provided by VRFB are limited to 2048x2048. */
165 return min_t(unsigned long, height
, 2048);
167 EXPORT_SYMBOL(omap_vrfb_max_height
);
169 void omap_vrfb_setup(struct vrfb
*vrfb
, unsigned long paddr
,
170 u16 width
, u16 height
,
171 unsigned bytespp
, bool yuv_mode
)
173 unsigned pixel_size_exp
;
176 u8 ctx
= vrfb
->context
;
180 DBG("omapfb_set_vrfb(%d, %lx, %dx%d, %d, %d)\n", ctx
, paddr
,
181 width
, height
, bytespp
, yuv_mode
);
183 /* For YUV2 and UYVY modes VRFB needs to handle pixels a bit
184 * differently. See TRM. */
192 else if (bytespp
== 2)
199 vrfb_width
= ALIGN(width
* bytespp
, VRFB_PAGE_WIDTH
) / bytespp
;
200 vrfb_height
= ALIGN(height
, VRFB_PAGE_HEIGHT
);
202 DBG("vrfb w %u, h %u bytespp %d\n", vrfb_width
, vrfb_height
, bytespp
);
204 size
= vrfb_width
<< SMS_IMAGEWIDTH_OFFSET
;
205 size
|= vrfb_height
<< SMS_IMAGEHEIGHT_OFFSET
;
207 control
= pixel_size_exp
<< SMS_PS_OFFSET
;
208 control
|= VRFB_PAGE_WIDTH_EXP
<< SMS_PW_OFFSET
;
209 control
|= VRFB_PAGE_HEIGHT_EXP
<< SMS_PH_OFFSET
;
211 ctxs
[ctx
].physical_ba
= paddr
;
212 ctxs
[ctx
].size
= size
;
213 ctxs
[ctx
].control
= control
;
215 omap2_sms_write_rot_physical_ba(paddr
, ctx
);
216 omap2_sms_write_rot_size(size
, ctx
);
217 omap2_sms_write_rot_control(control
, ctx
);
219 DBG("vrfb offset pixels %d, %d\n",
220 vrfb_width
- width
, vrfb_height
- height
);
224 vrfb
->xoffset
= vrfb_width
- width
;
225 vrfb
->yoffset
= vrfb_height
- height
;
226 vrfb
->bytespp
= bytespp
;
227 vrfb
->yuv_mode
= yuv_mode
;
229 EXPORT_SYMBOL(omap_vrfb_setup
);
231 int omap_vrfb_map_angle(struct vrfb
*vrfb
, u16 height
, u8 rot
)
233 unsigned long size
= height
* OMAP_VRFB_LINE_LEN
* vrfb
->bytespp
;
235 vrfb
->vaddr
[rot
] = ioremap_wc(vrfb
->paddr
[rot
], size
);
237 if (!vrfb
->vaddr
[rot
]) {
238 printk(KERN_ERR
"vrfb: ioremap failed\n");
242 DBG("ioremapped vrfb area %d of size %lu into %p\n", rot
, size
,
247 EXPORT_SYMBOL(omap_vrfb_map_angle
);
249 void omap_vrfb_release_ctx(struct vrfb
*vrfb
)
252 int ctx
= vrfb
->context
;
257 DBG("release ctx %d\n", ctx
);
259 mutex_lock(&ctx_lock
);
261 BUG_ON(!(ctx_map
& (1 << ctx
)));
263 clear_bit(ctx
, &ctx_map
);
265 for (rot
= 0; rot
< 4; ++rot
) {
266 if (vrfb
->paddr
[rot
]) {
267 release_mem_region(vrfb
->paddr
[rot
], OMAP_VRFB_SIZE
);
268 vrfb
->paddr
[rot
] = 0;
272 vrfb
->context
= 0xff;
274 mutex_unlock(&ctx_lock
);
276 EXPORT_SYMBOL(omap_vrfb_release_ctx
);
278 int omap_vrfb_request_ctx(struct vrfb
*vrfb
)
285 DBG("request ctx\n");
287 mutex_lock(&ctx_lock
);
289 for (ctx
= 0; ctx
< num_ctxs
; ++ctx
)
290 if ((ctx_map
& (1 << ctx
)) == 0)
293 if (ctx
== num_ctxs
) {
294 pr_err("vrfb: no free contexts\n");
299 DBG("found free ctx %d\n", ctx
);
301 set_bit(ctx
, &ctx_map
);
303 memset(vrfb
, 0, sizeof(*vrfb
));
307 for (rot
= 0; rot
< 4; ++rot
) {
308 paddr
= ctxs
[ctx
].base
+ SMS_ROT_VIRT_BASE(rot
);
309 if (!request_mem_region(paddr
, OMAP_VRFB_SIZE
, "vrfb")) {
310 pr_err("vrfb: failed to reserve VRFB "
311 "area for ctx %d, rotation %d\n",
313 omap_vrfb_release_ctx(vrfb
);
318 vrfb
->paddr
[rot
] = paddr
;
320 DBG("VRFB %d/%d: %lx\n", ctx
, rot
*90, vrfb
->paddr
[rot
]);
325 mutex_unlock(&ctx_lock
);
328 EXPORT_SYMBOL(omap_vrfb_request_ctx
);
330 bool omap_vrfb_supported(void)
334 EXPORT_SYMBOL(omap_vrfb_supported
);
336 static int __init
vrfb_probe(struct platform_device
*pdev
)
338 struct resource
*mem
;
341 /* first resource is the register res, the rest are vrfb contexts */
342 vrfb_base
= devm_platform_ioremap_resource(pdev
, 0);
343 if (IS_ERR(vrfb_base
))
344 return PTR_ERR(vrfb_base
);
346 num_ctxs
= pdev
->num_resources
- 1;
348 ctxs
= devm_kcalloc(&pdev
->dev
,
349 num_ctxs
, sizeof(struct vrfb_ctx
),
355 for (i
= 0; i
< num_ctxs
; ++i
) {
356 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1 + i
);
358 dev_err(&pdev
->dev
, "can't get vrfb ctx %d address\n",
363 ctxs
[i
].base
= mem
->start
;
371 static void __exit
vrfb_remove(struct platform_device
*pdev
)
376 static struct platform_driver vrfb_driver
= {
377 .driver
.name
= "omapvrfb",
378 .remove
= __exit_p(vrfb_remove
),
381 module_platform_driver_probe(vrfb_driver
, vrfb_probe
);
383 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
384 MODULE_DESCRIPTION("OMAP VRFB");
385 MODULE_LICENSE("GPL v2");