1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) Intel Corp. 2007.
6 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
9 * This file is part of the Vermilion Range fb driver.
12 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
13 * Michel Dänzer <michel-at-tungstengraphics-dot-com>
14 * Alan Hourihane <alanh-at-tungstengraphics-dot-com>
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
25 #include <linux/pci.h>
26 #include <asm/set_memory.h>
27 #include <asm/tlbflush.h>
28 #include <linux/mmzone.h>
30 /* #define VERMILION_DEBUG */
32 #include "vermilion.h"
34 #define MODULE_NAME "vmlfb"
36 #define VML_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
38 static struct mutex vml_mutex
;
39 static struct list_head global_no_mode
;
40 static struct list_head global_has_mode
;
41 static struct fb_ops vmlfb_ops
;
42 static struct vml_sys
*subsys
= NULL
;
43 static char *vml_default_mode
= "1024x768@60";
44 static const struct fb_videomode defaultmode
= {
45 NULL
, 60, 1024, 768, 12896, 144, 24, 29, 3, 136, 6,
46 0, FB_VMODE_NONINTERLACED
49 static u32 vml_mem_requested
= (10 * 1024 * 1024);
50 static u32 vml_mem_contig
= (4 * 1024 * 1024);
51 static u32 vml_mem_min
= (4 * 1024 * 1024);
53 static u32 vml_clocks
[] = {
66 static u32 vml_num_clocks
= ARRAY_SIZE(vml_clocks
);
69 * Allocate a contiguous vram area and make its linear kernel map
73 static int vmlfb_alloc_vram_area(struct vram_area
*va
, unsigned max_order
,
82 * Really try hard to get the needed memory.
83 * We need memory below the first 32MB, so we
84 * add the __GFP_DMA flag that guarantees that we are
85 * below the first 16MB.
88 flags
= __GFP_DMA
| __GFP_HIGH
| __GFP_KSWAPD_RECLAIM
;
90 __get_free_pages(flags
, --max_order
);
91 } while (va
->logical
== 0 && max_order
> min_order
);
96 va
->phys
= virt_to_phys((void *)va
->logical
);
97 va
->size
= PAGE_SIZE
<< max_order
;
98 va
->order
= max_order
;
101 * It seems like __get_free_pages only ups the usage count
102 * of the first page. This doesn't work with fault mapping, so
103 * up the usage count once more (XXX: should use split_page or
107 memset((void *)va
->logical
, 0x00, va
->size
);
108 for (i
= va
->logical
; i
< va
->logical
+ va
->size
; i
+= PAGE_SIZE
) {
109 get_page(virt_to_page(i
));
113 * Change caching policy of the linear kernel map to avoid
114 * mapping type conflicts with user-space mappings.
116 set_pages_uc(virt_to_page(va
->logical
), va
->size
>> PAGE_SHIFT
);
118 printk(KERN_DEBUG MODULE_NAME
119 ": Allocated %ld bytes vram area at 0x%08lx\n",
126 * Free a contiguous vram area and reset its linear kernel map
130 static void vmlfb_free_vram_area(struct vram_area
*va
)
137 * Reset the linear kernel map caching policy.
140 set_pages_wb(virt_to_page(va
->logical
),
141 va
->size
>> PAGE_SHIFT
);
144 * Decrease the usage count on the pages we've used
145 * to compensate for upping when allocating.
148 for (j
= va
->logical
; j
< va
->logical
+ va
->size
;
150 (void)put_page_testzero(virt_to_page(j
));
153 printk(KERN_DEBUG MODULE_NAME
154 ": Freeing %ld bytes vram area at 0x%08lx\n",
156 free_pages(va
->logical
, va
->order
);
163 * Free allocated vram.
166 static void vmlfb_free_vram(struct vml_info
*vinfo
)
170 for (i
= 0; i
< vinfo
->num_areas
; ++i
) {
171 vmlfb_free_vram_area(&vinfo
->vram
[i
]);
173 vinfo
->num_areas
= 0;
177 * Allocate vram. Currently we try to allocate contiguous areas from the
178 * __GFP_DMA zone and puzzle them together. A better approach would be to
179 * allocate one contiguous area for scanout and use one-page allocations for
180 * offscreen areas. This requires user-space and GPU virtual mappings.
183 static int vmlfb_alloc_vram(struct vml_info
*vinfo
,
185 size_t min_total
, size_t min_contig
)
191 struct vram_area
*va
;
192 struct vram_area
*va2
;
194 vinfo
->num_areas
= 0;
195 for (i
= 0; i
< VML_VRAM_AREAS
; ++i
) {
196 va
= &vinfo
->vram
[i
];
199 while (requested
> (PAGE_SIZE
<< order
) && order
< MAX_ORDER
)
202 err
= vmlfb_alloc_vram_area(va
, order
, 0);
208 vinfo
->vram_start
= va
->phys
;
209 vinfo
->vram_logical
= (void __iomem
*) va
->logical
;
210 vinfo
->vram_contig_size
= va
->size
;
211 vinfo
->num_areas
= 1;
215 for (j
= 0; j
< i
; ++j
) {
216 va2
= &vinfo
->vram
[j
];
217 if (va
->phys
+ va
->size
== va2
->phys
||
218 va2
->phys
+ va2
->size
== va
->phys
) {
226 if (va
->phys
< vinfo
->vram_start
) {
227 vinfo
->vram_start
= va
->phys
;
228 vinfo
->vram_logical
=
229 (void __iomem
*)va
->logical
;
231 vinfo
->vram_contig_size
+= va
->size
;
233 vmlfb_free_vram_area(va
);
238 if (requested
< va
->size
)
241 requested
-= va
->size
;
244 if (vinfo
->vram_contig_size
> min_total
&&
245 vinfo
->vram_contig_size
> min_contig
) {
247 printk(KERN_DEBUG MODULE_NAME
248 ": Contiguous vram: %ld bytes at physical 0x%08lx.\n",
249 (unsigned long)vinfo
->vram_contig_size
,
250 (unsigned long)vinfo
->vram_start
);
255 printk(KERN_ERR MODULE_NAME
256 ": Could not allocate requested minimal amount of vram.\n");
258 vmlfb_free_vram(vinfo
);
264 * Find the GPU to use with our display controller.
267 static int vmlfb_get_gpu(struct vml_par
*par
)
269 mutex_lock(&vml_mutex
);
271 par
->gpu
= pci_get_device(PCI_VENDOR_ID_INTEL
, VML_DEVICE_GPU
, NULL
);
274 mutex_unlock(&vml_mutex
);
278 mutex_unlock(&vml_mutex
);
280 if (pci_enable_device(par
->gpu
) < 0)
287 * Find a contiguous vram area that contains a given offset from vram start.
289 static int vmlfb_vram_offset(struct vml_info
*vinfo
, unsigned long offset
)
291 unsigned long aoffset
;
294 for (i
= 0; i
< vinfo
->num_areas
; ++i
) {
295 aoffset
= offset
- (vinfo
->vram
[i
].phys
- vinfo
->vram_start
);
297 if (aoffset
< vinfo
->vram
[i
].size
) {
306 * Remap the MMIO register spaces of the VDC and the GPU.
309 static int vmlfb_enable_mmio(struct vml_par
*par
)
313 par
->vdc_mem_base
= pci_resource_start(par
->vdc
, 0);
314 par
->vdc_mem_size
= pci_resource_len(par
->vdc
, 0);
315 if (!request_mem_region(par
->vdc_mem_base
, par
->vdc_mem_size
, "vmlfb")) {
316 printk(KERN_ERR MODULE_NAME
317 ": Could not claim display controller MMIO.\n");
320 par
->vdc_mem
= ioremap(par
->vdc_mem_base
, par
->vdc_mem_size
);
321 if (par
->vdc_mem
== NULL
) {
322 printk(KERN_ERR MODULE_NAME
323 ": Could not map display controller MMIO.\n");
328 par
->gpu_mem_base
= pci_resource_start(par
->gpu
, 0);
329 par
->gpu_mem_size
= pci_resource_len(par
->gpu
, 0);
330 if (!request_mem_region(par
->gpu_mem_base
, par
->gpu_mem_size
, "vmlfb")) {
331 printk(KERN_ERR MODULE_NAME
": Could not claim GPU MMIO.\n");
335 par
->gpu_mem
= ioremap(par
->gpu_mem_base
, par
->gpu_mem_size
);
336 if (par
->gpu_mem
== NULL
) {
337 printk(KERN_ERR MODULE_NAME
": Could not map GPU MMIO.\n");
345 release_mem_region(par
->gpu_mem_base
, par
->gpu_mem_size
);
347 iounmap(par
->vdc_mem
);
349 release_mem_region(par
->vdc_mem_base
, par
->vdc_mem_size
);
354 * Unmap the VDC and GPU register spaces.
357 static void vmlfb_disable_mmio(struct vml_par
*par
)
359 iounmap(par
->gpu_mem
);
360 release_mem_region(par
->gpu_mem_base
, par
->gpu_mem_size
);
361 iounmap(par
->vdc_mem
);
362 release_mem_region(par
->vdc_mem_base
, par
->vdc_mem_size
);
366 * Release and uninit the VDC and GPU.
369 static void vmlfb_release_devices(struct vml_par
*par
)
371 if (atomic_dec_and_test(&par
->refcount
)) {
372 pci_disable_device(par
->gpu
);
373 pci_disable_device(par
->vdc
);
378 * Free up allocated resources for a device.
381 static void vml_pci_remove(struct pci_dev
*dev
)
383 struct fb_info
*info
;
384 struct vml_info
*vinfo
;
387 info
= pci_get_drvdata(dev
);
389 vinfo
= container_of(info
, struct vml_info
, info
);
391 mutex_lock(&vml_mutex
);
392 unregister_framebuffer(info
);
393 fb_dealloc_cmap(&info
->cmap
);
394 vmlfb_free_vram(vinfo
);
395 vmlfb_disable_mmio(par
);
396 vmlfb_release_devices(par
);
399 mutex_unlock(&vml_mutex
);
403 static void vmlfb_set_pref_pixel_format(struct fb_var_screeninfo
*var
)
405 switch (var
->bits_per_pixel
) {
407 var
->blue
.offset
= 0;
408 var
->blue
.length
= 5;
409 var
->green
.offset
= 5;
410 var
->green
.length
= 5;
411 var
->red
.offset
= 10;
413 var
->transp
.offset
= 15;
414 var
->transp
.length
= 1;
417 var
->blue
.offset
= 0;
418 var
->blue
.length
= 8;
419 var
->green
.offset
= 8;
420 var
->green
.length
= 8;
421 var
->red
.offset
= 16;
423 var
->transp
.offset
= 24;
424 var
->transp
.length
= 0;
430 var
->blue
.msb_right
= var
->green
.msb_right
=
431 var
->red
.msb_right
= var
->transp
.msb_right
= 0;
435 * Device initialization.
436 * We initialize one vml_par struct per device and one vml_info
437 * struct per pipe. Currently we have only one pipe.
440 static int vml_pci_probe(struct pci_dev
*dev
, const struct pci_device_id
*id
)
442 struct vml_info
*vinfo
;
443 struct fb_info
*info
;
447 par
= kzalloc(sizeof(*par
), GFP_KERNEL
);
451 vinfo
= kzalloc(sizeof(*vinfo
), GFP_KERNEL
);
459 atomic_set(&par
->refcount
, 1);
461 switch (id
->device
) {
463 if ((err
= vmlfb_get_gpu(par
)))
465 pci_set_drvdata(dev
, &vinfo
->info
);
473 info
->flags
= FBINFO_DEFAULT
| FBINFO_PARTIAL_PAN_OK
;
475 err
= vmlfb_enable_mmio(par
);
479 err
= vmlfb_alloc_vram(vinfo
, vml_mem_requested
,
480 vml_mem_contig
, vml_mem_min
);
484 strcpy(info
->fix
.id
, "Vermilion Range");
485 info
->fix
.mmio_start
= 0;
486 info
->fix
.mmio_len
= 0;
487 info
->fix
.smem_start
= vinfo
->vram_start
;
488 info
->fix
.smem_len
= vinfo
->vram_contig_size
;
489 info
->fix
.type
= FB_TYPE_PACKED_PIXELS
;
490 info
->fix
.visual
= FB_VISUAL_TRUECOLOR
;
491 info
->fix
.ypanstep
= 1;
492 info
->fix
.xpanstep
= 1;
493 info
->fix
.ywrapstep
= 0;
494 info
->fix
.accel
= FB_ACCEL_NONE
;
495 info
->screen_base
= vinfo
->vram_logical
;
496 info
->pseudo_palette
= vinfo
->pseudo_palette
;
498 info
->fbops
= &vmlfb_ops
;
499 info
->device
= &dev
->dev
;
501 INIT_LIST_HEAD(&vinfo
->head
);
502 vinfo
->pipe_disabled
= 1;
503 vinfo
->cur_blank_mode
= FB_BLANK_UNBLANK
;
505 info
->var
.grayscale
= 0;
506 info
->var
.bits_per_pixel
= 16;
507 vmlfb_set_pref_pixel_format(&info
->var
);
510 (&info
->var
, info
, vml_default_mode
, NULL
, 0, &defaultmode
, 16)) {
511 printk(KERN_ERR MODULE_NAME
": Could not find initial mode\n");
514 if (fb_alloc_cmap(&info
->cmap
, 256, 1) < 0) {
519 err
= register_framebuffer(info
);
521 printk(KERN_ERR MODULE_NAME
": Register framebuffer error.\n");
525 printk("Initialized vmlfb\n");
530 fb_dealloc_cmap(&info
->cmap
);
532 vmlfb_free_vram(vinfo
);
534 vmlfb_disable_mmio(par
);
536 vmlfb_release_devices(par
);
544 static int vmlfb_open(struct fb_info
*info
, int user
)
547 * Save registers here?
552 static int vmlfb_release(struct fb_info
*info
, int user
)
555 * Restore registers here.
561 static int vml_nearest_clock(int clock
)
570 cur_diff
= clock
- vml_clocks
[0];
571 cur_diff
= (cur_diff
< 0) ? -cur_diff
: cur_diff
;
572 for (i
= 1; i
< vml_num_clocks
; ++i
) {
573 diff
= clock
- vml_clocks
[i
];
574 diff
= (diff
< 0) ? -diff
: diff
;
575 if (diff
< cur_diff
) {
580 return vml_clocks
[cur_index
];
583 static int vmlfb_check_var_locked(struct fb_var_screeninfo
*var
,
584 struct vml_info
*vinfo
)
591 struct fb_var_screeninfo v
;
594 clock
= PICOS2KHZ(var
->pixclock
);
596 if (subsys
&& subsys
->nearest_clock
) {
597 nearest_clock
= subsys
->nearest_clock(subsys
, clock
);
599 nearest_clock
= vml_nearest_clock(clock
);
606 clock_diff
= nearest_clock
- clock
;
607 clock_diff
= (clock_diff
< 0) ? -clock_diff
: clock_diff
;
608 if (clock_diff
> clock
/ 5) {
610 printk(KERN_DEBUG MODULE_NAME
": Diff failure. %d %d\n",clock_diff
,clock
);
615 v
.pixclock
= KHZ2PICOS(nearest_clock
);
617 if (var
->xres
> VML_MAX_XRES
|| var
->yres
> VML_MAX_YRES
) {
618 printk(KERN_DEBUG MODULE_NAME
": Resolution failure.\n");
621 if (var
->xres_virtual
> VML_MAX_XRES_VIRTUAL
) {
622 printk(KERN_DEBUG MODULE_NAME
623 ": Virtual resolution failure.\n");
626 switch (v
.bits_per_pixel
) {
628 v
.bits_per_pixel
= 16;
631 v
.bits_per_pixel
= 32;
634 printk(KERN_DEBUG MODULE_NAME
": Invalid bpp: %d.\n",
635 var
->bits_per_pixel
);
639 pitch
= ALIGN((var
->xres
* var
->bits_per_pixel
) >> 3, 0x40);
640 mem
= (u64
)pitch
* var
->yres_virtual
;
641 if (mem
> vinfo
->vram_contig_size
) {
645 switch (v
.bits_per_pixel
) {
647 if (var
->blue
.offset
!= 0 ||
648 var
->blue
.length
!= 5 ||
649 var
->green
.offset
!= 5 ||
650 var
->green
.length
!= 5 ||
651 var
->red
.offset
!= 10 ||
652 var
->red
.length
!= 5 ||
653 var
->transp
.offset
!= 15 || var
->transp
.length
!= 1) {
654 vmlfb_set_pref_pixel_format(&v
);
658 if (var
->blue
.offset
!= 0 ||
659 var
->blue
.length
!= 8 ||
660 var
->green
.offset
!= 8 ||
661 var
->green
.length
!= 8 ||
662 var
->red
.offset
!= 16 ||
663 var
->red
.length
!= 8 ||
664 (var
->transp
.length
!= 0 && var
->transp
.length
!= 8) ||
665 (var
->transp
.length
== 8 && var
->transp
.offset
!= 24)) {
666 vmlfb_set_pref_pixel_format(&v
);
678 static int vmlfb_check_var(struct fb_var_screeninfo
*var
, struct fb_info
*info
)
680 struct vml_info
*vinfo
= container_of(info
, struct vml_info
, info
);
683 mutex_lock(&vml_mutex
);
684 ret
= vmlfb_check_var_locked(var
, vinfo
);
685 mutex_unlock(&vml_mutex
);
690 static void vml_wait_vblank(struct vml_info
*vinfo
)
692 /* Wait for vblank. For now, just wait for a 50Hz cycle (20ms)) */
696 static void vmlfb_disable_pipe(struct vml_info
*vinfo
)
698 struct vml_par
*par
= vinfo
->par
;
700 /* Disable the MDVO pad */
701 VML_WRITE32(par
, VML_RCOMPSTAT
, 0);
702 while (!(VML_READ32(par
, VML_RCOMPSTAT
) & VML_MDVO_VDC_I_RCOMP
)) ;
704 /* Disable display planes */
705 VML_WRITE32(par
, VML_DSPCCNTR
,
706 VML_READ32(par
, VML_DSPCCNTR
) & ~VML_GFX_ENABLE
);
707 (void)VML_READ32(par
, VML_DSPCCNTR
);
708 /* Wait for vblank for the disable to take effect */
709 vml_wait_vblank(vinfo
);
711 /* Next, disable display pipes */
712 VML_WRITE32(par
, VML_PIPEACONF
, 0);
713 (void)VML_READ32(par
, VML_PIPEACONF
);
715 vinfo
->pipe_disabled
= 1;
718 #ifdef VERMILION_DEBUG
719 static void vml_dump_regs(struct vml_info
*vinfo
)
721 struct vml_par
*par
= vinfo
->par
;
723 printk(KERN_DEBUG MODULE_NAME
": Modesetting register dump:\n");
724 printk(KERN_DEBUG MODULE_NAME
": \tHTOTAL_A : 0x%08x\n",
725 (unsigned)VML_READ32(par
, VML_HTOTAL_A
));
726 printk(KERN_DEBUG MODULE_NAME
": \tHBLANK_A : 0x%08x\n",
727 (unsigned)VML_READ32(par
, VML_HBLANK_A
));
728 printk(KERN_DEBUG MODULE_NAME
": \tHSYNC_A : 0x%08x\n",
729 (unsigned)VML_READ32(par
, VML_HSYNC_A
));
730 printk(KERN_DEBUG MODULE_NAME
": \tVTOTAL_A : 0x%08x\n",
731 (unsigned)VML_READ32(par
, VML_VTOTAL_A
));
732 printk(KERN_DEBUG MODULE_NAME
": \tVBLANK_A : 0x%08x\n",
733 (unsigned)VML_READ32(par
, VML_VBLANK_A
));
734 printk(KERN_DEBUG MODULE_NAME
": \tVSYNC_A : 0x%08x\n",
735 (unsigned)VML_READ32(par
, VML_VSYNC_A
));
736 printk(KERN_DEBUG MODULE_NAME
": \tDSPCSTRIDE : 0x%08x\n",
737 (unsigned)VML_READ32(par
, VML_DSPCSTRIDE
));
738 printk(KERN_DEBUG MODULE_NAME
": \tDSPCSIZE : 0x%08x\n",
739 (unsigned)VML_READ32(par
, VML_DSPCSIZE
));
740 printk(KERN_DEBUG MODULE_NAME
": \tDSPCPOS : 0x%08x\n",
741 (unsigned)VML_READ32(par
, VML_DSPCPOS
));
742 printk(KERN_DEBUG MODULE_NAME
": \tDSPARB : 0x%08x\n",
743 (unsigned)VML_READ32(par
, VML_DSPARB
));
744 printk(KERN_DEBUG MODULE_NAME
": \tDSPCADDR : 0x%08x\n",
745 (unsigned)VML_READ32(par
, VML_DSPCADDR
));
746 printk(KERN_DEBUG MODULE_NAME
": \tBCLRPAT_A : 0x%08x\n",
747 (unsigned)VML_READ32(par
, VML_BCLRPAT_A
));
748 printk(KERN_DEBUG MODULE_NAME
": \tCANVSCLR_A : 0x%08x\n",
749 (unsigned)VML_READ32(par
, VML_CANVSCLR_A
));
750 printk(KERN_DEBUG MODULE_NAME
": \tPIPEASRC : 0x%08x\n",
751 (unsigned)VML_READ32(par
, VML_PIPEASRC
));
752 printk(KERN_DEBUG MODULE_NAME
": \tPIPEACONF : 0x%08x\n",
753 (unsigned)VML_READ32(par
, VML_PIPEACONF
));
754 printk(KERN_DEBUG MODULE_NAME
": \tDSPCCNTR : 0x%08x\n",
755 (unsigned)VML_READ32(par
, VML_DSPCCNTR
));
756 printk(KERN_DEBUG MODULE_NAME
": \tRCOMPSTAT : 0x%08x\n",
757 (unsigned)VML_READ32(par
, VML_RCOMPSTAT
));
758 printk(KERN_DEBUG MODULE_NAME
": End of modesetting register dump.\n");
762 static int vmlfb_set_par_locked(struct vml_info
*vinfo
)
764 struct vml_par
*par
= vinfo
->par
;
765 struct fb_info
*info
= &vinfo
->info
;
766 struct fb_var_screeninfo
*var
= &info
->var
;
767 u32 htotal
, hactive
, hblank_start
, hblank_end
, hsync_start
, hsync_end
;
768 u32 vtotal
, vactive
, vblank_start
, vblank_end
, vsync_start
, vsync_end
;
772 vinfo
->bytes_per_pixel
= var
->bits_per_pixel
>> 3;
773 vinfo
->stride
= ALIGN(var
->xres_virtual
* vinfo
->bytes_per_pixel
, 0x40);
774 info
->fix
.line_length
= vinfo
->stride
;
780 var
->xres
+ var
->right_margin
+ var
->hsync_len
+ var
->left_margin
;
782 hblank_start
= var
->xres
;
784 hsync_start
= hactive
+ var
->right_margin
;
785 hsync_end
= hsync_start
+ var
->hsync_len
;
788 var
->yres
+ var
->lower_margin
+ var
->vsync_len
+ var
->upper_margin
;
790 vblank_start
= var
->yres
;
792 vsync_start
= vactive
+ var
->lower_margin
;
793 vsync_end
= vsync_start
+ var
->vsync_len
;
795 dspcntr
= VML_GFX_ENABLE
| VML_GFX_GAMMABYPASS
;
796 clock
= PICOS2KHZ(var
->pixclock
);
798 if (subsys
->nearest_clock
) {
799 clock
= subsys
->nearest_clock(subsys
, clock
);
801 clock
= vml_nearest_clock(clock
);
803 printk(KERN_DEBUG MODULE_NAME
804 ": Set mode Hfreq : %d kHz, Vfreq : %d Hz.\n", clock
/ htotal
,
805 ((clock
/ htotal
) * 1000) / vtotal
);
807 switch (var
->bits_per_pixel
) {
809 dspcntr
|= VML_GFX_ARGB1555
;
812 if (var
->transp
.length
== 8)
813 dspcntr
|= VML_GFX_ARGB8888
| VML_GFX_ALPHAMULT
;
815 dspcntr
|= VML_GFX_RGB0888
;
821 vmlfb_disable_pipe(vinfo
);
824 if (subsys
->set_clock
)
825 subsys
->set_clock(subsys
, clock
);
829 VML_WRITE32(par
, VML_HTOTAL_A
, ((htotal
- 1) << 16) | (hactive
- 1));
830 VML_WRITE32(par
, VML_HBLANK_A
,
831 ((hblank_end
- 1) << 16) | (hblank_start
- 1));
832 VML_WRITE32(par
, VML_HSYNC_A
,
833 ((hsync_end
- 1) << 16) | (hsync_start
- 1));
834 VML_WRITE32(par
, VML_VTOTAL_A
, ((vtotal
- 1) << 16) | (vactive
- 1));
835 VML_WRITE32(par
, VML_VBLANK_A
,
836 ((vblank_end
- 1) << 16) | (vblank_start
- 1));
837 VML_WRITE32(par
, VML_VSYNC_A
,
838 ((vsync_end
- 1) << 16) | (vsync_start
- 1));
839 VML_WRITE32(par
, VML_DSPCSTRIDE
, vinfo
->stride
);
840 VML_WRITE32(par
, VML_DSPCSIZE
,
841 ((var
->yres
- 1) << 16) | (var
->xres
- 1));
842 VML_WRITE32(par
, VML_DSPCPOS
, 0x00000000);
843 VML_WRITE32(par
, VML_DSPARB
, VML_FIFO_DEFAULT
);
844 VML_WRITE32(par
, VML_BCLRPAT_A
, 0x00000000);
845 VML_WRITE32(par
, VML_CANVSCLR_A
, 0x00000000);
846 VML_WRITE32(par
, VML_PIPEASRC
,
847 ((var
->xres
- 1) << 16) | (var
->yres
- 1));
850 VML_WRITE32(par
, VML_PIPEACONF
, VML_PIPE_ENABLE
);
852 VML_WRITE32(par
, VML_DSPCCNTR
, dspcntr
);
854 VML_WRITE32(par
, VML_DSPCADDR
, (u32
) vinfo
->vram_start
+
855 var
->yoffset
* vinfo
->stride
+
856 var
->xoffset
* vinfo
->bytes_per_pixel
);
858 VML_WRITE32(par
, VML_RCOMPSTAT
, VML_MDVO_PAD_ENABLE
);
860 while (!(VML_READ32(par
, VML_RCOMPSTAT
) &
861 (VML_MDVO_VDC_I_RCOMP
| VML_MDVO_PAD_ENABLE
))) ;
863 vinfo
->pipe_disabled
= 0;
864 #ifdef VERMILION_DEBUG
865 vml_dump_regs(vinfo
);
871 static int vmlfb_set_par(struct fb_info
*info
)
873 struct vml_info
*vinfo
= container_of(info
, struct vml_info
, info
);
876 mutex_lock(&vml_mutex
);
877 list_move(&vinfo
->head
, (subsys
) ? &global_has_mode
: &global_no_mode
);
878 ret
= vmlfb_set_par_locked(vinfo
);
880 mutex_unlock(&vml_mutex
);
884 static int vmlfb_blank_locked(struct vml_info
*vinfo
)
886 struct vml_par
*par
= vinfo
->par
;
887 u32 cur
= VML_READ32(par
, VML_PIPEACONF
);
889 switch (vinfo
->cur_blank_mode
) {
890 case FB_BLANK_UNBLANK
:
891 if (vinfo
->pipe_disabled
) {
892 vmlfb_set_par_locked(vinfo
);
894 VML_WRITE32(par
, VML_PIPEACONF
, cur
& ~VML_PIPE_FORCE_BORDER
);
895 (void)VML_READ32(par
, VML_PIPEACONF
);
897 case FB_BLANK_NORMAL
:
898 if (vinfo
->pipe_disabled
) {
899 vmlfb_set_par_locked(vinfo
);
901 VML_WRITE32(par
, VML_PIPEACONF
, cur
| VML_PIPE_FORCE_BORDER
);
902 (void)VML_READ32(par
, VML_PIPEACONF
);
904 case FB_BLANK_VSYNC_SUSPEND
:
905 case FB_BLANK_HSYNC_SUSPEND
:
906 if (!vinfo
->pipe_disabled
) {
907 vmlfb_disable_pipe(vinfo
);
910 case FB_BLANK_POWERDOWN
:
911 if (!vinfo
->pipe_disabled
) {
912 vmlfb_disable_pipe(vinfo
);
922 static int vmlfb_blank(int blank_mode
, struct fb_info
*info
)
924 struct vml_info
*vinfo
= container_of(info
, struct vml_info
, info
);
927 mutex_lock(&vml_mutex
);
928 vinfo
->cur_blank_mode
= blank_mode
;
929 ret
= vmlfb_blank_locked(vinfo
);
930 mutex_unlock(&vml_mutex
);
934 static int vmlfb_pan_display(struct fb_var_screeninfo
*var
,
935 struct fb_info
*info
)
937 struct vml_info
*vinfo
= container_of(info
, struct vml_info
, info
);
938 struct vml_par
*par
= vinfo
->par
;
940 mutex_lock(&vml_mutex
);
941 VML_WRITE32(par
, VML_DSPCADDR
, (u32
) vinfo
->vram_start
+
942 var
->yoffset
* vinfo
->stride
+
943 var
->xoffset
* vinfo
->bytes_per_pixel
);
944 (void)VML_READ32(par
, VML_DSPCADDR
);
945 mutex_unlock(&vml_mutex
);
950 static int vmlfb_setcolreg(u_int regno
, u_int red
, u_int green
, u_int blue
,
951 u_int transp
, struct fb_info
*info
)
958 if (info
->var
.grayscale
) {
959 red
= green
= blue
= (red
* 77 + green
* 151 + blue
* 28) >> 8;
962 if (info
->fix
.visual
!= FB_VISUAL_TRUECOLOR
)
965 red
= VML_TOHW(red
, info
->var
.red
.length
);
966 blue
= VML_TOHW(blue
, info
->var
.blue
.length
);
967 green
= VML_TOHW(green
, info
->var
.green
.length
);
968 transp
= VML_TOHW(transp
, info
->var
.transp
.length
);
970 v
= (red
<< info
->var
.red
.offset
) |
971 (green
<< info
->var
.green
.offset
) |
972 (blue
<< info
->var
.blue
.offset
) |
973 (transp
<< info
->var
.transp
.offset
);
975 switch (info
->var
.bits_per_pixel
) {
977 ((u32
*) info
->pseudo_palette
)[regno
] = v
;
981 ((u32
*) info
->pseudo_palette
)[regno
] = v
;
987 static int vmlfb_mmap(struct fb_info
*info
, struct vm_area_struct
*vma
)
989 struct vml_info
*vinfo
= container_of(info
, struct vml_info
, info
);
990 unsigned long offset
= vma
->vm_pgoff
<< PAGE_SHIFT
;
994 ret
= vmlfb_vram_offset(vinfo
, offset
);
998 prot
= pgprot_val(vma
->vm_page_prot
) & ~_PAGE_CACHE_MASK
;
999 pgprot_val(vma
->vm_page_prot
) =
1000 prot
| cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS
);
1002 return vm_iomap_memory(vma
, vinfo
->vram_start
,
1003 vinfo
->vram_contig_size
);
1006 static int vmlfb_sync(struct fb_info
*info
)
1011 static int vmlfb_cursor(struct fb_info
*info
, struct fb_cursor
*cursor
)
1013 return -EINVAL
; /* just to force soft_cursor() call */
1016 static struct fb_ops vmlfb_ops
= {
1017 .owner
= THIS_MODULE
,
1018 .fb_open
= vmlfb_open
,
1019 .fb_release
= vmlfb_release
,
1020 .fb_check_var
= vmlfb_check_var
,
1021 .fb_set_par
= vmlfb_set_par
,
1022 .fb_blank
= vmlfb_blank
,
1023 .fb_pan_display
= vmlfb_pan_display
,
1024 .fb_fillrect
= cfb_fillrect
,
1025 .fb_copyarea
= cfb_copyarea
,
1026 .fb_imageblit
= cfb_imageblit
,
1027 .fb_cursor
= vmlfb_cursor
,
1028 .fb_sync
= vmlfb_sync
,
1029 .fb_mmap
= vmlfb_mmap
,
1030 .fb_setcolreg
= vmlfb_setcolreg
1033 static const struct pci_device_id vml_ids
[] = {
1034 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, VML_DEVICE_VDC
)},
1038 static struct pci_driver vmlfb_pci_driver
= {
1040 .id_table
= vml_ids
,
1041 .probe
= vml_pci_probe
,
1042 .remove
= vml_pci_remove
,
1045 static void __exit
vmlfb_cleanup(void)
1047 pci_unregister_driver(&vmlfb_pci_driver
);
1050 static int __init
vmlfb_init(void)
1054 char *option
= NULL
;
1056 if (fb_get_options(MODULE_NAME
, &option
))
1060 printk(KERN_DEBUG MODULE_NAME
": initializing\n");
1061 mutex_init(&vml_mutex
);
1062 INIT_LIST_HEAD(&global_no_mode
);
1063 INIT_LIST_HEAD(&global_has_mode
);
1065 return pci_register_driver(&vmlfb_pci_driver
);
1068 int vmlfb_register_subsys(struct vml_sys
*sys
)
1070 struct vml_info
*entry
;
1071 struct list_head
*list
;
1074 mutex_lock(&vml_mutex
);
1075 if (subsys
!= NULL
) {
1076 subsys
->restore(subsys
);
1079 subsys
->save(subsys
);
1082 * We need to restart list traversal for each item, since we
1083 * release the list mutex in the loop.
1086 list
= global_no_mode
.next
;
1087 while (list
!= &global_no_mode
) {
1088 list_del_init(list
);
1089 entry
= list_entry(list
, struct vml_info
, head
);
1092 * First, try the current mode which might not be
1093 * completely validated with respect to the pixel clock.
1096 if (!vmlfb_check_var_locked(&entry
->info
.var
, entry
)) {
1097 vmlfb_set_par_locked(entry
);
1098 list_add_tail(list
, &global_has_mode
);
1102 * Didn't work. Try to find another mode,
1103 * that matches this subsys.
1106 mutex_unlock(&vml_mutex
);
1107 save_activate
= entry
->info
.var
.activate
;
1108 entry
->info
.var
.bits_per_pixel
= 16;
1109 vmlfb_set_pref_pixel_format(&entry
->info
.var
);
1110 if (fb_find_mode(&entry
->info
.var
,
1112 vml_default_mode
, NULL
, 0, NULL
, 16)) {
1113 entry
->info
.var
.activate
|=
1114 FB_ACTIVATE_FORCE
| FB_ACTIVATE_NOW
;
1115 fb_set_var(&entry
->info
, &entry
->info
.var
);
1117 printk(KERN_ERR MODULE_NAME
1118 ": Sorry. no mode found for this subsys.\n");
1120 entry
->info
.var
.activate
= save_activate
;
1121 mutex_lock(&vml_mutex
);
1123 vmlfb_blank_locked(entry
);
1124 list
= global_no_mode
.next
;
1126 mutex_unlock(&vml_mutex
);
1128 printk(KERN_DEBUG MODULE_NAME
": Registered %s subsystem.\n",
1129 subsys
->name
? subsys
->name
: "unknown");
1133 EXPORT_SYMBOL_GPL(vmlfb_register_subsys
);
1135 void vmlfb_unregister_subsys(struct vml_sys
*sys
)
1137 struct vml_info
*entry
, *next
;
1139 mutex_lock(&vml_mutex
);
1140 if (subsys
!= sys
) {
1141 mutex_unlock(&vml_mutex
);
1144 subsys
->restore(subsys
);
1146 list_for_each_entry_safe(entry
, next
, &global_has_mode
, head
) {
1147 printk(KERN_DEBUG MODULE_NAME
": subsys disable pipe\n");
1148 vmlfb_disable_pipe(entry
);
1149 list_move_tail(&entry
->head
, &global_no_mode
);
1151 mutex_unlock(&vml_mutex
);
1154 EXPORT_SYMBOL_GPL(vmlfb_unregister_subsys
);
1156 module_init(vmlfb_init
);
1157 module_exit(vmlfb_cleanup
);
1159 MODULE_AUTHOR("Tungsten Graphics");
1160 MODULE_DESCRIPTION("Initialization of the Vermilion display devices");
1161 MODULE_VERSION("1.0.0");
1162 MODULE_LICENSE("GPL");