2 * Intel GTT (Graphics Translation Table) routines
4 * Caveat: This driver implements the linux agp interface, but this is far from
5 * a agp driver! GTT support ended up here for purely historical reasons: The
6 * old userspace intel graphics drivers needed an interface to map memory into
7 * the GTT. And the drm provides a default interface for graphic devices sitting
8 * on an agp port. So it made sense to fake the GTT support as an agp port to
9 * avoid having to create a new api.
11 * With gem this does not make much sense anymore, just needlessly complicates
12 * the code. But as long as the old graphics stack is still support, it's stuck
15 * /fairy-tale-mode off
19 * If we have Intel graphics, we're not going to have anything other than
20 * an Intel IOMMU. So make the correct use of the PCI DMA API contingent
21 * on the Intel IOMMU support (CONFIG_DMAR).
22 * Only newer chipsets need to bother with this, of course.
25 #define USE_PCI_DMA_API 1
28 static const struct aper_size_info_fixed intel_i810_sizes
[] =
31 /* The 32M mode still requires a 64k gatt */
35 #define AGP_DCACHE_MEMORY 1
36 #define AGP_PHYS_MEMORY 2
37 #define INTEL_AGP_CACHED_MEMORY 3
39 static struct gatt_mask intel_i810_masks
[] =
41 {.mask
= I810_PTE_VALID
, .type
= 0},
42 {.mask
= (I810_PTE_VALID
| I810_PTE_LOCAL
), .type
= AGP_DCACHE_MEMORY
},
43 {.mask
= I810_PTE_VALID
, .type
= 0},
44 {.mask
= I810_PTE_VALID
| I830_PTE_SYSTEM_CACHED
,
45 .type
= INTEL_AGP_CACHED_MEMORY
}
48 static struct _intel_private
{
49 struct pci_dev
*pcidev
; /* device one */
50 u8 __iomem
*registers
;
51 u32 __iomem
*gtt
; /* I915G */
52 int num_dcache_entries
;
53 /* gtt_entries is the number of gtt entries that are already mapped
54 * to stolen memory. Stolen memory is larger than the memory mapped
55 * through gtt_entries, as it includes some reserved space for the BIOS
56 * popup and for the GTT.
58 int gtt_entries
; /* i830+ */
61 void __iomem
*i9xx_flush_page
;
62 void *i8xx_flush_page
;
64 struct page
*i8xx_page
;
65 struct resource ifp_resource
;
69 #ifdef USE_PCI_DMA_API
70 static int intel_agp_map_page(struct page
*page
, dma_addr_t
*ret
)
72 *ret
= pci_map_page(intel_private
.pcidev
, page
, 0,
73 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
74 if (pci_dma_mapping_error(intel_private
.pcidev
, *ret
))
79 static void intel_agp_unmap_page(struct page
*page
, dma_addr_t dma
)
81 pci_unmap_page(intel_private
.pcidev
, dma
,
82 PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
85 static void intel_agp_free_sglist(struct agp_memory
*mem
)
89 st
.sgl
= mem
->sg_list
;
90 st
.orig_nents
= st
.nents
= mem
->page_count
;
98 static int intel_agp_map_memory(struct agp_memory
*mem
)
101 struct scatterlist
*sg
;
104 DBG("try mapping %lu pages\n", (unsigned long)mem
->page_count
);
106 if (sg_alloc_table(&st
, mem
->page_count
, GFP_KERNEL
))
109 mem
->sg_list
= sg
= st
.sgl
;
111 for (i
= 0 ; i
< mem
->page_count
; i
++, sg
= sg_next(sg
))
112 sg_set_page(sg
, mem
->pages
[i
], PAGE_SIZE
, 0);
114 mem
->num_sg
= pci_map_sg(intel_private
.pcidev
, mem
->sg_list
,
115 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
116 if (unlikely(!mem
->num_sg
)) {
117 intel_agp_free_sglist(mem
);
123 static void intel_agp_unmap_memory(struct agp_memory
*mem
)
125 DBG("try unmapping %lu pages\n", (unsigned long)mem
->page_count
);
127 pci_unmap_sg(intel_private
.pcidev
, mem
->sg_list
,
128 mem
->page_count
, PCI_DMA_BIDIRECTIONAL
);
129 intel_agp_free_sglist(mem
);
132 static void intel_agp_insert_sg_entries(struct agp_memory
*mem
,
133 off_t pg_start
, int mask_type
)
135 struct scatterlist
*sg
;
140 WARN_ON(!mem
->num_sg
);
142 if (mem
->num_sg
== mem
->page_count
) {
143 for_each_sg(mem
->sg_list
, sg
, mem
->page_count
, i
) {
144 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
145 sg_dma_address(sg
), mask_type
),
146 intel_private
.gtt
+j
);
150 /* sg may merge pages, but we have to separate
151 * per-page addr for GTT */
154 for_each_sg(mem
->sg_list
, sg
, mem
->num_sg
, i
) {
155 len
= sg_dma_len(sg
) / PAGE_SIZE
;
156 for (m
= 0; m
< len
; m
++) {
157 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
158 sg_dma_address(sg
) + m
* PAGE_SIZE
,
160 intel_private
.gtt
+j
);
165 readl(intel_private
.gtt
+j
-1);
170 static void intel_agp_insert_sg_entries(struct agp_memory
*mem
,
171 off_t pg_start
, int mask_type
)
176 if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB
||
177 agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB
)
179 cache_bits
= I830_PTE_SYSTEM_CACHED
;
182 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
183 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
184 page_to_phys(mem
->pages
[i
]), mask_type
),
185 intel_private
.gtt
+j
);
188 readl(intel_private
.gtt
+j
-1);
193 static int intel_i810_fetch_size(void)
196 struct aper_size_info_fixed
*values
;
198 pci_read_config_dword(agp_bridge
->dev
, I810_SMRAM_MISCC
, &smram_miscc
);
199 values
= A_SIZE_FIX(agp_bridge
->driver
->aperture_sizes
);
201 if ((smram_miscc
& I810_GMS
) == I810_GMS_DISABLE
) {
202 dev_warn(&agp_bridge
->dev
->dev
, "i810 is disabled\n");
205 if ((smram_miscc
& I810_GFX_MEM_WIN_SIZE
) == I810_GFX_MEM_WIN_32M
) {
206 agp_bridge
->current_size
= (void *) (values
+ 1);
207 agp_bridge
->aperture_size_idx
= 1;
208 return values
[1].size
;
210 agp_bridge
->current_size
= (void *) (values
);
211 agp_bridge
->aperture_size_idx
= 0;
212 return values
[0].size
;
218 static int intel_i810_configure(void)
220 struct aper_size_info_fixed
*current_size
;
224 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
226 if (!intel_private
.registers
) {
227 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, &temp
);
230 intel_private
.registers
= ioremap(temp
, 128 * 4096);
231 if (!intel_private
.registers
) {
232 dev_err(&intel_private
.pcidev
->dev
,
233 "can't remap memory\n");
238 if ((readl(intel_private
.registers
+I810_DRAM_CTL
)
239 & I810_DRAM_ROW_0
) == I810_DRAM_ROW_0_SDRAM
) {
240 /* This will need to be dynamically assigned */
241 dev_info(&intel_private
.pcidev
->dev
,
242 "detected 4MB dedicated video ram\n");
243 intel_private
.num_dcache_entries
= 1024;
245 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
, &temp
);
246 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
247 writel(agp_bridge
->gatt_bus_addr
| I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
248 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
250 if (agp_bridge
->driver
->needs_scratch_page
) {
251 for (i
= 0; i
< current_size
->num_entries
; i
++) {
252 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
254 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4)); /* PCI posting. */
256 global_cache_flush();
260 static void intel_i810_cleanup(void)
262 writel(0, intel_private
.registers
+I810_PGETBL_CTL
);
263 readl(intel_private
.registers
); /* PCI Posting. */
264 iounmap(intel_private
.registers
);
267 static void intel_i810_agp_enable(struct agp_bridge_data
*bridge
, u32 mode
)
272 /* Exists to support ARGB cursors */
273 static struct page
*i8xx_alloc_pages(void)
277 page
= alloc_pages(GFP_KERNEL
| GFP_DMA32
, 2);
281 if (set_pages_uc(page
, 4) < 0) {
282 set_pages_wb(page
, 4);
283 __free_pages(page
, 2);
287 atomic_inc(&agp_bridge
->current_memory_agp
);
291 static void i8xx_destroy_pages(struct page
*page
)
296 set_pages_wb(page
, 4);
298 __free_pages(page
, 2);
299 atomic_dec(&agp_bridge
->current_memory_agp
);
302 static int intel_i830_type_to_mask_type(struct agp_bridge_data
*bridge
,
305 if (type
< AGP_USER_TYPES
)
307 else if (type
== AGP_USER_CACHED_MEMORY
)
308 return INTEL_AGP_CACHED_MEMORY
;
313 static int intel_i810_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
316 int i
, j
, num_entries
;
321 if (mem
->page_count
== 0)
324 temp
= agp_bridge
->current_size
;
325 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
327 if ((pg_start
+ mem
->page_count
) > num_entries
)
331 for (j
= pg_start
; j
< (pg_start
+ mem
->page_count
); j
++) {
332 if (!PGE_EMPTY(agp_bridge
, readl(agp_bridge
->gatt_table
+j
))) {
338 if (type
!= mem
->type
)
341 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
344 case AGP_DCACHE_MEMORY
:
345 if (!mem
->is_flushed
)
346 global_cache_flush();
347 for (i
= pg_start
; i
< (pg_start
+ mem
->page_count
); i
++) {
348 writel((i
*4096)|I810_PTE_LOCAL
|I810_PTE_VALID
,
349 intel_private
.registers
+I810_PTE_BASE
+(i
*4));
351 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
353 case AGP_PHYS_MEMORY
:
354 case AGP_NORMAL_MEMORY
:
355 if (!mem
->is_flushed
)
356 global_cache_flush();
357 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
358 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
359 page_to_phys(mem
->pages
[i
]), mask_type
),
360 intel_private
.registers
+I810_PTE_BASE
+(j
*4));
362 readl(intel_private
.registers
+I810_PTE_BASE
+((j
-1)*4));
371 mem
->is_flushed
= true;
375 static int intel_i810_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
380 if (mem
->page_count
== 0)
383 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
384 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
386 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
392 * The i810/i830 requires a physical address to program its mouse
393 * pointer into hardware.
394 * However the Xserver still writes to it through the agp aperture.
396 static struct agp_memory
*alloc_agpphysmem_i8xx(size_t pg_count
, int type
)
398 struct agp_memory
*new;
402 case 1: page
= agp_bridge
->driver
->agp_alloc_page(agp_bridge
);
405 /* kludge to get 4 physical pages for ARGB cursor */
406 page
= i8xx_alloc_pages();
415 new = agp_create_memory(pg_count
);
419 new->pages
[0] = page
;
421 /* kludge to get 4 physical pages for ARGB cursor */
422 new->pages
[1] = new->pages
[0] + 1;
423 new->pages
[2] = new->pages
[1] + 1;
424 new->pages
[3] = new->pages
[2] + 1;
426 new->page_count
= pg_count
;
427 new->num_scratch_pages
= pg_count
;
428 new->type
= AGP_PHYS_MEMORY
;
429 new->physical
= page_to_phys(new->pages
[0]);
433 static struct agp_memory
*intel_i810_alloc_by_type(size_t pg_count
, int type
)
435 struct agp_memory
*new;
437 if (type
== AGP_DCACHE_MEMORY
) {
438 if (pg_count
!= intel_private
.num_dcache_entries
)
441 new = agp_create_memory(1);
445 new->type
= AGP_DCACHE_MEMORY
;
446 new->page_count
= pg_count
;
447 new->num_scratch_pages
= 0;
448 agp_free_page_array(new);
451 if (type
== AGP_PHYS_MEMORY
)
452 return alloc_agpphysmem_i8xx(pg_count
, type
);
456 static void intel_i810_free_by_type(struct agp_memory
*curr
)
458 agp_free_key(curr
->key
);
459 if (curr
->type
== AGP_PHYS_MEMORY
) {
460 if (curr
->page_count
== 4)
461 i8xx_destroy_pages(curr
->pages
[0]);
463 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
464 AGP_PAGE_DESTROY_UNMAP
);
465 agp_bridge
->driver
->agp_destroy_page(curr
->pages
[0],
466 AGP_PAGE_DESTROY_FREE
);
468 agp_free_page_array(curr
);
473 static unsigned long intel_i810_mask_memory(struct agp_bridge_data
*bridge
,
474 dma_addr_t addr
, int type
)
476 /* Type checking must be done elsewhere */
477 return addr
| bridge
->driver
->masks
[type
].mask
;
480 static struct aper_size_info_fixed intel_i830_sizes
[] =
483 /* The 64M mode still requires a 128k gatt */
489 static void intel_i830_init_gtt_entries(void)
495 static const int ddt
[4] = { 0, 16, 32, 64 };
496 int size
; /* reserved space (in kb) at the top of stolen memory */
498 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
502 pgetbl_ctl
= readl(intel_private
.registers
+I810_PGETBL_CTL
);
504 /* The 965 has a field telling us the size of the GTT,
505 * which may be larger than what is necessary to map the
508 switch (pgetbl_ctl
& I965_PGETBL_SIZE_MASK
) {
509 case I965_PGETBL_SIZE_128KB
:
512 case I965_PGETBL_SIZE_256KB
:
515 case I965_PGETBL_SIZE_512KB
:
518 case I965_PGETBL_SIZE_1MB
:
521 case I965_PGETBL_SIZE_2MB
:
524 case I965_PGETBL_SIZE_1_5MB
:
528 dev_info(&intel_private
.pcidev
->dev
,
529 "unknown page table size, assuming 512KB\n");
532 size
+= 4; /* add in BIOS popup space */
533 } else if (IS_G33
&& !IS_PINEVIEW
) {
534 /* G33's GTT size defined in gmch_ctrl */
535 switch (gmch_ctrl
& G33_PGETBL_SIZE_MASK
) {
536 case G33_PGETBL_SIZE_1M
:
539 case G33_PGETBL_SIZE_2M
:
543 dev_info(&agp_bridge
->dev
->dev
,
544 "unknown page table size 0x%x, assuming 512KB\n",
545 (gmch_ctrl
& G33_PGETBL_SIZE_MASK
));
549 } else if (IS_G4X
|| IS_PINEVIEW
) {
550 /* On 4 series hardware, GTT stolen is separate from graphics
551 * stolen, ignore it in stolen gtt entries counting. However,
552 * 4KB of the stolen memory doesn't get mapped to the GTT.
556 /* On previous hardware, the GTT size was just what was
557 * required to map the aperture.
559 size
= agp_bridge
->driver
->fetch_size() + 4;
562 if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_82830_HB
||
563 agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_82845G_HB
) {
564 switch (gmch_ctrl
& I830_GMCH_GMS_MASK
) {
565 case I830_GMCH_GMS_STOLEN_512
:
566 gtt_entries
= KB(512) - KB(size
);
568 case I830_GMCH_GMS_STOLEN_1024
:
569 gtt_entries
= MB(1) - KB(size
);
571 case I830_GMCH_GMS_STOLEN_8192
:
572 gtt_entries
= MB(8) - KB(size
);
574 case I830_GMCH_GMS_LOCAL
:
575 rdct
= readb(intel_private
.registers
+I830_RDRAM_CHANNEL_TYPE
);
576 gtt_entries
= (I830_RDRAM_ND(rdct
) + 1) *
577 MB(ddt
[I830_RDRAM_DDT(rdct
)]);
584 } else if (agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB
||
585 agp_bridge
->dev
->device
== PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB
) {
587 * SandyBridge has new memory control reg at 0x50.w
590 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
591 switch (snb_gmch_ctl
& SNB_GMCH_GMS_STOLEN_MASK
) {
592 case SNB_GMCH_GMS_STOLEN_32M
:
593 gtt_entries
= MB(32) - KB(size
);
595 case SNB_GMCH_GMS_STOLEN_64M
:
596 gtt_entries
= MB(64) - KB(size
);
598 case SNB_GMCH_GMS_STOLEN_96M
:
599 gtt_entries
= MB(96) - KB(size
);
601 case SNB_GMCH_GMS_STOLEN_128M
:
602 gtt_entries
= MB(128) - KB(size
);
604 case SNB_GMCH_GMS_STOLEN_160M
:
605 gtt_entries
= MB(160) - KB(size
);
607 case SNB_GMCH_GMS_STOLEN_192M
:
608 gtt_entries
= MB(192) - KB(size
);
610 case SNB_GMCH_GMS_STOLEN_224M
:
611 gtt_entries
= MB(224) - KB(size
);
613 case SNB_GMCH_GMS_STOLEN_256M
:
614 gtt_entries
= MB(256) - KB(size
);
616 case SNB_GMCH_GMS_STOLEN_288M
:
617 gtt_entries
= MB(288) - KB(size
);
619 case SNB_GMCH_GMS_STOLEN_320M
:
620 gtt_entries
= MB(320) - KB(size
);
622 case SNB_GMCH_GMS_STOLEN_352M
:
623 gtt_entries
= MB(352) - KB(size
);
625 case SNB_GMCH_GMS_STOLEN_384M
:
626 gtt_entries
= MB(384) - KB(size
);
628 case SNB_GMCH_GMS_STOLEN_416M
:
629 gtt_entries
= MB(416) - KB(size
);
631 case SNB_GMCH_GMS_STOLEN_448M
:
632 gtt_entries
= MB(448) - KB(size
);
634 case SNB_GMCH_GMS_STOLEN_480M
:
635 gtt_entries
= MB(480) - KB(size
);
637 case SNB_GMCH_GMS_STOLEN_512M
:
638 gtt_entries
= MB(512) - KB(size
);
642 switch (gmch_ctrl
& I855_GMCH_GMS_MASK
) {
643 case I855_GMCH_GMS_STOLEN_1M
:
644 gtt_entries
= MB(1) - KB(size
);
646 case I855_GMCH_GMS_STOLEN_4M
:
647 gtt_entries
= MB(4) - KB(size
);
649 case I855_GMCH_GMS_STOLEN_8M
:
650 gtt_entries
= MB(8) - KB(size
);
652 case I855_GMCH_GMS_STOLEN_16M
:
653 gtt_entries
= MB(16) - KB(size
);
655 case I855_GMCH_GMS_STOLEN_32M
:
656 gtt_entries
= MB(32) - KB(size
);
658 case I915_GMCH_GMS_STOLEN_48M
:
659 /* Check it's really I915G */
660 if (IS_I915
|| IS_I965
|| IS_G33
|| IS_G4X
)
661 gtt_entries
= MB(48) - KB(size
);
665 case I915_GMCH_GMS_STOLEN_64M
:
666 /* Check it's really I915G */
667 if (IS_I915
|| IS_I965
|| IS_G33
|| IS_G4X
)
668 gtt_entries
= MB(64) - KB(size
);
672 case G33_GMCH_GMS_STOLEN_128M
:
673 if (IS_G33
|| IS_I965
|| IS_G4X
)
674 gtt_entries
= MB(128) - KB(size
);
678 case G33_GMCH_GMS_STOLEN_256M
:
679 if (IS_G33
|| IS_I965
|| IS_G4X
)
680 gtt_entries
= MB(256) - KB(size
);
684 case INTEL_GMCH_GMS_STOLEN_96M
:
685 if (IS_I965
|| IS_G4X
)
686 gtt_entries
= MB(96) - KB(size
);
690 case INTEL_GMCH_GMS_STOLEN_160M
:
691 if (IS_I965
|| IS_G4X
)
692 gtt_entries
= MB(160) - KB(size
);
696 case INTEL_GMCH_GMS_STOLEN_224M
:
697 if (IS_I965
|| IS_G4X
)
698 gtt_entries
= MB(224) - KB(size
);
702 case INTEL_GMCH_GMS_STOLEN_352M
:
703 if (IS_I965
|| IS_G4X
)
704 gtt_entries
= MB(352) - KB(size
);
713 if (gtt_entries
> 0) {
714 dev_info(&agp_bridge
->dev
->dev
, "detected %dK %s memory\n",
715 gtt_entries
/ KB(1), local
? "local" : "stolen");
716 gtt_entries
/= KB(4);
718 dev_info(&agp_bridge
->dev
->dev
,
719 "no pre-allocated video memory detected\n");
723 intel_private
.gtt_entries
= gtt_entries
;
726 static void intel_i830_fini_flush(void)
728 kunmap(intel_private
.i8xx_page
);
729 intel_private
.i8xx_flush_page
= NULL
;
730 unmap_page_from_agp(intel_private
.i8xx_page
);
732 __free_page(intel_private
.i8xx_page
);
733 intel_private
.i8xx_page
= NULL
;
736 static void intel_i830_setup_flush(void)
738 /* return if we've already set the flush mechanism up */
739 if (intel_private
.i8xx_page
)
742 intel_private
.i8xx_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
| GFP_DMA32
);
743 if (!intel_private
.i8xx_page
)
746 intel_private
.i8xx_flush_page
= kmap(intel_private
.i8xx_page
);
747 if (!intel_private
.i8xx_flush_page
)
748 intel_i830_fini_flush();
751 /* The chipset_flush interface needs to get data that has already been
752 * flushed out of the CPU all the way out to main memory, because the GPU
753 * doesn't snoop those buffers.
755 * The 8xx series doesn't have the same lovely interface for flushing the
756 * chipset write buffers that the later chips do. According to the 865
757 * specs, it's 64 octwords, or 1KB. So, to get those previous things in
758 * that buffer out, we just fill 1KB and clflush it out, on the assumption
759 * that it'll push whatever was in there out. It appears to work.
761 static void intel_i830_chipset_flush(struct agp_bridge_data
*bridge
)
763 unsigned int *pg
= intel_private
.i8xx_flush_page
;
768 clflush_cache_range(pg
, 1024);
769 else if (wbinvd_on_all_cpus() != 0)
770 printk(KERN_ERR
"Timed out waiting for cache flush.\n");
773 /* The intel i830 automatically initializes the agp aperture during POST.
774 * Use the memory already set aside for in the GTT.
776 static int intel_i830_create_gatt_table(struct agp_bridge_data
*bridge
)
779 struct aper_size_info_fixed
*size
;
783 size
= agp_bridge
->current_size
;
784 page_order
= size
->page_order
;
785 num_entries
= size
->num_entries
;
786 agp_bridge
->gatt_table_real
= NULL
;
788 pci_read_config_dword(intel_private
.pcidev
, I810_MMADDR
, &temp
);
791 intel_private
.registers
= ioremap(temp
, 128 * 4096);
792 if (!intel_private
.registers
)
795 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
796 global_cache_flush(); /* FIXME: ?? */
798 /* we have to call this as early as possible after the MMIO base address is known */
799 intel_i830_init_gtt_entries();
801 agp_bridge
->gatt_table
= NULL
;
803 agp_bridge
->gatt_bus_addr
= temp
;
808 /* Return the gatt table to a sane state. Use the top of stolen
809 * memory for the GTT.
811 static int intel_i830_free_gatt_table(struct agp_bridge_data
*bridge
)
816 static int intel_i830_fetch_size(void)
819 struct aper_size_info_fixed
*values
;
821 values
= A_SIZE_FIX(agp_bridge
->driver
->aperture_sizes
);
823 if (agp_bridge
->dev
->device
!= PCI_DEVICE_ID_INTEL_82830_HB
&&
824 agp_bridge
->dev
->device
!= PCI_DEVICE_ID_INTEL_82845G_HB
) {
825 /* 855GM/852GM/865G has 128MB aperture size */
826 agp_bridge
->current_size
= (void *) values
;
827 agp_bridge
->aperture_size_idx
= 0;
828 return values
[0].size
;
831 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
833 if ((gmch_ctrl
& I830_GMCH_MEM_MASK
) == I830_GMCH_MEM_128M
) {
834 agp_bridge
->current_size
= (void *) values
;
835 agp_bridge
->aperture_size_idx
= 0;
836 return values
[0].size
;
838 agp_bridge
->current_size
= (void *) (values
+ 1);
839 agp_bridge
->aperture_size_idx
= 1;
840 return values
[1].size
;
846 static int intel_i830_configure(void)
848 struct aper_size_info_fixed
*current_size
;
853 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
855 pci_read_config_dword(intel_private
.pcidev
, I810_GMADDR
, &temp
);
856 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
858 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
859 gmch_ctrl
|= I830_GMCH_ENABLED
;
860 pci_write_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, gmch_ctrl
);
862 writel(agp_bridge
->gatt_bus_addr
|I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
863 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
865 if (agp_bridge
->driver
->needs_scratch_page
) {
866 for (i
= intel_private
.gtt_entries
; i
< current_size
->num_entries
; i
++) {
867 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
869 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4)); /* PCI Posting. */
872 global_cache_flush();
874 intel_i830_setup_flush();
878 static void intel_i830_cleanup(void)
880 iounmap(intel_private
.registers
);
883 static int intel_i830_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
886 int i
, j
, num_entries
;
891 if (mem
->page_count
== 0)
894 temp
= agp_bridge
->current_size
;
895 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
897 if (pg_start
< intel_private
.gtt_entries
) {
898 dev_printk(KERN_DEBUG
, &intel_private
.pcidev
->dev
,
899 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
900 pg_start
, intel_private
.gtt_entries
);
902 dev_info(&intel_private
.pcidev
->dev
,
903 "trying to insert into local/stolen memory\n");
907 if ((pg_start
+ mem
->page_count
) > num_entries
)
910 /* The i830 can't check the GTT for entries since its read only,
911 * depend on the caller to make the correct offset decisions.
914 if (type
!= mem
->type
)
917 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
919 if (mask_type
!= 0 && mask_type
!= AGP_PHYS_MEMORY
&&
920 mask_type
!= INTEL_AGP_CACHED_MEMORY
)
923 if (!mem
->is_flushed
)
924 global_cache_flush();
926 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
927 writel(agp_bridge
->driver
->mask_memory(agp_bridge
,
928 page_to_phys(mem
->pages
[i
]), mask_type
),
929 intel_private
.registers
+I810_PTE_BASE
+(j
*4));
931 readl(intel_private
.registers
+I810_PTE_BASE
+((j
-1)*4));
936 mem
->is_flushed
= true;
940 static int intel_i830_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
945 if (mem
->page_count
== 0)
948 if (pg_start
< intel_private
.gtt_entries
) {
949 dev_info(&intel_private
.pcidev
->dev
,
950 "trying to disable local/stolen memory\n");
954 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
955 writel(agp_bridge
->scratch_page
, intel_private
.registers
+I810_PTE_BASE
+(i
*4));
957 readl(intel_private
.registers
+I810_PTE_BASE
+((i
-1)*4));
962 static struct agp_memory
*intel_i830_alloc_by_type(size_t pg_count
, int type
)
964 if (type
== AGP_PHYS_MEMORY
)
965 return alloc_agpphysmem_i8xx(pg_count
, type
);
966 /* always return NULL for other allocation types for now */
970 static int intel_alloc_chipset_flush_resource(void)
973 ret
= pci_bus_alloc_resource(agp_bridge
->dev
->bus
, &intel_private
.ifp_resource
, PAGE_SIZE
,
974 PAGE_SIZE
, PCIBIOS_MIN_MEM
, 0,
975 pcibios_align_resource
, agp_bridge
->dev
);
980 static void intel_i915_setup_chipset_flush(void)
985 pci_read_config_dword(agp_bridge
->dev
, I915_IFPADDR
, &temp
);
987 intel_alloc_chipset_flush_resource();
988 intel_private
.resource_valid
= 1;
989 pci_write_config_dword(agp_bridge
->dev
, I915_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
993 intel_private
.resource_valid
= 1;
994 intel_private
.ifp_resource
.start
= temp
;
995 intel_private
.ifp_resource
.end
= temp
+ PAGE_SIZE
;
996 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
997 /* some BIOSes reserve this area in a pnp some don't */
999 intel_private
.resource_valid
= 0;
1003 static void intel_i965_g33_setup_chipset_flush(void)
1005 u32 temp_hi
, temp_lo
;
1008 pci_read_config_dword(agp_bridge
->dev
, I965_IFPADDR
+ 4, &temp_hi
);
1009 pci_read_config_dword(agp_bridge
->dev
, I965_IFPADDR
, &temp_lo
);
1011 if (!(temp_lo
& 0x1)) {
1013 intel_alloc_chipset_flush_resource();
1015 intel_private
.resource_valid
= 1;
1016 pci_write_config_dword(agp_bridge
->dev
, I965_IFPADDR
+ 4,
1017 upper_32_bits(intel_private
.ifp_resource
.start
));
1018 pci_write_config_dword(agp_bridge
->dev
, I965_IFPADDR
, (intel_private
.ifp_resource
.start
& 0xffffffff) | 0x1);
1023 l64
= ((u64
)temp_hi
<< 32) | temp_lo
;
1025 intel_private
.resource_valid
= 1;
1026 intel_private
.ifp_resource
.start
= l64
;
1027 intel_private
.ifp_resource
.end
= l64
+ PAGE_SIZE
;
1028 ret
= request_resource(&iomem_resource
, &intel_private
.ifp_resource
);
1029 /* some BIOSes reserve this area in a pnp some don't */
1031 intel_private
.resource_valid
= 0;
1035 static void intel_i9xx_setup_flush(void)
1037 /* return if already configured */
1038 if (intel_private
.ifp_resource
.start
)
1044 /* setup a resource for this object */
1045 intel_private
.ifp_resource
.name
= "Intel Flush Page";
1046 intel_private
.ifp_resource
.flags
= IORESOURCE_MEM
;
1048 /* Setup chipset flush for 915 */
1049 if (IS_I965
|| IS_G33
|| IS_G4X
) {
1050 intel_i965_g33_setup_chipset_flush();
1052 intel_i915_setup_chipset_flush();
1055 if (intel_private
.ifp_resource
.start
) {
1056 intel_private
.i9xx_flush_page
= ioremap_nocache(intel_private
.ifp_resource
.start
, PAGE_SIZE
);
1057 if (!intel_private
.i9xx_flush_page
)
1058 dev_info(&intel_private
.pcidev
->dev
, "can't ioremap flush page - no chipset flushing");
1062 static int intel_i9xx_configure(void)
1064 struct aper_size_info_fixed
*current_size
;
1069 current_size
= A_SIZE_FIX(agp_bridge
->current_size
);
1071 pci_read_config_dword(intel_private
.pcidev
, I915_GMADDR
, &temp
);
1073 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1075 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
1076 gmch_ctrl
|= I830_GMCH_ENABLED
;
1077 pci_write_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, gmch_ctrl
);
1079 writel(agp_bridge
->gatt_bus_addr
|I810_PGETBL_ENABLED
, intel_private
.registers
+I810_PGETBL_CTL
);
1080 readl(intel_private
.registers
+I810_PGETBL_CTL
); /* PCI Posting. */
1082 if (agp_bridge
->driver
->needs_scratch_page
) {
1083 for (i
= intel_private
.gtt_entries
; i
< intel_private
.gtt_total_size
; i
++) {
1084 writel(agp_bridge
->scratch_page
, intel_private
.gtt
+i
);
1086 readl(intel_private
.gtt
+i
-1); /* PCI Posting. */
1089 global_cache_flush();
1091 intel_i9xx_setup_flush();
1096 static void intel_i915_cleanup(void)
1098 if (intel_private
.i9xx_flush_page
)
1099 iounmap(intel_private
.i9xx_flush_page
);
1100 if (intel_private
.resource_valid
)
1101 release_resource(&intel_private
.ifp_resource
);
1102 intel_private
.ifp_resource
.start
= 0;
1103 intel_private
.resource_valid
= 0;
1104 iounmap(intel_private
.gtt
);
1105 iounmap(intel_private
.registers
);
1108 static void intel_i915_chipset_flush(struct agp_bridge_data
*bridge
)
1110 if (intel_private
.i9xx_flush_page
)
1111 writel(1, intel_private
.i9xx_flush_page
);
1114 static int intel_i915_insert_entries(struct agp_memory
*mem
, off_t pg_start
,
1122 if (mem
->page_count
== 0)
1125 temp
= agp_bridge
->current_size
;
1126 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
1128 if (pg_start
< intel_private
.gtt_entries
) {
1129 dev_printk(KERN_DEBUG
, &intel_private
.pcidev
->dev
,
1130 "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
1131 pg_start
, intel_private
.gtt_entries
);
1133 dev_info(&intel_private
.pcidev
->dev
,
1134 "trying to insert into local/stolen memory\n");
1138 if ((pg_start
+ mem
->page_count
) > num_entries
)
1141 /* The i915 can't check the GTT for entries since it's read only;
1142 * depend on the caller to make the correct offset decisions.
1145 if (type
!= mem
->type
)
1148 mask_type
= agp_bridge
->driver
->agp_type_to_mask_type(agp_bridge
, type
);
1150 if (mask_type
!= 0 && mask_type
!= AGP_PHYS_MEMORY
&&
1151 mask_type
!= INTEL_AGP_CACHED_MEMORY
)
1154 if (!mem
->is_flushed
)
1155 global_cache_flush();
1157 intel_agp_insert_sg_entries(mem
, pg_start
, mask_type
);
1162 mem
->is_flushed
= true;
1166 static int intel_i915_remove_entries(struct agp_memory
*mem
, off_t pg_start
,
1171 if (mem
->page_count
== 0)
1174 if (pg_start
< intel_private
.gtt_entries
) {
1175 dev_info(&intel_private
.pcidev
->dev
,
1176 "trying to disable local/stolen memory\n");
1180 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++)
1181 writel(agp_bridge
->scratch_page
, intel_private
.gtt
+i
);
1183 readl(intel_private
.gtt
+i
-1);
1188 /* Return the aperture size by just checking the resource length. The effect
1189 * described in the spec of the MSAC registers is just changing of the
1192 static int intel_i9xx_fetch_size(void)
1194 int num_sizes
= ARRAY_SIZE(intel_i830_sizes
);
1195 int aper_size
; /* size in megabytes */
1198 aper_size
= pci_resource_len(intel_private
.pcidev
, 2) / MB(1);
1200 for (i
= 0; i
< num_sizes
; i
++) {
1201 if (aper_size
== intel_i830_sizes
[i
].size
) {
1202 agp_bridge
->current_size
= intel_i830_sizes
+ i
;
1210 static int intel_i915_get_gtt_size(void)
1217 /* G33's GTT size defined in gmch_ctrl */
1218 pci_read_config_word(agp_bridge
->dev
, I830_GMCH_CTRL
, &gmch_ctrl
);
1219 switch (gmch_ctrl
& I830_GMCH_GMS_MASK
) {
1220 case I830_GMCH_GMS_STOLEN_512
:
1223 case I830_GMCH_GMS_STOLEN_1024
:
1226 case I830_GMCH_GMS_STOLEN_8192
:
1230 dev_info(&agp_bridge
->dev
->dev
,
1231 "unknown page table size 0x%x, assuming 512KB\n",
1232 (gmch_ctrl
& I830_GMCH_GMS_MASK
));
1236 /* On previous hardware, the GTT size was just what was
1237 * required to map the aperture.
1239 size
= agp_bridge
->driver
->fetch_size();
1245 /* The intel i915 automatically initializes the agp aperture during POST.
1246 * Use the memory already set aside for in the GTT.
1248 static int intel_i915_create_gatt_table(struct agp_bridge_data
*bridge
)
1251 struct aper_size_info_fixed
*size
;
1256 size
= agp_bridge
->current_size
;
1257 page_order
= size
->page_order
;
1258 num_entries
= size
->num_entries
;
1259 agp_bridge
->gatt_table_real
= NULL
;
1261 pci_read_config_dword(intel_private
.pcidev
, I915_MMADDR
, &temp
);
1262 pci_read_config_dword(intel_private
.pcidev
, I915_PTEADDR
, &temp2
);
1264 gtt_map_size
= intel_i915_get_gtt_size();
1266 intel_private
.gtt
= ioremap(temp2
, gtt_map_size
);
1267 if (!intel_private
.gtt
)
1270 intel_private
.gtt_total_size
= gtt_map_size
/ 4;
1274 intel_private
.registers
= ioremap(temp
, 128 * 4096);
1275 if (!intel_private
.registers
) {
1276 iounmap(intel_private
.gtt
);
1280 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
1281 global_cache_flush(); /* FIXME: ? */
1283 /* we have to call this as early as possible after the MMIO base address is known */
1284 intel_i830_init_gtt_entries();
1286 agp_bridge
->gatt_table
= NULL
;
1288 agp_bridge
->gatt_bus_addr
= temp
;
1294 * The i965 supports 36-bit physical addresses, but to keep
1295 * the format of the GTT the same, the bits that don't fit
1296 * in a 32-bit word are shifted down to bits 4..7.
1298 * Gcc is smart enough to notice that "(addr >> 28) & 0xf0"
1299 * is always zero on 32-bit architectures, so no need to make
1302 static unsigned long intel_i965_mask_memory(struct agp_bridge_data
*bridge
,
1303 dma_addr_t addr
, int type
)
1305 /* Shift high bits down */
1306 addr
|= (addr
>> 28) & 0xf0;
1308 /* Type checking must be done elsewhere */
1309 return addr
| bridge
->driver
->masks
[type
].mask
;
1312 static void intel_i965_get_gtt_range(int *gtt_offset
, int *gtt_size
)
1316 switch (agp_bridge
->dev
->device
) {
1317 case PCI_DEVICE_ID_INTEL_GM45_HB
:
1318 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB
:
1319 case PCI_DEVICE_ID_INTEL_Q45_HB
:
1320 case PCI_DEVICE_ID_INTEL_G45_HB
:
1321 case PCI_DEVICE_ID_INTEL_G41_HB
:
1322 case PCI_DEVICE_ID_INTEL_B43_HB
:
1323 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB
:
1324 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB
:
1325 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB
:
1326 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB
:
1327 *gtt_offset
= *gtt_size
= MB(2);
1329 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB
:
1330 case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB
:
1331 *gtt_offset
= MB(2);
1333 pci_read_config_word(intel_private
.pcidev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
1334 switch (snb_gmch_ctl
& SNB_GTT_SIZE_MASK
) {
1336 case SNB_GTT_SIZE_0M
:
1337 printk(KERN_ERR
"Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl
);
1340 case SNB_GTT_SIZE_1M
:
1343 case SNB_GTT_SIZE_2M
:
1349 *gtt_offset
= *gtt_size
= KB(512);
1353 /* The intel i965 automatically initializes the agp aperture during POST.
1354 * Use the memory already set aside for in the GTT.
1356 static int intel_i965_create_gatt_table(struct agp_bridge_data
*bridge
)
1359 struct aper_size_info_fixed
*size
;
1362 int gtt_offset
, gtt_size
;
1364 size
= agp_bridge
->current_size
;
1365 page_order
= size
->page_order
;
1366 num_entries
= size
->num_entries
;
1367 agp_bridge
->gatt_table_real
= NULL
;
1369 pci_read_config_dword(intel_private
.pcidev
, I915_MMADDR
, &temp
);
1373 intel_i965_get_gtt_range(>t_offset
, >t_size
);
1375 intel_private
.gtt
= ioremap((temp
+ gtt_offset
) , gtt_size
);
1377 if (!intel_private
.gtt
)
1380 intel_private
.gtt_total_size
= gtt_size
/ 4;
1382 intel_private
.registers
= ioremap(temp
, 128 * 4096);
1383 if (!intel_private
.registers
) {
1384 iounmap(intel_private
.gtt
);
1388 temp
= readl(intel_private
.registers
+I810_PGETBL_CTL
) & 0xfffff000;
1389 global_cache_flush(); /* FIXME: ? */
1391 /* we have to call this as early as possible after the MMIO base address is known */
1392 intel_i830_init_gtt_entries();
1394 agp_bridge
->gatt_table
= NULL
;
1396 agp_bridge
->gatt_bus_addr
= temp
;
1401 static const struct agp_bridge_driver intel_810_driver
= {
1402 .owner
= THIS_MODULE
,
1403 .aperture_sizes
= intel_i810_sizes
,
1404 .size_type
= FIXED_APER_SIZE
,
1405 .num_aperture_sizes
= 2,
1406 .needs_scratch_page
= true,
1407 .configure
= intel_i810_configure
,
1408 .fetch_size
= intel_i810_fetch_size
,
1409 .cleanup
= intel_i810_cleanup
,
1410 .mask_memory
= intel_i810_mask_memory
,
1411 .masks
= intel_i810_masks
,
1412 .agp_enable
= intel_i810_agp_enable
,
1413 .cache_flush
= global_cache_flush
,
1414 .create_gatt_table
= agp_generic_create_gatt_table
,
1415 .free_gatt_table
= agp_generic_free_gatt_table
,
1416 .insert_memory
= intel_i810_insert_entries
,
1417 .remove_memory
= intel_i810_remove_entries
,
1418 .alloc_by_type
= intel_i810_alloc_by_type
,
1419 .free_by_type
= intel_i810_free_by_type
,
1420 .agp_alloc_page
= agp_generic_alloc_page
,
1421 .agp_alloc_pages
= agp_generic_alloc_pages
,
1422 .agp_destroy_page
= agp_generic_destroy_page
,
1423 .agp_destroy_pages
= agp_generic_destroy_pages
,
1424 .agp_type_to_mask_type
= agp_generic_type_to_mask_type
,
1427 static const struct agp_bridge_driver intel_830_driver
= {
1428 .owner
= THIS_MODULE
,
1429 .aperture_sizes
= intel_i830_sizes
,
1430 .size_type
= FIXED_APER_SIZE
,
1431 .num_aperture_sizes
= 4,
1432 .needs_scratch_page
= true,
1433 .configure
= intel_i830_configure
,
1434 .fetch_size
= intel_i830_fetch_size
,
1435 .cleanup
= intel_i830_cleanup
,
1436 .mask_memory
= intel_i810_mask_memory
,
1437 .masks
= intel_i810_masks
,
1438 .agp_enable
= intel_i810_agp_enable
,
1439 .cache_flush
= global_cache_flush
,
1440 .create_gatt_table
= intel_i830_create_gatt_table
,
1441 .free_gatt_table
= intel_i830_free_gatt_table
,
1442 .insert_memory
= intel_i830_insert_entries
,
1443 .remove_memory
= intel_i830_remove_entries
,
1444 .alloc_by_type
= intel_i830_alloc_by_type
,
1445 .free_by_type
= intel_i810_free_by_type
,
1446 .agp_alloc_page
= agp_generic_alloc_page
,
1447 .agp_alloc_pages
= agp_generic_alloc_pages
,
1448 .agp_destroy_page
= agp_generic_destroy_page
,
1449 .agp_destroy_pages
= agp_generic_destroy_pages
,
1450 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1451 .chipset_flush
= intel_i830_chipset_flush
,
1454 static const struct agp_bridge_driver intel_915_driver
= {
1455 .owner
= THIS_MODULE
,
1456 .aperture_sizes
= intel_i830_sizes
,
1457 .size_type
= FIXED_APER_SIZE
,
1458 .num_aperture_sizes
= 4,
1459 .needs_scratch_page
= true,
1460 .configure
= intel_i9xx_configure
,
1461 .fetch_size
= intel_i9xx_fetch_size
,
1462 .cleanup
= intel_i915_cleanup
,
1463 .mask_memory
= intel_i810_mask_memory
,
1464 .masks
= intel_i810_masks
,
1465 .agp_enable
= intel_i810_agp_enable
,
1466 .cache_flush
= global_cache_flush
,
1467 .create_gatt_table
= intel_i915_create_gatt_table
,
1468 .free_gatt_table
= intel_i830_free_gatt_table
,
1469 .insert_memory
= intel_i915_insert_entries
,
1470 .remove_memory
= intel_i915_remove_entries
,
1471 .alloc_by_type
= intel_i830_alloc_by_type
,
1472 .free_by_type
= intel_i810_free_by_type
,
1473 .agp_alloc_page
= agp_generic_alloc_page
,
1474 .agp_alloc_pages
= agp_generic_alloc_pages
,
1475 .agp_destroy_page
= agp_generic_destroy_page
,
1476 .agp_destroy_pages
= agp_generic_destroy_pages
,
1477 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1478 .chipset_flush
= intel_i915_chipset_flush
,
1479 #ifdef USE_PCI_DMA_API
1480 .agp_map_page
= intel_agp_map_page
,
1481 .agp_unmap_page
= intel_agp_unmap_page
,
1482 .agp_map_memory
= intel_agp_map_memory
,
1483 .agp_unmap_memory
= intel_agp_unmap_memory
,
1487 static const struct agp_bridge_driver intel_i965_driver
= {
1488 .owner
= THIS_MODULE
,
1489 .aperture_sizes
= intel_i830_sizes
,
1490 .size_type
= FIXED_APER_SIZE
,
1491 .num_aperture_sizes
= 4,
1492 .needs_scratch_page
= true,
1493 .configure
= intel_i9xx_configure
,
1494 .fetch_size
= intel_i9xx_fetch_size
,
1495 .cleanup
= intel_i915_cleanup
,
1496 .mask_memory
= intel_i965_mask_memory
,
1497 .masks
= intel_i810_masks
,
1498 .agp_enable
= intel_i810_agp_enable
,
1499 .cache_flush
= global_cache_flush
,
1500 .create_gatt_table
= intel_i965_create_gatt_table
,
1501 .free_gatt_table
= intel_i830_free_gatt_table
,
1502 .insert_memory
= intel_i915_insert_entries
,
1503 .remove_memory
= intel_i915_remove_entries
,
1504 .alloc_by_type
= intel_i830_alloc_by_type
,
1505 .free_by_type
= intel_i810_free_by_type
,
1506 .agp_alloc_page
= agp_generic_alloc_page
,
1507 .agp_alloc_pages
= agp_generic_alloc_pages
,
1508 .agp_destroy_page
= agp_generic_destroy_page
,
1509 .agp_destroy_pages
= agp_generic_destroy_pages
,
1510 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1511 .chipset_flush
= intel_i915_chipset_flush
,
1512 #ifdef USE_PCI_DMA_API
1513 .agp_map_page
= intel_agp_map_page
,
1514 .agp_unmap_page
= intel_agp_unmap_page
,
1515 .agp_map_memory
= intel_agp_map_memory
,
1516 .agp_unmap_memory
= intel_agp_unmap_memory
,
1520 static const struct agp_bridge_driver intel_g33_driver
= {
1521 .owner
= THIS_MODULE
,
1522 .aperture_sizes
= intel_i830_sizes
,
1523 .size_type
= FIXED_APER_SIZE
,
1524 .num_aperture_sizes
= 4,
1525 .needs_scratch_page
= true,
1526 .configure
= intel_i9xx_configure
,
1527 .fetch_size
= intel_i9xx_fetch_size
,
1528 .cleanup
= intel_i915_cleanup
,
1529 .mask_memory
= intel_i965_mask_memory
,
1530 .masks
= intel_i810_masks
,
1531 .agp_enable
= intel_i810_agp_enable
,
1532 .cache_flush
= global_cache_flush
,
1533 .create_gatt_table
= intel_i915_create_gatt_table
,
1534 .free_gatt_table
= intel_i830_free_gatt_table
,
1535 .insert_memory
= intel_i915_insert_entries
,
1536 .remove_memory
= intel_i915_remove_entries
,
1537 .alloc_by_type
= intel_i830_alloc_by_type
,
1538 .free_by_type
= intel_i810_free_by_type
,
1539 .agp_alloc_page
= agp_generic_alloc_page
,
1540 .agp_alloc_pages
= agp_generic_alloc_pages
,
1541 .agp_destroy_page
= agp_generic_destroy_page
,
1542 .agp_destroy_pages
= agp_generic_destroy_pages
,
1543 .agp_type_to_mask_type
= intel_i830_type_to_mask_type
,
1544 .chipset_flush
= intel_i915_chipset_flush
,
1545 #ifdef USE_PCI_DMA_API
1546 .agp_map_page
= intel_agp_map_page
,
1547 .agp_unmap_page
= intel_agp_unmap_page
,
1548 .agp_map_memory
= intel_agp_map_memory
,
1549 .agp_unmap_memory
= intel_agp_unmap_memory
,