2 * Serverworks AGPGART routines.
5 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/agp_backend.h>
11 #define SVWRKS_COMMAND 0x04
12 #define SVWRKS_APSIZE 0x10
13 #define SVWRKS_MMBASE 0x14
14 #define SVWRKS_CACHING 0x4b
15 #define SVWRKS_AGP_ENABLE 0x60
16 #define SVWRKS_FEATURE 0x68
18 #define SVWRKS_SIZE_MASK 0xfe000000
20 /* Memory mapped registers */
21 #define SVWRKS_GART_CACHE 0x02
22 #define SVWRKS_GATTBASE 0x04
23 #define SVWRKS_TLBFLUSH 0x10
24 #define SVWRKS_POSTFLUSH 0x14
25 #define SVWRKS_DIRFLUSH 0x0c
28 struct serverworks_page_map
{
30 unsigned long __iomem
*remapped
;
33 static struct _serverworks_private
{
34 struct pci_dev
*svrwrks_dev
; /* device one */
35 volatile u8 __iomem
*registers
;
36 struct serverworks_page_map
**gatt_pages
;
38 struct serverworks_page_map scratch_dir
;
42 } serverworks_private
;
44 static int serverworks_create_page_map(struct serverworks_page_map
*page_map
)
48 page_map
->real
= (unsigned long *) __get_free_page(GFP_KERNEL
);
49 if (page_map
->real
== NULL
) {
52 SetPageReserved(virt_to_page(page_map
->real
));
54 page_map
->remapped
= ioremap_nocache(virt_to_phys(page_map
->real
),
56 if (page_map
->remapped
== NULL
) {
57 ClearPageReserved(virt_to_page(page_map
->real
));
58 free_page((unsigned long) page_map
->real
);
59 page_map
->real
= NULL
;
64 for(i
= 0; i
< PAGE_SIZE
/ sizeof(unsigned long); i
++)
65 writel(agp_bridge
->scratch_page
, page_map
->remapped
+i
);
70 static void serverworks_free_page_map(struct serverworks_page_map
*page_map
)
72 iounmap(page_map
->remapped
);
73 ClearPageReserved(virt_to_page(page_map
->real
));
74 free_page((unsigned long) page_map
->real
);
77 static void serverworks_free_gatt_pages(void)
80 struct serverworks_page_map
**tables
;
81 struct serverworks_page_map
*entry
;
83 tables
= serverworks_private
.gatt_pages
;
84 for(i
= 0; i
< serverworks_private
.num_tables
; i
++) {
87 if (entry
->real
!= NULL
) {
88 serverworks_free_page_map(entry
);
96 static int serverworks_create_gatt_pages(int nr_tables
)
98 struct serverworks_page_map
**tables
;
99 struct serverworks_page_map
*entry
;
103 tables
= kmalloc((nr_tables
+ 1) * sizeof(struct serverworks_page_map
*),
105 if (tables
== NULL
) {
108 memset(tables
, 0, sizeof(struct serverworks_page_map
*) * (nr_tables
+ 1));
109 for (i
= 0; i
< nr_tables
; i
++) {
110 entry
= kmalloc(sizeof(struct serverworks_page_map
), GFP_KERNEL
);
115 memset(entry
, 0, sizeof(struct serverworks_page_map
));
117 retval
= serverworks_create_page_map(entry
);
118 if (retval
!= 0) break;
120 serverworks_private
.num_tables
= nr_tables
;
121 serverworks_private
.gatt_pages
= tables
;
123 if (retval
!= 0) serverworks_free_gatt_pages();
128 #define SVRWRKS_GET_GATT(addr) (serverworks_private.gatt_pages[\
129 GET_PAGE_DIR_IDX(addr)]->remapped)
131 #ifndef GET_PAGE_DIR_OFF
132 #define GET_PAGE_DIR_OFF(addr) (addr >> 22)
135 #ifndef GET_PAGE_DIR_IDX
136 #define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr) - \
137 GET_PAGE_DIR_OFF(agp_bridge->gart_bus_addr))
141 #define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
144 static int serverworks_create_gatt_table(void)
146 struct aper_size_info_lvl2
*value
;
147 struct serverworks_page_map page_dir
;
152 value
= A_SIZE_LVL2(agp_bridge
->current_size
);
153 retval
= serverworks_create_page_map(&page_dir
);
157 retval
= serverworks_create_page_map(&serverworks_private
.scratch_dir
);
159 serverworks_free_page_map(&page_dir
);
162 /* Create a fake scratch directory */
163 for(i
= 0; i
< 1024; i
++) {
164 writel(agp_bridge
->scratch_page
, serverworks_private
.scratch_dir
.remapped
+i
);
165 writel(virt_to_phys(serverworks_private
.scratch_dir
.real
) | 1, page_dir
.remapped
+i
);
168 retval
= serverworks_create_gatt_pages(value
->num_entries
/ 1024);
170 serverworks_free_page_map(&page_dir
);
171 serverworks_free_page_map(&serverworks_private
.scratch_dir
);
175 agp_bridge
->gatt_table_real
= (u32
*)page_dir
.real
;
176 agp_bridge
->gatt_table
= (u32 __iomem
*)page_dir
.remapped
;
177 agp_bridge
->gatt_bus_addr
= virt_to_phys(page_dir
.real
);
179 /* Get the address for the gart region.
180 * This is a bus address even on the alpha, b/c its
181 * used to program the agp master not the cpu
184 pci_read_config_dword(agp_bridge
->dev
,serverworks_private
.gart_addr_ofs
,&temp
);
185 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
187 /* Calculate the agp offset */
189 for(i
= 0; i
< value
->num_entries
/ 1024; i
++)
190 writel(virt_to_phys(serverworks_private
.gatt_pages
[i
]->real
)|1, page_dir
.remapped
+i
);
195 static int serverworks_free_gatt_table(void)
197 struct serverworks_page_map page_dir
;
199 page_dir
.real
= (unsigned long *)agp_bridge
->gatt_table_real
;
200 page_dir
.remapped
= (unsigned long __iomem
*)agp_bridge
->gatt_table
;
202 serverworks_free_gatt_pages();
203 serverworks_free_page_map(&page_dir
);
204 serverworks_free_page_map(&serverworks_private
.scratch_dir
);
208 static int serverworks_fetch_size(void)
213 struct aper_size_info_lvl2
*values
;
215 values
= A_SIZE_LVL2(agp_bridge
->driver
->aperture_sizes
);
216 pci_read_config_dword(agp_bridge
->dev
,serverworks_private
.gart_addr_ofs
,&temp
);
217 pci_write_config_dword(agp_bridge
->dev
,serverworks_private
.gart_addr_ofs
,
219 pci_read_config_dword(agp_bridge
->dev
,serverworks_private
.gart_addr_ofs
,&temp2
);
220 pci_write_config_dword(agp_bridge
->dev
,serverworks_private
.gart_addr_ofs
,temp
);
221 temp2
&= SVWRKS_SIZE_MASK
;
223 for (i
= 0; i
< agp_bridge
->driver
->num_aperture_sizes
; i
++) {
224 if (temp2
== values
[i
].size_value
) {
225 agp_bridge
->previous_size
=
226 agp_bridge
->current_size
= (void *) (values
+ i
);
228 agp_bridge
->aperture_size_idx
= i
;
229 return values
[i
].size
;
237 * This routine could be implemented by taking the addresses
238 * written to the GATT, and flushing them individually. However
239 * currently it just flushes the whole table. Which is probably
240 * more efficent, since agp_memory blocks can be a large number of
243 static void serverworks_tlbflush(struct agp_memory
*temp
)
245 OUTREG8(serverworks_private
.registers
, SVWRKS_POSTFLUSH
, 1);
246 while(INREG8(serverworks_private
.registers
, SVWRKS_POSTFLUSH
) == 1)
249 OUTREG32(serverworks_private
.registers
, SVWRKS_DIRFLUSH
, 1);
250 while(INREG32(serverworks_private
.registers
, SVWRKS_DIRFLUSH
) == 1)
254 static int serverworks_configure(void)
256 struct aper_size_info_lvl2
*current_size
;
261 current_size
= A_SIZE_LVL2(agp_bridge
->current_size
);
263 /* Get the memory mapped registers */
264 pci_read_config_dword(agp_bridge
->dev
, serverworks_private
.mm_addr_ofs
, &temp
);
265 temp
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
266 serverworks_private
.registers
= (volatile u8 __iomem
*) ioremap(temp
, 4096);
267 if (!serverworks_private
.registers
) {
268 printk (KERN_ERR PFX
"Unable to ioremap() memory.\n");
272 OUTREG8(serverworks_private
.registers
, SVWRKS_GART_CACHE
, 0x0a);
274 OUTREG32(serverworks_private
.registers
, SVWRKS_GATTBASE
,
275 agp_bridge
->gatt_bus_addr
);
277 cap_reg
= INREG16(serverworks_private
.registers
, SVWRKS_COMMAND
);
280 OUTREG16(serverworks_private
.registers
, SVWRKS_COMMAND
, cap_reg
);
282 pci_read_config_byte(serverworks_private
.svrwrks_dev
,
283 SVWRKS_AGP_ENABLE
, &enable_reg
);
284 enable_reg
|= 0x1; /* Agp Enable bit */
285 pci_write_config_byte(serverworks_private
.svrwrks_dev
,
286 SVWRKS_AGP_ENABLE
, enable_reg
);
287 serverworks_tlbflush(NULL
);
289 agp_bridge
->capndx
= pci_find_capability(serverworks_private
.svrwrks_dev
, PCI_CAP_ID_AGP
);
291 /* Fill in the mode register */
292 pci_read_config_dword(serverworks_private
.svrwrks_dev
,
293 agp_bridge
->capndx
+PCI_AGP_STATUS
, &agp_bridge
->mode
);
295 pci_read_config_byte(agp_bridge
->dev
, SVWRKS_CACHING
, &enable_reg
);
297 pci_write_config_byte(agp_bridge
->dev
, SVWRKS_CACHING
, enable_reg
);
299 pci_read_config_byte(agp_bridge
->dev
, SVWRKS_FEATURE
, &enable_reg
);
300 enable_reg
|= (1<<6);
301 pci_write_config_byte(agp_bridge
->dev
,SVWRKS_FEATURE
, enable_reg
);
306 static void serverworks_cleanup(void)
308 iounmap((void __iomem
*) serverworks_private
.registers
);
311 static int serverworks_insert_memory(struct agp_memory
*mem
,
312 off_t pg_start
, int type
)
314 int i
, j
, num_entries
;
315 unsigned long __iomem
*cur_gatt
;
318 num_entries
= A_SIZE_LVL2(agp_bridge
->current_size
)->num_entries
;
320 if (type
!= 0 || mem
->type
!= 0) {
323 if ((pg_start
+ mem
->page_count
) > num_entries
) {
328 while (j
< (pg_start
+ mem
->page_count
)) {
329 addr
= (j
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
330 cur_gatt
= SVRWRKS_GET_GATT(addr
);
331 if (!PGE_EMPTY(agp_bridge
, readl(cur_gatt
+GET_GATT_OFF(addr
))))
336 if (mem
->is_flushed
== FALSE
) {
337 global_cache_flush();
338 mem
->is_flushed
= TRUE
;
341 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
342 addr
= (j
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
343 cur_gatt
= SVRWRKS_GET_GATT(addr
);
344 writel(agp_bridge
->driver
->mask_memory(mem
->memory
[i
], mem
->type
), cur_gatt
+GET_GATT_OFF(addr
));
346 serverworks_tlbflush(mem
);
350 static int serverworks_remove_memory(struct agp_memory
*mem
, off_t pg_start
,
354 unsigned long __iomem
*cur_gatt
;
357 if (type
!= 0 || mem
->type
!= 0) {
361 global_cache_flush();
362 serverworks_tlbflush(mem
);
364 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
365 addr
= (i
* PAGE_SIZE
) + agp_bridge
->gart_bus_addr
;
366 cur_gatt
= SVRWRKS_GET_GATT(addr
);
367 writel(agp_bridge
->scratch_page
, cur_gatt
+GET_GATT_OFF(addr
));
370 serverworks_tlbflush(mem
);
374 static struct gatt_mask serverworks_masks
[] =
376 {.mask
= 1, .type
= 0}
379 static struct aper_size_info_lvl2 serverworks_sizes
[7] =
381 {2048, 524288, 0x80000000},
382 {1024, 262144, 0xc0000000},
383 {512, 131072, 0xe0000000},
384 {256, 65536, 0xf0000000},
385 {128, 32768, 0xf8000000},
386 {64, 16384, 0xfc000000},
387 {32, 8192, 0xfe000000}
390 static void serverworks_agp_enable(u32 mode
)
394 pci_read_config_dword(serverworks_private
.svrwrks_dev
,
395 agp_bridge
->capndx
+ PCI_AGP_STATUS
,
398 command
= agp_collect_device_status(mode
, command
);
400 command
&= ~0x10; /* disable FW */
405 pci_write_config_dword(serverworks_private
.svrwrks_dev
,
406 agp_bridge
->capndx
+ PCI_AGP_COMMAND
,
409 agp_device_command(command
, 0);
412 struct agp_bridge_driver sworks_driver
= {
413 .owner
= THIS_MODULE
,
414 .aperture_sizes
= serverworks_sizes
,
415 .size_type
= LVL2_APER_SIZE
,
416 .num_aperture_sizes
= 7,
417 .configure
= serverworks_configure
,
418 .fetch_size
= serverworks_fetch_size
,
419 .cleanup
= serverworks_cleanup
,
420 .tlb_flush
= serverworks_tlbflush
,
421 .mask_memory
= agp_generic_mask_memory
,
422 .masks
= serverworks_masks
,
423 .agp_enable
= serverworks_agp_enable
,
424 .cache_flush
= global_cache_flush
,
425 .create_gatt_table
= serverworks_create_gatt_table
,
426 .free_gatt_table
= serverworks_free_gatt_table
,
427 .insert_memory
= serverworks_insert_memory
,
428 .remove_memory
= serverworks_remove_memory
,
429 .alloc_by_type
= agp_generic_alloc_by_type
,
430 .free_by_type
= agp_generic_free_by_type
,
431 .agp_alloc_page
= agp_generic_alloc_page
,
432 .agp_destroy_page
= agp_generic_destroy_page
,
435 static int __devinit
agp_serverworks_probe(struct pci_dev
*pdev
,
436 const struct pci_device_id
*ent
)
438 struct agp_bridge_data
*bridge
;
439 struct pci_dev
*bridge_dev
;
443 /* Everything is on func 1 here so we are hardcoding function one */
444 bridge_dev
= pci_find_slot((unsigned int)pdev
->bus
->number
,
447 printk(KERN_INFO PFX
"Detected a Serverworks chipset "
448 "but could not find the secondary device.\n");
452 cap_ptr
= pci_find_capability(pdev
, PCI_CAP_ID_AGP
);
454 switch (pdev
->device
) {
456 /* ServerWorks CNB20HE
458 printk (KERN_ERR PFX
"Detected ServerWorks CNB20HE chipset: No AGP present.\n");
461 case PCI_DEVICE_ID_SERVERWORKS_HE
:
462 case PCI_DEVICE_ID_SERVERWORKS_LE
:
468 printk(KERN_ERR PFX
"Unsupported Serverworks chipset "
469 "(device id: %04x)\n", pdev
->device
);
473 serverworks_private
.svrwrks_dev
= bridge_dev
;
474 serverworks_private
.gart_addr_ofs
= 0x10;
476 pci_read_config_dword(pdev
, SVWRKS_APSIZE
, &temp
);
477 if (temp
& PCI_BASE_ADDRESS_MEM_TYPE_64
) {
478 pci_read_config_dword(pdev
, SVWRKS_APSIZE
+ 4, &temp2
);
480 printk(KERN_INFO PFX
"Detected 64 bit aperture address, "
481 "but top bits are not zero. Disabling agp\n");
484 serverworks_private
.mm_addr_ofs
= 0x18;
486 serverworks_private
.mm_addr_ofs
= 0x14;
488 pci_read_config_dword(pdev
, serverworks_private
.mm_addr_ofs
, &temp
);
489 if (temp
& PCI_BASE_ADDRESS_MEM_TYPE_64
) {
490 pci_read_config_dword(pdev
,
491 serverworks_private
.mm_addr_ofs
+ 4, &temp2
);
493 printk(KERN_INFO PFX
"Detected 64 bit MMIO address, "
494 "but top bits are not zero. Disabling agp\n");
499 bridge
= agp_alloc_bridge();
503 bridge
->driver
= &sworks_driver
;
504 bridge
->dev_private_data
= &serverworks_private
,
507 pci_set_drvdata(pdev
, bridge
);
508 return agp_add_bridge(bridge
);
511 static void __devexit
agp_serverworks_remove(struct pci_dev
*pdev
)
513 struct agp_bridge_data
*bridge
= pci_get_drvdata(pdev
);
515 agp_remove_bridge(bridge
);
516 agp_put_bridge(bridge
);
519 static struct pci_device_id agp_serverworks_pci_table
[] = {
521 .class = (PCI_CLASS_BRIDGE_HOST
<< 8),
523 .vendor
= PCI_VENDOR_ID_SERVERWORKS
,
524 .device
= PCI_ANY_ID
,
525 .subvendor
= PCI_ANY_ID
,
526 .subdevice
= PCI_ANY_ID
,
531 MODULE_DEVICE_TABLE(pci
, agp_serverworks_pci_table
);
533 static struct pci_driver agp_serverworks_pci_driver
= {
534 .name
= "agpgart-serverworks",
535 .id_table
= agp_serverworks_pci_table
,
536 .probe
= agp_serverworks_probe
,
537 .remove
= agp_serverworks_remove
,
540 static int __init
agp_serverworks_init(void)
542 return pci_module_init(&agp_serverworks_pci_driver
);
545 static void __exit
agp_serverworks_cleanup(void)
547 pci_unregister_driver(&agp_serverworks_pci_driver
);
550 module_init(agp_serverworks_init
);
551 module_exit(agp_serverworks_cleanup
);
553 MODULE_LICENSE("GPL and additional rights");