3 * Copyright (C) 2004 Silicon Graphics, Inc.
4 * Copyright (C) 2002-2005 Dave Jones.
5 * Copyright (C) 1999 Jeff Hartmann.
6 * Copyright (C) 1999 Precision Insight, Inc.
7 * Copyright (C) 1999 Xi Graphics, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included
17 * in all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
25 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
28 * - Allocate more than order 0 pages to avoid too much linear map splitting.
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/init.h>
33 #include <linux/pagemap.h>
34 #include <linux/miscdevice.h>
36 #include <linux/agp_backend.h>
37 #include <linux/vmalloc.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/sched.h>
42 #include <asm/cacheflush.h>
43 #include <asm/pgtable.h>
46 __u32
*agp_gatt_table
;
47 int agp_memory_reserved
;
50 * Needed by the Nforce GART driver for the time being. Would be
51 * nice to do this some other way instead of needing this export.
53 EXPORT_SYMBOL_GPL(agp_memory_reserved
);
56 * Generic routines for handling agp_memory structures -
57 * They use the basic page allocation routines to do the brunt of the work.
60 void agp_free_key(int key
)
66 clear_bit(key
, agp_bridge
->key_list
);
68 EXPORT_SYMBOL(agp_free_key
);
71 static int agp_get_key(void)
75 bit
= find_first_zero_bit(agp_bridge
->key_list
, MAXKEY
);
77 set_bit(bit
, agp_bridge
->key_list
);
83 void agp_flush_chipset(struct agp_bridge_data
*bridge
)
85 if (bridge
->driver
->chipset_flush
)
86 bridge
->driver
->chipset_flush(bridge
);
88 EXPORT_SYMBOL(agp_flush_chipset
);
91 * Use kmalloc if possible for the page list. Otherwise fall back to
92 * vmalloc. This speeds things up and also saves memory for small AGP
96 void agp_alloc_page_array(size_t size
, struct agp_memory
*mem
)
99 mem
->vmalloc_flag
= false;
101 if (size
<= 2*PAGE_SIZE
)
102 mem
->pages
= kmalloc(size
, GFP_KERNEL
| __GFP_NORETRY
);
103 if (mem
->pages
== NULL
) {
104 mem
->pages
= vmalloc(size
);
105 mem
->vmalloc_flag
= true;
108 EXPORT_SYMBOL(agp_alloc_page_array
);
110 void agp_free_page_array(struct agp_memory
*mem
)
112 if (mem
->vmalloc_flag
) {
118 EXPORT_SYMBOL(agp_free_page_array
);
121 static struct agp_memory
*agp_create_user_memory(unsigned long num_agp_pages
)
123 struct agp_memory
*new;
124 unsigned long alloc_size
= num_agp_pages
*sizeof(struct page
*);
126 new = kzalloc(sizeof(struct agp_memory
), GFP_KERNEL
);
130 new->key
= agp_get_key();
137 agp_alloc_page_array(alloc_size
, new);
139 if (new->pages
== NULL
) {
140 agp_free_key(new->key
);
144 new->num_scratch_pages
= 0;
148 struct agp_memory
*agp_create_memory(int scratch_pages
)
150 struct agp_memory
*new;
152 new = kzalloc(sizeof(struct agp_memory
), GFP_KERNEL
);
156 new->key
= agp_get_key();
163 agp_alloc_page_array(PAGE_SIZE
* scratch_pages
, new);
165 if (new->pages
== NULL
) {
166 agp_free_key(new->key
);
170 new->num_scratch_pages
= scratch_pages
;
171 new->type
= AGP_NORMAL_MEMORY
;
174 EXPORT_SYMBOL(agp_create_memory
);
177 * agp_free_memory - free memory associated with an agp_memory pointer.
179 * @curr: agp_memory pointer to be freed.
181 * It is the only function that can be called when the backend is not owned
182 * by the caller. (So it can free memory on client death.)
184 void agp_free_memory(struct agp_memory
*curr
)
192 agp_unbind_memory(curr
);
194 if (curr
->type
>= AGP_USER_TYPES
) {
195 agp_generic_free_by_type(curr
);
199 if (curr
->type
!= 0) {
200 curr
->bridge
->driver
->free_by_type(curr
);
203 if (curr
->page_count
!= 0) {
204 if (curr
->bridge
->driver
->agp_destroy_pages
) {
205 curr
->bridge
->driver
->agp_destroy_pages(curr
);
208 for (i
= 0; i
< curr
->page_count
; i
++) {
209 curr
->bridge
->driver
->agp_destroy_page(
211 AGP_PAGE_DESTROY_UNMAP
);
213 for (i
= 0; i
< curr
->page_count
; i
++) {
214 curr
->bridge
->driver
->agp_destroy_page(
216 AGP_PAGE_DESTROY_FREE
);
220 agp_free_key(curr
->key
);
221 agp_free_page_array(curr
);
224 EXPORT_SYMBOL(agp_free_memory
);
226 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
229 * agp_allocate_memory - allocate a group of pages of a certain type.
231 * @page_count: size_t argument of the number of pages
232 * @type: u32 argument of the type of memory to be allocated.
234 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
235 * maps to physical ram. Any other type is device dependent.
237 * It returns NULL whenever memory is unavailable.
239 struct agp_memory
*agp_allocate_memory(struct agp_bridge_data
*bridge
,
240 size_t page_count
, u32 type
)
243 struct agp_memory
*new;
249 if ((atomic_read(&bridge
->current_memory_agp
) + page_count
) > bridge
->max_memory_agp
)
252 if (type
>= AGP_USER_TYPES
) {
253 new = agp_generic_alloc_user(page_count
, type
);
255 new->bridge
= bridge
;
260 new = bridge
->driver
->alloc_by_type(page_count
, type
);
262 new->bridge
= bridge
;
266 scratch_pages
= (page_count
+ ENTRIES_PER_PAGE
- 1) / ENTRIES_PER_PAGE
;
268 new = agp_create_memory(scratch_pages
);
273 if (bridge
->driver
->agp_alloc_pages
) {
274 if (bridge
->driver
->agp_alloc_pages(bridge
, new, page_count
)) {
275 agp_free_memory(new);
278 new->bridge
= bridge
;
282 for (i
= 0; i
< page_count
; i
++) {
283 struct page
*page
= bridge
->driver
->agp_alloc_page(bridge
);
286 agp_free_memory(new);
289 new->pages
[i
] = page
;
292 new->bridge
= bridge
;
296 EXPORT_SYMBOL(agp_allocate_memory
);
299 /* End - Generic routines for handling agp_memory structures */
302 static int agp_return_size(void)
307 temp
= agp_bridge
->current_size
;
309 switch (agp_bridge
->driver
->size_type
) {
311 current_size
= A_SIZE_8(temp
)->size
;
314 current_size
= A_SIZE_16(temp
)->size
;
317 current_size
= A_SIZE_32(temp
)->size
;
320 current_size
= A_SIZE_LVL2(temp
)->size
;
322 case FIXED_APER_SIZE
:
323 current_size
= A_SIZE_FIX(temp
)->size
;
330 current_size
-= (agp_memory_reserved
/ (1024*1024));
337 int agp_num_entries(void)
342 temp
= agp_bridge
->current_size
;
344 switch (agp_bridge
->driver
->size_type
) {
346 num_entries
= A_SIZE_8(temp
)->num_entries
;
349 num_entries
= A_SIZE_16(temp
)->num_entries
;
352 num_entries
= A_SIZE_32(temp
)->num_entries
;
355 num_entries
= A_SIZE_LVL2(temp
)->num_entries
;
357 case FIXED_APER_SIZE
:
358 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
365 num_entries
-= agp_memory_reserved
>>PAGE_SHIFT
;
370 EXPORT_SYMBOL_GPL(agp_num_entries
);
374 * agp_copy_info - copy bridge state information
376 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
378 * This function copies information about the agp bridge device and the state of
379 * the agp backend into an agp_kern_info pointer.
381 int agp_copy_info(struct agp_bridge_data
*bridge
, struct agp_kern_info
*info
)
383 memset(info
, 0, sizeof(struct agp_kern_info
));
385 info
->chipset
= NOT_SUPPORTED
;
389 info
->version
.major
= bridge
->version
->major
;
390 info
->version
.minor
= bridge
->version
->minor
;
391 info
->chipset
= SUPPORTED
;
392 info
->device
= bridge
->dev
;
393 if (bridge
->mode
& AGPSTAT_MODE_3_0
)
394 info
->mode
= bridge
->mode
& ~AGP3_RESERVED_MASK
;
396 info
->mode
= bridge
->mode
& ~AGP2_RESERVED_MASK
;
397 info
->aper_base
= bridge
->gart_bus_addr
;
398 info
->aper_size
= agp_return_size();
399 info
->max_memory
= bridge
->max_memory_agp
;
400 info
->current_memory
= atomic_read(&bridge
->current_memory_agp
);
401 info
->cant_use_aperture
= bridge
->driver
->cant_use_aperture
;
402 info
->vm_ops
= bridge
->vm_ops
;
403 info
->page_mask
= ~0UL;
406 EXPORT_SYMBOL(agp_copy_info
);
408 /* End - Routine to copy over information structure */
411 * Routines for handling swapping of agp_memory into the GATT -
412 * These routines take agp_memory and insert them into the GATT.
413 * They call device specific routines to actually write to the GATT.
417 * agp_bind_memory - Bind an agp_memory structure into the GATT.
419 * @curr: agp_memory pointer
420 * @pg_start: an offset into the graphics aperture translation table
422 * It returns -EINVAL if the pointer == NULL.
423 * It returns -EBUSY if the area of the table requested is already in use.
425 int agp_bind_memory(struct agp_memory
*curr
, off_t pg_start
)
432 if (curr
->is_bound
) {
433 printk(KERN_INFO PFX
"memory %p is already bound!\n", curr
);
436 if (!curr
->is_flushed
) {
437 curr
->bridge
->driver
->cache_flush();
438 curr
->is_flushed
= true;
440 ret_val
= curr
->bridge
->driver
->insert_memory(curr
, pg_start
, curr
->type
);
445 curr
->is_bound
= true;
446 curr
->pg_start
= pg_start
;
447 spin_lock(&agp_bridge
->mapped_lock
);
448 list_add(&curr
->mapped_list
, &agp_bridge
->mapped_list
);
449 spin_unlock(&agp_bridge
->mapped_lock
);
453 EXPORT_SYMBOL(agp_bind_memory
);
457 * agp_unbind_memory - Removes an agp_memory structure from the GATT
459 * @curr: agp_memory pointer to be removed from the GATT.
461 * It returns -EINVAL if this piece of agp_memory is not currently bound to
462 * the graphics aperture translation table or if the agp_memory pointer == NULL
464 int agp_unbind_memory(struct agp_memory
*curr
)
471 if (!curr
->is_bound
) {
472 printk(KERN_INFO PFX
"memory %p was not bound!\n", curr
);
476 ret_val
= curr
->bridge
->driver
->remove_memory(curr
, curr
->pg_start
, curr
->type
);
481 curr
->is_bound
= false;
483 spin_lock(&curr
->bridge
->mapped_lock
);
484 list_del(&curr
->mapped_list
);
485 spin_unlock(&curr
->bridge
->mapped_lock
);
488 EXPORT_SYMBOL(agp_unbind_memory
);
491 * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
493 int agp_rebind_memory(void)
495 struct agp_memory
*curr
;
498 spin_lock(&agp_bridge
->mapped_lock
);
499 list_for_each_entry(curr
, &agp_bridge
->mapped_list
, mapped_list
) {
500 ret_val
= curr
->bridge
->driver
->insert_memory(curr
,
506 spin_unlock(&agp_bridge
->mapped_lock
);
509 EXPORT_SYMBOL(agp_rebind_memory
);
511 /* End - Routines for handling swapping of agp_memory into the GATT */
514 /* Generic Agp routines - Start */
515 static void agp_v2_parse_one(u32
*requested_mode
, u32
*bridge_agpstat
, u32
*vga_agpstat
)
519 if (*requested_mode
& AGP2_RESERVED_MASK
) {
520 printk(KERN_INFO PFX
"reserved bits set (%x) in mode 0x%x. Fixed.\n",
521 *requested_mode
& AGP2_RESERVED_MASK
, *requested_mode
);
522 *requested_mode
&= ~AGP2_RESERVED_MASK
;
526 * Some dumb bridges are programmed to disobey the AGP2 spec.
527 * This is likely a BIOS misprogramming rather than poweron default, or
528 * it would be a lot more common.
529 * https://bugs.freedesktop.org/show_bug.cgi?id=8816
530 * AGPv2 spec 6.1.9 states:
531 * The RATE field indicates the data transfer rates supported by this
532 * device. A.G.P. devices must report all that apply.
533 * Fix them up as best we can.
535 switch (*bridge_agpstat
& 7) {
537 *bridge_agpstat
|= (AGPSTAT2_2X
| AGPSTAT2_1X
);
538 printk(KERN_INFO PFX
"BIOS bug. AGP bridge claims to only support x4 rate"
539 "Fixing up support for x2 & x1\n");
542 *bridge_agpstat
|= AGPSTAT2_1X
;
543 printk(KERN_INFO PFX
"BIOS bug. AGP bridge claims to only support x2 rate"
544 "Fixing up support for x1\n");
550 /* Check the speed bits make sense. Only one should be set. */
551 tmp
= *requested_mode
& 7;
554 printk(KERN_INFO PFX
"%s tried to set rate=x0. Setting to x1 mode.\n", current
->comm
);
555 *requested_mode
|= AGPSTAT2_1X
;
561 *requested_mode
&= ~(AGPSTAT2_1X
); /* rate=2 */
568 *requested_mode
&= ~(AGPSTAT2_1X
|AGPSTAT2_2X
); /* rate=4*/
572 /* disable SBA if it's not supported */
573 if (!((*bridge_agpstat
& AGPSTAT_SBA
) && (*vga_agpstat
& AGPSTAT_SBA
) && (*requested_mode
& AGPSTAT_SBA
)))
574 *bridge_agpstat
&= ~AGPSTAT_SBA
;
577 if (!((*bridge_agpstat
& AGPSTAT2_4X
) && (*vga_agpstat
& AGPSTAT2_4X
) && (*requested_mode
& AGPSTAT2_4X
)))
578 *bridge_agpstat
&= ~AGPSTAT2_4X
;
580 if (!((*bridge_agpstat
& AGPSTAT2_2X
) && (*vga_agpstat
& AGPSTAT2_2X
) && (*requested_mode
& AGPSTAT2_2X
)))
581 *bridge_agpstat
&= ~AGPSTAT2_2X
;
583 if (!((*bridge_agpstat
& AGPSTAT2_1X
) && (*vga_agpstat
& AGPSTAT2_1X
) && (*requested_mode
& AGPSTAT2_1X
)))
584 *bridge_agpstat
&= ~AGPSTAT2_1X
;
586 /* Now we know what mode it should be, clear out the unwanted bits. */
587 if (*bridge_agpstat
& AGPSTAT2_4X
)
588 *bridge_agpstat
&= ~(AGPSTAT2_1X
| AGPSTAT2_2X
); /* 4X */
590 if (*bridge_agpstat
& AGPSTAT2_2X
)
591 *bridge_agpstat
&= ~(AGPSTAT2_1X
| AGPSTAT2_4X
); /* 2X */
593 if (*bridge_agpstat
& AGPSTAT2_1X
)
594 *bridge_agpstat
&= ~(AGPSTAT2_2X
| AGPSTAT2_4X
); /* 1X */
596 /* Apply any errata. */
597 if (agp_bridge
->flags
& AGP_ERRATA_FASTWRITES
)
598 *bridge_agpstat
&= ~AGPSTAT_FW
;
600 if (agp_bridge
->flags
& AGP_ERRATA_SBA
)
601 *bridge_agpstat
&= ~AGPSTAT_SBA
;
603 if (agp_bridge
->flags
& AGP_ERRATA_1X
) {
604 *bridge_agpstat
&= ~(AGPSTAT2_2X
| AGPSTAT2_4X
);
605 *bridge_agpstat
|= AGPSTAT2_1X
;
608 /* If we've dropped down to 1X, disable fast writes. */
609 if (*bridge_agpstat
& AGPSTAT2_1X
)
610 *bridge_agpstat
&= ~AGPSTAT_FW
;
614 * requested_mode = Mode requested by (typically) X.
615 * bridge_agpstat = PCI_AGP_STATUS from agp bridge.
616 * vga_agpstat = PCI_AGP_STATUS from graphic card.
618 static void agp_v3_parse_one(u32
*requested_mode
, u32
*bridge_agpstat
, u32
*vga_agpstat
)
620 u32 origbridge
=*bridge_agpstat
, origvga
=*vga_agpstat
;
623 if (*requested_mode
& AGP3_RESERVED_MASK
) {
624 printk(KERN_INFO PFX
"reserved bits set (%x) in mode 0x%x. Fixed.\n",
625 *requested_mode
& AGP3_RESERVED_MASK
, *requested_mode
);
626 *requested_mode
&= ~AGP3_RESERVED_MASK
;
629 /* Check the speed bits make sense. */
630 tmp
= *requested_mode
& 7;
632 printk(KERN_INFO PFX
"%s tried to set rate=x0. Setting to AGP3 x4 mode.\n", current
->comm
);
633 *requested_mode
|= AGPSTAT3_4X
;
636 printk(KERN_INFO PFX
"%s tried to set rate=x%d. Setting to AGP3 x8 mode.\n", current
->comm
, tmp
* 4);
637 *requested_mode
= (*requested_mode
& ~7) | AGPSTAT3_8X
;
640 /* ARQSZ - Set the value to the maximum one.
641 * Don't allow the mode register to override values. */
642 *bridge_agpstat
= ((*bridge_agpstat
& ~AGPSTAT_ARQSZ
) |
643 max_t(u32
,(*bridge_agpstat
& AGPSTAT_ARQSZ
),(*vga_agpstat
& AGPSTAT_ARQSZ
)));
645 /* Calibration cycle.
646 * Don't allow the mode register to override values. */
647 *bridge_agpstat
= ((*bridge_agpstat
& ~AGPSTAT_CAL_MASK
) |
648 min_t(u32
,(*bridge_agpstat
& AGPSTAT_CAL_MASK
),(*vga_agpstat
& AGPSTAT_CAL_MASK
)));
650 /* SBA *must* be supported for AGP v3 */
651 *bridge_agpstat
|= AGPSTAT_SBA
;
655 * Check for invalid speeds. This can happen when applications
656 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
658 if (*requested_mode
& AGPSTAT_MODE_3_0
) {
660 * Caller hasn't a clue what it is doing. Bridge is in 3.0 mode,
661 * have been passed a 3.0 mode, but with 2.x speed bits set.
662 * AGP2.x 4x -> AGP3.0 4x.
664 if (*requested_mode
& AGPSTAT2_4X
) {
665 printk(KERN_INFO PFX
"%s passes broken AGP3 flags (%x). Fixed.\n",
666 current
->comm
, *requested_mode
);
667 *requested_mode
&= ~AGPSTAT2_4X
;
668 *requested_mode
|= AGPSTAT3_4X
;
672 * The caller doesn't know what they are doing. We are in 3.0 mode,
673 * but have been passed an AGP 2.x mode.
674 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
676 printk(KERN_INFO PFX
"%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
677 current
->comm
, *requested_mode
);
678 *requested_mode
&= ~(AGPSTAT2_4X
| AGPSTAT2_2X
| AGPSTAT2_1X
);
679 *requested_mode
|= AGPSTAT3_4X
;
682 if (*requested_mode
& AGPSTAT3_8X
) {
683 if (!(*bridge_agpstat
& AGPSTAT3_8X
)) {
684 *bridge_agpstat
&= ~(AGPSTAT3_8X
| AGPSTAT3_RSVD
);
685 *bridge_agpstat
|= AGPSTAT3_4X
;
686 printk(KERN_INFO PFX
"%s requested AGPx8 but bridge not capable.\n", current
->comm
);
689 if (!(*vga_agpstat
& AGPSTAT3_8X
)) {
690 *bridge_agpstat
&= ~(AGPSTAT3_8X
| AGPSTAT3_RSVD
);
691 *bridge_agpstat
|= AGPSTAT3_4X
;
692 printk(KERN_INFO PFX
"%s requested AGPx8 but graphic card not capable.\n", current
->comm
);
695 /* All set, bridge & device can do AGP x8*/
696 *bridge_agpstat
&= ~(AGPSTAT3_4X
| AGPSTAT3_RSVD
);
699 } else if (*requested_mode
& AGPSTAT3_4X
) {
700 *bridge_agpstat
&= ~(AGPSTAT3_8X
| AGPSTAT3_RSVD
);
701 *bridge_agpstat
|= AGPSTAT3_4X
;
707 * If we didn't specify an AGP mode, we see if both
708 * the graphics card, and the bridge can do x8, and use if so.
709 * If not, we fall back to x4 mode.
711 if ((*bridge_agpstat
& AGPSTAT3_8X
) && (*vga_agpstat
& AGPSTAT3_8X
)) {
712 printk(KERN_INFO PFX
"No AGP mode specified. Setting to highest mode "
713 "supported by bridge & card (x8).\n");
714 *bridge_agpstat
&= ~(AGPSTAT3_4X
| AGPSTAT3_RSVD
);
715 *vga_agpstat
&= ~(AGPSTAT3_4X
| AGPSTAT3_RSVD
);
717 printk(KERN_INFO PFX
"Fell back to AGPx4 mode because");
718 if (!(*bridge_agpstat
& AGPSTAT3_8X
)) {
719 printk(KERN_INFO PFX
"bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
720 *bridge_agpstat
, origbridge
);
721 *bridge_agpstat
&= ~(AGPSTAT3_8X
| AGPSTAT3_RSVD
);
722 *bridge_agpstat
|= AGPSTAT3_4X
;
724 if (!(*vga_agpstat
& AGPSTAT3_8X
)) {
725 printk(KERN_INFO PFX
"graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n",
726 *vga_agpstat
, origvga
);
727 *vga_agpstat
&= ~(AGPSTAT3_8X
| AGPSTAT3_RSVD
);
728 *vga_agpstat
|= AGPSTAT3_4X
;
734 /* Apply any errata. */
735 if (agp_bridge
->flags
& AGP_ERRATA_FASTWRITES
)
736 *bridge_agpstat
&= ~AGPSTAT_FW
;
738 if (agp_bridge
->flags
& AGP_ERRATA_SBA
)
739 *bridge_agpstat
&= ~AGPSTAT_SBA
;
741 if (agp_bridge
->flags
& AGP_ERRATA_1X
) {
742 *bridge_agpstat
&= ~(AGPSTAT2_2X
| AGPSTAT2_4X
);
743 *bridge_agpstat
|= AGPSTAT2_1X
;
749 * agp_collect_device_status - determine correct agp_cmd from various agp_stat's
750 * @bridge: an agp_bridge_data struct allocated for the AGP host bridge.
751 * @requested_mode: requested agp_stat from userspace (Typically from X)
752 * @bridge_agpstat: current agp_stat from AGP bridge.
754 * This function will hunt for an AGP graphics card, and try to match
755 * the requested mode to the capabilities of both the bridge and the card.
757 u32
agp_collect_device_status(struct agp_bridge_data
*bridge
, u32 requested_mode
, u32 bridge_agpstat
)
759 struct pci_dev
*device
= NULL
;
764 device
= pci_get_class(PCI_CLASS_DISPLAY_VGA
<< 8, device
);
766 printk(KERN_INFO PFX
"Couldn't find an AGP VGA controller.\n");
769 cap_ptr
= pci_find_capability(device
, PCI_CAP_ID_AGP
);
775 * Ok, here we have a AGP device. Disable impossible
776 * settings, and adjust the readqueue to the minimum.
778 pci_read_config_dword(device
, cap_ptr
+PCI_AGP_STATUS
, &vga_agpstat
);
780 /* adjust RQ depth */
781 bridge_agpstat
= ((bridge_agpstat
& ~AGPSTAT_RQ_DEPTH
) |
782 min_t(u32
, (requested_mode
& AGPSTAT_RQ_DEPTH
),
783 min_t(u32
, (bridge_agpstat
& AGPSTAT_RQ_DEPTH
), (vga_agpstat
& AGPSTAT_RQ_DEPTH
))));
785 /* disable FW if it's not supported */
786 if (!((bridge_agpstat
& AGPSTAT_FW
) &&
787 (vga_agpstat
& AGPSTAT_FW
) &&
788 (requested_mode
& AGPSTAT_FW
)))
789 bridge_agpstat
&= ~AGPSTAT_FW
;
791 /* Check to see if we are operating in 3.0 mode */
792 if (agp_bridge
->mode
& AGPSTAT_MODE_3_0
)
793 agp_v3_parse_one(&requested_mode
, &bridge_agpstat
, &vga_agpstat
);
795 agp_v2_parse_one(&requested_mode
, &bridge_agpstat
, &vga_agpstat
);
798 return bridge_agpstat
;
800 EXPORT_SYMBOL(agp_collect_device_status
);
803 void agp_device_command(u32 bridge_agpstat
, bool agp_v3
)
805 struct pci_dev
*device
= NULL
;
808 mode
= bridge_agpstat
& 0x7;
812 for_each_pci_dev(device
) {
813 u8 agp
= pci_find_capability(device
, PCI_CAP_ID_AGP
);
817 dev_info(&device
->dev
, "putting AGP V%d device into %dx mode\n",
818 agp_v3
? 3 : 2, mode
);
819 pci_write_config_dword(device
, agp
+ PCI_AGP_COMMAND
, bridge_agpstat
);
822 EXPORT_SYMBOL(agp_device_command
);
825 void get_agp_version(struct agp_bridge_data
*bridge
)
829 /* Exit early if already set by errata workarounds. */
830 if (bridge
->major_version
!= 0)
833 pci_read_config_dword(bridge
->dev
, bridge
->capndx
, &ncapid
);
834 bridge
->major_version
= (ncapid
>> AGP_MAJOR_VERSION_SHIFT
) & 0xf;
835 bridge
->minor_version
= (ncapid
>> AGP_MINOR_VERSION_SHIFT
) & 0xf;
837 EXPORT_SYMBOL(get_agp_version
);
840 void agp_generic_enable(struct agp_bridge_data
*bridge
, u32 requested_mode
)
842 u32 bridge_agpstat
, temp
;
844 get_agp_version(agp_bridge
);
846 dev_info(&agp_bridge
->dev
->dev
, "AGP %d.%d bridge\n",
847 agp_bridge
->major_version
, agp_bridge
->minor_version
);
849 pci_read_config_dword(agp_bridge
->dev
,
850 agp_bridge
->capndx
+ PCI_AGP_STATUS
, &bridge_agpstat
);
852 bridge_agpstat
= agp_collect_device_status(agp_bridge
, requested_mode
, bridge_agpstat
);
853 if (bridge_agpstat
== 0)
854 /* Something bad happened. FIXME: Return error code? */
857 bridge_agpstat
|= AGPSTAT_AGP_ENABLE
;
859 /* Do AGP version specific frobbing. */
860 if (bridge
->major_version
>= 3) {
861 if (bridge
->mode
& AGPSTAT_MODE_3_0
) {
862 /* If we have 3.5, we can do the isoch stuff. */
863 if (bridge
->minor_version
>= 5)
864 agp_3_5_enable(bridge
);
865 agp_device_command(bridge_agpstat
, true);
868 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
869 bridge_agpstat
&= ~(7<<10) ;
870 pci_read_config_dword(bridge
->dev
,
871 bridge
->capndx
+AGPCTRL
, &temp
);
873 pci_write_config_dword(bridge
->dev
,
874 bridge
->capndx
+AGPCTRL
, temp
);
876 dev_info(&bridge
->dev
->dev
, "bridge is in legacy mode, falling back to 2.x\n");
881 agp_device_command(bridge_agpstat
, false);
883 EXPORT_SYMBOL(agp_generic_enable
);
886 int agp_generic_create_gatt_table(struct agp_bridge_data
*bridge
)
897 /* The generic routines can't handle 2 level gatt's */
898 if (bridge
->driver
->size_type
== LVL2_APER_SIZE
)
902 i
= bridge
->aperture_size_idx
;
903 temp
= bridge
->current_size
;
904 size
= page_order
= num_entries
= 0;
906 if (bridge
->driver
->size_type
!= FIXED_APER_SIZE
) {
908 switch (bridge
->driver
->size_type
) {
910 size
= A_SIZE_8(temp
)->size
;
912 A_SIZE_8(temp
)->page_order
;
914 A_SIZE_8(temp
)->num_entries
;
917 size
= A_SIZE_16(temp
)->size
;
918 page_order
= A_SIZE_16(temp
)->page_order
;
919 num_entries
= A_SIZE_16(temp
)->num_entries
;
922 size
= A_SIZE_32(temp
)->size
;
923 page_order
= A_SIZE_32(temp
)->page_order
;
924 num_entries
= A_SIZE_32(temp
)->num_entries
;
926 /* This case will never really happen. */
927 case FIXED_APER_SIZE
:
930 size
= page_order
= num_entries
= 0;
934 table
= alloc_gatt_pages(page_order
);
938 switch (bridge
->driver
->size_type
) {
940 bridge
->current_size
= A_IDX8(bridge
);
943 bridge
->current_size
= A_IDX16(bridge
);
946 bridge
->current_size
= A_IDX32(bridge
);
948 /* These cases will never really happen. */
949 case FIXED_APER_SIZE
:
954 temp
= bridge
->current_size
;
956 bridge
->aperture_size_idx
= i
;
958 } while (!table
&& (i
< bridge
->driver
->num_aperture_sizes
));
960 size
= ((struct aper_size_info_fixed
*) temp
)->size
;
961 page_order
= ((struct aper_size_info_fixed
*) temp
)->page_order
;
962 num_entries
= ((struct aper_size_info_fixed
*) temp
)->num_entries
;
963 table
= alloc_gatt_pages(page_order
);
969 table_end
= table
+ ((PAGE_SIZE
* (1 << page_order
)) - 1);
971 for (page
= virt_to_page(table
); page
<= virt_to_page(table_end
); page
++)
972 SetPageReserved(page
);
974 bridge
->gatt_table_real
= (u32
*) table
;
975 agp_gatt_table
= (void *)table
;
977 bridge
->driver
->cache_flush();
979 set_memory_uc((unsigned long)table
, 1 << page_order
);
980 bridge
->gatt_table
= (void *)table
;
982 bridge
->gatt_table
= ioremap_nocache(virt_to_gart(table
),
983 (PAGE_SIZE
* (1 << page_order
)));
984 bridge
->driver
->cache_flush();
987 if (bridge
->gatt_table
== NULL
) {
988 for (page
= virt_to_page(table
); page
<= virt_to_page(table_end
); page
++)
989 ClearPageReserved(page
);
991 free_gatt_pages(table
, page_order
);
995 bridge
->gatt_bus_addr
= virt_to_gart(bridge
->gatt_table_real
);
997 /* AK: bogus, should encode addresses > 4GB */
998 for (i
= 0; i
< num_entries
; i
++) {
999 writel(bridge
->scratch_page
, bridge
->gatt_table
+i
);
1000 readl(bridge
->gatt_table
+i
); /* PCI Posting. */
1005 EXPORT_SYMBOL(agp_generic_create_gatt_table
);
1007 int agp_generic_free_gatt_table(struct agp_bridge_data
*bridge
)
1010 char *table
, *table_end
;
1014 temp
= bridge
->current_size
;
1016 switch (bridge
->driver
->size_type
) {
1018 page_order
= A_SIZE_8(temp
)->page_order
;
1021 page_order
= A_SIZE_16(temp
)->page_order
;
1024 page_order
= A_SIZE_32(temp
)->page_order
;
1026 case FIXED_APER_SIZE
:
1027 page_order
= A_SIZE_FIX(temp
)->page_order
;
1029 case LVL2_APER_SIZE
:
1030 /* The generic routines can't deal with 2 level gatt's */
1038 /* Do not worry about freeing memory, because if this is
1039 * called, then all agp memory is deallocated and removed
1040 * from the table. */
1043 set_memory_wb((unsigned long)bridge
->gatt_table
, 1 << page_order
);
1045 iounmap(bridge
->gatt_table
);
1047 table
= (char *) bridge
->gatt_table_real
;
1048 table_end
= table
+ ((PAGE_SIZE
* (1 << page_order
)) - 1);
1050 for (page
= virt_to_page(table
); page
<= virt_to_page(table_end
); page
++)
1051 ClearPageReserved(page
);
1053 free_gatt_pages(bridge
->gatt_table_real
, page_order
);
1055 agp_gatt_table
= NULL
;
1056 bridge
->gatt_table
= NULL
;
1057 bridge
->gatt_table_real
= NULL
;
1058 bridge
->gatt_bus_addr
= 0;
1062 EXPORT_SYMBOL(agp_generic_free_gatt_table
);
1065 int agp_generic_insert_memory(struct agp_memory
* mem
, off_t pg_start
, int type
)
1071 struct agp_bridge_data
*bridge
;
1074 bridge
= mem
->bridge
;
1078 if (mem
->page_count
== 0)
1081 temp
= bridge
->current_size
;
1083 switch (bridge
->driver
->size_type
) {
1085 num_entries
= A_SIZE_8(temp
)->num_entries
;
1088 num_entries
= A_SIZE_16(temp
)->num_entries
;
1091 num_entries
= A_SIZE_32(temp
)->num_entries
;
1093 case FIXED_APER_SIZE
:
1094 num_entries
= A_SIZE_FIX(temp
)->num_entries
;
1096 case LVL2_APER_SIZE
:
1097 /* The generic routines can't deal with 2 level gatt's */
1105 num_entries
-= agp_memory_reserved
/PAGE_SIZE
;
1106 if (num_entries
< 0) num_entries
= 0;
1108 if (type
!= mem
->type
)
1111 mask_type
= bridge
->driver
->agp_type_to_mask_type(bridge
, type
);
1112 if (mask_type
!= 0) {
1113 /* The generic routines know nothing of memory types */
1117 /* AK: could wrap */
1118 if ((pg_start
+ mem
->page_count
) > num_entries
)
1123 while (j
< (pg_start
+ mem
->page_count
)) {
1124 if (!PGE_EMPTY(bridge
, readl(bridge
->gatt_table
+j
)))
1129 if (!mem
->is_flushed
) {
1130 bridge
->driver
->cache_flush();
1131 mem
->is_flushed
= true;
1134 for (i
= 0, j
= pg_start
; i
< mem
->page_count
; i
++, j
++) {
1135 writel(bridge
->driver
->mask_memory(bridge
, mem
->pages
[i
], mask_type
),
1136 bridge
->gatt_table
+j
);
1138 readl(bridge
->gatt_table
+j
-1); /* PCI Posting. */
1140 bridge
->driver
->tlb_flush(mem
);
1143 EXPORT_SYMBOL(agp_generic_insert_memory
);
1146 int agp_generic_remove_memory(struct agp_memory
*mem
, off_t pg_start
, int type
)
1149 struct agp_bridge_data
*bridge
;
1152 bridge
= mem
->bridge
;
1156 if (mem
->page_count
== 0)
1159 if (type
!= mem
->type
)
1162 mask_type
= bridge
->driver
->agp_type_to_mask_type(bridge
, type
);
1163 if (mask_type
!= 0) {
1164 /* The generic routines know nothing of memory types */
1168 /* AK: bogus, should encode addresses > 4GB */
1169 for (i
= pg_start
; i
< (mem
->page_count
+ pg_start
); i
++) {
1170 writel(bridge
->scratch_page
, bridge
->gatt_table
+i
);
1172 readl(bridge
->gatt_table
+i
-1); /* PCI Posting. */
1174 bridge
->driver
->tlb_flush(mem
);
1177 EXPORT_SYMBOL(agp_generic_remove_memory
);
1179 struct agp_memory
*agp_generic_alloc_by_type(size_t page_count
, int type
)
1183 EXPORT_SYMBOL(agp_generic_alloc_by_type
);
1185 void agp_generic_free_by_type(struct agp_memory
*curr
)
1187 agp_free_page_array(curr
);
1188 agp_free_key(curr
->key
);
1191 EXPORT_SYMBOL(agp_generic_free_by_type
);
1193 struct agp_memory
*agp_generic_alloc_user(size_t page_count
, int type
)
1195 struct agp_memory
*new;
1199 pages
= (page_count
+ ENTRIES_PER_PAGE
- 1) / ENTRIES_PER_PAGE
;
1200 new = agp_create_user_memory(page_count
);
1204 for (i
= 0; i
< page_count
; i
++)
1206 new->page_count
= 0;
1208 new->num_scratch_pages
= pages
;
1212 EXPORT_SYMBOL(agp_generic_alloc_user
);
1215 * Basic Page Allocation Routines -
1216 * These routines handle page allocation and by default they reserve the allocated
1217 * memory. They also handle incrementing the current_memory_agp value, Which is checked
1218 * against a maximum value.
1221 int agp_generic_alloc_pages(struct agp_bridge_data
*bridge
, struct agp_memory
*mem
, size_t num_pages
)
1224 int i
, ret
= -ENOMEM
;
1226 for (i
= 0; i
< num_pages
; i
++) {
1227 page
= alloc_page(GFP_KERNEL
| GFP_DMA32
| __GFP_ZERO
);
1228 /* agp_free_memory() needs gart address */
1233 map_page_into_agp(page
);
1236 atomic_inc(&agp_bridge
->current_memory_agp
);
1238 mem
->pages
[i
] = page
;
1243 set_pages_array_uc(mem
->pages
, num_pages
);
1249 EXPORT_SYMBOL(agp_generic_alloc_pages
);
1251 struct page
*agp_generic_alloc_page(struct agp_bridge_data
*bridge
)
1255 page
= alloc_page(GFP_KERNEL
| GFP_DMA32
| __GFP_ZERO
);
1259 map_page_into_agp(page
);
1262 atomic_inc(&agp_bridge
->current_memory_agp
);
1265 EXPORT_SYMBOL(agp_generic_alloc_page
);
1267 void agp_generic_destroy_pages(struct agp_memory
*mem
)
1276 set_pages_array_wb(mem
->pages
, mem
->page_count
);
1279 for (i
= 0; i
< mem
->page_count
; i
++) {
1280 page
= mem
->pages
[i
];
1283 unmap_page_from_agp(page
);
1287 atomic_dec(&agp_bridge
->current_memory_agp
);
1288 mem
->pages
[i
] = NULL
;
1291 EXPORT_SYMBOL(agp_generic_destroy_pages
);
1293 void agp_generic_destroy_page(struct page
*page
, int flags
)
1298 if (flags
& AGP_PAGE_DESTROY_UNMAP
)
1299 unmap_page_from_agp(page
);
1301 if (flags
& AGP_PAGE_DESTROY_FREE
) {
1304 atomic_dec(&agp_bridge
->current_memory_agp
);
1307 EXPORT_SYMBOL(agp_generic_destroy_page
);
1309 /* End Basic Page Allocation Routines */
1313 * agp_enable - initialise the agp point-to-point connection.
1315 * @mode: agp mode register value to configure with.
1317 void agp_enable(struct agp_bridge_data
*bridge
, u32 mode
)
1321 bridge
->driver
->agp_enable(bridge
, mode
);
1323 EXPORT_SYMBOL(agp_enable
);
1325 /* When we remove the global variable agp_bridge from all drivers
1326 * then agp_alloc_bridge and agp_generic_find_bridge need to be updated
1329 struct agp_bridge_data
*agp_generic_find_bridge(struct pci_dev
*pdev
)
1331 if (list_empty(&agp_bridges
))
1337 static void ipi_handler(void *null
)
1342 void global_cache_flush(void)
1344 if (on_each_cpu(ipi_handler
, NULL
, 1) != 0)
1345 panic(PFX
"timed out waiting for the other CPUs!\n");
1347 EXPORT_SYMBOL(global_cache_flush
);
1349 unsigned long agp_generic_mask_memory(struct agp_bridge_data
*bridge
,
1350 struct page
*page
, int type
)
1352 unsigned long addr
= phys_to_gart(page_to_phys(page
));
1353 /* memory type is ignored in the generic routine */
1354 if (bridge
->driver
->masks
)
1355 return addr
| bridge
->driver
->masks
[0].mask
;
1359 EXPORT_SYMBOL(agp_generic_mask_memory
);
1361 int agp_generic_type_to_mask_type(struct agp_bridge_data
*bridge
,
1364 if (type
>= AGP_USER_TYPES
)
1368 EXPORT_SYMBOL(agp_generic_type_to_mask_type
);
1371 * These functions are implemented according to the AGPv3 spec,
1372 * which covers implementation details that had previously been
1376 int agp3_generic_fetch_size(void)
1380 struct aper_size_info_16
*values
;
1382 pci_read_config_word(agp_bridge
->dev
, agp_bridge
->capndx
+AGPAPSIZE
, &temp_size
);
1383 values
= A_SIZE_16(agp_bridge
->driver
->aperture_sizes
);
1385 for (i
= 0; i
< agp_bridge
->driver
->num_aperture_sizes
; i
++) {
1386 if (temp_size
== values
[i
].size_value
) {
1387 agp_bridge
->previous_size
=
1388 agp_bridge
->current_size
= (void *) (values
+ i
);
1390 agp_bridge
->aperture_size_idx
= i
;
1391 return values
[i
].size
;
1396 EXPORT_SYMBOL(agp3_generic_fetch_size
);
1398 void agp3_generic_tlbflush(struct agp_memory
*mem
)
1401 pci_read_config_dword(agp_bridge
->dev
, agp_bridge
->capndx
+AGPCTRL
, &ctrl
);
1402 pci_write_config_dword(agp_bridge
->dev
, agp_bridge
->capndx
+AGPCTRL
, ctrl
& ~AGPCTRL_GTLBEN
);
1403 pci_write_config_dword(agp_bridge
->dev
, agp_bridge
->capndx
+AGPCTRL
, ctrl
);
1405 EXPORT_SYMBOL(agp3_generic_tlbflush
);
1407 int agp3_generic_configure(void)
1410 struct aper_size_info_16
*current_size
;
1412 current_size
= A_SIZE_16(agp_bridge
->current_size
);
1414 pci_read_config_dword(agp_bridge
->dev
, AGP_APBASE
, &temp
);
1415 agp_bridge
->gart_bus_addr
= (temp
& PCI_BASE_ADDRESS_MEM_MASK
);
1417 /* set aperture size */
1418 pci_write_config_word(agp_bridge
->dev
, agp_bridge
->capndx
+AGPAPSIZE
, current_size
->size_value
);
1419 /* set gart pointer */
1420 pci_write_config_dword(agp_bridge
->dev
, agp_bridge
->capndx
+AGPGARTLO
, agp_bridge
->gatt_bus_addr
);
1421 /* enable aperture and GTLB */
1422 pci_read_config_dword(agp_bridge
->dev
, agp_bridge
->capndx
+AGPCTRL
, &temp
);
1423 pci_write_config_dword(agp_bridge
->dev
, agp_bridge
->capndx
+AGPCTRL
, temp
| AGPCTRL_APERENB
| AGPCTRL_GTLBEN
);
1426 EXPORT_SYMBOL(agp3_generic_configure
);
1428 void agp3_generic_cleanup(void)
1431 pci_read_config_dword(agp_bridge
->dev
, agp_bridge
->capndx
+AGPCTRL
, &ctrl
);
1432 pci_write_config_dword(agp_bridge
->dev
, agp_bridge
->capndx
+AGPCTRL
, ctrl
& ~AGPCTRL_APERENB
);
1434 EXPORT_SYMBOL(agp3_generic_cleanup
);
1436 const struct aper_size_info_16 agp3_generic_sizes
[AGP_GENERIC_SIZES_ENTRIES
] =
1438 {4096, 1048576, 10,0x000},
1439 {2048, 524288, 9, 0x800},
1440 {1024, 262144, 8, 0xc00},
1441 { 512, 131072, 7, 0xe00},
1442 { 256, 65536, 6, 0xf00},
1443 { 128, 32768, 5, 0xf20},
1444 { 64, 16384, 4, 0xf30},
1445 { 32, 8192, 3, 0xf38},
1446 { 16, 4096, 2, 0xf3c},
1447 { 8, 2048, 1, 0xf3e},
1448 { 4, 1024, 0, 0xf3f}
1450 EXPORT_SYMBOL(agp3_generic_sizes
);