2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <asm/sn/sn_sal.h>
13 #include <asm/sn/addrs.h>
14 #include <asm/sn/io.h>
15 #include <asm/sn/pcidev.h>
16 #include <asm/sn/pcibus_provider_defs.h>
17 #include <asm/sn/tioca_provider.h>
20 EXPORT_SYMBOL(tioca_gart_found
); /* used by agp-sgi */
22 LIST_HEAD(tioca_list
);
23 EXPORT_SYMBOL(tioca_list
); /* used by agp-sgi */
25 static int tioca_gart_init(struct tioca_kernel
*);
28 * tioca_gart_init - Initialize SGI TIOCA GART
29 * @tioca_common: ptr to common prom/kernel struct identifying the
31 * If the indicated tioca has devices present, initialize its associated
32 * GART MMR's and kernel memory.
35 tioca_gart_init(struct tioca_kernel
*tioca_kern
)
40 struct tioca_common
*tioca_common
;
41 struct tioca __iomem
*ca_base
;
43 tioca_common
= tioca_kern
->ca_common
;
44 ca_base
= (struct tioca __iomem
*)tioca_common
->ca_common
.bs_base
;
46 if (list_empty(tioca_kern
->ca_devices
))
52 * Validate aperature size
55 switch (CA_APERATURE_SIZE
>> 20) {
57 ap_reg
|= (0x3ff << CA_GART_AP_SIZE_SHFT
); /* 4MB */
60 ap_reg
|= (0x3fe << CA_GART_AP_SIZE_SHFT
); /* 8MB */
63 ap_reg
|= (0x3fc << CA_GART_AP_SIZE_SHFT
); /* 16MB */
66 ap_reg
|= (0x3f8 << CA_GART_AP_SIZE_SHFT
); /* 32 MB */
69 ap_reg
|= (0x3f0 << CA_GART_AP_SIZE_SHFT
); /* 64 MB */
72 ap_reg
|= (0x3e0 << CA_GART_AP_SIZE_SHFT
); /* 128 MB */
75 ap_reg
|= (0x3c0 << CA_GART_AP_SIZE_SHFT
); /* 256 MB */
78 ap_reg
|= (0x380 << CA_GART_AP_SIZE_SHFT
); /* 512 MB */
81 ap_reg
|= (0x300 << CA_GART_AP_SIZE_SHFT
); /* 1GB */
84 ap_reg
|= (0x200 << CA_GART_AP_SIZE_SHFT
); /* 2GB */
87 ap_reg
|= (0x000 << CA_GART_AP_SIZE_SHFT
); /* 4 GB */
90 printk(KERN_ERR
"%s: Invalid CA_APERATURE_SIZE "
91 "0x%lx\n", __func__
, (ulong
) CA_APERATURE_SIZE
);
96 * Set up other aperature parameters
99 if (PAGE_SIZE
>= 16384) {
100 tioca_kern
->ca_ap_pagesize
= 16384;
101 ap_reg
|= CA_GART_PAGE_SIZE
;
103 tioca_kern
->ca_ap_pagesize
= 4096;
106 tioca_kern
->ca_ap_size
= CA_APERATURE_SIZE
;
107 tioca_kern
->ca_ap_bus_base
= CA_APERATURE_BASE
;
108 tioca_kern
->ca_gart_entries
=
109 tioca_kern
->ca_ap_size
/ tioca_kern
->ca_ap_pagesize
;
111 ap_reg
|= (CA_GART_AP_ENB_AGP
| CA_GART_AP_ENB_PCI
);
112 ap_reg
|= tioca_kern
->ca_ap_bus_base
;
115 * Allocate and set up the GART
118 tioca_kern
->ca_gart_size
= tioca_kern
->ca_gart_entries
* sizeof(u64
);
120 alloc_pages_node(tioca_kern
->ca_closest_node
,
121 GFP_KERNEL
| __GFP_ZERO
,
122 get_order(tioca_kern
->ca_gart_size
));
125 printk(KERN_ERR
"%s: Could not allocate "
126 "%llu bytes (order %d) for GART\n",
128 tioca_kern
->ca_gart_size
,
129 get_order(tioca_kern
->ca_gart_size
));
133 tioca_kern
->ca_gart
= page_address(tmp
);
134 tioca_kern
->ca_gart_coretalk_addr
=
135 PHYS_TO_TIODMA(virt_to_phys(tioca_kern
->ca_gart
));
138 * Compute PCI/AGP convenience fields
141 offset
= CA_PCI32_MAPPED_BASE
- CA_APERATURE_BASE
;
142 tioca_kern
->ca_pciap_base
= CA_PCI32_MAPPED_BASE
;
143 tioca_kern
->ca_pciap_size
= CA_PCI32_MAPPED_SIZE
;
144 tioca_kern
->ca_pcigart_start
= offset
/ tioca_kern
->ca_ap_pagesize
;
145 tioca_kern
->ca_pcigart_base
=
146 tioca_kern
->ca_gart_coretalk_addr
+ offset
;
147 tioca_kern
->ca_pcigart
=
148 &tioca_kern
->ca_gart
[tioca_kern
->ca_pcigart_start
];
149 tioca_kern
->ca_pcigart_entries
=
150 tioca_kern
->ca_pciap_size
/ tioca_kern
->ca_ap_pagesize
;
151 tioca_kern
->ca_pcigart_pagemap
=
152 kzalloc(tioca_kern
->ca_pcigart_entries
/ 8, GFP_KERNEL
);
153 if (!tioca_kern
->ca_pcigart_pagemap
) {
154 free_pages((unsigned long)tioca_kern
->ca_gart
,
155 get_order(tioca_kern
->ca_gart_size
));
159 offset
= CA_AGP_MAPPED_BASE
- CA_APERATURE_BASE
;
160 tioca_kern
->ca_gfxap_base
= CA_AGP_MAPPED_BASE
;
161 tioca_kern
->ca_gfxap_size
= CA_AGP_MAPPED_SIZE
;
162 tioca_kern
->ca_gfxgart_start
= offset
/ tioca_kern
->ca_ap_pagesize
;
163 tioca_kern
->ca_gfxgart_base
=
164 tioca_kern
->ca_gart_coretalk_addr
+ offset
;
165 tioca_kern
->ca_gfxgart
=
166 &tioca_kern
->ca_gart
[tioca_kern
->ca_gfxgart_start
];
167 tioca_kern
->ca_gfxgart_entries
=
168 tioca_kern
->ca_gfxap_size
/ tioca_kern
->ca_ap_pagesize
;
171 * various control settings:
172 * use agp op-combining
173 * use GET semantics to fetch memory
174 * participate in coherency domain
175 * DISABLE GART PREFETCHING due to hw bug tracked in SGI PV930029
178 __sn_setq_relaxed(&ca_base
->ca_control1
,
179 CA_AGPDMA_OP_ENB_COMBDELAY
); /* PV895469 ? */
180 __sn_clrq_relaxed(&ca_base
->ca_control2
, CA_GART_MEM_PARAM
);
181 __sn_setq_relaxed(&ca_base
->ca_control2
,
182 (0x2ull
<< CA_GART_MEM_PARAM_SHFT
));
183 tioca_kern
->ca_gart_iscoherent
= 1;
184 __sn_clrq_relaxed(&ca_base
->ca_control2
,
185 (CA_GART_WR_PREFETCH_ENB
| CA_GART_RD_PREFETCH_ENB
));
188 * Unmask GART fetch error interrupts. Clear residual errors first.
191 writeq(CA_GART_FETCH_ERR
, &ca_base
->ca_int_status_alias
);
192 writeq(CA_GART_FETCH_ERR
, &ca_base
->ca_mult_error_alias
);
193 __sn_clrq_relaxed(&ca_base
->ca_int_mask
, CA_GART_FETCH_ERR
);
196 * Program the aperature and gart registers in TIOCA
199 writeq(ap_reg
, &ca_base
->ca_gart_aperature
);
200 writeq(tioca_kern
->ca_gart_coretalk_addr
|1, &ca_base
->ca_gart_ptr_table
);
206 * tioca_fastwrite_enable - enable AGP FW for a tioca and its functions
207 * @tioca_kernel: structure representing the CA
209 * Given a CA, scan all attached functions making sure they all support
210 * FastWrite. If so, enable FastWrite for all functions and the CA itself.
214 tioca_fastwrite_enable(struct tioca_kernel
*tioca_kern
)
218 struct tioca __iomem
*tioca_base
;
219 struct pci_dev
*pdev
;
220 struct tioca_common
*common
;
222 common
= tioca_kern
->ca_common
;
225 * Scan all vga controllers on this bus making sure they all
226 * support FW. If not, return.
229 list_for_each_entry(pdev
, tioca_kern
->ca_devices
, bus_list
) {
230 if (pdev
->class != (PCI_CLASS_DISPLAY_VGA
<< 8))
233 cap_ptr
= pci_find_capability(pdev
, PCI_CAP_ID_AGP
);
235 return; /* no AGP CAP means no FW */
237 pci_read_config_dword(pdev
, cap_ptr
+ PCI_AGP_STATUS
, ®
);
238 if (!(reg
& PCI_AGP_STATUS_FW
))
239 return; /* function doesn't support FW */
243 * Set fw for all vga fn's
246 list_for_each_entry(pdev
, tioca_kern
->ca_devices
, bus_list
) {
247 if (pdev
->class != (PCI_CLASS_DISPLAY_VGA
<< 8))
250 cap_ptr
= pci_find_capability(pdev
, PCI_CAP_ID_AGP
);
251 pci_read_config_dword(pdev
, cap_ptr
+ PCI_AGP_COMMAND
, ®
);
252 reg
|= PCI_AGP_COMMAND_FW
;
253 pci_write_config_dword(pdev
, cap_ptr
+ PCI_AGP_COMMAND
, reg
);
257 * Set ca's fw to match
260 tioca_base
= (struct tioca __iomem
*)common
->ca_common
.bs_base
;
261 __sn_setq_relaxed(&tioca_base
->ca_control1
, CA_AGP_FW_ENABLE
);
264 EXPORT_SYMBOL(tioca_fastwrite_enable
); /* used by agp-sgi */
267 * tioca_dma_d64 - create a DMA mapping using 64-bit direct mode
268 * @paddr: system physical address
270 * Map @paddr into 64-bit CA bus space. No device context is necessary.
271 * Bits 53:0 come from the coretalk address. We just need to mask in the
272 * following optional bits of the 64-bit pci address:
274 * 63:60 - Coretalk Packet Type - 0x1 for Mem Get/Put (coherent)
275 * 0x2 for PIO (non-coherent)
276 * We will always use 0x1
277 * 55:55 - Swap bytes Currently unused
280 tioca_dma_d64(unsigned long paddr
)
284 bus_addr
= PHYS_TO_TIODMA(paddr
);
287 BUG_ON(bus_addr
>> 54);
289 /* Set upper nibble to Cache Coherent Memory op */
290 bus_addr
|= (1UL << 60);
296 * tioca_dma_d48 - create a DMA mapping using 48-bit direct mode
297 * @pdev: linux pci_dev representing the function
298 * @paddr: system physical address
300 * Map @paddr into 64-bit bus space of the CA associated with @pcidev_info.
302 * The CA agp 48 bit direct address falls out as follows:
304 * When direct mapping AGP addresses, the 48 bit AGP address is
305 * constructed as follows:
307 * [47:40] - Low 8 bits of the page Node ID extracted from coretalk
308 * address [47:40]. The upper 8 node bits are fixed
309 * and come from the xxx register bits [5:0]
310 * [39:38] - Chiplet ID extracted from coretalk address [39:38]
311 * [37:00] - node offset extracted from coretalk address [37:00]
313 * Since the node id in general will be non-zero, and the chiplet id
314 * will always be non-zero, it follows that the device must support
315 * a dma mask of at least 0xffffffffff (40 bits) to target node 0
316 * and in general should be 0xffffffffffff (48 bits) to target nodes
317 * up to 255. Nodes above 255 need the support of the xxx register,
318 * and so a given CA can only directly target nodes in the range
322 tioca_dma_d48(struct pci_dev
*pdev
, u64 paddr
)
324 struct tioca_common
*tioca_common
;
325 struct tioca __iomem
*ca_base
;
330 struct pcidev_info
*pcidev_info
= SN_PCIDEV_INFO(pdev
);
332 tioca_common
= (struct tioca_common
*)pcidev_info
->pdi_pcibus_info
;
333 ca_base
= (struct tioca __iomem
*)tioca_common
->ca_common
.bs_base
;
335 ct_addr
= PHYS_TO_TIODMA(paddr
);
339 bus_addr
= (dma_addr_t
) (ct_addr
& 0xffffffffffffUL
);
340 node_upper
= ct_addr
>> 48;
342 if (node_upper
> 64) {
343 printk(KERN_ERR
"%s: coretalk addr 0x%p node id out "
344 "of range\n", __func__
, (void *)ct_addr
);
348 agp_dma_extn
= __sn_readq_relaxed(&ca_base
->ca_agp_dma_addr_extn
);
349 if (node_upper
!= (agp_dma_extn
>> CA_AGP_DMA_NODE_ID_SHFT
)) {
350 printk(KERN_ERR
"%s: coretalk upper node (%u) "
351 "mismatch with ca_agp_dma_addr_extn (%llu)\n",
353 node_upper
, (agp_dma_extn
>> CA_AGP_DMA_NODE_ID_SHFT
));
361 * tioca_dma_mapped - create a DMA mapping using a CA GART
362 * @pdev: linux pci_dev representing the function
363 * @paddr: host physical address to map
364 * @req_size: len (bytes) to map
366 * Map @paddr into CA address space using the GART mechanism. The mapped
367 * dma_addr_t is guaranteed to be contiguous in CA bus space.
370 tioca_dma_mapped(struct pci_dev
*pdev
, unsigned long paddr
, size_t req_size
)
372 int i
, ps
, ps_shift
, entry
, entries
, mapsize
, last_entry
;
373 u64 xio_addr
, end_xio_addr
;
374 struct tioca_common
*tioca_common
;
375 struct tioca_kernel
*tioca_kern
;
376 dma_addr_t bus_addr
= 0;
377 struct tioca_dmamap
*ca_dmamap
;
380 struct pcidev_info
*pcidev_info
= SN_PCIDEV_INFO(pdev
);
382 tioca_common
= (struct tioca_common
*)pcidev_info
->pdi_pcibus_info
;
383 tioca_kern
= (struct tioca_kernel
*)tioca_common
->ca_kernel_private
;
385 xio_addr
= PHYS_TO_TIODMA(paddr
);
389 spin_lock_irqsave(&tioca_kern
->ca_lock
, flags
);
392 * allocate a map struct
395 ca_dmamap
= kzalloc(sizeof(struct tioca_dmamap
), GFP_ATOMIC
);
400 * Locate free entries that can hold req_size. Account for
401 * unaligned start/length when allocating.
404 ps
= tioca_kern
->ca_ap_pagesize
; /* will be power of 2 */
405 ps_shift
= ffs(ps
) - 1;
406 end_xio_addr
= xio_addr
+ req_size
- 1;
408 entries
= (end_xio_addr
>> ps_shift
) - (xio_addr
>> ps_shift
) + 1;
410 map
= tioca_kern
->ca_pcigart_pagemap
;
411 mapsize
= tioca_kern
->ca_pcigart_entries
;
413 entry
= find_first_zero_bit(map
, mapsize
);
414 while (entry
< mapsize
) {
415 last_entry
= find_next_bit(map
, mapsize
, entry
);
417 if (last_entry
- entry
>= entries
)
420 entry
= find_next_zero_bit(map
, mapsize
, last_entry
);
423 if (entry
> mapsize
) {
428 for (i
= 0; i
< entries
; i
++)
429 set_bit(entry
+ i
, map
);
431 bus_addr
= tioca_kern
->ca_pciap_base
+ (entry
* ps
);
433 ca_dmamap
->cad_dma_addr
= bus_addr
;
434 ca_dmamap
->cad_gart_size
= entries
;
435 ca_dmamap
->cad_gart_entry
= entry
;
436 list_add(&ca_dmamap
->cad_list
, &tioca_kern
->ca_dmamaps
);
439 tioca_kern
->ca_pcigart
[entry
] = tioca_paddr_to_gart(xio_addr
);
440 bus_addr
+= xio_addr
& (ps
- 1);
441 xio_addr
&= ~(ps
- 1);
446 while (xio_addr
< end_xio_addr
) {
447 tioca_kern
->ca_pcigart
[entry
] = tioca_paddr_to_gart(xio_addr
);
452 tioca_tlbflush(tioca_kern
);
455 spin_unlock_irqrestore(&tioca_kern
->ca_lock
, flags
);
460 * tioca_dma_unmap - release CA mapping resources
461 * @pdev: linux pci_dev representing the function
462 * @bus_addr: bus address returned by an earlier tioca_dma_map
463 * @dir: mapping direction (unused)
465 * Locate mapping resources associated with @bus_addr and release them.
466 * For mappings created using the direct modes (64 or 48) there are no
467 * resources to release.
470 tioca_dma_unmap(struct pci_dev
*pdev
, dma_addr_t bus_addr
, int dir
)
473 struct tioca_common
*tioca_common
;
474 struct tioca_kernel
*tioca_kern
;
475 struct tioca_dmamap
*map
;
476 struct pcidev_info
*pcidev_info
= SN_PCIDEV_INFO(pdev
);
479 tioca_common
= (struct tioca_common
*)pcidev_info
->pdi_pcibus_info
;
480 tioca_kern
= (struct tioca_kernel
*)tioca_common
->ca_kernel_private
;
482 /* return straight away if this isn't be a mapped address */
484 if (bus_addr
< tioca_kern
->ca_pciap_base
||
485 bus_addr
>= (tioca_kern
->ca_pciap_base
+ tioca_kern
->ca_pciap_size
))
488 spin_lock_irqsave(&tioca_kern
->ca_lock
, flags
);
490 list_for_each_entry(map
, &tioca_kern
->ca_dmamaps
, cad_list
)
491 if (map
->cad_dma_addr
== bus_addr
)
496 entry
= map
->cad_gart_entry
;
498 for (i
= 0; i
< map
->cad_gart_size
; i
++, entry
++) {
499 clear_bit(entry
, tioca_kern
->ca_pcigart_pagemap
);
500 tioca_kern
->ca_pcigart
[entry
] = 0;
502 tioca_tlbflush(tioca_kern
);
504 list_del(&map
->cad_list
);
505 spin_unlock_irqrestore(&tioca_kern
->ca_lock
, flags
);
510 * tioca_dma_map - map pages for PCI DMA
511 * @pdev: linux pci_dev representing the function
512 * @paddr: host physical address to map
513 * @byte_count: bytes to map
515 * This is the main wrapper for mapping host physical pages to CA PCI space.
516 * The mapping mode used is based on the devices dma_mask. As a last resort
517 * use the GART mapped mode.
520 tioca_dma_map(struct pci_dev
*pdev
, u64 paddr
, size_t byte_count
, int dma_flags
)
525 * Not supported for now ...
527 if (dma_flags
& SN_DMA_MSI
)
531 * If card is 64 or 48 bit addressable, use a direct mapping. 32
532 * bit direct is so restrictive w.r.t. where the memory resides that
533 * we don't use it even though CA has some support.
536 if (pdev
->dma_mask
== ~0UL)
537 mapaddr
= tioca_dma_d64(paddr
);
538 else if (pdev
->dma_mask
== 0xffffffffffffUL
)
539 mapaddr
= tioca_dma_d48(pdev
, paddr
);
543 /* Last resort ... use PCI portion of CA GART */
546 mapaddr
= tioca_dma_mapped(pdev
, paddr
, byte_count
);
552 * tioca_error_intr_handler - SGI TIO CA error interrupt handler
554 * @arg: pointer to tioca_common struct for the given CA
556 * Handle a CA error interrupt. Simply a wrapper around a SAL call which
557 * defers processing to the SGI prom.
560 tioca_error_intr_handler(int irq
, void *arg
)
562 struct tioca_common
*soft
= arg
;
563 struct ia64_sal_retval ret_stuff
;
566 ret_stuff
.status
= 0;
569 segment
= soft
->ca_common
.bs_persist_segment
;
570 busnum
= soft
->ca_common
.bs_persist_busnum
;
572 SAL_CALL_NOLOCK(ret_stuff
,
573 (u64
) SN_SAL_IOIF_ERROR_INTERRUPT
,
574 segment
, busnum
, 0, 0, 0, 0, 0);
580 * tioca_bus_fixup - perform final PCI fixup for a TIO CA bus
581 * @prom_bussoft: Common prom/kernel struct representing the bus
583 * Replicates the tioca_common pointed to by @prom_bussoft in kernel
584 * space. Allocates and initializes a kernel-only area for a given CA,
585 * and sets up an irq for handling CA error interrupts.
587 * On successful setup, returns the kernel version of tioca_common back to
591 tioca_bus_fixup(struct pcibus_bussoft
*prom_bussoft
, struct pci_controller
*controller
)
593 struct tioca_common
*tioca_common
;
594 struct tioca_kernel
*tioca_kern
;
597 /* sanity check prom rev */
599 if (is_shub1() && sn_sal_rev() < 0x0406) {
601 (KERN_ERR
"%s: SGI prom rev 4.06 or greater required "
602 "for tioca support\n", __func__
);
607 * Allocate kernel bus soft and copy from prom.
610 tioca_common
= kzalloc(sizeof(struct tioca_common
), GFP_KERNEL
);
614 memcpy(tioca_common
, prom_bussoft
, sizeof(struct tioca_common
));
615 tioca_common
->ca_common
.bs_base
= (unsigned long)
616 ioremap(REGION_OFFSET(tioca_common
->ca_common
.bs_base
),
617 sizeof(struct tioca_common
));
619 /* init kernel-private area */
621 tioca_kern
= kzalloc(sizeof(struct tioca_kernel
), GFP_KERNEL
);
627 tioca_kern
->ca_common
= tioca_common
;
628 spin_lock_init(&tioca_kern
->ca_lock
);
629 INIT_LIST_HEAD(&tioca_kern
->ca_dmamaps
);
630 tioca_kern
->ca_closest_node
=
631 nasid_to_cnodeid(tioca_common
->ca_closest_nasid
);
632 tioca_common
->ca_kernel_private
= (u64
) tioca_kern
;
634 bus
= pci_find_bus(tioca_common
->ca_common
.bs_persist_segment
,
635 tioca_common
->ca_common
.bs_persist_busnum
);
637 tioca_kern
->ca_devices
= &bus
->devices
;
641 if (tioca_gart_init(tioca_kern
) < 0) {
648 list_add(&tioca_kern
->ca_list
, &tioca_list
);
650 if (request_irq(SGI_TIOCA_ERROR
,
651 tioca_error_intr_handler
,
652 IRQF_SHARED
, "TIOCA error", (void *)tioca_common
))
654 "%s: Unable to get irq %d. "
655 "Error interrupts won't be routed for TIOCA bus %d\n",
656 __func__
, SGI_TIOCA_ERROR
,
657 (int)tioca_common
->ca_common
.bs_persist_busnum
);
659 sn_set_err_irq_affinity(SGI_TIOCA_ERROR
);
661 /* Setup locality information */
662 controller
->node
= tioca_kern
->ca_closest_node
;
666 static struct sn_pcibus_provider tioca_pci_interfaces
= {
667 .dma_map
= tioca_dma_map
,
668 .dma_map_consistent
= tioca_dma_map
,
669 .dma_unmap
= tioca_dma_unmap
,
670 .bus_fixup
= tioca_bus_fixup
,
671 .force_interrupt
= NULL
,
672 .target_interrupt
= NULL
676 * tioca_init_provider - init SN PCI provider ops for TIO CA
679 tioca_init_provider(void)
681 sn_pci_provider
[PCIIO_ASIC_TYPE_TIOCA
] = &tioca_pci_interfaces
;