2 ** System Bus Adapter (SBA) I/O MMU manager
4 ** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org>
5 ** (c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com>
6 ** (c) Copyright 2000-2004 Hewlett-Packard Company
8 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
10 ** This program is free software; you can redistribute it and/or modify
11 ** it under the terms of the GNU General Public License as published by
12 ** the Free Software Foundation; either version 2 of the License, or
13 ** (at your option) any later version.
16 ** This module initializes the IOC (I/O Controller) found on B1000/C3000/
17 ** J5000/J7000/N-class/L-class machines and their successors.
19 ** FIXME: add DMA hint support programming in both sba and lba modules.
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/spinlock.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
29 #include <linux/string.h>
30 #include <linux/pci.h>
31 #include <linux/scatterlist.h>
32 #include <linux/iommu-helper.h>
34 #include <asm/byteorder.h>
36 #include <asm/dma.h> /* for DMA_CHUNK_SIZE */
38 #include <asm/hardware.h> /* for register_parisc_driver() stuff */
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
42 #include <linux/module.h>
44 #include <asm/ropes.h>
45 #include <asm/mckinley.h> /* for proc_mckinley_root */
46 #include <asm/runway.h> /* for proc_runway_root */
47 #include <asm/page.h> /* for PAGE0 */
48 #include <asm/pdc.h> /* for PDC_MODEL_* */
49 #include <asm/pdcpat.h> /* for is_pdc_pat() */
50 #include <asm/parisc-device.h>
52 #define MODULE_NAME "SBA"
55 ** The number of debug flags is a clue - this code is fragile.
56 ** Don't even think about messing with it unless you have
57 ** plenty of 710's to sacrifice to the computer gods. :^)
61 #undef DEBUG_SBA_RUN_SG
62 #undef DEBUG_SBA_RESOURCE
63 #undef ASSERT_PDIR_SANITY
64 #undef DEBUG_LARGE_SG_ENTRIES
68 #define DBG_INIT(x...) printk(x)
70 #define DBG_INIT(x...)
74 #define DBG_RUN(x...) printk(x)
79 #ifdef DEBUG_SBA_RUN_SG
80 #define DBG_RUN_SG(x...) printk(x)
82 #define DBG_RUN_SG(x...)
86 #ifdef DEBUG_SBA_RESOURCE
87 #define DBG_RES(x...) printk(x)
92 #define SBA_INLINE __inline__
94 #define DEFAULT_DMA_HINT_REG 0
96 struct sba_device
*sba_list
;
97 EXPORT_SYMBOL_GPL(sba_list
);
99 static unsigned long ioc_needs_fdc
= 0;
101 /* global count of IOMMUs in the system */
102 static unsigned int global_ioc_cnt
= 0;
104 /* PA8700 (Piranha 2.2) bug workaround */
105 static unsigned long piranha_bad_128k
= 0;
107 /* Looks nice and keeps the compiler happy */
108 #define SBA_DEV(d) ((struct sba_device *) (d))
110 #ifdef CONFIG_AGP_PARISC
111 #define SBA_AGP_SUPPORT
112 #endif /*CONFIG_AGP_PARISC*/
114 #ifdef SBA_AGP_SUPPORT
115 static int sba_reserve_agpgart
= 1;
116 module_param(sba_reserve_agpgart
, int, 0444);
117 MODULE_PARM_DESC(sba_reserve_agpgart
, "Reserve half of IO pdir as AGPGART");
121 /************************************
122 ** SBA register read and write support
124 ** BE WARNED: register writes are posted.
125 ** (ie follow writes which must reach HW with a read)
127 ** Superdome (in particular, REO) allows only 64-bit CSR accesses.
129 #define READ_REG32(addr) readl(addr)
130 #define READ_REG64(addr) readq(addr)
131 #define WRITE_REG32(val, addr) writel((val), (addr))
132 #define WRITE_REG64(val, addr) writeq((val), (addr))
135 #define READ_REG(addr) READ_REG64(addr)
136 #define WRITE_REG(value, addr) WRITE_REG64(value, addr)
138 #define READ_REG(addr) READ_REG32(addr)
139 #define WRITE_REG(value, addr) WRITE_REG32(value, addr)
142 #ifdef DEBUG_SBA_INIT
144 /* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */
147 * sba_dump_ranges - debugging only - print ranges assigned to this IOA
148 * @hpa: base address of the sba
150 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO
151 * IO Adapter (aka Bus Converter).
154 sba_dump_ranges(void __iomem
*hpa
)
156 DBG_INIT("SBA at 0x%p\n", hpa
);
157 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa
+IOS_DIST_BASE
));
158 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa
+IOS_DIST_MASK
));
159 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa
+IOS_DIST_ROUTE
));
161 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa
+IOS_DIRECT_BASE
));
162 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa
+IOS_DIRECT_MASK
));
163 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa
+IOS_DIRECT_ROUTE
));
167 * sba_dump_tlb - debugging only - print IOMMU operating parameters
168 * @hpa: base address of the IOMMU
170 * Print the size/location of the IO MMU PDIR.
172 static void sba_dump_tlb(void __iomem
*hpa
)
174 DBG_INIT("IO TLB at 0x%p\n", hpa
);
175 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa
+IOC_IBASE
));
176 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa
+IOC_IMASK
));
177 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa
+IOC_TCNFG
));
178 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa
+IOC_PDIR_BASE
));
182 #define sba_dump_ranges(x)
183 #define sba_dump_tlb(x)
184 #endif /* DEBUG_SBA_INIT */
187 #ifdef ASSERT_PDIR_SANITY
190 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
191 * @ioc: IO MMU structure which owns the pdir we are interested in.
192 * @msg: text to print ont the output line.
195 * Print one entry of the IO MMU PDIR in human readable form.
198 sba_dump_pdir_entry(struct ioc
*ioc
, char *msg
, uint pide
)
200 /* start printing from lowest pde in rval */
201 u64
*ptr
= &(ioc
->pdir_base
[pide
& (~0U * BITS_PER_LONG
)]);
202 unsigned long *rptr
= (unsigned long *) &(ioc
->res_map
[(pide
>>3) & ~(sizeof(unsigned long) - 1)]);
205 printk(KERN_DEBUG
"SBA: %s rp %p bit %d rval 0x%lx\n",
207 rptr
, pide
& (BITS_PER_LONG
- 1), *rptr
);
210 while (rcnt
< BITS_PER_LONG
) {
211 printk(KERN_DEBUG
"%s %2d %p %016Lx\n",
212 (rcnt
== (pide
& (BITS_PER_LONG
- 1)))
218 printk(KERN_DEBUG
"%s", msg
);
223 * sba_check_pdir - debugging only - consistency checker
224 * @ioc: IO MMU structure which owns the pdir we are interested in.
225 * @msg: text to print ont the output line.
227 * Verify the resource map and pdir state is consistent
230 sba_check_pdir(struct ioc
*ioc
, char *msg
)
232 u32
*rptr_end
= (u32
*) &(ioc
->res_map
[ioc
->res_size
]);
233 u32
*rptr
= (u32
*) ioc
->res_map
; /* resource map ptr */
234 u64
*pptr
= ioc
->pdir_base
; /* pdir ptr */
237 while (rptr
< rptr_end
) {
239 int rcnt
= 32; /* number of bits we might check */
242 /* Get last byte and highest bit from that */
243 u32 pde
= ((u32
) (((char *)pptr
)[7])) << 24;
244 if ((rval
^ pde
) & 0x80000000)
247 ** BUMMER! -- res_map != pdir --
248 ** Dump rval and matching pdir entries
250 sba_dump_pdir_entry(ioc
, msg
, pide
);
254 rval
<<= 1; /* try the next bit */
258 rptr
++; /* look at next word of res_map */
260 /* It'd be nice if we always got here :^) */
266 * sba_dump_sg - debugging only - print Scatter-Gather list
267 * @ioc: IO MMU structure which owns the pdir we are interested in.
268 * @startsg: head of the SG list
269 * @nents: number of entries in SG list
271 * print the SG list so we can verify it's correct by hand.
274 sba_dump_sg( struct ioc
*ioc
, struct scatterlist
*startsg
, int nents
)
276 while (nents
-- > 0) {
277 printk(KERN_DEBUG
" %d : %08lx/%05x %p/%05x\n",
279 (unsigned long) sg_dma_address(startsg
),
281 sg_virt(startsg
), startsg
->length
);
286 #endif /* ASSERT_PDIR_SANITY */
291 /**************************************************************
293 * I/O Pdir Resource Management
295 * Bits set in the resource map are in use.
296 * Each bit can represent a number of pages.
297 * LSbs represent lower addresses (IOVA's).
299 ***************************************************************/
300 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
302 /* Convert from IOVP to IOVA and vice versa. */
305 /* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */
306 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
307 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
309 /* only support Astro and ancestors. Saves a few cycles in key places */
310 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
311 #define SBA_IOVP(ioc,iova) (iova)
314 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
316 #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
317 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
319 static unsigned long ptr_to_pide(struct ioc
*ioc
, unsigned long *res_ptr
,
320 unsigned int bitshiftcnt
)
322 return (((unsigned long)res_ptr
- (unsigned long)ioc
->res_map
) << 3)
327 * sba_search_bitmap - find free space in IO PDIR resource bitmap
328 * @ioc: IO MMU structure which owns the pdir we are interested in.
329 * @bits_wanted: number of entries we need.
331 * Find consecutive free bits in resource bitmap.
332 * Each bit represents one entry in the IO Pdir.
333 * Cool perf optimization: search for log2(size) bits at a time.
335 static SBA_INLINE
unsigned long
336 sba_search_bitmap(struct ioc
*ioc
, struct device
*dev
,
337 unsigned long bits_wanted
)
339 unsigned long *res_ptr
= ioc
->res_hint
;
340 unsigned long *res_end
= (unsigned long *) &(ioc
->res_map
[ioc
->res_size
]);
341 unsigned long pide
= ~0UL, tpide
;
342 unsigned long boundary_size
;
346 boundary_size
= ALIGN((unsigned long long)dma_get_seg_boundary(dev
) + 1,
347 1ULL << IOVP_SHIFT
) >> IOVP_SHIFT
;
349 #if defined(ZX1_SUPPORT)
350 BUG_ON(ioc
->ibase
& ~IOVP_MASK
);
351 shift
= ioc
->ibase
>> IOVP_SHIFT
;
356 if (bits_wanted
> (BITS_PER_LONG
/2)) {
357 /* Search word at a time - no mask needed */
358 for(; res_ptr
< res_end
; ++res_ptr
) {
359 tpide
= ptr_to_pide(ioc
, res_ptr
, 0);
360 ret
= iommu_is_span_boundary(tpide
, bits_wanted
,
363 if ((*res_ptr
== 0) && !ret
) {
364 *res_ptr
= RESMAP_MASK(bits_wanted
);
369 /* point to the next word on next pass */
371 ioc
->res_bitshift
= 0;
374 ** Search the resource bit map on well-aligned values.
375 ** "o" is the alignment.
376 ** We need the alignment to invalidate I/O TLB using
377 ** SBA HW features in the unmap path.
379 unsigned long o
= 1 << get_order(bits_wanted
<< PAGE_SHIFT
);
380 uint bitshiftcnt
= ALIGN(ioc
->res_bitshift
, o
);
383 if (bitshiftcnt
>= BITS_PER_LONG
) {
387 mask
= RESMAP_MASK(bits_wanted
) >> bitshiftcnt
;
389 DBG_RES("%s() o %ld %p", __func__
, o
, res_ptr
);
390 while(res_ptr
< res_end
)
392 DBG_RES(" %p %lx %lx\n", res_ptr
, mask
, *res_ptr
);
394 tpide
= ptr_to_pide(ioc
, res_ptr
, bitshiftcnt
);
395 ret
= iommu_is_span_boundary(tpide
, bits_wanted
,
398 if ((((*res_ptr
) & mask
) == 0) && !ret
) {
399 *res_ptr
|= mask
; /* mark resources busy! */
406 mask
= RESMAP_MASK(bits_wanted
);
411 /* look in the same word on the next pass */
412 ioc
->res_bitshift
= bitshiftcnt
+ bits_wanted
;
416 if (res_end
<= res_ptr
) {
417 ioc
->res_hint
= (unsigned long *) ioc
->res_map
;
418 ioc
->res_bitshift
= 0;
420 ioc
->res_hint
= res_ptr
;
427 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
428 * @ioc: IO MMU structure which owns the pdir we are interested in.
429 * @size: number of bytes to create a mapping for
431 * Given a size, find consecutive unmarked and then mark those bits in the
435 sba_alloc_range(struct ioc
*ioc
, struct device
*dev
, size_t size
)
437 unsigned int pages_needed
= size
>> IOVP_SHIFT
;
438 #ifdef SBA_COLLECT_STATS
439 unsigned long cr_start
= mfctl(16);
443 pide
= sba_search_bitmap(ioc
, dev
, pages_needed
);
444 if (pide
>= (ioc
->res_size
<< 3)) {
445 pide
= sba_search_bitmap(ioc
, dev
, pages_needed
);
446 if (pide
>= (ioc
->res_size
<< 3))
447 panic("%s: I/O MMU @ %p is out of mapping resources\n",
448 __FILE__
, ioc
->ioc_hpa
);
451 #ifdef ASSERT_PDIR_SANITY
452 /* verify the first enable bit is clear */
453 if(0x00 != ((u8
*) ioc
->pdir_base
)[pide
*sizeof(u64
) + 7]) {
454 sba_dump_pdir_entry(ioc
, "sba_search_bitmap() botched it?", pide
);
458 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
459 __func__
, size
, pages_needed
, pide
,
460 (uint
) ((unsigned long) ioc
->res_hint
- (unsigned long) ioc
->res_map
),
463 #ifdef SBA_COLLECT_STATS
465 unsigned long cr_end
= mfctl(16);
466 unsigned long tmp
= cr_end
- cr_start
;
467 /* check for roll over */
468 cr_start
= (cr_end
< cr_start
) ? -(tmp
) : (tmp
);
470 ioc
->avg_search
[ioc
->avg_idx
++] = cr_start
;
471 ioc
->avg_idx
&= SBA_SEARCH_SAMPLE
- 1;
473 ioc
->used_pages
+= pages_needed
;
481 * sba_free_range - unmark bits in IO PDIR resource bitmap
482 * @ioc: IO MMU structure which owns the pdir we are interested in.
483 * @iova: IO virtual address which was previously allocated.
484 * @size: number of bytes to create a mapping for
486 * clear bits in the ioc's resource map
488 static SBA_INLINE
void
489 sba_free_range(struct ioc
*ioc
, dma_addr_t iova
, size_t size
)
491 unsigned long iovp
= SBA_IOVP(ioc
, iova
);
492 unsigned int pide
= PDIR_INDEX(iovp
);
493 unsigned int ridx
= pide
>> 3; /* convert bit to byte address */
494 unsigned long *res_ptr
= (unsigned long *) &((ioc
)->res_map
[ridx
& ~RESMAP_IDX_MASK
]);
496 int bits_not_wanted
= size
>> IOVP_SHIFT
;
498 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
499 unsigned long m
= RESMAP_MASK(bits_not_wanted
) >> (pide
& (BITS_PER_LONG
- 1));
501 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
502 __func__
, (uint
) iova
, size
,
503 bits_not_wanted
, m
, pide
, res_ptr
, *res_ptr
);
505 #ifdef SBA_COLLECT_STATS
506 ioc
->used_pages
-= bits_not_wanted
;
513 /**************************************************************
515 * "Dynamic DMA Mapping" support (aka "Coherent I/O")
517 ***************************************************************/
519 #ifdef SBA_HINT_SUPPORT
520 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
523 typedef unsigned long space_t
;
524 #define KERNEL_SPACE 0
527 * sba_io_pdir_entry - fill in one IO PDIR entry
528 * @pdir_ptr: pointer to IO PDIR entry
529 * @sid: process Space ID - currently only support KERNEL_SPACE
530 * @vba: Virtual CPU address of buffer to map
531 * @hint: DMA hint set to use for this mapping
533 * SBA Mapping Routine
535 * Given a virtual address (vba, arg2) and space id, (sid, arg1)
536 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by
538 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry
539 * for Astro/Ike looks like:
543 * +-+---------------------+----------------------------------+----+--------+
544 * |V| U | PPN[43:12] | U | VI |
545 * +-+---------------------+----------------------------------+----+--------+
547 * Pluto is basically identical, supports fewer physical address bits:
550 * +-+------------------------+-------------------------------+----+--------+
551 * |V| U | PPN[39:12] | U | VI |
552 * +-+------------------------+-------------------------------+----+--------+
554 * V == Valid Bit (Most Significant Bit is bit 0)
556 * PPN == Physical Page Number
557 * VI == Virtual Index (aka Coherent Index)
559 * LPA instruction output is put into PPN field.
560 * LCI (Load Coherence Index) instruction provides the "VI" bits.
562 * We pre-swap the bytes since PCX-W is Big Endian and the
563 * IOMMU uses little endian for the pdir.
566 static void SBA_INLINE
567 sba_io_pdir_entry(u64
*pdir_ptr
, space_t sid
, unsigned long vba
,
570 u64 pa
; /* physical address */
571 register unsigned ci
; /* coherent index */
573 pa
= virt_to_phys(vba
);
577 asm("lci 0(%%sr1, %1), %0" : "=r" (ci
) : "r" (vba
));
578 pa
|= (ci
>> PAGE_SHIFT
) & 0xff; /* move CI (8 bits) into lowest byte */
580 pa
|= SBA_PDIR_VALID_BIT
; /* set "valid" bit */
581 *pdir_ptr
= cpu_to_le64(pa
); /* swap and store into I/O Pdir */
584 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
585 * (bit #61, big endian), we have to flush and sync every time
586 * IO-PDIR is changed in Ike/Astro.
589 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr
));
594 * sba_mark_invalid - invalidate one or more IO PDIR entries
595 * @ioc: IO MMU structure which owns the pdir we are interested in.
596 * @iova: IO Virtual Address mapped earlier
597 * @byte_cnt: number of bytes this mapping covers.
599 * Marking the IO PDIR entry(ies) as Invalid and invalidate
600 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register)
601 * is to purge stale entries in the IO TLB when unmapping entries.
603 * The PCOM register supports purging of multiple pages, with a minium
604 * of 1 page and a maximum of 2GB. Hardware requires the address be
605 * aligned to the size of the range being purged. The size of the range
606 * must be a power of 2. The "Cool perf optimization" in the
607 * allocation routine helps keep that true.
609 static SBA_INLINE
void
610 sba_mark_invalid(struct ioc
*ioc
, dma_addr_t iova
, size_t byte_cnt
)
612 u32 iovp
= (u32
) SBA_IOVP(ioc
,iova
);
613 u64
*pdir_ptr
= &ioc
->pdir_base
[PDIR_INDEX(iovp
)];
615 #ifdef ASSERT_PDIR_SANITY
616 /* Assert first pdir entry is set.
618 ** Even though this is a big-endian machine, the entries
619 ** in the iopdir are little endian. That's why we look at
620 ** the byte at +7 instead of at +0.
622 if (0x80 != (((u8
*) pdir_ptr
)[7])) {
623 sba_dump_pdir_entry(ioc
,"sba_mark_invalid()", PDIR_INDEX(iovp
));
627 if (byte_cnt
> IOVP_SIZE
)
630 unsigned long entries_per_cacheline
= ioc_needs_fdc
?
631 L1_CACHE_ALIGN(((unsigned long) pdir_ptr
))
632 - (unsigned long) pdir_ptr
;
636 /* set "size" field for PCOM */
637 iovp
|= get_order(byte_cnt
) + PAGE_SHIFT
;
640 /* clear I/O Pdir entry "valid" bit first */
641 ((u8
*) pdir_ptr
)[7] = 0;
643 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr
));
645 entries_per_cacheline
= L1_CACHE_SHIFT
- 3;
649 byte_cnt
-= IOVP_SIZE
;
650 } while (byte_cnt
> IOVP_SIZE
);
652 iovp
|= IOVP_SHIFT
; /* set "size" field for PCOM */
655 ** clear I/O PDIR entry "valid" bit.
656 ** We have to R/M/W the cacheline regardless how much of the
657 ** pdir entry that we clobber.
658 ** The rest of the entry would be useful for debugging if we
659 ** could dump core on HPMC.
661 ((u8
*) pdir_ptr
)[7] = 0;
663 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr
));
665 WRITE_REG( SBA_IOVA(ioc
, iovp
, 0, 0), ioc
->ioc_hpa
+IOC_PCOM
);
669 * sba_dma_supported - PCI driver can query DMA support
670 * @dev: instance of PCI owned by the driver that's asking
671 * @mask: number of address bits this PCI device can handle
673 * See Documentation/DMA-API-HOWTO.txt
675 static int sba_dma_supported( struct device
*dev
, u64 mask
)
680 printk(KERN_ERR MODULE_NAME
": EISA/ISA/et al not supported\n");
685 /* Documentation/DMA-API-HOWTO.txt tells drivers to try 64-bit
686 * first, then fall back to 32-bit if that fails.
687 * We are just "encouraging" 32-bit DMA masks here since we can
688 * never allow IOMMU bypass unless we add special support for ZX1.
696 * check if mask is >= than the current max IO Virt Address
697 * The max IO Virt address will *always* < 30 bits.
699 return((int)(mask
>= (ioc
->ibase
- 1 +
700 (ioc
->pdir_size
/ sizeof(u64
) * IOVP_SIZE
) )));
705 * sba_map_single - map one buffer and return IOVA for DMA
706 * @dev: instance of PCI owned by the driver that's asking.
707 * @addr: driver buffer to map.
708 * @size: number of bytes to map in driver buffer.
709 * @direction: R/W or both.
711 * See Documentation/DMA-API-HOWTO.txt
714 sba_map_single(struct device
*dev
, void *addr
, size_t size
,
715 enum dma_data_direction direction
)
726 /* save offset bits */
727 offset
= ((dma_addr_t
) (long) addr
) & ~IOVP_MASK
;
729 /* round up to nearest IOVP_SIZE */
730 size
= (size
+ offset
+ ~IOVP_MASK
) & IOVP_MASK
;
732 spin_lock_irqsave(&ioc
->res_lock
, flags
);
733 #ifdef ASSERT_PDIR_SANITY
734 sba_check_pdir(ioc
,"Check before sba_map_single()");
737 #ifdef SBA_COLLECT_STATS
738 ioc
->msingle_calls
++;
739 ioc
->msingle_pages
+= size
>> IOVP_SHIFT
;
741 pide
= sba_alloc_range(ioc
, dev
, size
);
742 iovp
= (dma_addr_t
) pide
<< IOVP_SHIFT
;
744 DBG_RUN("%s() 0x%p -> 0x%lx\n",
745 __func__
, addr
, (long) iovp
| offset
);
747 pdir_start
= &(ioc
->pdir_base
[pide
]);
750 sba_io_pdir_entry(pdir_start
, KERNEL_SPACE
, (unsigned long) addr
, 0);
752 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
754 (u8
) (((u8
*) pdir_start
)[7]),
755 (u8
) (((u8
*) pdir_start
)[6]),
756 (u8
) (((u8
*) pdir_start
)[5]),
757 (u8
) (((u8
*) pdir_start
)[4]),
758 (u8
) (((u8
*) pdir_start
)[3]),
759 (u8
) (((u8
*) pdir_start
)[2]),
760 (u8
) (((u8
*) pdir_start
)[1]),
761 (u8
) (((u8
*) pdir_start
)[0])
769 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */
771 asm volatile("sync" : : );
773 #ifdef ASSERT_PDIR_SANITY
774 sba_check_pdir(ioc
,"Check after sba_map_single()");
776 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
778 /* form complete address */
779 return SBA_IOVA(ioc
, iovp
, offset
, DEFAULT_DMA_HINT_REG
);
784 * sba_unmap_single - unmap one IOVA and free resources
785 * @dev: instance of PCI owned by the driver that's asking.
786 * @iova: IOVA of driver buffer previously mapped.
787 * @size: number of bytes mapped in driver buffer.
788 * @direction: R/W or both.
790 * See Documentation/DMA-API-HOWTO.txt
793 sba_unmap_single(struct device
*dev
, dma_addr_t iova
, size_t size
,
794 enum dma_data_direction direction
)
797 #if DELAYED_RESOURCE_CNT > 0
798 struct sba_dma_pair
*d
;
803 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__
, (long) iova
, size
);
806 offset
= iova
& ~IOVP_MASK
;
807 iova
^= offset
; /* clear offset bits */
809 size
= ALIGN(size
, IOVP_SIZE
);
811 spin_lock_irqsave(&ioc
->res_lock
, flags
);
813 #ifdef SBA_COLLECT_STATS
814 ioc
->usingle_calls
++;
815 ioc
->usingle_pages
+= size
>> IOVP_SHIFT
;
818 sba_mark_invalid(ioc
, iova
, size
);
820 #if DELAYED_RESOURCE_CNT > 0
821 /* Delaying when we re-use a IO Pdir entry reduces the number
822 * of MMIO reads needed to flush writes to the PCOM register.
824 d
= &(ioc
->saved
[ioc
->saved_cnt
]);
827 if (++(ioc
->saved_cnt
) >= DELAYED_RESOURCE_CNT
) {
828 int cnt
= ioc
->saved_cnt
;
830 sba_free_range(ioc
, d
->iova
, d
->size
);
835 READ_REG(ioc
->ioc_hpa
+IOC_PCOM
); /* flush purges */
837 #else /* DELAYED_RESOURCE_CNT == 0 */
838 sba_free_range(ioc
, iova
, size
);
840 /* If fdc's were issued, force fdc's to be visible now */
842 asm volatile("sync" : : );
844 READ_REG(ioc
->ioc_hpa
+IOC_PCOM
); /* flush purges */
845 #endif /* DELAYED_RESOURCE_CNT == 0 */
847 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
849 /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
850 ** For Astro based systems this isn't a big deal WRT performance.
851 ** As long as 2.4 kernels copyin/copyout data from/to userspace,
852 ** we don't need the syncdma. The issue here is I/O MMU cachelines
853 ** are *not* coherent in all cases. May be hwrev dependent.
854 ** Need to investigate more.
855 asm volatile("syncdma");
861 * sba_alloc_consistent - allocate/map shared mem for DMA
862 * @hwdev: instance of PCI owned by the driver that's asking.
863 * @size: number of bytes mapped in driver buffer.
864 * @dma_handle: IOVA of new buffer.
866 * See Documentation/DMA-API-HOWTO.txt
868 static void *sba_alloc_consistent(struct device
*hwdev
, size_t size
,
869 dma_addr_t
*dma_handle
, gfp_t gfp
)
874 /* only support PCI */
879 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
882 memset(ret
, 0, size
);
883 *dma_handle
= sba_map_single(hwdev
, ret
, size
, 0);
891 * sba_free_consistent - free/unmap shared mem for DMA
892 * @hwdev: instance of PCI owned by the driver that's asking.
893 * @size: number of bytes mapped in driver buffer.
894 * @vaddr: virtual address IOVA of "consistent" buffer.
895 * @dma_handler: IO virtual address of "consistent" buffer.
897 * See Documentation/DMA-API-HOWTO.txt
900 sba_free_consistent(struct device
*hwdev
, size_t size
, void *vaddr
,
901 dma_addr_t dma_handle
)
903 sba_unmap_single(hwdev
, dma_handle
, size
, 0);
904 free_pages((unsigned long) vaddr
, get_order(size
));
909 ** Since 0 is a valid pdir_base index value, can't use that
910 ** to determine if a value is valid or not. Use a flag to indicate
911 ** the SG list entry contains a valid pdir index.
913 #define PIDE_FLAG 0x80000000UL
915 #ifdef SBA_COLLECT_STATS
916 #define IOMMU_MAP_STATS
918 #include "iommu-helpers.h"
920 #ifdef DEBUG_LARGE_SG_ENTRIES
926 * sba_map_sg - map Scatter/Gather list
927 * @dev: instance of PCI owned by the driver that's asking.
928 * @sglist: array of buffer/length pairs
929 * @nents: number of entries in list
930 * @direction: R/W or both.
932 * See Documentation/DMA-API-HOWTO.txt
935 sba_map_sg(struct device
*dev
, struct scatterlist
*sglist
, int nents
,
936 enum dma_data_direction direction
)
939 int coalesced
, filled
= 0;
942 DBG_RUN_SG("%s() START %d entries\n", __func__
, nents
);
946 /* Fast path single entry scatterlists. */
948 sg_dma_address(sglist
) = sba_map_single(dev
, sg_virt(sglist
),
949 sglist
->length
, direction
);
950 sg_dma_len(sglist
) = sglist
->length
;
954 spin_lock_irqsave(&ioc
->res_lock
, flags
);
956 #ifdef ASSERT_PDIR_SANITY
957 if (sba_check_pdir(ioc
,"Check before sba_map_sg()"))
959 sba_dump_sg(ioc
, sglist
, nents
);
960 panic("Check before sba_map_sg()");
964 #ifdef SBA_COLLECT_STATS
969 ** First coalesce the chunks and allocate I/O pdir space
971 ** If this is one DMA stream, we can properly map using the
972 ** correct virtual address associated with each DMA page.
973 ** w/o this association, we wouldn't have coherent DMA!
974 ** Access to the virtual address is what forces a two pass algorithm.
976 coalesced
= iommu_coalesce_chunks(ioc
, dev
, sglist
, nents
, sba_alloc_range
);
979 ** Program the I/O Pdir
981 ** map the virtual addresses to the I/O Pdir
982 ** o dma_address will contain the pdir index
983 ** o dma_len will contain the number of bytes to map
984 ** o address contains the virtual address.
986 filled
= iommu_fill_pdir(ioc
, sglist
, nents
, 0, sba_io_pdir_entry
);
988 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */
990 asm volatile("sync" : : );
992 #ifdef ASSERT_PDIR_SANITY
993 if (sba_check_pdir(ioc
,"Check after sba_map_sg()"))
995 sba_dump_sg(ioc
, sglist
, nents
);
996 panic("Check after sba_map_sg()\n");
1000 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1002 DBG_RUN_SG("%s() DONE %d mappings\n", __func__
, filled
);
1009 * sba_unmap_sg - unmap Scatter/Gather list
1010 * @dev: instance of PCI owned by the driver that's asking.
1011 * @sglist: array of buffer/length pairs
1012 * @nents: number of entries in list
1013 * @direction: R/W or both.
1015 * See Documentation/DMA-API-HOWTO.txt
1018 sba_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
, int nents
,
1019 enum dma_data_direction direction
)
1022 #ifdef ASSERT_PDIR_SANITY
1023 unsigned long flags
;
1026 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1027 __func__
, nents
, sg_virt(sglist
), sglist
->length
);
1031 #ifdef SBA_COLLECT_STATS
1035 #ifdef ASSERT_PDIR_SANITY
1036 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1037 sba_check_pdir(ioc
,"Check before sba_unmap_sg()");
1038 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1041 while (sg_dma_len(sglist
) && nents
--) {
1043 sba_unmap_single(dev
, sg_dma_address(sglist
), sg_dma_len(sglist
), direction
);
1044 #ifdef SBA_COLLECT_STATS
1045 ioc
->usg_pages
+= ((sg_dma_address(sglist
) & ~IOVP_MASK
) + sg_dma_len(sglist
) + IOVP_SIZE
- 1) >> PAGE_SHIFT
;
1046 ioc
->usingle_calls
--; /* kluge since call is unmap_sg() */
1051 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__
, nents
);
1053 #ifdef ASSERT_PDIR_SANITY
1054 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1055 sba_check_pdir(ioc
,"Check after sba_unmap_sg()");
1056 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1061 static struct hppa_dma_ops sba_ops
= {
1062 .dma_supported
= sba_dma_supported
,
1063 .alloc_consistent
= sba_alloc_consistent
,
1064 .alloc_noncoherent
= sba_alloc_consistent
,
1065 .free_consistent
= sba_free_consistent
,
1066 .map_single
= sba_map_single
,
1067 .unmap_single
= sba_unmap_single
,
1068 .map_sg
= sba_map_sg
,
1069 .unmap_sg
= sba_unmap_sg
,
1070 .dma_sync_single_for_cpu
= NULL
,
1071 .dma_sync_single_for_device
= NULL
,
1072 .dma_sync_sg_for_cpu
= NULL
,
1073 .dma_sync_sg_for_device
= NULL
,
1077 /**************************************************************************
1079 ** SBA PAT PDC support
1081 ** o call pdc_pat_cell_module()
1082 ** o store ranges in PCI "resource" structures
1084 **************************************************************************/
1087 sba_get_pat_resources(struct sba_device
*sba_dev
)
1091 ** TODO/REVISIT/FIXME: support for directed ranges requires calls to
1092 ** PAT PDC to program the SBA/LBA directed range registers...this
1093 ** burden may fall on the LBA code since it directly supports the
1094 ** PCI subsystem. It's not clear yet. - ggg
1096 PAT_MOD(mod
)->mod_info
.mod_pages
= PAT_GET_MOD_PAGES(temp
);
1098 PAT_MOD(mod
)->mod_info
.dvi
= PAT_GET_DVI(temp
);
1099 Tells where the dvi bits are located in the address
.
1100 PAT_MOD(mod
)->mod_info
.ioc
= PAT_GET_IOC(temp
);
1106 /**************************************************************
1108 * Initialization and claim
1110 ***************************************************************/
1111 #define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */
1112 #define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */
1114 sba_alloc_pdir(unsigned int pdir_size
)
1116 unsigned long pdir_base
;
1117 unsigned long pdir_order
= get_order(pdir_size
);
1119 pdir_base
= __get_free_pages(GFP_KERNEL
, pdir_order
);
1120 if (NULL
== (void *) pdir_base
) {
1121 panic("%s() could not allocate I/O Page Table\n",
1125 /* If this is not PA8700 (PCX-W2)
1126 ** OR newer than ver 2.2
1127 ** OR in a system that doesn't need VINDEX bits from SBA,
1129 ** then we aren't exposed to the HW bug.
1131 if ( ((boot_cpu_data
.pdc
.cpuid
>> 5) & 0x7f) != 0x13
1132 || (boot_cpu_data
.pdc
.versions
> 0x202)
1133 || (boot_cpu_data
.pdc
.capabilities
& 0x08L
) )
1134 return (void *) pdir_base
;
1137 * PA8700 (PCX-W2, aka piranha) silent data corruption fix
1139 * An interaction between PA8700 CPU (Ver 2.2 or older) and
1140 * Ike/Astro can cause silent data corruption. This is only
1141 * a problem if the I/O PDIR is located in memory such that
1142 * (little-endian) bits 17 and 18 are on and bit 20 is off.
1144 * Since the max IO Pdir size is 2MB, by cleverly allocating the
1145 * right physical address, we can either avoid (IOPDIR <= 1MB)
1146 * or minimize (2MB IO Pdir) the problem if we restrict the
1147 * IO Pdir to a maximum size of 2MB-128K (1902K).
1149 * Because we always allocate 2^N sized IO pdirs, either of the
1150 * "bad" regions will be the last 128K if at all. That's easy
1154 if (pdir_order
<= (19-12)) {
1155 if (((virt_to_phys(pdir_base
)+pdir_size
-1) & PIRANHA_ADDR_MASK
) == PIRANHA_ADDR_VAL
) {
1156 /* allocate a new one on 512k alignment */
1157 unsigned long new_pdir
= __get_free_pages(GFP_KERNEL
, (19-12));
1158 /* release original */
1159 free_pages(pdir_base
, pdir_order
);
1161 pdir_base
= new_pdir
;
1163 /* release excess */
1164 while (pdir_order
< (19-12)) {
1165 new_pdir
+= pdir_size
;
1166 free_pages(new_pdir
, pdir_order
);
1174 ** Needs to be aligned on an "odd" 1MB boundary.
1176 unsigned long new_pdir
= __get_free_pages(GFP_KERNEL
, pdir_order
+1); /* 2 or 4MB */
1178 /* release original */
1179 free_pages( pdir_base
, pdir_order
);
1181 /* release first 1MB */
1182 free_pages(new_pdir
, 20-12);
1184 pdir_base
= new_pdir
+ 1024*1024;
1186 if (pdir_order
> (20-12)) {
1190 ** Flag tells init_bitmap() to mark bad 128k as used
1191 ** and to reduce the size by 128k.
1193 piranha_bad_128k
= 1;
1195 new_pdir
+= 3*1024*1024;
1196 /* release last 1MB */
1197 free_pages(new_pdir
, 20-12);
1199 /* release unusable 128KB */
1200 free_pages(new_pdir
- 128*1024 , 17-12);
1202 pdir_size
-= 128*1024;
1206 memset((void *) pdir_base
, 0, pdir_size
);
1207 return (void *) pdir_base
;
1210 struct ibase_data_struct
{
1215 static int setup_ibase_imask_callback(struct device
*dev
, void *data
)
1217 /* lba_set_iregs() is in drivers/parisc/lba_pci.c */
1218 extern void lba_set_iregs(struct parisc_device
*, u32
, u32
);
1219 struct parisc_device
*lba
= to_parisc_device(dev
);
1220 struct ibase_data_struct
*ibd
= data
;
1221 int rope_num
= (lba
->hpa
.start
>> 13) & 0xf;
1222 if (rope_num
>> 3 == ibd
->ioc_num
)
1223 lba_set_iregs(lba
, ibd
->ioc
->ibase
, ibd
->ioc
->imask
);
1227 /* setup Mercury or Elroy IBASE/IMASK registers. */
1229 setup_ibase_imask(struct parisc_device
*sba
, struct ioc
*ioc
, int ioc_num
)
1231 struct ibase_data_struct ibase_data
= {
1236 device_for_each_child(&sba
->dev
, &ibase_data
,
1237 setup_ibase_imask_callback
);
1240 #ifdef SBA_AGP_SUPPORT
1242 sba_ioc_find_quicksilver(struct device
*dev
, void *data
)
1244 int *agp_found
= data
;
1245 struct parisc_device
*lba
= to_parisc_device(dev
);
1247 if (IS_QUICKSILVER(lba
))
1254 sba_ioc_init_pluto(struct parisc_device
*sba
, struct ioc
*ioc
, int ioc_num
)
1256 u32 iova_space_mask
;
1257 u32 iova_space_size
;
1258 int iov_order
, tcnfg
;
1259 #ifdef SBA_AGP_SUPPORT
1263 ** Firmware programs the base and size of a "safe IOVA space"
1264 ** (one that doesn't overlap memory or LMMIO space) in the
1265 ** IBASE and IMASK registers.
1267 ioc
->ibase
= READ_REG(ioc
->ioc_hpa
+ IOC_IBASE
);
1268 iova_space_size
= ~(READ_REG(ioc
->ioc_hpa
+ IOC_IMASK
) & 0xFFFFFFFFUL
) + 1;
1270 if ((ioc
->ibase
< 0xfed00000UL
) && ((ioc
->ibase
+ iova_space_size
) > 0xfee00000UL
)) {
1271 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1272 iova_space_size
/= 2;
1276 ** iov_order is always based on a 1GB IOVA space since we want to
1277 ** turn on the other half for AGP GART.
1279 iov_order
= get_order(iova_space_size
>> (IOVP_SHIFT
- PAGE_SHIFT
));
1280 ioc
->pdir_size
= (iova_space_size
/ IOVP_SIZE
) * sizeof(u64
);
1282 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1283 __func__
, ioc
->ioc_hpa
, iova_space_size
>> 20,
1284 iov_order
+ PAGE_SHIFT
);
1286 ioc
->pdir_base
= (void *) __get_free_pages(GFP_KERNEL
,
1287 get_order(ioc
->pdir_size
));
1288 if (!ioc
->pdir_base
)
1289 panic("Couldn't allocate I/O Page Table\n");
1291 memset(ioc
->pdir_base
, 0, ioc
->pdir_size
);
1293 DBG_INIT("%s() pdir %p size %x\n",
1294 __func__
, ioc
->pdir_base
, ioc
->pdir_size
);
1296 #ifdef SBA_HINT_SUPPORT
1297 ioc
->hint_shift_pdir
= iov_order
+ PAGE_SHIFT
;
1298 ioc
->hint_mask_pdir
= ~(0x3 << (iov_order
+ PAGE_SHIFT
));
1300 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1301 ioc
->hint_shift_pdir
, ioc
->hint_mask_pdir
);
1304 WARN_ON((((unsigned long) ioc
->pdir_base
) & PAGE_MASK
) != (unsigned long) ioc
->pdir_base
);
1305 WRITE_REG(virt_to_phys(ioc
->pdir_base
), ioc
->ioc_hpa
+ IOC_PDIR_BASE
);
1307 /* build IMASK for IOC and Elroy */
1308 iova_space_mask
= 0xffffffff;
1309 iova_space_mask
<<= (iov_order
+ PAGE_SHIFT
);
1310 ioc
->imask
= iova_space_mask
;
1312 ioc
->iovp_mask
= ~(iova_space_mask
+ PAGE_SIZE
- 1);
1314 sba_dump_tlb(ioc
->ioc_hpa
);
1316 setup_ibase_imask(sba
, ioc
, ioc_num
);
1318 WRITE_REG(ioc
->imask
, ioc
->ioc_hpa
+ IOC_IMASK
);
1322 ** Setting the upper bits makes checking for bypass addresses
1323 ** a little faster later on.
1325 ioc
->imask
|= 0xFFFFFFFF00000000UL
;
1328 /* Set I/O PDIR Page size to system page size */
1329 switch (PAGE_SHIFT
) {
1330 case 12: tcnfg
= 0; break; /* 4K */
1331 case 13: tcnfg
= 1; break; /* 8K */
1332 case 14: tcnfg
= 2; break; /* 16K */
1333 case 16: tcnfg
= 3; break; /* 64K */
1335 panic(__FILE__
"Unsupported system page size %d",
1339 WRITE_REG(tcnfg
, ioc
->ioc_hpa
+ IOC_TCNFG
);
1342 ** Program the IOC's ibase and enable IOVA translation
1343 ** Bit zero == enable bit.
1345 WRITE_REG(ioc
->ibase
| 1, ioc
->ioc_hpa
+ IOC_IBASE
);
1348 ** Clear I/O TLB of any possible entries.
1349 ** (Yes. This is a bit paranoid...but so what)
1351 WRITE_REG(ioc
->ibase
| 31, ioc
->ioc_hpa
+ IOC_PCOM
);
1353 #ifdef SBA_AGP_SUPPORT
1356 ** If an AGP device is present, only use half of the IOV space
1357 ** for PCI DMA. Unfortunately we can't know ahead of time
1358 ** whether GART support will actually be used, for now we
1359 ** can just key on any AGP device found in the system.
1360 ** We program the next pdir index after we stop w/ a key for
1361 ** the GART code to handshake on.
1363 device_for_each_child(&sba
->dev
, &agp_found
, sba_ioc_find_quicksilver
);
1365 if (agp_found
&& sba_reserve_agpgart
) {
1366 printk(KERN_INFO
"%s: reserving %dMb of IOVA space for agpgart\n",
1367 __func__
, (iova_space_size
/2) >> 20);
1368 ioc
->pdir_size
/= 2;
1369 ioc
->pdir_base
[PDIR_INDEX(iova_space_size
/2)] = SBA_AGPGART_COOKIE
;
1371 #endif /*SBA_AGP_SUPPORT*/
1375 sba_ioc_init(struct parisc_device
*sba
, struct ioc
*ioc
, int ioc_num
)
1377 u32 iova_space_size
, iova_space_mask
;
1378 unsigned int pdir_size
, iov_order
, tcnfg
;
1381 ** Determine IOVA Space size from memory size.
1383 ** Ideally, PCI drivers would register the maximum number
1384 ** of DMA they can have outstanding for each device they
1385 ** own. Next best thing would be to guess how much DMA
1386 ** can be outstanding based on PCI Class/sub-class. Both
1387 ** methods still require some "extra" to support PCI
1388 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
1390 ** While we have 32-bits "IOVA" space, top two 2 bits are used
1391 ** for DMA hints - ergo only 30 bits max.
1394 iova_space_size
= (u32
) (totalram_pages
/global_ioc_cnt
);
1396 /* limit IOVA space size to 1MB-1GB */
1397 if (iova_space_size
< (1 << (20 - PAGE_SHIFT
))) {
1398 iova_space_size
= 1 << (20 - PAGE_SHIFT
);
1400 else if (iova_space_size
> (1 << (30 - PAGE_SHIFT
))) {
1401 iova_space_size
= 1 << (30 - PAGE_SHIFT
);
1405 ** iova space must be log2() in size.
1406 ** thus, pdir/res_map will also be log2().
1407 ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced)
1409 iov_order
= get_order(iova_space_size
<< PAGE_SHIFT
);
1411 /* iova_space_size is now bytes, not pages */
1412 iova_space_size
= 1 << (iov_order
+ PAGE_SHIFT
);
1414 ioc
->pdir_size
= pdir_size
= (iova_space_size
/IOVP_SIZE
) * sizeof(u64
);
1416 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1419 (unsigned long) totalram_pages
>> (20 - PAGE_SHIFT
),
1420 iova_space_size
>>20,
1421 iov_order
+ PAGE_SHIFT
);
1423 ioc
->pdir_base
= sba_alloc_pdir(pdir_size
);
1425 DBG_INIT("%s() pdir %p size %x\n",
1426 __func__
, ioc
->pdir_base
, pdir_size
);
1428 #ifdef SBA_HINT_SUPPORT
1429 /* FIXME : DMA HINTs not used */
1430 ioc
->hint_shift_pdir
= iov_order
+ PAGE_SHIFT
;
1431 ioc
->hint_mask_pdir
= ~(0x3 << (iov_order
+ PAGE_SHIFT
));
1433 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1434 ioc
->hint_shift_pdir
, ioc
->hint_mask_pdir
);
1437 WRITE_REG64(virt_to_phys(ioc
->pdir_base
), ioc
->ioc_hpa
+ IOC_PDIR_BASE
);
1439 /* build IMASK for IOC and Elroy */
1440 iova_space_mask
= 0xffffffff;
1441 iova_space_mask
<<= (iov_order
+ PAGE_SHIFT
);
1444 ** On C3000 w/512MB mem, HP-UX 10.20 reports:
1445 ** ibase=0, imask=0xFE000000, size=0x2000000.
1448 ioc
->imask
= iova_space_mask
; /* save it */
1450 ioc
->iovp_mask
= ~(iova_space_mask
+ PAGE_SIZE
- 1);
1453 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1454 __func__
, ioc
->ibase
, ioc
->imask
);
1457 ** FIXME: Hint registers are programmed with default hint
1458 ** values during boot, so hints should be sane even if we
1459 ** can't reprogram them the way drivers want.
1462 setup_ibase_imask(sba
, ioc
, ioc_num
);
1465 ** Program the IOC's ibase and enable IOVA translation
1467 WRITE_REG(ioc
->ibase
| 1, ioc
->ioc_hpa
+IOC_IBASE
);
1468 WRITE_REG(ioc
->imask
, ioc
->ioc_hpa
+IOC_IMASK
);
1470 /* Set I/O PDIR Page size to system page size */
1471 switch (PAGE_SHIFT
) {
1472 case 12: tcnfg
= 0; break; /* 4K */
1473 case 13: tcnfg
= 1; break; /* 8K */
1474 case 14: tcnfg
= 2; break; /* 16K */
1475 case 16: tcnfg
= 3; break; /* 64K */
1477 panic(__FILE__
"Unsupported system page size %d",
1481 /* Set I/O PDIR Page size to PAGE_SIZE (4k/16k/...) */
1482 WRITE_REG(tcnfg
, ioc
->ioc_hpa
+IOC_TCNFG
);
1485 ** Clear I/O TLB of any possible entries.
1486 ** (Yes. This is a bit paranoid...but so what)
1488 WRITE_REG(0 | 31, ioc
->ioc_hpa
+IOC_PCOM
);
1490 ioc
->ibase
= 0; /* used by SBA_IOVA and related macros */
1492 DBG_INIT("%s() DONE\n", __func__
);
1497 /**************************************************************************
1499 ** SBA initialization code (HW and SW)
1501 ** o identify SBA chip itself
1502 ** o initialize SBA chip modes (HardFail)
1503 ** o initialize SBA chip modes (HardFail)
1504 ** o FIXME: initialize DMA hints for reasonable defaults
1506 **************************************************************************/
1508 static void __iomem
*ioc_remap(struct sba_device
*sba_dev
, unsigned int offset
)
1510 return ioremap_nocache(sba_dev
->dev
->hpa
.start
+ offset
, SBA_FUNC_SIZE
);
1513 static void sba_hw_init(struct sba_device
*sba_dev
)
1519 if (!is_pdc_pat()) {
1520 /* Shutdown the USB controller on Astro-based workstations.
1521 ** Once we reprogram the IOMMU, the next DMA performed by
1522 ** USB will HPMC the box. USB is only enabled if a
1523 ** keyboard is present and found.
1525 ** With serial console, j6k v5.0 firmware says:
1526 ** mem_kbd hpa 0xfee003f8 sba 0x0 pad 0x0 cl_class 0x7
1528 ** FIXME: Using GFX+USB console at power up but direct
1529 ** linux to serial console is still broken.
1530 ** USB could generate DMA so we must reset USB.
1531 ** The proper sequence would be:
1532 ** o block console output
1533 ** o reset USB device
1534 ** o reprogram serial port
1535 ** o unblock console output
1537 if (PAGE0
->mem_kbd
.cl_class
== CL_KEYBD
) {
1538 pdc_io_reset_devices();
1545 printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0
->mem_boot
.hpa
,
1546 PAGE0
->mem_boot
.spa
, PAGE0
->mem_boot
.pad
, PAGE0
->mem_boot
.cl_class
);
1549 ** Need to deal with DMA from LAN.
1550 ** Maybe use page zero boot device as a handle to talk
1551 ** to PDC about which device to shutdown.
1553 ** Netbooting, j6k v5.0 firmware says:
1554 ** mem_boot hpa 0xf4008000 sba 0x0 pad 0x0 cl_class 0x1002
1555 ** ARGH! invalid class.
1557 if ((PAGE0
->mem_boot
.cl_class
!= CL_RANDOM
)
1558 && (PAGE0
->mem_boot
.cl_class
!= CL_SEQU
)) {
1563 if (!IS_PLUTO(sba_dev
->dev
)) {
1564 ioc_ctl
= READ_REG(sba_dev
->sba_hpa
+IOC_CTRL
);
1565 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1566 __func__
, sba_dev
->sba_hpa
, ioc_ctl
);
1567 ioc_ctl
&= ~(IOC_CTRL_RM
| IOC_CTRL_NC
| IOC_CTRL_CE
);
1568 ioc_ctl
|= IOC_CTRL_DD
| IOC_CTRL_D4
| IOC_CTRL_TC
;
1569 /* j6700 v1.6 firmware sets 0x294f */
1570 /* A500 firmware sets 0x4d */
1572 WRITE_REG(ioc_ctl
, sba_dev
->sba_hpa
+IOC_CTRL
);
1574 #ifdef DEBUG_SBA_INIT
1575 ioc_ctl
= READ_REG64(sba_dev
->sba_hpa
+IOC_CTRL
);
1576 DBG_INIT(" 0x%Lx\n", ioc_ctl
);
1580 if (IS_ASTRO(sba_dev
->dev
)) {
1582 sba_dev
->ioc
[0].ioc_hpa
= ioc_remap(sba_dev
, ASTRO_IOC_OFFSET
);
1585 sba_dev
->chip_resv
.name
= "Astro Intr Ack";
1586 sba_dev
->chip_resv
.start
= PCI_F_EXTEND
| 0xfef00000UL
;
1587 sba_dev
->chip_resv
.end
= PCI_F_EXTEND
| (0xff000000UL
- 1) ;
1588 err
= request_resource(&iomem_resource
, &(sba_dev
->chip_resv
));
1591 } else if (IS_PLUTO(sba_dev
->dev
)) {
1594 sba_dev
->ioc
[0].ioc_hpa
= ioc_remap(sba_dev
, PLUTO_IOC_OFFSET
);
1597 sba_dev
->chip_resv
.name
= "Pluto Intr/PIOP/VGA";
1598 sba_dev
->chip_resv
.start
= PCI_F_EXTEND
| 0xfee00000UL
;
1599 sba_dev
->chip_resv
.end
= PCI_F_EXTEND
| (0xff200000UL
- 1);
1600 err
= request_resource(&iomem_resource
, &(sba_dev
->chip_resv
));
1603 sba_dev
->iommu_resv
.name
= "IOVA Space";
1604 sba_dev
->iommu_resv
.start
= 0x40000000UL
;
1605 sba_dev
->iommu_resv
.end
= 0x50000000UL
- 1;
1606 err
= request_resource(&iomem_resource
, &(sba_dev
->iommu_resv
));
1610 sba_dev
->ioc
[0].ioc_hpa
= ioc_remap(sba_dev
, IKE_IOC_OFFSET(0));
1611 sba_dev
->ioc
[1].ioc_hpa
= ioc_remap(sba_dev
, IKE_IOC_OFFSET(1));
1614 /* TODO - LOOKUP Ike/Stretch chipset mem map */
1616 /* XXX: What about Reo Grande? */
1618 sba_dev
->num_ioc
= num_ioc
;
1619 for (i
= 0; i
< num_ioc
; i
++) {
1620 void __iomem
*ioc_hpa
= sba_dev
->ioc
[i
].ioc_hpa
;
1623 for (j
=0; j
< sizeof(u64
) * ROPES_PER_IOC
; j
+=sizeof(u64
)) {
1626 * Clear ROPE(N)_CONFIG AO bit.
1627 * Disables "NT Ordering" (~= !"Relaxed Ordering")
1628 * Overrides bit 1 in DMA Hint Sets.
1629 * Improves netperf UDP_STREAM by ~10% for bcm5701.
1631 if (IS_PLUTO(sba_dev
->dev
)) {
1632 void __iomem
*rope_cfg
;
1633 unsigned long cfg_val
;
1635 rope_cfg
= ioc_hpa
+ IOC_ROPE0_CFG
+ j
;
1636 cfg_val
= READ_REG(rope_cfg
);
1637 cfg_val
&= ~IOC_ROPE_AO
;
1638 WRITE_REG(cfg_val
, rope_cfg
);
1642 ** Make sure the box crashes on rope errors.
1644 WRITE_REG(HF_ENABLE
, ioc_hpa
+ ROPE0_CTL
+ j
);
1647 /* flush out the last writes */
1648 READ_REG(sba_dev
->ioc
[i
].ioc_hpa
+ ROPE7_CTL
);
1650 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1652 READ_REG(sba_dev
->ioc
[i
].ioc_hpa
+ 0x40),
1653 READ_REG(sba_dev
->ioc
[i
].ioc_hpa
+ 0x50)
1655 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1656 READ_REG(sba_dev
->ioc
[i
].ioc_hpa
+ 0x108),
1657 READ_REG(sba_dev
->ioc
[i
].ioc_hpa
+ 0x400)
1660 if (IS_PLUTO(sba_dev
->dev
)) {
1661 sba_ioc_init_pluto(sba_dev
->dev
, &(sba_dev
->ioc
[i
]), i
);
1663 sba_ioc_init(sba_dev
->dev
, &(sba_dev
->ioc
[i
]), i
);
1669 sba_common_init(struct sba_device
*sba_dev
)
1673 /* add this one to the head of the list (order doesn't matter)
1674 ** This will be useful for debugging - especially if we get coredumps
1676 sba_dev
->next
= sba_list
;
1679 for(i
=0; i
< sba_dev
->num_ioc
; i
++) {
1681 #ifdef DEBUG_DMB_TRAP
1682 extern void iterate_pages(unsigned long , unsigned long ,
1683 void (*)(pte_t
* , unsigned long),
1685 void set_data_memory_break(pte_t
* , unsigned long);
1687 /* resource map size dictated by pdir_size */
1688 res_size
= sba_dev
->ioc
[i
].pdir_size
/sizeof(u64
); /* entries */
1690 /* Second part of PIRANHA BUG */
1691 if (piranha_bad_128k
) {
1692 res_size
-= (128*1024)/sizeof(u64
);
1695 res_size
>>= 3; /* convert bit count to byte count */
1696 DBG_INIT("%s() res_size 0x%x\n",
1697 __func__
, res_size
);
1699 sba_dev
->ioc
[i
].res_size
= res_size
;
1700 sba_dev
->ioc
[i
].res_map
= (char *) __get_free_pages(GFP_KERNEL
, get_order(res_size
));
1702 #ifdef DEBUG_DMB_TRAP
1703 iterate_pages( sba_dev
->ioc
[i
].res_map
, res_size
,
1704 set_data_memory_break
, 0);
1707 if (NULL
== sba_dev
->ioc
[i
].res_map
)
1709 panic("%s:%s() could not allocate resource map\n",
1710 __FILE__
, __func__
);
1713 memset(sba_dev
->ioc
[i
].res_map
, 0, res_size
);
1714 /* next available IOVP - circular search */
1715 sba_dev
->ioc
[i
].res_hint
= (unsigned long *)
1716 &(sba_dev
->ioc
[i
].res_map
[L1_CACHE_BYTES
]);
1718 #ifdef ASSERT_PDIR_SANITY
1719 /* Mark first bit busy - ie no IOVA 0 */
1720 sba_dev
->ioc
[i
].res_map
[0] = 0x80;
1721 sba_dev
->ioc
[i
].pdir_base
[0] = 0xeeffc0addbba0080ULL
;
1724 /* Third (and last) part of PIRANHA BUG */
1725 if (piranha_bad_128k
) {
1726 /* region from +1408K to +1536 is un-usable. */
1728 int idx_start
= (1408*1024/sizeof(u64
)) >> 3;
1729 int idx_end
= (1536*1024/sizeof(u64
)) >> 3;
1730 long *p_start
= (long *) &(sba_dev
->ioc
[i
].res_map
[idx_start
]);
1731 long *p_end
= (long *) &(sba_dev
->ioc
[i
].res_map
[idx_end
]);
1733 /* mark that part of the io pdir busy */
1734 while (p_start
< p_end
)
1739 #ifdef DEBUG_DMB_TRAP
1740 iterate_pages( sba_dev
->ioc
[i
].res_map
, res_size
,
1741 set_data_memory_break
, 0);
1742 iterate_pages( sba_dev
->ioc
[i
].pdir_base
, sba_dev
->ioc
[i
].pdir_size
,
1743 set_data_memory_break
, 0);
1746 DBG_INIT("%s() %d res_map %x %p\n",
1747 __func__
, i
, res_size
, sba_dev
->ioc
[i
].res_map
);
1750 spin_lock_init(&sba_dev
->sba_lock
);
1751 ioc_needs_fdc
= boot_cpu_data
.pdc
.capabilities
& PDC_MODEL_IOPDIR_FDC
;
1753 #ifdef DEBUG_SBA_INIT
1755 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
1756 * (bit #61, big endian), we have to flush and sync every time
1757 * IO-PDIR is changed in Ike/Astro.
1759 if (ioc_needs_fdc
) {
1760 printk(KERN_INFO MODULE_NAME
" FDC/SYNC required.\n");
1762 printk(KERN_INFO MODULE_NAME
" IOC has cache coherent PDIR.\n");
1767 #ifdef CONFIG_PROC_FS
1768 static int sba_proc_info(struct seq_file
*m
, void *p
)
1770 struct sba_device
*sba_dev
= sba_list
;
1771 struct ioc
*ioc
= &sba_dev
->ioc
[0]; /* FIXME: Multi-IOC support! */
1772 int total_pages
= (int) (ioc
->res_size
<< 3); /* 8 bits per byte */
1773 #ifdef SBA_COLLECT_STATS
1774 unsigned long avg
= 0, min
, max
;
1778 seq_printf(m
, "%s rev %d.%d\n",
1780 (sba_dev
->hw_rev
& 0x7) + 1,
1781 (sba_dev
->hw_rev
& 0x18) >> 3);
1782 seq_printf(m
, "IO PDIR size : %d bytes (%d entries)\n",
1783 (int)((ioc
->res_size
<< 3) * sizeof(u64
)), /* 8 bits/byte */
1786 seq_printf(m
, "Resource bitmap : %d bytes (%d pages)\n",
1787 ioc
->res_size
, ioc
->res_size
<< 3); /* 8 bits per byte */
1789 seq_printf(m
, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1790 READ_REG32(sba_dev
->sba_hpa
+ LMMIO_DIST_BASE
),
1791 READ_REG32(sba_dev
->sba_hpa
+ LMMIO_DIST_MASK
),
1792 READ_REG32(sba_dev
->sba_hpa
+ LMMIO_DIST_ROUTE
));
1795 seq_printf(m
, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n",
1797 READ_REG32(sba_dev
->sba_hpa
+ LMMIO_DIRECT0_BASE
+ i
*0x18),
1798 READ_REG32(sba_dev
->sba_hpa
+ LMMIO_DIRECT0_MASK
+ i
*0x18),
1799 READ_REG32(sba_dev
->sba_hpa
+ LMMIO_DIRECT0_ROUTE
+ i
*0x18));
1801 #ifdef SBA_COLLECT_STATS
1802 seq_printf(m
, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1803 total_pages
- ioc
->used_pages
, ioc
->used_pages
,
1804 (int)(ioc
->used_pages
* 100 / total_pages
));
1806 min
= max
= ioc
->avg_search
[0];
1807 for (i
= 0; i
< SBA_SEARCH_SAMPLE
; i
++) {
1808 avg
+= ioc
->avg_search
[i
];
1809 if (ioc
->avg_search
[i
] > max
) max
= ioc
->avg_search
[i
];
1810 if (ioc
->avg_search
[i
] < min
) min
= ioc
->avg_search
[i
];
1812 avg
/= SBA_SEARCH_SAMPLE
;
1813 seq_printf(m
, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1816 seq_printf(m
, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1817 ioc
->msingle_calls
, ioc
->msingle_pages
,
1818 (int)((ioc
->msingle_pages
* 1000)/ioc
->msingle_calls
));
1820 /* KLUGE - unmap_sg calls unmap_single for each mapped page */
1821 min
= ioc
->usingle_calls
;
1822 max
= ioc
->usingle_pages
- ioc
->usg_pages
;
1823 seq_printf(m
, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1824 min
, max
, (int)((max
* 1000)/min
));
1826 seq_printf(m
, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1827 ioc
->msg_calls
, ioc
->msg_pages
,
1828 (int)((ioc
->msg_pages
* 1000)/ioc
->msg_calls
));
1830 seq_printf(m
, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1831 ioc
->usg_calls
, ioc
->usg_pages
,
1832 (int)((ioc
->usg_pages
* 1000)/ioc
->usg_calls
));
1839 sba_proc_open(struct inode
*i
, struct file
*f
)
1841 return single_open(f
, &sba_proc_info
, NULL
);
1844 static const struct file_operations sba_proc_fops
= {
1845 .owner
= THIS_MODULE
,
1846 .open
= sba_proc_open
,
1848 .llseek
= seq_lseek
,
1849 .release
= single_release
,
1853 sba_proc_bitmap_info(struct seq_file
*m
, void *p
)
1855 struct sba_device
*sba_dev
= sba_list
;
1856 struct ioc
*ioc
= &sba_dev
->ioc
[0]; /* FIXME: Multi-IOC support! */
1857 unsigned int *res_ptr
= (unsigned int *)ioc
->res_map
;
1860 for (i
= 0; i
< (ioc
->res_size
/sizeof(unsigned int)); ++i
, ++res_ptr
) {
1863 seq_printf(m
, " %08x", *res_ptr
);
1871 sba_proc_bitmap_open(struct inode
*i
, struct file
*f
)
1873 return single_open(f
, &sba_proc_bitmap_info
, NULL
);
1876 static const struct file_operations sba_proc_bitmap_fops
= {
1877 .owner
= THIS_MODULE
,
1878 .open
= sba_proc_bitmap_open
,
1880 .llseek
= seq_lseek
,
1881 .release
= single_release
,
1883 #endif /* CONFIG_PROC_FS */
1885 static struct parisc_device_id sba_tbl
[] = {
1886 { HPHW_IOA
, HVERSION_REV_ANY_ID
, ASTRO_RUNWAY_PORT
, 0xb },
1887 { HPHW_BCPORT
, HVERSION_REV_ANY_ID
, IKE_MERCED_PORT
, 0xc },
1888 { HPHW_BCPORT
, HVERSION_REV_ANY_ID
, REO_MERCED_PORT
, 0xc },
1889 { HPHW_BCPORT
, HVERSION_REV_ANY_ID
, REOG_MERCED_PORT
, 0xc },
1890 { HPHW_IOA
, HVERSION_REV_ANY_ID
, PLUTO_MCKINLEY_PORT
, 0xc },
1894 static int sba_driver_callback(struct parisc_device
*);
1896 static struct parisc_driver sba_driver
= {
1897 .name
= MODULE_NAME
,
1898 .id_table
= sba_tbl
,
1899 .probe
= sba_driver_callback
,
1903 ** Determine if sba should claim this chip (return 0) or not (return 1).
1904 ** If so, initialize the chip and tell other partners in crime they
1907 static int sba_driver_callback(struct parisc_device
*dev
)
1909 struct sba_device
*sba_dev
;
1913 void __iomem
*sba_addr
= ioremap_nocache(dev
->hpa
.start
, SBA_FUNC_SIZE
);
1914 #ifdef CONFIG_PROC_FS
1915 struct proc_dir_entry
*root
;
1918 sba_dump_ranges(sba_addr
);
1920 /* Read HW Rev First */
1921 func_class
= READ_REG(sba_addr
+ SBA_FCLASS
);
1923 if (IS_ASTRO(dev
)) {
1924 unsigned long fclass
;
1925 static char astro_rev
[]="Astro ?.?";
1927 /* Astro is broken...Read HW Rev First */
1928 fclass
= READ_REG(sba_addr
);
1930 astro_rev
[6] = '1' + (char) (fclass
& 0x7);
1931 astro_rev
[8] = '0' + (char) ((fclass
& 0x18) >> 3);
1932 version
= astro_rev
;
1934 } else if (IS_IKE(dev
)) {
1935 static char ike_rev
[] = "Ike rev ?";
1936 ike_rev
[8] = '0' + (char) (func_class
& 0xff);
1938 } else if (IS_PLUTO(dev
)) {
1939 static char pluto_rev
[]="Pluto ?.?";
1940 pluto_rev
[6] = '0' + (char) ((func_class
& 0xf0) >> 4);
1941 pluto_rev
[8] = '0' + (char) (func_class
& 0x0f);
1942 version
= pluto_rev
;
1944 static char reo_rev
[] = "REO rev ?";
1945 reo_rev
[8] = '0' + (char) (func_class
& 0xff);
1949 if (!global_ioc_cnt
) {
1950 global_ioc_cnt
= count_parisc_driver(&sba_driver
);
1952 /* Astro and Pluto have one IOC per SBA */
1953 if ((!IS_ASTRO(dev
)) || (!IS_PLUTO(dev
)))
1954 global_ioc_cnt
*= 2;
1957 printk(KERN_INFO
"%s found %s at 0x%llx\n",
1958 MODULE_NAME
, version
, (unsigned long long)dev
->hpa
.start
);
1960 sba_dev
= kzalloc(sizeof(struct sba_device
), GFP_KERNEL
);
1962 printk(KERN_ERR MODULE_NAME
" - couldn't alloc sba_device\n");
1966 parisc_set_drvdata(dev
, sba_dev
);
1968 for(i
=0; i
<MAX_IOC
; i
++)
1969 spin_lock_init(&(sba_dev
->ioc
[i
].res_lock
));
1972 sba_dev
->hw_rev
= func_class
;
1973 sba_dev
->name
= dev
->name
;
1974 sba_dev
->sba_hpa
= sba_addr
;
1976 sba_get_pat_resources(sba_dev
);
1977 sba_hw_init(sba_dev
);
1978 sba_common_init(sba_dev
);
1980 hppa_dma_ops
= &sba_ops
;
1982 #ifdef CONFIG_PROC_FS
1983 switch (dev
->id
.hversion
) {
1984 case PLUTO_MCKINLEY_PORT
:
1985 root
= proc_mckinley_root
;
1987 case ASTRO_RUNWAY_PORT
:
1988 case IKE_MERCED_PORT
:
1990 root
= proc_runway_root
;
1994 proc_create("sba_iommu", 0, root
, &sba_proc_fops
);
1995 proc_create("sba_iommu-bitmap", 0, root
, &sba_proc_bitmap_fops
);
2003 ** One time initialization to let the world know the SBA was found.
2004 ** This is the only routine which is NOT static.
2005 ** Must be called exactly once before pci_init().
2007 void __init
sba_init(void)
2009 register_parisc_driver(&sba_driver
);
2014 * sba_get_iommu - Assign the iommu pointer for the pci bus controller.
2015 * @dev: The parisc device.
2017 * Returns the appropriate IOMMU data for the given parisc PCI controller.
2018 * This is cached and used later for PCI DMA Mapping.
2020 void * sba_get_iommu(struct parisc_device
*pci_hba
)
2022 struct parisc_device
*sba_dev
= parisc_parent(pci_hba
);
2023 struct sba_device
*sba
= dev_get_drvdata(&sba_dev
->dev
);
2024 char t
= sba_dev
->id
.hw_type
;
2025 int iocnum
= (pci_hba
->hw_path
>> 3); /* rope # */
2027 WARN_ON((t
!= HPHW_IOA
) && (t
!= HPHW_BCPORT
));
2029 return &(sba
->ioc
[iocnum
]);
2034 * sba_directed_lmmio - return first directed LMMIO range routed to rope
2035 * @pa_dev: The parisc device.
2036 * @r: resource PCI host controller wants start/end fields assigned.
2038 * For the given parisc PCI controller, determine if any direct ranges
2039 * are routed down the corresponding rope.
2041 void sba_directed_lmmio(struct parisc_device
*pci_hba
, struct resource
*r
)
2043 struct parisc_device
*sba_dev
= parisc_parent(pci_hba
);
2044 struct sba_device
*sba
= dev_get_drvdata(&sba_dev
->dev
);
2045 char t
= sba_dev
->id
.hw_type
;
2047 int rope
= (pci_hba
->hw_path
& (ROPES_PER_IOC
-1)); /* rope # */
2049 BUG_ON((t
!=HPHW_IOA
) && (t
!=HPHW_BCPORT
));
2051 r
->start
= r
->end
= 0;
2053 /* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */
2054 for (i
=0; i
<4; i
++) {
2056 void __iomem
*reg
= sba
->sba_hpa
+ i
*0x18;
2058 base
= READ_REG32(reg
+ LMMIO_DIRECT0_BASE
);
2059 if ((base
& 1) == 0)
2060 continue; /* not enabled */
2062 size
= READ_REG32(reg
+ LMMIO_DIRECT0_ROUTE
);
2064 if ((size
& (ROPES_PER_IOC
-1)) != rope
)
2065 continue; /* directed down different rope */
2067 r
->start
= (base
& ~1UL) | PCI_F_EXTEND
;
2068 size
= ~ READ_REG32(reg
+ LMMIO_DIRECT0_MASK
);
2069 r
->end
= r
->start
+ size
;
2070 r
->flags
= IORESOURCE_MEM
;
2076 * sba_distributed_lmmio - return portion of distributed LMMIO range
2077 * @pa_dev: The parisc device.
2078 * @r: resource PCI host controller wants start/end fields assigned.
2080 * For the given parisc PCI controller, return portion of distributed LMMIO
2081 * range. The distributed LMMIO is always present and it's just a question
2082 * of the base address and size of the range.
2084 void sba_distributed_lmmio(struct parisc_device
*pci_hba
, struct resource
*r
)
2086 struct parisc_device
*sba_dev
= parisc_parent(pci_hba
);
2087 struct sba_device
*sba
= dev_get_drvdata(&sba_dev
->dev
);
2088 char t
= sba_dev
->id
.hw_type
;
2090 int rope
= (pci_hba
->hw_path
& (ROPES_PER_IOC
-1)); /* rope # */
2092 BUG_ON((t
!=HPHW_IOA
) && (t
!=HPHW_BCPORT
));
2094 r
->start
= r
->end
= 0;
2096 base
= READ_REG32(sba
->sba_hpa
+ LMMIO_DIST_BASE
);
2097 if ((base
& 1) == 0) {
2098 BUG(); /* Gah! Distr Range wasn't enabled! */
2102 r
->start
= (base
& ~1UL) | PCI_F_EXTEND
;
2104 size
= (~READ_REG32(sba
->sba_hpa
+ LMMIO_DIST_MASK
)) / ROPES_PER_IOC
;
2105 r
->start
+= rope
* (size
+ 1); /* adjust base for this rope */
2106 r
->end
= r
->start
+ size
;
2107 r
->flags
= IORESOURCE_MEM
;