2 ** IA64 System Bus Adapter (SBA) I/O MMU manager
4 ** (c) Copyright 2002-2005 Alex Williamson
5 ** (c) Copyright 2002-2003 Grant Grundler
6 ** (c) Copyright 2002-2005 Hewlett-Packard Company
8 ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
11 ** This program is free software; you can redistribute it and/or modify
12 ** it under the terms of the GNU General Public License as published by
13 ** the Free Software Foundation; either version 2 of the License, or
14 ** (at your option) any later version.
17 ** This module initializes the IOC (I/O Controller) found on HP
18 ** McKinley machines and their successors.
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/init.h>
29 #include <linux/string.h>
30 #include <linux/pci.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33 #include <linux/acpi.h>
34 #include <linux/efi.h>
35 #include <linux/nodemask.h>
36 #include <linux/bitops.h> /* hweight64() */
37 #include <linux/crash_dump.h>
38 #include <linux/iommu-helper.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/prefetch.h>
42 #include <asm/delay.h> /* ia64_get_itc() */
44 #include <asm/page.h> /* PAGE_OFFSET */
47 #include <asm/acpi-ext.h>
49 extern int swiotlb_late_init_with_default_size (size_t size
);
54 ** Enabling timing search of the pdir resource map. Output in /proc.
55 ** Disabled by default to optimize performance.
57 #undef PDIR_SEARCH_TIMING
60 ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
61 ** not defined, all DMA will be 32bit and go through the TLB.
62 ** There's potentially a conflict in the bio merge code with us
63 ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
64 ** appears to give more performance than bio-level virtual merging, we'll
65 ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
66 ** completely restrict DMA to the IOMMU.
68 #define ALLOW_IOV_BYPASS
71 ** This option specifically allows/disallows bypassing scatterlists with
72 ** multiple entries. Coalescing these entries can allow better DMA streaming
73 ** and in some cases shows better performance than entirely bypassing the
74 ** IOMMU. Performance increase on the order of 1-2% sequential output/input
75 ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
77 #undef ALLOW_IOV_BYPASS_SG
80 ** If a device prefetches beyond the end of a valid pdir entry, it will cause
81 ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
82 ** disconnect on 4k boundaries and prevent such issues. If the device is
83 ** particularly aggressive, this option will keep the entire pdir valid such
84 ** that prefetching will hit a valid address. This could severely impact
85 ** error containment, and is therefore off by default. The page that is
86 ** used for spill-over is poisoned, so that should help debugging somewhat.
88 #undef FULL_VALID_PDIR
90 #define ENABLE_MARK_CLEAN
93 ** The number of debug flags is a clue - this code is fragile. NOTE: since
94 ** tightening the use of res_lock the resource bitmap and actual pdir are no
95 ** longer guaranteed to stay in sync. The sanity checking code isn't going to
100 #undef DEBUG_SBA_RUN_SG
101 #undef DEBUG_SBA_RESOURCE
102 #undef ASSERT_PDIR_SANITY
103 #undef DEBUG_LARGE_SG_ENTRIES
106 #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
107 #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
110 #define SBA_INLINE __inline__
111 /* #define SBA_INLINE */
113 #ifdef DEBUG_SBA_INIT
114 #define DBG_INIT(x...) printk(x)
116 #define DBG_INIT(x...)
120 #define DBG_RUN(x...) printk(x)
122 #define DBG_RUN(x...)
125 #ifdef DEBUG_SBA_RUN_SG
126 #define DBG_RUN_SG(x...) printk(x)
128 #define DBG_RUN_SG(x...)
132 #ifdef DEBUG_SBA_RESOURCE
133 #define DBG_RES(x...) printk(x)
135 #define DBG_RES(x...)
139 #define DBG_BYPASS(x...) printk(x)
141 #define DBG_BYPASS(x...)
144 #ifdef ASSERT_PDIR_SANITY
145 #define ASSERT(expr) \
147 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
155 ** The number of pdir entries to "free" before issuing
156 ** a read to PCOM register to flush out PCOM writes.
157 ** Interacts with allocation granularity (ie 4 or 8 entries
158 ** allocated and free'd/purged at a time might make this
159 ** less interesting).
161 #define DELAYED_RESOURCE_CNT 64
163 #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
165 #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
166 #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
167 #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
168 #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
169 #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
171 #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
173 #define IOC_FUNC_ID 0x000
174 #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
175 #define IOC_IBASE 0x300 /* IO TLB */
176 #define IOC_IMASK 0x308
177 #define IOC_PCOM 0x310
178 #define IOC_TCNFG 0x318
179 #define IOC_PDIR_BASE 0x320
181 #define IOC_ROPE0_CFG 0x500
182 #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
185 /* AGP GART driver looks for this */
186 #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
189 ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
191 ** Some IOCs (sx1000) can run at the above pages sizes, but are
192 ** really only supported using the IOC at a 4k page size.
194 ** iovp_size could only be greater than PAGE_SIZE if we are
195 ** confident the drivers really only touch the next physical
196 ** page iff that driver instance owns it.
198 static unsigned long iovp_size
;
199 static unsigned long iovp_shift
;
200 static unsigned long iovp_mask
;
203 void __iomem
*ioc_hpa
; /* I/O MMU base address */
204 char *res_map
; /* resource map, bit == pdir entry */
205 u64
*pdir_base
; /* physical base address */
206 unsigned long ibase
; /* pdir IOV Space base */
207 unsigned long imask
; /* pdir IOV Space mask */
209 unsigned long *res_hint
; /* next avail IOVP - circular search */
210 unsigned long dma_mask
;
211 spinlock_t res_lock
; /* protects the resource bitmap, but must be held when */
212 /* clearing pdir to prevent races with allocations. */
213 unsigned int res_bitshift
; /* from the RIGHT! */
214 unsigned int res_size
; /* size of resource map in bytes */
216 unsigned int node
; /* node where this IOC lives */
218 #if DELAYED_RESOURCE_CNT > 0
219 spinlock_t saved_lock
; /* may want to try to get this on a separate cacheline */
220 /* than res_lock for bigger systems. */
222 struct sba_dma_pair
{
225 } saved
[DELAYED_RESOURCE_CNT
];
228 #ifdef PDIR_SEARCH_TIMING
229 #define SBA_SEARCH_SAMPLE 0x100
230 unsigned long avg_search
[SBA_SEARCH_SAMPLE
];
231 unsigned long avg_idx
; /* current index into avg_search */
234 /* Stuff we don't need in performance path */
235 struct ioc
*next
; /* list of IOC's in system */
236 acpi_handle handle
; /* for multiple IOC's */
238 unsigned int func_id
;
239 unsigned int rev
; /* HW revision of chip */
241 unsigned int pdir_size
; /* in bytes, determined by IOV Space size */
242 struct pci_dev
*sac_only_dev
;
245 static struct ioc
*ioc_list
, *ioc_found
;
246 static int reserve_sba_gart
= 1;
248 static SBA_INLINE
void sba_mark_invalid(struct ioc
*, dma_addr_t
, size_t);
249 static SBA_INLINE
void sba_free_range(struct ioc
*, dma_addr_t
, size_t);
251 #define sba_sg_address(sg) sg_virt((sg))
253 #ifdef FULL_VALID_PDIR
254 static u64 prefetch_spill_page
;
258 # define GET_IOC(dev) ((dev_is_pci(dev)) \
259 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
261 # define GET_IOC(dev) NULL
265 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
266 ** (or rather not merge) DMAs into manageable chunks.
267 ** On parisc, this is more of the software/tuning constraint
268 ** rather than the HW. I/O MMU allocation algorithms can be
269 ** faster with smaller sizes (to some degree).
271 #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
273 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
275 /************************************
276 ** SBA register read and write support
278 ** BE WARNED: register writes are posted.
279 ** (ie follow writes which must reach HW with a read)
282 #define READ_REG(addr) __raw_readq(addr)
283 #define WRITE_REG(val, addr) __raw_writeq(val, addr)
285 #ifdef DEBUG_SBA_INIT
288 * sba_dump_tlb - debugging only - print IOMMU operating parameters
289 * @hpa: base address of the IOMMU
291 * Print the size/location of the IO MMU PDIR.
294 sba_dump_tlb(char *hpa
)
296 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa
);
297 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa
+IOC_IBASE
));
298 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa
+IOC_IMASK
));
299 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa
+IOC_TCNFG
));
300 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa
+IOC_PDIR_BASE
));
306 #ifdef ASSERT_PDIR_SANITY
309 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
310 * @ioc: IO MMU structure which owns the pdir we are interested in.
311 * @msg: text to print ont the output line.
314 * Print one entry of the IO MMU PDIR in human readable form.
317 sba_dump_pdir_entry(struct ioc
*ioc
, char *msg
, uint pide
)
319 /* start printing from lowest pde in rval */
320 u64
*ptr
= &ioc
->pdir_base
[pide
& ~(BITS_PER_LONG
- 1)];
321 unsigned long *rptr
= (unsigned long *) &ioc
->res_map
[(pide
>>3) & -sizeof(unsigned long)];
324 printk(KERN_DEBUG
"SBA: %s rp %p bit %d rval 0x%lx\n",
325 msg
, rptr
, pide
& (BITS_PER_LONG
- 1), *rptr
);
328 while (rcnt
< BITS_PER_LONG
) {
329 printk(KERN_DEBUG
"%s %2d %p %016Lx\n",
330 (rcnt
== (pide
& (BITS_PER_LONG
- 1)))
332 rcnt
, ptr
, (unsigned long long) *ptr
);
336 printk(KERN_DEBUG
"%s", msg
);
341 * sba_check_pdir - debugging only - consistency checker
342 * @ioc: IO MMU structure which owns the pdir we are interested in.
343 * @msg: text to print ont the output line.
345 * Verify the resource map and pdir state is consistent
348 sba_check_pdir(struct ioc
*ioc
, char *msg
)
350 u64
*rptr_end
= (u64
*) &(ioc
->res_map
[ioc
->res_size
]);
351 u64
*rptr
= (u64
*) ioc
->res_map
; /* resource map ptr */
352 u64
*pptr
= ioc
->pdir_base
; /* pdir ptr */
355 while (rptr
< rptr_end
) {
357 int rcnt
; /* number of bits we might check */
363 /* Get last byte and highest bit from that */
364 u32 pde
= ((u32
)((*pptr
>> (63)) & 0x1));
365 if ((rval
& 0x1) ^ pde
)
368 ** BUMMER! -- res_map != pdir --
369 ** Dump rval and matching pdir entries
371 sba_dump_pdir_entry(ioc
, msg
, pide
);
375 rval
>>= 1; /* try the next bit */
379 rptr
++; /* look at next word of res_map */
381 /* It'd be nice if we always got here :^) */
387 * sba_dump_sg - debugging only - print Scatter-Gather list
388 * @ioc: IO MMU structure which owns the pdir we are interested in.
389 * @startsg: head of the SG list
390 * @nents: number of entries in SG list
392 * print the SG list so we can verify it's correct by hand.
395 sba_dump_sg( struct ioc
*ioc
, struct scatterlist
*startsg
, int nents
)
397 while (nents
-- > 0) {
398 printk(KERN_DEBUG
" %d : DMA %08lx/%05x CPU %p\n", nents
,
399 startsg
->dma_address
, startsg
->dma_length
,
400 sba_sg_address(startsg
));
401 startsg
= sg_next(startsg
);
406 sba_check_sg( struct ioc
*ioc
, struct scatterlist
*startsg
, int nents
)
408 struct scatterlist
*the_sg
= startsg
;
409 int the_nents
= nents
;
411 while (the_nents
-- > 0) {
412 if (sba_sg_address(the_sg
) == 0x0UL
)
413 sba_dump_sg(NULL
, startsg
, nents
);
414 the_sg
= sg_next(the_sg
);
418 #endif /* ASSERT_PDIR_SANITY */
423 /**************************************************************
425 * I/O Pdir Resource Management
427 * Bits set in the resource map are in use.
428 * Each bit can represent a number of pages.
429 * LSbs represent lower addresses (IOVA's).
431 ***************************************************************/
432 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
434 /* Convert from IOVP to IOVA and vice versa. */
435 #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
436 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
438 #define PDIR_ENTRY_SIZE sizeof(u64)
440 #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
442 #define RESMAP_MASK(n) ~(~0UL << (n))
443 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
447 * For most cases the normal get_order is sufficient, however it limits us
448 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
449 * It only incurs about 1 clock cycle to use this one with the static variable
450 * and makes the code more intuitive.
452 static SBA_INLINE
int
453 get_iovp_order (unsigned long size
)
455 long double d
= size
- 1;
458 order
= ia64_getf_exp(d
);
459 order
= order
- iovp_shift
- 0xffff + 1;
465 static unsigned long ptr_to_pide(struct ioc
*ioc
, unsigned long *res_ptr
,
466 unsigned int bitshiftcnt
)
468 return (((unsigned long)res_ptr
- (unsigned long)ioc
->res_map
) << 3)
473 * sba_search_bitmap - find free space in IO PDIR resource bitmap
474 * @ioc: IO MMU structure which owns the pdir we are interested in.
475 * @bits_wanted: number of entries we need.
476 * @use_hint: use res_hint to indicate where to start looking
478 * Find consecutive free bits in resource bitmap.
479 * Each bit represents one entry in the IO Pdir.
480 * Cool perf optimization: search for log2(size) bits at a time.
482 static SBA_INLINE
unsigned long
483 sba_search_bitmap(struct ioc
*ioc
, struct device
*dev
,
484 unsigned long bits_wanted
, int use_hint
)
486 unsigned long *res_ptr
;
487 unsigned long *res_end
= (unsigned long *) &(ioc
->res_map
[ioc
->res_size
]);
488 unsigned long flags
, pide
= ~0UL, tpide
;
489 unsigned long boundary_size
;
493 ASSERT(((unsigned long) ioc
->res_hint
& (sizeof(unsigned long) - 1UL)) == 0);
494 ASSERT(res_ptr
< res_end
);
496 boundary_size
= (unsigned long long)dma_get_seg_boundary(dev
) + 1;
497 boundary_size
= ALIGN(boundary_size
, 1ULL << iovp_shift
) >> iovp_shift
;
499 BUG_ON(ioc
->ibase
& ~iovp_mask
);
500 shift
= ioc
->ibase
>> iovp_shift
;
502 spin_lock_irqsave(&ioc
->res_lock
, flags
);
504 /* Allow caller to force a search through the entire resource space */
505 if (likely(use_hint
)) {
506 res_ptr
= ioc
->res_hint
;
508 res_ptr
= (ulong
*)ioc
->res_map
;
509 ioc
->res_bitshift
= 0;
513 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
514 * if a TLB entry is purged while in use. sba_mark_invalid()
515 * purges IOTLB entries in power-of-two sizes, so we also
516 * allocate IOVA space in power-of-two sizes.
518 bits_wanted
= 1UL << get_iovp_order(bits_wanted
<< iovp_shift
);
520 if (likely(bits_wanted
== 1)) {
521 unsigned int bitshiftcnt
;
522 for(; res_ptr
< res_end
; res_ptr
++) {
523 if (likely(*res_ptr
!= ~0UL)) {
524 bitshiftcnt
= ffz(*res_ptr
);
525 *res_ptr
|= (1UL << bitshiftcnt
);
526 pide
= ptr_to_pide(ioc
, res_ptr
, bitshiftcnt
);
527 ioc
->res_bitshift
= bitshiftcnt
+ bits_wanted
;
535 if (likely(bits_wanted
<= BITS_PER_LONG
/2)) {
537 ** Search the resource bit map on well-aligned values.
538 ** "o" is the alignment.
539 ** We need the alignment to invalidate I/O TLB using
540 ** SBA HW features in the unmap path.
542 unsigned long o
= 1 << get_iovp_order(bits_wanted
<< iovp_shift
);
543 uint bitshiftcnt
= ROUNDUP(ioc
->res_bitshift
, o
);
544 unsigned long mask
, base_mask
;
546 base_mask
= RESMAP_MASK(bits_wanted
);
547 mask
= base_mask
<< bitshiftcnt
;
549 DBG_RES("%s() o %ld %p", __func__
, o
, res_ptr
);
550 for(; res_ptr
< res_end
; res_ptr
++)
552 DBG_RES(" %p %lx %lx\n", res_ptr
, mask
, *res_ptr
);
554 for (; mask
; mask
<<= o
, bitshiftcnt
+= o
) {
555 tpide
= ptr_to_pide(ioc
, res_ptr
, bitshiftcnt
);
556 ret
= iommu_is_span_boundary(tpide
, bits_wanted
,
559 if ((0 == ((*res_ptr
) & mask
)) && !ret
) {
560 *res_ptr
|= mask
; /* mark resources busy! */
562 ioc
->res_bitshift
= bitshiftcnt
+ bits_wanted
;
576 qwords
= bits_wanted
>> 6; /* /64 */
577 bits
= bits_wanted
- (qwords
* BITS_PER_LONG
);
579 end
= res_end
- qwords
;
581 for (; res_ptr
< end
; res_ptr
++) {
582 tpide
= ptr_to_pide(ioc
, res_ptr
, 0);
583 ret
= iommu_is_span_boundary(tpide
, bits_wanted
,
584 shift
, boundary_size
);
587 for (i
= 0 ; i
< qwords
; i
++) {
591 if (bits
&& res_ptr
[i
] && (__ffs(res_ptr
[i
]) < bits
))
594 /* Found it, mark it */
595 for (i
= 0 ; i
< qwords
; i
++)
597 res_ptr
[i
] |= RESMAP_MASK(bits
);
601 ioc
->res_bitshift
= bits
;
609 prefetch(ioc
->res_map
);
610 ioc
->res_hint
= (unsigned long *) ioc
->res_map
;
611 ioc
->res_bitshift
= 0;
612 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
616 ioc
->res_hint
= res_ptr
;
617 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
623 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
624 * @ioc: IO MMU structure which owns the pdir we are interested in.
625 * @size: number of bytes to create a mapping for
627 * Given a size, find consecutive unmarked and then mark those bits in the
631 sba_alloc_range(struct ioc
*ioc
, struct device
*dev
, size_t size
)
633 unsigned int pages_needed
= size
>> iovp_shift
;
634 #ifdef PDIR_SEARCH_TIMING
635 unsigned long itc_start
;
639 ASSERT(pages_needed
);
640 ASSERT(0 == (size
& ~iovp_mask
));
642 #ifdef PDIR_SEARCH_TIMING
643 itc_start
= ia64_get_itc();
646 ** "seek and ye shall find"...praying never hurts either...
648 pide
= sba_search_bitmap(ioc
, dev
, pages_needed
, 1);
649 if (unlikely(pide
>= (ioc
->res_size
<< 3))) {
650 pide
= sba_search_bitmap(ioc
, dev
, pages_needed
, 0);
651 if (unlikely(pide
>= (ioc
->res_size
<< 3))) {
652 #if DELAYED_RESOURCE_CNT > 0
656 ** With delayed resource freeing, we can give this one more shot. We're
657 ** getting close to being in trouble here, so do what we can to make this
660 spin_lock_irqsave(&ioc
->saved_lock
, flags
);
661 if (ioc
->saved_cnt
> 0) {
662 struct sba_dma_pair
*d
;
663 int cnt
= ioc
->saved_cnt
;
665 d
= &(ioc
->saved
[ioc
->saved_cnt
- 1]);
667 spin_lock(&ioc
->res_lock
);
669 sba_mark_invalid(ioc
, d
->iova
, d
->size
);
670 sba_free_range(ioc
, d
->iova
, d
->size
);
674 READ_REG(ioc
->ioc_hpa
+IOC_PCOM
); /* flush purges */
675 spin_unlock(&ioc
->res_lock
);
677 spin_unlock_irqrestore(&ioc
->saved_lock
, flags
);
679 pide
= sba_search_bitmap(ioc
, dev
, pages_needed
, 0);
680 if (unlikely(pide
>= (ioc
->res_size
<< 3))) {
681 printk(KERN_WARNING
"%s: I/O MMU @ %p is"
682 "out of mapping resources, %u %u %lx\n",
683 __func__
, ioc
->ioc_hpa
, ioc
->res_size
,
684 pages_needed
, dma_get_seg_boundary(dev
));
688 printk(KERN_WARNING
"%s: I/O MMU @ %p is"
689 "out of mapping resources, %u %u %lx\n",
690 __func__
, ioc
->ioc_hpa
, ioc
->res_size
,
691 pages_needed
, dma_get_seg_boundary(dev
));
697 #ifdef PDIR_SEARCH_TIMING
698 ioc
->avg_search
[ioc
->avg_idx
++] = (ia64_get_itc() - itc_start
) / pages_needed
;
699 ioc
->avg_idx
&= SBA_SEARCH_SAMPLE
- 1;
702 prefetchw(&(ioc
->pdir_base
[pide
]));
704 #ifdef ASSERT_PDIR_SANITY
705 /* verify the first enable bit is clear */
706 if(0x00 != ((u8
*) ioc
->pdir_base
)[pide
*PDIR_ENTRY_SIZE
+ 7]) {
707 sba_dump_pdir_entry(ioc
, "sba_search_bitmap() botched it?", pide
);
711 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
712 __func__
, size
, pages_needed
, pide
,
713 (uint
) ((unsigned long) ioc
->res_hint
- (unsigned long) ioc
->res_map
),
721 * sba_free_range - unmark bits in IO PDIR resource bitmap
722 * @ioc: IO MMU structure which owns the pdir we are interested in.
723 * @iova: IO virtual address which was previously allocated.
724 * @size: number of bytes to create a mapping for
726 * clear bits in the ioc's resource map
728 static SBA_INLINE
void
729 sba_free_range(struct ioc
*ioc
, dma_addr_t iova
, size_t size
)
731 unsigned long iovp
= SBA_IOVP(ioc
, iova
);
732 unsigned int pide
= PDIR_INDEX(iovp
);
733 unsigned int ridx
= pide
>> 3; /* convert bit to byte address */
734 unsigned long *res_ptr
= (unsigned long *) &((ioc
)->res_map
[ridx
& ~RESMAP_IDX_MASK
]);
735 int bits_not_wanted
= size
>> iovp_shift
;
738 /* Round up to power-of-two size: see AR2305 note above */
739 bits_not_wanted
= 1UL << get_iovp_order(bits_not_wanted
<< iovp_shift
);
740 for (; bits_not_wanted
> 0 ; res_ptr
++) {
742 if (unlikely(bits_not_wanted
> BITS_PER_LONG
)) {
744 /* these mappings start 64bit aligned */
746 bits_not_wanted
-= BITS_PER_LONG
;
747 pide
+= BITS_PER_LONG
;
751 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
752 m
= RESMAP_MASK(bits_not_wanted
) << (pide
& (BITS_PER_LONG
- 1));
755 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__
, (uint
) iova
, size
,
756 bits_not_wanted
, m
, pide
, res_ptr
, *res_ptr
);
759 ASSERT(bits_not_wanted
);
760 ASSERT((*res_ptr
& m
) == m
); /* verify same bits are set */
767 /**************************************************************
769 * "Dynamic DMA Mapping" support (aka "Coherent I/O")
771 ***************************************************************/
774 * sba_io_pdir_entry - fill in one IO PDIR entry
775 * @pdir_ptr: pointer to IO PDIR entry
776 * @vba: Virtual CPU address of buffer to map
778 * SBA Mapping Routine
780 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
781 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
782 * Each IO Pdir entry consists of 8 bytes as shown below
786 * +-+---------------------+----------------------------------+----+--------+
787 * |V| U | PPN[39:12] | U | FF |
788 * +-+---------------------+----------------------------------+----+--------+
792 * PPN == Physical Page Number
794 * The physical address fields are filled with the results of virt_to_phys()
799 #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
800 | 0x8000000000000000ULL)
803 sba_io_pdir_entry(u64
*pdir_ptr
, unsigned long vba
)
805 *pdir_ptr
= ((vba
& ~0xE000000000000FFFULL
) | 0x80000000000000FFULL
);
809 #ifdef ENABLE_MARK_CLEAN
811 * Since DMA is i-cache coherent, any (complete) pages that were written via
812 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
813 * flush them when they get mapped into an executable vm-area.
816 mark_clean (void *addr
, size_t size
)
818 unsigned long pg_addr
, end
;
820 pg_addr
= PAGE_ALIGN((unsigned long) addr
);
821 end
= (unsigned long) addr
+ size
;
822 while (pg_addr
+ PAGE_SIZE
<= end
) {
823 struct page
*page
= virt_to_page((void *)pg_addr
);
824 set_bit(PG_arch_1
, &page
->flags
);
825 pg_addr
+= PAGE_SIZE
;
831 * sba_mark_invalid - invalidate one or more IO PDIR entries
832 * @ioc: IO MMU structure which owns the pdir we are interested in.
833 * @iova: IO Virtual Address mapped earlier
834 * @byte_cnt: number of bytes this mapping covers.
836 * Marking the IO PDIR entry(ies) as Invalid and invalidate
837 * corresponding IO TLB entry. The PCOM (Purge Command Register)
838 * is to purge stale entries in the IO TLB when unmapping entries.
840 * The PCOM register supports purging of multiple pages, with a minium
841 * of 1 page and a maximum of 2GB. Hardware requires the address be
842 * aligned to the size of the range being purged. The size of the range
843 * must be a power of 2. The "Cool perf optimization" in the
844 * allocation routine helps keep that true.
846 static SBA_INLINE
void
847 sba_mark_invalid(struct ioc
*ioc
, dma_addr_t iova
, size_t byte_cnt
)
849 u32 iovp
= (u32
) SBA_IOVP(ioc
,iova
);
851 int off
= PDIR_INDEX(iovp
);
853 /* Must be non-zero and rounded up */
854 ASSERT(byte_cnt
> 0);
855 ASSERT(0 == (byte_cnt
& ~iovp_mask
));
857 #ifdef ASSERT_PDIR_SANITY
858 /* Assert first pdir entry is set */
859 if (!(ioc
->pdir_base
[off
] >> 60)) {
860 sba_dump_pdir_entry(ioc
,"sba_mark_invalid()", PDIR_INDEX(iovp
));
864 if (byte_cnt
<= iovp_size
)
866 ASSERT(off
< ioc
->pdir_size
);
868 iovp
|= iovp_shift
; /* set "size" field for PCOM */
870 #ifndef FULL_VALID_PDIR
872 ** clear I/O PDIR entry "valid" bit
873 ** Do NOT clear the rest - save it for debugging.
874 ** We should only clear bits that have previously
877 ioc
->pdir_base
[off
] &= ~(0x80000000000000FFULL
);
880 ** If we want to maintain the PDIR as valid, put in
881 ** the spill page so devices prefetching won't
882 ** cause a hard fail.
884 ioc
->pdir_base
[off
] = (0x80000000000000FFULL
| prefetch_spill_page
);
887 u32 t
= get_iovp_order(byte_cnt
) + iovp_shift
;
890 ASSERT(t
<= 31); /* 2GB! Max value of "size" field */
893 /* verify this pdir entry is enabled */
894 ASSERT(ioc
->pdir_base
[off
] >> 63);
895 #ifndef FULL_VALID_PDIR
896 /* clear I/O Pdir entry "valid" bit first */
897 ioc
->pdir_base
[off
] &= ~(0x80000000000000FFULL
);
899 ioc
->pdir_base
[off
] = (0x80000000000000FFULL
| prefetch_spill_page
);
902 byte_cnt
-= iovp_size
;
903 } while (byte_cnt
> 0);
906 WRITE_REG(iovp
| ioc
->ibase
, ioc
->ioc_hpa
+IOC_PCOM
);
910 * sba_map_page - map one buffer and return IOVA for DMA
911 * @dev: instance of PCI owned by the driver that's asking.
913 * @poff: offset into page
914 * @size: number of bytes to map
915 * @dir: dma direction
916 * @attrs: optional dma attributes
918 * See Documentation/DMA-API-HOWTO.txt
920 static dma_addr_t
sba_map_page(struct device
*dev
, struct page
*page
,
921 unsigned long poff
, size_t size
,
922 enum dma_data_direction dir
,
926 void *addr
= page_address(page
) + poff
;
931 #ifdef ASSERT_PDIR_SANITY
934 #ifdef ALLOW_IOV_BYPASS
935 unsigned long pci_addr
= virt_to_phys(addr
);
938 #ifdef ALLOW_IOV_BYPASS
939 ASSERT(to_pci_dev(dev
)->dma_mask
);
941 ** Check if the PCI device can DMA to ptr... if so, just return ptr
943 if (likely((pci_addr
& ~to_pci_dev(dev
)->dma_mask
) == 0)) {
945 ** Device is bit capable of DMA'ing to the buffer...
946 ** just return the PCI address of ptr
948 DBG_BYPASS("sba_map_page() bypass mask/addr: "
950 to_pci_dev(dev
)->dma_mask
, pci_addr
);
957 prefetch(ioc
->res_hint
);
960 ASSERT(size
<= DMA_CHUNK_SIZE
);
962 /* save offset bits */
963 offset
= ((dma_addr_t
) (long) addr
) & ~iovp_mask
;
965 /* round up to nearest iovp_size */
966 size
= (size
+ offset
+ ~iovp_mask
) & iovp_mask
;
968 #ifdef ASSERT_PDIR_SANITY
969 spin_lock_irqsave(&ioc
->res_lock
, flags
);
970 if (sba_check_pdir(ioc
,"Check before sba_map_page()"))
971 panic("Sanity check failed");
972 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
975 pide
= sba_alloc_range(ioc
, dev
, size
);
977 return DMA_MAPPING_ERROR
;
979 iovp
= (dma_addr_t
) pide
<< iovp_shift
;
981 DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__
, addr
, (long) iovp
| offset
);
983 pdir_start
= &(ioc
->pdir_base
[pide
]);
986 ASSERT(((u8
*)pdir_start
)[7] == 0); /* verify availability */
987 sba_io_pdir_entry(pdir_start
, (unsigned long) addr
);
989 DBG_RUN(" pdir 0x%p %lx\n", pdir_start
, *pdir_start
);
995 /* force pdir update */
998 /* form complete address */
999 #ifdef ASSERT_PDIR_SANITY
1000 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1001 sba_check_pdir(ioc
,"Check after sba_map_page()");
1002 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1004 return SBA_IOVA(ioc
, iovp
, offset
);
1007 #ifdef ENABLE_MARK_CLEAN
1008 static SBA_INLINE
void
1009 sba_mark_clean(struct ioc
*ioc
, dma_addr_t iova
, size_t size
)
1011 u32 iovp
= (u32
) SBA_IOVP(ioc
,iova
);
1012 int off
= PDIR_INDEX(iovp
);
1015 if (size
<= iovp_size
) {
1016 addr
= phys_to_virt(ioc
->pdir_base
[off
] &
1017 ~0xE000000000000FFFULL
);
1018 mark_clean(addr
, size
);
1021 addr
= phys_to_virt(ioc
->pdir_base
[off
] &
1022 ~0xE000000000000FFFULL
);
1023 mark_clean(addr
, min(size
, iovp_size
));
1032 * sba_unmap_page - unmap one IOVA and free resources
1033 * @dev: instance of PCI owned by the driver that's asking.
1034 * @iova: IOVA of driver buffer previously mapped.
1035 * @size: number of bytes mapped in driver buffer.
1036 * @dir: R/W or both.
1037 * @attrs: optional dma attributes
1039 * See Documentation/DMA-API-HOWTO.txt
1041 static void sba_unmap_page(struct device
*dev
, dma_addr_t iova
, size_t size
,
1042 enum dma_data_direction dir
, unsigned long attrs
)
1045 #if DELAYED_RESOURCE_CNT > 0
1046 struct sba_dma_pair
*d
;
1048 unsigned long flags
;
1054 #ifdef ALLOW_IOV_BYPASS
1055 if (likely((iova
& ioc
->imask
) != ioc
->ibase
)) {
1057 ** Address does not fall w/in IOVA, must be bypassing
1059 DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n",
1062 #ifdef ENABLE_MARK_CLEAN
1063 if (dir
== DMA_FROM_DEVICE
) {
1064 mark_clean(phys_to_virt(iova
), size
);
1070 offset
= iova
& ~iovp_mask
;
1072 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__
, (long) iova
, size
);
1074 iova
^= offset
; /* clear offset bits */
1076 size
= ROUNDUP(size
, iovp_size
);
1078 #ifdef ENABLE_MARK_CLEAN
1079 if (dir
== DMA_FROM_DEVICE
)
1080 sba_mark_clean(ioc
, iova
, size
);
1083 #if DELAYED_RESOURCE_CNT > 0
1084 spin_lock_irqsave(&ioc
->saved_lock
, flags
);
1085 d
= &(ioc
->saved
[ioc
->saved_cnt
]);
1088 if (unlikely(++(ioc
->saved_cnt
) >= DELAYED_RESOURCE_CNT
)) {
1089 int cnt
= ioc
->saved_cnt
;
1090 spin_lock(&ioc
->res_lock
);
1092 sba_mark_invalid(ioc
, d
->iova
, d
->size
);
1093 sba_free_range(ioc
, d
->iova
, d
->size
);
1097 READ_REG(ioc
->ioc_hpa
+IOC_PCOM
); /* flush purges */
1098 spin_unlock(&ioc
->res_lock
);
1100 spin_unlock_irqrestore(&ioc
->saved_lock
, flags
);
1101 #else /* DELAYED_RESOURCE_CNT == 0 */
1102 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1103 sba_mark_invalid(ioc
, iova
, size
);
1104 sba_free_range(ioc
, iova
, size
);
1105 READ_REG(ioc
->ioc_hpa
+IOC_PCOM
); /* flush purges */
1106 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1107 #endif /* DELAYED_RESOURCE_CNT == 0 */
1111 * sba_alloc_coherent - allocate/map shared mem for DMA
1112 * @dev: instance of PCI owned by the driver that's asking.
1113 * @size: number of bytes mapped in driver buffer.
1114 * @dma_handle: IOVA of new buffer.
1116 * See Documentation/DMA-API-HOWTO.txt
1119 sba_alloc_coherent(struct device
*dev
, size_t size
, dma_addr_t
*dma_handle
,
1120 gfp_t flags
, unsigned long attrs
)
1133 page
= alloc_pages_node(node
, flags
, get_order(size
));
1134 if (unlikely(!page
))
1137 addr
= page_address(page
);
1138 memset(addr
, 0, size
);
1139 *dma_handle
= page_to_phys(page
);
1141 #ifdef ALLOW_IOV_BYPASS
1142 ASSERT(dev
->coherent_dma_mask
);
1144 ** Check if the PCI device can DMA to ptr... if so, just return ptr
1146 if (likely((*dma_handle
& ~dev
->coherent_dma_mask
) == 0)) {
1147 DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1148 dev
->coherent_dma_mask
, *dma_handle
);
1155 * If device can't bypass or bypass is disabled, pass the 32bit fake
1156 * device to map single to get an iova mapping.
1158 *dma_handle
= sba_map_page(&ioc
->sac_only_dev
->dev
, page
, 0, size
,
1159 DMA_BIDIRECTIONAL
, 0);
1160 if (dma_mapping_error(dev
, *dma_handle
))
1167 * sba_free_coherent - free/unmap shared mem for DMA
1168 * @dev: instance of PCI owned by the driver that's asking.
1169 * @size: number of bytes mapped in driver buffer.
1170 * @vaddr: virtual address IOVA of "consistent" buffer.
1171 * @dma_handler: IO virtual address of "consistent" buffer.
1173 * See Documentation/DMA-API-HOWTO.txt
1175 static void sba_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
1176 dma_addr_t dma_handle
, unsigned long attrs
)
1178 sba_unmap_page(dev
, dma_handle
, size
, 0, 0);
1179 free_pages((unsigned long) vaddr
, get_order(size
));
1184 ** Since 0 is a valid pdir_base index value, can't use that
1185 ** to determine if a value is valid or not. Use a flag to indicate
1186 ** the SG list entry contains a valid pdir index.
1188 #define PIDE_FLAG 0x1UL
1190 #ifdef DEBUG_LARGE_SG_ENTRIES
1191 int dump_run_sg
= 0;
1196 * sba_fill_pdir - write allocated SG entries into IO PDIR
1197 * @ioc: IO MMU structure which owns the pdir we are interested in.
1198 * @startsg: list of IOVA/size pairs
1199 * @nents: number of entries in startsg list
1201 * Take preprocessed SG list and write corresponding entries
1205 static SBA_INLINE
int
1208 struct scatterlist
*startsg
,
1211 struct scatterlist
*dma_sg
= startsg
; /* pointer to current DMA */
1214 unsigned long dma_offset
= 0;
1216 while (nents
-- > 0) {
1217 int cnt
= startsg
->dma_length
;
1218 startsg
->dma_length
= 0;
1220 #ifdef DEBUG_LARGE_SG_ENTRIES
1222 printk(" %2d : %08lx/%05x %p\n",
1223 nents
, startsg
->dma_address
, cnt
,
1224 sba_sg_address(startsg
));
1226 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1227 nents
, startsg
->dma_address
, cnt
,
1228 sba_sg_address(startsg
));
1231 ** Look for the start of a new DMA stream
1233 if (startsg
->dma_address
& PIDE_FLAG
) {
1234 u32 pide
= startsg
->dma_address
& ~PIDE_FLAG
;
1235 dma_offset
= (unsigned long) pide
& ~iovp_mask
;
1236 startsg
->dma_address
= 0;
1238 dma_sg
= sg_next(dma_sg
);
1239 dma_sg
->dma_address
= pide
| ioc
->ibase
;
1240 pdirp
= &(ioc
->pdir_base
[pide
>> iovp_shift
]);
1245 ** Look for a VCONTIG chunk
1248 unsigned long vaddr
= (unsigned long) sba_sg_address(startsg
);
1251 /* Since multiple Vcontig blocks could make up
1252 ** one DMA stream, *add* cnt to dma_len.
1254 dma_sg
->dma_length
+= cnt
;
1256 dma_offset
=0; /* only want offset on first chunk */
1257 cnt
= ROUNDUP(cnt
, iovp_size
);
1259 sba_io_pdir_entry(pdirp
, vaddr
);
1265 startsg
= sg_next(startsg
);
1267 /* force pdir update */
1270 #ifdef DEBUG_LARGE_SG_ENTRIES
1278 ** Two address ranges are DMA contiguous *iff* "end of prev" and
1279 ** "start of next" are both on an IOV page boundary.
1281 ** (shift left is a quick trick to mask off upper bits)
1283 #define DMA_CONTIG(__X, __Y) \
1284 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1288 * sba_coalesce_chunks - preprocess the SG list
1289 * @ioc: IO MMU structure which owns the pdir we are interested in.
1290 * @startsg: list of IOVA/size pairs
1291 * @nents: number of entries in startsg list
1293 * First pass is to walk the SG list and determine where the breaks are
1294 * in the DMA stream. Allocates PDIR entries but does not fill them.
1295 * Returns the number of DMA chunks.
1297 * Doing the fill separate from the coalescing/allocation keeps the
1298 * code simpler. Future enhancement could make one pass through
1299 * the sglist do both.
1301 static SBA_INLINE
int
1302 sba_coalesce_chunks(struct ioc
*ioc
, struct device
*dev
,
1303 struct scatterlist
*startsg
,
1306 struct scatterlist
*vcontig_sg
; /* VCONTIG chunk head */
1307 unsigned long vcontig_len
; /* len of VCONTIG chunk */
1308 unsigned long vcontig_end
;
1309 struct scatterlist
*dma_sg
; /* next DMA stream head */
1310 unsigned long dma_offset
, dma_len
; /* start/len of DMA stream */
1312 unsigned int max_seg_size
= dma_get_max_seg_size(dev
);
1316 unsigned long vaddr
= (unsigned long) sba_sg_address(startsg
);
1319 ** Prepare for first/next DMA stream
1321 dma_sg
= vcontig_sg
= startsg
;
1322 dma_len
= vcontig_len
= vcontig_end
= startsg
->length
;
1323 vcontig_end
+= vaddr
;
1324 dma_offset
= vaddr
& ~iovp_mask
;
1326 /* PARANOID: clear entries */
1327 startsg
->dma_address
= startsg
->dma_length
= 0;
1330 ** This loop terminates one iteration "early" since
1331 ** it's always looking one "ahead".
1333 while (--nents
> 0) {
1334 unsigned long vaddr
; /* tmp */
1336 startsg
= sg_next(startsg
);
1339 startsg
->dma_address
= startsg
->dma_length
= 0;
1341 /* catch brokenness in SCSI layer */
1342 ASSERT(startsg
->length
<= DMA_CHUNK_SIZE
);
1345 ** First make sure current dma stream won't
1346 ** exceed DMA_CHUNK_SIZE if we coalesce the
1349 if (((dma_len
+ dma_offset
+ startsg
->length
+ ~iovp_mask
) & iovp_mask
)
1353 if (dma_len
+ startsg
->length
> max_seg_size
)
1357 ** Then look for virtually contiguous blocks.
1359 ** append the next transaction?
1361 vaddr
= (unsigned long) sba_sg_address(startsg
);
1362 if (vcontig_end
== vaddr
)
1364 vcontig_len
+= startsg
->length
;
1365 vcontig_end
+= startsg
->length
;
1366 dma_len
+= startsg
->length
;
1370 #ifdef DEBUG_LARGE_SG_ENTRIES
1371 dump_run_sg
= (vcontig_len
> iovp_size
);
1375 ** Not virtually contiguous.
1376 ** Terminate prev chunk.
1377 ** Start a new chunk.
1379 ** Once we start a new VCONTIG chunk, dma_offset
1380 ** can't change. And we need the offset from the first
1381 ** chunk - not the last one. Ergo Successive chunks
1382 ** must start on page boundaries and dove tail
1383 ** with it's predecessor.
1385 vcontig_sg
->dma_length
= vcontig_len
;
1387 vcontig_sg
= startsg
;
1388 vcontig_len
= startsg
->length
;
1391 ** 3) do the entries end/start on page boundaries?
1392 ** Don't update vcontig_end until we've checked.
1394 if (DMA_CONTIG(vcontig_end
, vaddr
))
1396 vcontig_end
= vcontig_len
+ vaddr
;
1397 dma_len
+= vcontig_len
;
1405 ** End of DMA Stream
1406 ** Terminate last VCONTIG block.
1407 ** Allocate space for DMA stream.
1409 vcontig_sg
->dma_length
= vcontig_len
;
1410 dma_len
= (dma_len
+ dma_offset
+ ~iovp_mask
) & iovp_mask
;
1411 ASSERT(dma_len
<= DMA_CHUNK_SIZE
);
1412 idx
= sba_alloc_range(ioc
, dev
, dma_len
);
1414 dma_sg
->dma_length
= 0;
1417 dma_sg
->dma_address
= (dma_addr_t
)(PIDE_FLAG
| (idx
<< iovp_shift
)
1425 static void sba_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sglist
,
1426 int nents
, enum dma_data_direction dir
,
1427 unsigned long attrs
);
1429 * sba_map_sg - map Scatter/Gather list
1430 * @dev: instance of PCI owned by the driver that's asking.
1431 * @sglist: array of buffer/length pairs
1432 * @nents: number of entries in list
1433 * @dir: R/W or both.
1434 * @attrs: optional dma attributes
1436 * See Documentation/DMA-API-HOWTO.txt
1438 static int sba_map_sg_attrs(struct device
*dev
, struct scatterlist
*sglist
,
1439 int nents
, enum dma_data_direction dir
,
1440 unsigned long attrs
)
1443 int coalesced
, filled
= 0;
1444 #ifdef ASSERT_PDIR_SANITY
1445 unsigned long flags
;
1447 #ifdef ALLOW_IOV_BYPASS_SG
1448 struct scatterlist
*sg
;
1451 DBG_RUN_SG("%s() START %d entries\n", __func__
, nents
);
1455 #ifdef ALLOW_IOV_BYPASS_SG
1456 ASSERT(to_pci_dev(dev
)->dma_mask
);
1457 if (likely((ioc
->dma_mask
& ~to_pci_dev(dev
)->dma_mask
) == 0)) {
1458 for_each_sg(sglist
, sg
, nents
, filled
) {
1459 sg
->dma_length
= sg
->length
;
1460 sg
->dma_address
= virt_to_phys(sba_sg_address(sg
));
1465 /* Fast path single entry scatterlists. */
1467 sglist
->dma_length
= sglist
->length
;
1468 sglist
->dma_address
= sba_map_page(dev
, sg_page(sglist
),
1469 sglist
->offset
, sglist
->length
, dir
, attrs
);
1470 if (dma_mapping_error(dev
, sglist
->dma_address
))
1475 #ifdef ASSERT_PDIR_SANITY
1476 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1477 if (sba_check_pdir(ioc
,"Check before sba_map_sg_attrs()"))
1479 sba_dump_sg(ioc
, sglist
, nents
);
1480 panic("Check before sba_map_sg_attrs()");
1482 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1485 prefetch(ioc
->res_hint
);
1488 ** First coalesce the chunks and allocate I/O pdir space
1490 ** If this is one DMA stream, we can properly map using the
1491 ** correct virtual address associated with each DMA page.
1492 ** w/o this association, we wouldn't have coherent DMA!
1493 ** Access to the virtual address is what forces a two pass algorithm.
1495 coalesced
= sba_coalesce_chunks(ioc
, dev
, sglist
, nents
);
1496 if (coalesced
< 0) {
1497 sba_unmap_sg_attrs(dev
, sglist
, nents
, dir
, attrs
);
1502 ** Program the I/O Pdir
1504 ** map the virtual addresses to the I/O Pdir
1505 ** o dma_address will contain the pdir index
1506 ** o dma_len will contain the number of bytes to map
1507 ** o address contains the virtual address.
1509 filled
= sba_fill_pdir(ioc
, sglist
, nents
);
1511 #ifdef ASSERT_PDIR_SANITY
1512 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1513 if (sba_check_pdir(ioc
,"Check after sba_map_sg_attrs()"))
1515 sba_dump_sg(ioc
, sglist
, nents
);
1516 panic("Check after sba_map_sg_attrs()\n");
1518 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1521 ASSERT(coalesced
== filled
);
1522 DBG_RUN_SG("%s() DONE %d mappings\n", __func__
, filled
);
1528 * sba_unmap_sg_attrs - unmap Scatter/Gather list
1529 * @dev: instance of PCI owned by the driver that's asking.
1530 * @sglist: array of buffer/length pairs
1531 * @nents: number of entries in list
1532 * @dir: R/W or both.
1533 * @attrs: optional dma attributes
1535 * See Documentation/DMA-API-HOWTO.txt
1537 static void sba_unmap_sg_attrs(struct device
*dev
, struct scatterlist
*sglist
,
1538 int nents
, enum dma_data_direction dir
,
1539 unsigned long attrs
)
1541 #ifdef ASSERT_PDIR_SANITY
1543 unsigned long flags
;
1546 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1547 __func__
, nents
, sba_sg_address(sglist
), sglist
->length
);
1549 #ifdef ASSERT_PDIR_SANITY
1553 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1554 sba_check_pdir(ioc
,"Check before sba_unmap_sg_attrs()");
1555 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1558 while (nents
&& sglist
->dma_length
) {
1560 sba_unmap_page(dev
, sglist
->dma_address
, sglist
->dma_length
,
1562 sglist
= sg_next(sglist
);
1566 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__
, nents
);
1568 #ifdef ASSERT_PDIR_SANITY
1569 spin_lock_irqsave(&ioc
->res_lock
, flags
);
1570 sba_check_pdir(ioc
,"Check after sba_unmap_sg_attrs()");
1571 spin_unlock_irqrestore(&ioc
->res_lock
, flags
);
1576 /**************************************************************
1578 * Initialization and claim
1580 ***************************************************************/
1583 ioc_iova_init(struct ioc
*ioc
)
1587 struct pci_dev
*device
= NULL
;
1588 #ifdef FULL_VALID_PDIR
1589 unsigned long index
;
1593 ** Firmware programs the base and size of a "safe IOVA space"
1594 ** (one that doesn't overlap memory or LMMIO space) in the
1595 ** IBASE and IMASK registers.
1597 ioc
->ibase
= READ_REG(ioc
->ioc_hpa
+ IOC_IBASE
) & ~0x1UL
;
1598 ioc
->imask
= READ_REG(ioc
->ioc_hpa
+ IOC_IMASK
) | 0xFFFFFFFF00000000UL
;
1600 ioc
->iov_size
= ~ioc
->imask
+ 1;
1602 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1603 __func__
, ioc
->ioc_hpa
, ioc
->ibase
, ioc
->imask
,
1604 ioc
->iov_size
>> 20);
1606 switch (iovp_size
) {
1607 case 4*1024: tcnfg
= 0; break;
1608 case 8*1024: tcnfg
= 1; break;
1609 case 16*1024: tcnfg
= 2; break;
1610 case 64*1024: tcnfg
= 3; break;
1612 panic(PFX
"Unsupported IOTLB page size %ldK",
1616 WRITE_REG(tcnfg
, ioc
->ioc_hpa
+ IOC_TCNFG
);
1618 ioc
->pdir_size
= (ioc
->iov_size
/ iovp_size
) * PDIR_ENTRY_SIZE
;
1619 ioc
->pdir_base
= (void *) __get_free_pages(GFP_KERNEL
,
1620 get_order(ioc
->pdir_size
));
1621 if (!ioc
->pdir_base
)
1622 panic(PFX
"Couldn't allocate I/O Page Table\n");
1624 memset(ioc
->pdir_base
, 0, ioc
->pdir_size
);
1626 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__
,
1627 iovp_size
>> 10, ioc
->pdir_base
, ioc
->pdir_size
);
1629 ASSERT(ALIGN((unsigned long) ioc
->pdir_base
, 4*1024) == (unsigned long) ioc
->pdir_base
);
1630 WRITE_REG(virt_to_phys(ioc
->pdir_base
), ioc
->ioc_hpa
+ IOC_PDIR_BASE
);
1633 ** If an AGP device is present, only use half of the IOV space
1634 ** for PCI DMA. Unfortunately we can't know ahead of time
1635 ** whether GART support will actually be used, for now we
1636 ** can just key on an AGP device found in the system.
1637 ** We program the next pdir index after we stop w/ a key for
1638 ** the GART code to handshake on.
1640 for_each_pci_dev(device
)
1641 agp_found
|= pci_find_capability(device
, PCI_CAP_ID_AGP
);
1643 if (agp_found
&& reserve_sba_gart
) {
1644 printk(KERN_INFO PFX
"reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1645 ioc
->iov_size
/2 >> 20, ioc
->ibase
+ ioc
->iov_size
/2);
1646 ioc
->pdir_size
/= 2;
1647 ((u64
*)ioc
->pdir_base
)[PDIR_INDEX(ioc
->iov_size
/2)] = ZX1_SBA_IOMMU_COOKIE
;
1649 #ifdef FULL_VALID_PDIR
1651 ** Check to see if the spill page has been allocated, we don't need more than
1652 ** one across multiple SBAs.
1654 if (!prefetch_spill_page
) {
1655 char *spill_poison
= "SBAIOMMU POISON";
1656 int poison_size
= 16;
1657 void *poison_addr
, *addr
;
1659 addr
= (void *)__get_free_pages(GFP_KERNEL
, get_order(iovp_size
));
1661 panic(PFX
"Couldn't allocate PDIR spill page\n");
1664 for ( ; (u64
) poison_addr
< addr
+ iovp_size
; poison_addr
+= poison_size
)
1665 memcpy(poison_addr
, spill_poison
, poison_size
);
1667 prefetch_spill_page
= virt_to_phys(addr
);
1669 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__
, prefetch_spill_page
);
1672 ** Set all the PDIR entries valid w/ the spill page as the target
1674 for (index
= 0 ; index
< (ioc
->pdir_size
/ PDIR_ENTRY_SIZE
) ; index
++)
1675 ((u64
*)ioc
->pdir_base
)[index
] = (0x80000000000000FF | prefetch_spill_page
);
1678 /* Clear I/O TLB of any possible entries */
1679 WRITE_REG(ioc
->ibase
| (get_iovp_order(ioc
->iov_size
) + iovp_shift
), ioc
->ioc_hpa
+ IOC_PCOM
);
1680 READ_REG(ioc
->ioc_hpa
+ IOC_PCOM
);
1682 /* Enable IOVA translation */
1683 WRITE_REG(ioc
->ibase
| 1, ioc
->ioc_hpa
+ IOC_IBASE
);
1684 READ_REG(ioc
->ioc_hpa
+ IOC_IBASE
);
1688 ioc_resource_init(struct ioc
*ioc
)
1690 spin_lock_init(&ioc
->res_lock
);
1691 #if DELAYED_RESOURCE_CNT > 0
1692 spin_lock_init(&ioc
->saved_lock
);
1695 /* resource map size dictated by pdir_size */
1696 ioc
->res_size
= ioc
->pdir_size
/ PDIR_ENTRY_SIZE
; /* entries */
1697 ioc
->res_size
>>= 3; /* convert bit count to byte count */
1698 DBG_INIT("%s() res_size 0x%x\n", __func__
, ioc
->res_size
);
1700 ioc
->res_map
= (char *) __get_free_pages(GFP_KERNEL
,
1701 get_order(ioc
->res_size
));
1703 panic(PFX
"Couldn't allocate resource map\n");
1705 memset(ioc
->res_map
, 0, ioc
->res_size
);
1706 /* next available IOVP - circular search */
1707 ioc
->res_hint
= (unsigned long *) ioc
->res_map
;
1709 #ifdef ASSERT_PDIR_SANITY
1710 /* Mark first bit busy - ie no IOVA 0 */
1711 ioc
->res_map
[0] = 0x1;
1712 ioc
->pdir_base
[0] = 0x8000000000000000ULL
| ZX1_SBA_IOMMU_COOKIE
;
1714 #ifdef FULL_VALID_PDIR
1715 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1716 ioc
->res_map
[ioc
->res_size
- 1] |= 0x80UL
; /* res_map is chars */
1717 ioc
->pdir_base
[(ioc
->pdir_size
/ PDIR_ENTRY_SIZE
) - 1] = (0x80000000000000FF
1718 | prefetch_spill_page
);
1721 DBG_INIT("%s() res_map %x %p\n", __func__
,
1722 ioc
->res_size
, (void *) ioc
->res_map
);
1726 ioc_sac_init(struct ioc
*ioc
)
1728 struct pci_dev
*sac
= NULL
;
1729 struct pci_controller
*controller
= NULL
;
1732 * pci_alloc_coherent() must return a DMA address which is
1733 * SAC (single address cycle) addressable, so allocate a
1734 * pseudo-device to enforce that.
1736 sac
= kzalloc(sizeof(*sac
), GFP_KERNEL
);
1738 panic(PFX
"Couldn't allocate struct pci_dev");
1740 controller
= kzalloc(sizeof(*controller
), GFP_KERNEL
);
1742 panic(PFX
"Couldn't allocate struct pci_controller");
1744 controller
->iommu
= ioc
;
1745 sac
->sysdata
= controller
;
1746 sac
->dma_mask
= 0xFFFFFFFFUL
;
1748 sac
->dev
.bus
= &pci_bus_type
;
1750 ioc
->sac_only_dev
= sac
;
1754 ioc_zx1_init(struct ioc
*ioc
)
1756 unsigned long rope_config
;
1759 if (ioc
->rev
< 0x20)
1760 panic(PFX
"IOC 2.0 or later required for IOMMU support\n");
1762 /* 38 bit memory controller + extra bit for range displaced by MMIO */
1763 ioc
->dma_mask
= (0x1UL
<< 39) - 1;
1766 ** Clear ROPE(N)_CONFIG AO bit.
1767 ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1768 ** Overrides bit 1 in DMA Hint Sets.
1769 ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1771 for (i
=0; i
<(8*8); i
+=8) {
1772 rope_config
= READ_REG(ioc
->ioc_hpa
+ IOC_ROPE0_CFG
+ i
);
1773 rope_config
&= ~IOC_ROPE_AO
;
1774 WRITE_REG(rope_config
, ioc
->ioc_hpa
+ IOC_ROPE0_CFG
+ i
);
1778 typedef void (initfunc
)(struct ioc
*);
1786 static struct ioc_iommu ioc_iommu_info
[] __initdata
= {
1787 { ZX1_IOC_ID
, "zx1", ioc_zx1_init
},
1788 { ZX2_IOC_ID
, "zx2", NULL
},
1789 { SX1000_IOC_ID
, "sx1000", NULL
},
1790 { SX2000_IOC_ID
, "sx2000", NULL
},
1793 static void __init
ioc_init(unsigned long hpa
, struct ioc
*ioc
)
1795 struct ioc_iommu
*info
;
1797 ioc
->next
= ioc_list
;
1800 ioc
->ioc_hpa
= ioremap(hpa
, 0x1000);
1802 ioc
->func_id
= READ_REG(ioc
->ioc_hpa
+ IOC_FUNC_ID
);
1803 ioc
->rev
= READ_REG(ioc
->ioc_hpa
+ IOC_FCLASS
) & 0xFFUL
;
1804 ioc
->dma_mask
= 0xFFFFFFFFFFFFFFFFUL
; /* conservative */
1806 for (info
= ioc_iommu_info
; info
< ioc_iommu_info
+ ARRAY_SIZE(ioc_iommu_info
); info
++) {
1807 if (ioc
->func_id
== info
->func_id
) {
1808 ioc
->name
= info
->name
;
1814 iovp_size
= (1 << iovp_shift
);
1815 iovp_mask
= ~(iovp_size
- 1);
1817 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__
,
1818 PAGE_SIZE
>> 10, iovp_size
>> 10);
1821 ioc
->name
= kmalloc(24, GFP_KERNEL
);
1823 sprintf((char *) ioc
->name
, "Unknown (%04x:%04x)",
1824 ioc
->func_id
& 0xFFFF, (ioc
->func_id
>> 16) & 0xFFFF);
1826 ioc
->name
= "Unknown";
1830 ioc_resource_init(ioc
);
1833 printk(KERN_INFO PFX
1834 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1835 ioc
->name
, (ioc
->rev
>> 4) & 0xF, ioc
->rev
& 0xF,
1836 hpa
, ioc
->iov_size
>> 20, ioc
->ibase
);
1841 /**************************************************************************
1843 ** SBA initialization code (HW and SW)
1845 ** o identify SBA chip itself
1846 ** o FIXME: initialize DMA hints for reasonable defaults
1848 **************************************************************************/
1850 #ifdef CONFIG_PROC_FS
1852 ioc_start(struct seq_file
*s
, loff_t
*pos
)
1857 for (ioc
= ioc_list
; ioc
; ioc
= ioc
->next
)
1865 ioc_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
1867 struct ioc
*ioc
= v
;
1874 ioc_stop(struct seq_file
*s
, void *v
)
1879 ioc_show(struct seq_file
*s
, void *v
)
1881 struct ioc
*ioc
= v
;
1882 unsigned long *res_ptr
= (unsigned long *)ioc
->res_map
;
1885 seq_printf(s
, "Hewlett Packard %s IOC rev %d.%d\n",
1886 ioc
->name
, ((ioc
->rev
>> 4) & 0xF), (ioc
->rev
& 0xF));
1888 if (ioc
->node
!= NUMA_NO_NODE
)
1889 seq_printf(s
, "NUMA node : %d\n", ioc
->node
);
1891 seq_printf(s
, "IOVA size : %ld MB\n", ((ioc
->pdir_size
>> 3) * iovp_size
)/(1024*1024));
1892 seq_printf(s
, "IOVA page size : %ld kb\n", iovp_size
/1024);
1894 for (i
= 0; i
< (ioc
->res_size
/ sizeof(unsigned long)); ++i
, ++res_ptr
)
1895 used
+= hweight64(*res_ptr
);
1897 seq_printf(s
, "PDIR size : %d entries\n", ioc
->pdir_size
>> 3);
1898 seq_printf(s
, "PDIR used : %d entries\n", used
);
1900 #ifdef PDIR_SEARCH_TIMING
1902 unsigned long i
= 0, avg
= 0, min
, max
;
1903 min
= max
= ioc
->avg_search
[0];
1904 for (i
= 0; i
< SBA_SEARCH_SAMPLE
; i
++) {
1905 avg
+= ioc
->avg_search
[i
];
1906 if (ioc
->avg_search
[i
] > max
) max
= ioc
->avg_search
[i
];
1907 if (ioc
->avg_search
[i
] < min
) min
= ioc
->avg_search
[i
];
1909 avg
/= SBA_SEARCH_SAMPLE
;
1910 seq_printf(s
, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1914 #ifndef ALLOW_IOV_BYPASS
1915 seq_printf(s
, "IOVA bypass disabled\n");
1920 static const struct seq_operations ioc_seq_ops
= {
1930 struct proc_dir_entry
*dir
;
1932 dir
= proc_mkdir("bus/mckinley", NULL
);
1936 proc_create_seq(ioc_list
->name
, 0, dir
, &ioc_seq_ops
);
1941 sba_connect_bus(struct pci_bus
*bus
)
1943 acpi_handle handle
, parent
;
1947 if (!PCI_CONTROLLER(bus
))
1948 panic(PFX
"no sysdata on bus %d!\n", bus
->number
);
1950 if (PCI_CONTROLLER(bus
)->iommu
)
1953 handle
= acpi_device_handle(PCI_CONTROLLER(bus
)->companion
);
1958 * The IOC scope encloses PCI root bridges in the ACPI
1959 * namespace, so work our way out until we find an IOC we
1960 * claimed previously.
1963 for (ioc
= ioc_list
; ioc
; ioc
= ioc
->next
)
1964 if (ioc
->handle
== handle
) {
1965 PCI_CONTROLLER(bus
)->iommu
= ioc
;
1969 status
= acpi_get_parent(handle
, &parent
);
1971 } while (ACPI_SUCCESS(status
));
1973 printk(KERN_WARNING
"No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus
), bus
->number
);
1977 sba_map_ioc_to_node(struct ioc
*ioc
, acpi_handle handle
)
1982 node
= acpi_get_node(handle
);
1983 if (node
!= NUMA_NO_NODE
&& !node_online(node
))
1984 node
= NUMA_NO_NODE
;
1990 static void __init
acpi_sba_ioc_add(struct ioc
*ioc
)
1992 acpi_handle handle
= ioc
->handle
;
1995 struct acpi_device_info
*adi
;
1997 ioc_found
= ioc
->next
;
1998 status
= hp_acpi_csr_space(handle
, &hpa
, &length
);
1999 if (ACPI_FAILURE(status
))
2002 status
= acpi_get_object_info(handle
, &adi
);
2003 if (ACPI_FAILURE(status
))
2007 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
2008 * root bridges, and its CSR space includes the IOC function.
2010 if (strncmp("HWP0001", adi
->hardware_id
.string
, 7) == 0) {
2011 hpa
+= ZX1_IOC_OFFSET
;
2012 /* zx1 based systems default to kernel page size iommu pages */
2014 iovp_shift
= min(PAGE_SHIFT
, 16);
2019 * default anything not caught above or specified on cmdline to 4k
2026 /* setup NUMA node association */
2027 sba_map_ioc_to_node(ioc
, handle
);
2034 static const struct acpi_device_id hp_ioc_iommu_device_ids
[] = {
2040 static int acpi_sba_ioc_attach(struct acpi_device
*device
,
2041 const struct acpi_device_id
*not_used
)
2045 ioc
= kzalloc(sizeof(*ioc
), GFP_KERNEL
);
2049 ioc
->next
= ioc_found
;
2051 ioc
->handle
= device
->handle
;
2056 static struct acpi_scan_handler acpi_sba_ioc_handler
= {
2057 .ids
= hp_ioc_iommu_device_ids
,
2058 .attach
= acpi_sba_ioc_attach
,
2061 static int __init
acpi_sba_ioc_init_acpi(void)
2063 return acpi_scan_add_handler(&acpi_sba_ioc_handler
);
2065 /* This has to run before acpi_scan_init(). */
2066 arch_initcall(acpi_sba_ioc_init_acpi
);
2071 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
2074 #if defined(CONFIG_IA64_GENERIC)
2075 /* If we are booting a kdump kernel, the sba_iommu will
2076 * cause devices that were not shutdown properly to MCA
2077 * as soon as they are turned back on. Our only option for
2078 * a successful kdump kernel boot is to use the swiotlb.
2080 if (is_kdump_kernel()) {
2082 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2083 panic("Unable to initialize software I/O TLB:"
2084 " Try machvec=dig boot option");
2085 machvec_init("dig");
2091 * ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
2092 * routine, but that only happens if acpi_scan_init() has already run.
2095 acpi_sba_ioc_add(ioc_found
);
2098 #ifdef CONFIG_IA64_GENERIC
2100 * If we didn't find something sba_iommu can claim, we
2101 * need to setup the swiotlb and switch to the dig machvec.
2104 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2105 panic("Unable to find SBA IOMMU or initialize "
2106 "software I/O TLB: Try machvec=dig boot option");
2107 machvec_init("dig");
2109 panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
2114 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
2116 * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
2117 * buffer setup to support devices with smaller DMA masks than
2118 * sba_iommu can handle.
2120 if (ia64_platform_is("hpzx1_swiotlb")) {
2121 extern void hwsw_init(void);
2129 struct pci_bus
*b
= NULL
;
2130 while ((b
= pci_find_next_bus(b
)) != NULL
)
2135 #ifdef CONFIG_PROC_FS
2141 subsys_initcall(sba_init
); /* must be initialized after ACPI etc., but before any drivers... */
2144 nosbagart(char *str
)
2146 reserve_sba_gart
= 0;
2150 static int sba_dma_supported (struct device
*dev
, u64 mask
)
2152 /* make sure it's at least 32bit capable */
2153 return ((mask
& 0xFFFFFFFFUL
) == 0xFFFFFFFFUL
);
2156 __setup("nosbagart", nosbagart
);
2159 sba_page_override(char *str
)
2161 unsigned long page_size
;
2163 page_size
= memparse(str
, &str
);
2164 switch (page_size
) {
2169 iovp_shift
= ffs(page_size
) - 1;
2172 printk("%s: unknown/unsupported iommu page size %ld\n",
2173 __func__
, page_size
);
2179 __setup("sbapagesize=",sba_page_override
);
2181 const struct dma_map_ops sba_dma_ops
= {
2182 .alloc
= sba_alloc_coherent
,
2183 .free
= sba_free_coherent
,
2184 .map_page
= sba_map_page
,
2185 .unmap_page
= sba_unmap_page
,
2186 .map_sg
= sba_map_sg_attrs
,
2187 .unmap_sg
= sba_unmap_sg_attrs
,
2188 .dma_supported
= sba_dma_supported
,
2191 void sba_dma_init(void)
2193 dma_ops
= &sba_dma_ops
;