2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version
5 * 2 of the License, or (at your option) any later version.
7 * Portions Copyright (C) Cisco Systems, Inc.
9 #ifndef __ASM_MACH_POWERTV_IOREMAP_H
10 #define __ASM_MACH_POWERTV_IOREMAP_H
12 #include <linux/types.h>
13 #include <linux/log2.h>
14 #include <linux/compiler.h>
16 #include <asm/pgtable-bits.h>
17 #include <asm/addrspace.h>
19 /* We're going to mess with bits, so get sizes */
20 #define IOR_BPC 8 /* Bits per char */
21 #define IOR_PHYS_BITS (IOR_BPC * sizeof(phys_addr_t))
22 #define IOR_DMA_BITS (IOR_BPC * sizeof(dma_addr_t))
25 * Define the granularity of physical/DMA mapping in terms of the number
26 * of bits that defines the offset within a grain. These will be the
27 * least significant bits of the address. The rest of a physical or DMA
28 * address will be used to index into an appropriate table to find the
29 * offset to add to the address to yield the corresponding DMA or physical
30 * address, respectively.
32 #define IOR_LSBITS 22 /* Bits in a grain */
35 * Compute the number of most significant address bits after removing those
36 * used for the offset within a grain and then compute the number of table
37 * entries for the conversion.
39 #define IOR_PHYS_MSBITS (IOR_PHYS_BITS - IOR_LSBITS)
40 #define IOR_NUM_PHYS_TO_DMA ((phys_addr_t) 1 << IOR_PHYS_MSBITS)
42 #define IOR_DMA_MSBITS (IOR_DMA_BITS - IOR_LSBITS)
43 #define IOR_NUM_DMA_TO_PHYS ((dma_addr_t) 1 << IOR_DMA_MSBITS)
46 * Define data structures used as elements in the arrays for the conversion
47 * between physical and DMA addresses. We do some slightly fancy math to
48 * compute the width of the offset element of the conversion tables so
49 * that we can have the smallest conversion tables. Next, round up the
50 * sizes to the next higher power of two, i.e. the offset element will have
51 * 8, 16, 32, 64, etc. bits. This eliminates the need to mask off any
52 * bits. Finally, we compute a shift value that puts the most significant
53 * bits of the offset into the most significant bits of the offset element.
54 * This makes it more efficient on processors without barrel shifters and
55 * easier to see the values if the conversion table is dumped in binary.
57 #define _IOR_OFFSET_WIDTH(n) (1 << order_base_2(n))
58 #define IOR_OFFSET_WIDTH(n) \
59 (_IOR_OFFSET_WIDTH(n) < 8 ? 8 : _IOR_OFFSET_WIDTH(n))
61 #define IOR_PHYS_OFFSET_BITS IOR_OFFSET_WIDTH(IOR_PHYS_MSBITS)
62 #define IOR_PHYS_SHIFT (IOR_PHYS_BITS - IOR_PHYS_OFFSET_BITS)
64 #define IOR_DMA_OFFSET_BITS IOR_OFFSET_WIDTH(IOR_DMA_MSBITS)
65 #define IOR_DMA_SHIFT (IOR_DMA_BITS - IOR_DMA_OFFSET_BITS)
67 struct ior_phys_to_dma
{
68 dma_addr_t offset
:IOR_DMA_OFFSET_BITS __packed
69 __aligned((IOR_DMA_OFFSET_BITS
/ IOR_BPC
));
72 struct ior_dma_to_phys
{
73 dma_addr_t offset
:IOR_PHYS_OFFSET_BITS __packed
74 __aligned((IOR_PHYS_OFFSET_BITS
/ IOR_BPC
));
77 extern struct ior_phys_to_dma _ior_phys_to_dma
[IOR_NUM_PHYS_TO_DMA
];
78 extern struct ior_dma_to_phys _ior_dma_to_phys
[IOR_NUM_DMA_TO_PHYS
];
80 static inline dma_addr_t
_phys_to_dma_offset_raw(phys_addr_t phys
)
82 return (dma_addr_t
)_ior_phys_to_dma
[phys
>> IOR_LSBITS
].offset
;
85 static inline dma_addr_t
_dma_to_phys_offset_raw(dma_addr_t dma
)
87 return (dma_addr_t
)_ior_dma_to_phys
[dma
>> IOR_LSBITS
].offset
;
90 /* These are not portable and should not be used in drivers. Drivers should
91 * be using ioremap() and friends to map physical addresses to virtual
92 * addresses and dma_map*() and friends to map virtual addresses into DMA
95 static inline dma_addr_t
phys_to_dma(phys_addr_t phys
)
97 return phys
+ (_phys_to_dma_offset_raw(phys
) << IOR_PHYS_SHIFT
);
100 static inline phys_addr_t
dma_to_phys(dma_addr_t dma
)
102 return dma
+ (_dma_to_phys_offset_raw(dma
) << IOR_DMA_SHIFT
);
105 extern void ioremap_add_map(dma_addr_t phys
, phys_addr_t alias
,
109 * Allow physical addresses to be fixed up to help peripherals located
110 * outside the low 32-bit range -- generic pass-through version.
112 static inline phys_t
fixup_bigphys_addr(phys_t phys_addr
, phys_t size
)
118 * Handle the special case of addresses the area aliased into the first
119 * 512 MiB of the processor's physical address space. These turn into either
120 * kseg0 or kseg1 addresses, depending on flags.
122 static inline void __iomem
*plat_ioremap(phys_t start
, unsigned long size
,
125 phys_addr_t start_offset
;
126 void __iomem
*result
= NULL
;
128 /* Start by checking to see whether this is an aliased address */
129 start_offset
= _dma_to_phys_offset_raw(start
);
133 * o the memory is aliased into the first 512 MiB, and
134 * o the start and end are in the same RAM bank, and
135 * o we don't have a zero size or wrap around, and
136 * o we are supposed to create an uncached mapping,
137 * handle this is a kseg0 or kseg1 address
139 if (start_offset
!= 0) {
141 dma_addr_t dma_to_phys_offset
;
143 last
= start
+ size
- 1;
145 _dma_to_phys_offset_raw(last
) << IOR_DMA_SHIFT
;
147 if (dma_to_phys_offset
== start_offset
&&
148 size
!= 0 && start
<= last
) {
149 phys_t adjusted_start
;
150 adjusted_start
= start
+ start_offset
;
151 if (flags
== _CACHE_UNCACHED
)
152 result
= (void __iomem
*) (unsigned long)
153 CKSEG1ADDR(adjusted_start
);
155 result
= (void __iomem
*) (unsigned long)
156 CKSEG0ADDR(adjusted_start
);
163 static inline int plat_iounmap(const volatile void __iomem
*addr
)
167 #endif /* __ASM_MACH_POWERTV_IOREMAP_H */