2 * IOMMU for IPMMU/IPMMUI
3 * Copyright (C) 2012 Hideki EIRAKU
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
10 #include <linux/dma-mapping.h>
12 #include <linux/iommu.h>
13 #include <linux/platform_device.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <asm/dma-iommu.h>
17 #include "shmobile-ipmmu.h"
19 #define L1_SIZE CONFIG_SHMOBILE_IOMMU_L1SIZE
20 #define L1_LEN (L1_SIZE / 4)
21 #define L1_ALIGN L1_SIZE
23 #define L2_LEN (L2_SIZE / 4)
24 #define L2_ALIGN L2_SIZE
26 struct shmobile_iommu_domain_pgtable
{
31 struct shmobile_iommu_archdata
{
32 struct list_head attached_list
;
33 struct dma_iommu_mapping
*iommu_mapping
;
34 spinlock_t attach_lock
;
35 struct shmobile_iommu_domain
*attached
;
36 int num_attached_devices
;
37 struct shmobile_ipmmu
*ipmmu
;
40 struct shmobile_iommu_domain
{
41 struct shmobile_iommu_domain_pgtable l1
, l2
[L1_LEN
];
43 spinlock_t attached_list_lock
;
44 struct list_head attached_list
;
45 struct iommu_domain domain
;
48 static struct shmobile_iommu_archdata
*ipmmu_archdata
;
49 static struct kmem_cache
*l1cache
, *l2cache
;
51 static struct shmobile_iommu_domain
*to_sh_domain(struct iommu_domain
*dom
)
53 return container_of(dom
, struct shmobile_iommu_domain
, domain
);
56 static int pgtable_alloc(struct shmobile_iommu_domain_pgtable
*pgtable
,
57 struct kmem_cache
*cache
, size_t size
)
59 pgtable
->pgtable
= kmem_cache_zalloc(cache
, GFP_ATOMIC
);
60 if (!pgtable
->pgtable
)
62 pgtable
->handle
= dma_map_single(NULL
, pgtable
->pgtable
, size
,
67 static void pgtable_free(struct shmobile_iommu_domain_pgtable
*pgtable
,
68 struct kmem_cache
*cache
, size_t size
)
70 dma_unmap_single(NULL
, pgtable
->handle
, size
, DMA_TO_DEVICE
);
71 kmem_cache_free(cache
, pgtable
->pgtable
);
74 static uint32_t pgtable_read(struct shmobile_iommu_domain_pgtable
*pgtable
,
77 return pgtable
->pgtable
[index
];
80 static void pgtable_write(struct shmobile_iommu_domain_pgtable
*pgtable
,
81 unsigned int index
, unsigned int count
, uint32_t val
)
85 for (i
= 0; i
< count
; i
++)
86 pgtable
->pgtable
[index
+ i
] = val
;
87 dma_sync_single_for_device(NULL
, pgtable
->handle
+ index
* sizeof(val
),
88 sizeof(val
) * count
, DMA_TO_DEVICE
);
91 static struct iommu_domain
*shmobile_iommu_domain_alloc(unsigned type
)
93 struct shmobile_iommu_domain
*sh_domain
;
96 if (type
!= IOMMU_DOMAIN_UNMANAGED
)
99 sh_domain
= kzalloc(sizeof(*sh_domain
), GFP_KERNEL
);
102 ret
= pgtable_alloc(&sh_domain
->l1
, l1cache
, L1_SIZE
);
107 for (i
= 0; i
< L1_LEN
; i
++)
108 sh_domain
->l2
[i
].pgtable
= NULL
;
109 spin_lock_init(&sh_domain
->map_lock
);
110 spin_lock_init(&sh_domain
->attached_list_lock
);
111 INIT_LIST_HEAD(&sh_domain
->attached_list
);
112 return &sh_domain
->domain
;
115 static void shmobile_iommu_domain_free(struct iommu_domain
*domain
)
117 struct shmobile_iommu_domain
*sh_domain
= to_sh_domain(domain
);
120 for (i
= 0; i
< L1_LEN
; i
++) {
121 if (sh_domain
->l2
[i
].pgtable
)
122 pgtable_free(&sh_domain
->l2
[i
], l2cache
, L2_SIZE
);
124 pgtable_free(&sh_domain
->l1
, l1cache
, L1_SIZE
);
128 static int shmobile_iommu_attach_device(struct iommu_domain
*domain
,
131 struct shmobile_iommu_archdata
*archdata
= dev
->archdata
.iommu
;
132 struct shmobile_iommu_domain
*sh_domain
= to_sh_domain(domain
);
137 spin_lock(&sh_domain
->attached_list_lock
);
138 spin_lock(&archdata
->attach_lock
);
139 if (archdata
->attached
!= sh_domain
) {
140 if (archdata
->attached
)
142 ipmmu_tlb_set(archdata
->ipmmu
, sh_domain
->l1
.handle
, L1_SIZE
,
144 ipmmu_tlb_flush(archdata
->ipmmu
);
145 archdata
->attached
= sh_domain
;
146 archdata
->num_attached_devices
= 0;
147 list_add(&archdata
->attached_list
, &sh_domain
->attached_list
);
149 archdata
->num_attached_devices
++;
152 spin_unlock(&archdata
->attach_lock
);
153 spin_unlock(&sh_domain
->attached_list_lock
);
157 static void shmobile_iommu_detach_device(struct iommu_domain
*domain
,
160 struct shmobile_iommu_archdata
*archdata
= dev
->archdata
.iommu
;
161 struct shmobile_iommu_domain
*sh_domain
= to_sh_domain(domain
);
165 spin_lock(&sh_domain
->attached_list_lock
);
166 spin_lock(&archdata
->attach_lock
);
167 archdata
->num_attached_devices
--;
168 if (!archdata
->num_attached_devices
) {
169 ipmmu_tlb_set(archdata
->ipmmu
, 0, 0, 0);
170 ipmmu_tlb_flush(archdata
->ipmmu
);
171 archdata
->attached
= NULL
;
172 list_del(&archdata
->attached_list
);
174 spin_unlock(&archdata
->attach_lock
);
175 spin_unlock(&sh_domain
->attached_list_lock
);
178 static void domain_tlb_flush(struct shmobile_iommu_domain
*sh_domain
)
180 struct shmobile_iommu_archdata
*archdata
;
182 spin_lock(&sh_domain
->attached_list_lock
);
183 list_for_each_entry(archdata
, &sh_domain
->attached_list
, attached_list
)
184 ipmmu_tlb_flush(archdata
->ipmmu
);
185 spin_unlock(&sh_domain
->attached_list_lock
);
188 static int l2alloc(struct shmobile_iommu_domain
*sh_domain
,
189 unsigned int l1index
)
193 if (!sh_domain
->l2
[l1index
].pgtable
) {
194 ret
= pgtable_alloc(&sh_domain
->l2
[l1index
], l2cache
, L2_SIZE
);
198 pgtable_write(&sh_domain
->l1
, l1index
, 1,
199 sh_domain
->l2
[l1index
].handle
| 0x1);
203 static void l2realfree(struct shmobile_iommu_domain_pgtable
*l2
)
206 pgtable_free(l2
, l2cache
, L2_SIZE
);
209 static void l2free(struct shmobile_iommu_domain
*sh_domain
,
210 unsigned int l1index
,
211 struct shmobile_iommu_domain_pgtable
*l2
)
213 pgtable_write(&sh_domain
->l1
, l1index
, 1, 0);
214 if (sh_domain
->l2
[l1index
].pgtable
) {
215 *l2
= sh_domain
->l2
[l1index
];
216 sh_domain
->l2
[l1index
].pgtable
= NULL
;
220 static int shmobile_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
221 phys_addr_t paddr
, size_t size
, int prot
)
223 struct shmobile_iommu_domain_pgtable l2
= { .pgtable
= NULL
};
224 struct shmobile_iommu_domain
*sh_domain
= to_sh_domain(domain
);
225 unsigned int l1index
, l2index
;
228 l1index
= iova
>> 20;
231 l2index
= (iova
>> 12) & 0xff;
232 spin_lock(&sh_domain
->map_lock
);
233 ret
= l2alloc(sh_domain
, l1index
);
235 pgtable_write(&sh_domain
->l2
[l1index
], l2index
, 1,
237 spin_unlock(&sh_domain
->map_lock
);
240 l2index
= (iova
>> 12) & 0xf0;
241 spin_lock(&sh_domain
->map_lock
);
242 ret
= l2alloc(sh_domain
, l1index
);
244 pgtable_write(&sh_domain
->l2
[l1index
], l2index
, 0x10,
246 spin_unlock(&sh_domain
->map_lock
);
249 spin_lock(&sh_domain
->map_lock
);
250 l2free(sh_domain
, l1index
, &l2
);
251 pgtable_write(&sh_domain
->l1
, l1index
, 1, paddr
| 0xc02);
252 spin_unlock(&sh_domain
->map_lock
);
259 domain_tlb_flush(sh_domain
);
264 static size_t shmobile_iommu_unmap(struct iommu_domain
*domain
,
265 unsigned long iova
, size_t size
)
267 struct shmobile_iommu_domain_pgtable l2
= { .pgtable
= NULL
};
268 struct shmobile_iommu_domain
*sh_domain
= to_sh_domain(domain
);
269 unsigned int l1index
, l2index
;
270 uint32_t l2entry
= 0;
273 l1index
= iova
>> 20;
274 if (!(iova
& 0xfffff) && size
>= SZ_1M
) {
275 spin_lock(&sh_domain
->map_lock
);
276 l2free(sh_domain
, l1index
, &l2
);
277 spin_unlock(&sh_domain
->map_lock
);
281 l2index
= (iova
>> 12) & 0xff;
282 spin_lock(&sh_domain
->map_lock
);
283 if (sh_domain
->l2
[l1index
].pgtable
)
284 l2entry
= pgtable_read(&sh_domain
->l2
[l1index
], l2index
);
285 switch (l2entry
& 3) {
289 pgtable_write(&sh_domain
->l2
[l1index
], l2index
, 0x10, 0);
293 pgtable_write(&sh_domain
->l2
[l1index
], l2index
, 1, 0);
297 spin_unlock(&sh_domain
->map_lock
);
300 domain_tlb_flush(sh_domain
);
305 static phys_addr_t
shmobile_iommu_iova_to_phys(struct iommu_domain
*domain
,
308 struct shmobile_iommu_domain
*sh_domain
= to_sh_domain(domain
);
309 uint32_t l1entry
= 0, l2entry
= 0;
310 unsigned int l1index
, l2index
;
312 l1index
= iova
>> 20;
313 l2index
= (iova
>> 12) & 0xff;
314 spin_lock(&sh_domain
->map_lock
);
315 if (sh_domain
->l2
[l1index
].pgtable
)
316 l2entry
= pgtable_read(&sh_domain
->l2
[l1index
], l2index
);
318 l1entry
= pgtable_read(&sh_domain
->l1
, l1index
);
319 spin_unlock(&sh_domain
->map_lock
);
320 switch (l2entry
& 3) {
322 return (l2entry
& ~0xffff) | (iova
& 0xffff);
324 return (l2entry
& ~0xfff) | (iova
& 0xfff);
326 if ((l1entry
& 3) == 2)
327 return (l1entry
& ~0xfffff) | (iova
& 0xfffff);
332 static int find_dev_name(struct shmobile_ipmmu
*ipmmu
, const char *dev_name
)
334 unsigned int i
, n
= ipmmu
->num_dev_names
;
336 for (i
= 0; i
< n
; i
++) {
337 if (strcmp(ipmmu
->dev_names
[i
], dev_name
) == 0)
343 static int shmobile_iommu_add_device(struct device
*dev
)
345 struct shmobile_iommu_archdata
*archdata
= ipmmu_archdata
;
346 struct dma_iommu_mapping
*mapping
;
348 if (!find_dev_name(archdata
->ipmmu
, dev_name(dev
)))
350 mapping
= archdata
->iommu_mapping
;
352 mapping
= arm_iommu_create_mapping(&platform_bus_type
, 0,
355 return PTR_ERR(mapping
);
356 archdata
->iommu_mapping
= mapping
;
358 dev
->archdata
.iommu
= archdata
;
359 if (arm_iommu_attach_device(dev
, mapping
))
360 pr_err("arm_iommu_attach_device failed\n");
364 static const struct iommu_ops shmobile_iommu_ops
= {
365 .domain_alloc
= shmobile_iommu_domain_alloc
,
366 .domain_free
= shmobile_iommu_domain_free
,
367 .attach_dev
= shmobile_iommu_attach_device
,
368 .detach_dev
= shmobile_iommu_detach_device
,
369 .map
= shmobile_iommu_map
,
370 .unmap
= shmobile_iommu_unmap
,
371 .map_sg
= default_iommu_map_sg
,
372 .iova_to_phys
= shmobile_iommu_iova_to_phys
,
373 .add_device
= shmobile_iommu_add_device
,
374 .pgsize_bitmap
= SZ_1M
| SZ_64K
| SZ_4K
,
377 int ipmmu_iommu_init(struct shmobile_ipmmu
*ipmmu
)
379 static struct shmobile_iommu_archdata
*archdata
;
381 l1cache
= kmem_cache_create("shmobile-iommu-pgtable1", L1_SIZE
,
382 L1_ALIGN
, SLAB_HWCACHE_ALIGN
, NULL
);
385 l2cache
= kmem_cache_create("shmobile-iommu-pgtable2", L2_SIZE
,
386 L2_ALIGN
, SLAB_HWCACHE_ALIGN
, NULL
);
388 kmem_cache_destroy(l1cache
);
391 archdata
= kzalloc(sizeof(*archdata
), GFP_KERNEL
);
393 kmem_cache_destroy(l1cache
);
394 kmem_cache_destroy(l2cache
);
397 spin_lock_init(&archdata
->attach_lock
);
398 archdata
->ipmmu
= ipmmu
;
399 ipmmu_archdata
= archdata
;
400 bus_set_iommu(&platform_bus_type
, &shmobile_iommu_ops
);