2 * IOMMU for IPMMU/IPMMUI
3 * Copyright (C) 2012 Hideki EIRAKU
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
10 #include <linux/dma-mapping.h>
12 #include <linux/iommu.h>
13 #include <linux/platform_device.h>
14 #include <linux/sizes.h>
15 #include <linux/slab.h>
16 #include <asm/dma-iommu.h>
17 #include "shmobile-ipmmu.h"
19 #define L1_SIZE CONFIG_SHMOBILE_IOMMU_L1SIZE
20 #define L1_LEN (L1_SIZE / 4)
21 #define L1_ALIGN L1_SIZE
23 #define L2_LEN (L2_SIZE / 4)
24 #define L2_ALIGN L2_SIZE
26 struct shmobile_iommu_domain_pgtable
{
31 struct shmobile_iommu_archdata
{
32 struct list_head attached_list
;
33 struct dma_iommu_mapping
*iommu_mapping
;
34 spinlock_t attach_lock
;
35 struct shmobile_iommu_domain
*attached
;
36 int num_attached_devices
;
37 struct shmobile_ipmmu
*ipmmu
;
40 struct shmobile_iommu_domain
{
41 struct shmobile_iommu_domain_pgtable l1
, l2
[L1_LEN
];
43 spinlock_t attached_list_lock
;
44 struct list_head attached_list
;
47 static struct shmobile_iommu_archdata
*ipmmu_archdata
;
48 static struct kmem_cache
*l1cache
, *l2cache
;
50 static int pgtable_alloc(struct shmobile_iommu_domain_pgtable
*pgtable
,
51 struct kmem_cache
*cache
, size_t size
)
53 pgtable
->pgtable
= kmem_cache_zalloc(cache
, GFP_ATOMIC
);
54 if (!pgtable
->pgtable
)
56 pgtable
->handle
= dma_map_single(NULL
, pgtable
->pgtable
, size
,
61 static void pgtable_free(struct shmobile_iommu_domain_pgtable
*pgtable
,
62 struct kmem_cache
*cache
, size_t size
)
64 dma_unmap_single(NULL
, pgtable
->handle
, size
, DMA_TO_DEVICE
);
65 kmem_cache_free(cache
, pgtable
->pgtable
);
68 static uint32_t pgtable_read(struct shmobile_iommu_domain_pgtable
*pgtable
,
71 return pgtable
->pgtable
[index
];
74 static void pgtable_write(struct shmobile_iommu_domain_pgtable
*pgtable
,
75 unsigned int index
, unsigned int count
, uint32_t val
)
79 for (i
= 0; i
< count
; i
++)
80 pgtable
->pgtable
[index
+ i
] = val
;
81 dma_sync_single_for_device(NULL
, pgtable
->handle
+ index
* sizeof(val
),
82 sizeof(val
) * count
, DMA_TO_DEVICE
);
85 static int shmobile_iommu_domain_init(struct iommu_domain
*domain
)
87 struct shmobile_iommu_domain
*sh_domain
;
90 sh_domain
= kmalloc(sizeof(*sh_domain
), GFP_KERNEL
);
93 ret
= pgtable_alloc(&sh_domain
->l1
, l1cache
, L1_SIZE
);
98 for (i
= 0; i
< L1_LEN
; i
++)
99 sh_domain
->l2
[i
].pgtable
= NULL
;
100 spin_lock_init(&sh_domain
->map_lock
);
101 spin_lock_init(&sh_domain
->attached_list_lock
);
102 INIT_LIST_HEAD(&sh_domain
->attached_list
);
103 domain
->priv
= sh_domain
;
107 static void shmobile_iommu_domain_destroy(struct iommu_domain
*domain
)
109 struct shmobile_iommu_domain
*sh_domain
= domain
->priv
;
112 for (i
= 0; i
< L1_LEN
; i
++) {
113 if (sh_domain
->l2
[i
].pgtable
)
114 pgtable_free(&sh_domain
->l2
[i
], l2cache
, L2_SIZE
);
116 pgtable_free(&sh_domain
->l1
, l1cache
, L1_SIZE
);
121 static int shmobile_iommu_attach_device(struct iommu_domain
*domain
,
124 struct shmobile_iommu_archdata
*archdata
= dev
->archdata
.iommu
;
125 struct shmobile_iommu_domain
*sh_domain
= domain
->priv
;
130 spin_lock(&sh_domain
->attached_list_lock
);
131 spin_lock(&archdata
->attach_lock
);
132 if (archdata
->attached
!= sh_domain
) {
133 if (archdata
->attached
)
135 ipmmu_tlb_set(archdata
->ipmmu
, sh_domain
->l1
.handle
, L1_SIZE
,
137 ipmmu_tlb_flush(archdata
->ipmmu
);
138 archdata
->attached
= sh_domain
;
139 archdata
->num_attached_devices
= 0;
140 list_add(&archdata
->attached_list
, &sh_domain
->attached_list
);
142 archdata
->num_attached_devices
++;
145 spin_unlock(&archdata
->attach_lock
);
146 spin_unlock(&sh_domain
->attached_list_lock
);
150 static void shmobile_iommu_detach_device(struct iommu_domain
*domain
,
153 struct shmobile_iommu_archdata
*archdata
= dev
->archdata
.iommu
;
154 struct shmobile_iommu_domain
*sh_domain
= domain
->priv
;
158 spin_lock(&sh_domain
->attached_list_lock
);
159 spin_lock(&archdata
->attach_lock
);
160 archdata
->num_attached_devices
--;
161 if (!archdata
->num_attached_devices
) {
162 ipmmu_tlb_set(archdata
->ipmmu
, 0, 0, 0);
163 ipmmu_tlb_flush(archdata
->ipmmu
);
164 archdata
->attached
= NULL
;
165 list_del(&archdata
->attached_list
);
167 spin_unlock(&archdata
->attach_lock
);
168 spin_unlock(&sh_domain
->attached_list_lock
);
171 static void domain_tlb_flush(struct shmobile_iommu_domain
*sh_domain
)
173 struct shmobile_iommu_archdata
*archdata
;
175 spin_lock(&sh_domain
->attached_list_lock
);
176 list_for_each_entry(archdata
, &sh_domain
->attached_list
, attached_list
)
177 ipmmu_tlb_flush(archdata
->ipmmu
);
178 spin_unlock(&sh_domain
->attached_list_lock
);
181 static int l2alloc(struct shmobile_iommu_domain
*sh_domain
,
182 unsigned int l1index
)
186 if (!sh_domain
->l2
[l1index
].pgtable
) {
187 ret
= pgtable_alloc(&sh_domain
->l2
[l1index
], l2cache
, L2_SIZE
);
191 pgtable_write(&sh_domain
->l1
, l1index
, 1,
192 sh_domain
->l2
[l1index
].handle
| 0x1);
196 static void l2realfree(struct shmobile_iommu_domain_pgtable
*l2
)
199 pgtable_free(l2
, l2cache
, L2_SIZE
);
202 static void l2free(struct shmobile_iommu_domain
*sh_domain
,
203 unsigned int l1index
,
204 struct shmobile_iommu_domain_pgtable
*l2
)
206 pgtable_write(&sh_domain
->l1
, l1index
, 1, 0);
207 if (sh_domain
->l2
[l1index
].pgtable
) {
208 *l2
= sh_domain
->l2
[l1index
];
209 sh_domain
->l2
[l1index
].pgtable
= NULL
;
213 static int shmobile_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
214 phys_addr_t paddr
, size_t size
, int prot
)
216 struct shmobile_iommu_domain_pgtable l2
= { .pgtable
= NULL
};
217 struct shmobile_iommu_domain
*sh_domain
= domain
->priv
;
218 unsigned int l1index
, l2index
;
221 l1index
= iova
>> 20;
224 l2index
= (iova
>> 12) & 0xff;
225 spin_lock(&sh_domain
->map_lock
);
226 ret
= l2alloc(sh_domain
, l1index
);
228 pgtable_write(&sh_domain
->l2
[l1index
], l2index
, 1,
230 spin_unlock(&sh_domain
->map_lock
);
233 l2index
= (iova
>> 12) & 0xf0;
234 spin_lock(&sh_domain
->map_lock
);
235 ret
= l2alloc(sh_domain
, l1index
);
237 pgtable_write(&sh_domain
->l2
[l1index
], l2index
, 0x10,
239 spin_unlock(&sh_domain
->map_lock
);
242 spin_lock(&sh_domain
->map_lock
);
243 l2free(sh_domain
, l1index
, &l2
);
244 pgtable_write(&sh_domain
->l1
, l1index
, 1, paddr
| 0xc02);
245 spin_unlock(&sh_domain
->map_lock
);
252 domain_tlb_flush(sh_domain
);
257 static size_t shmobile_iommu_unmap(struct iommu_domain
*domain
,
258 unsigned long iova
, size_t size
)
260 struct shmobile_iommu_domain_pgtable l2
= { .pgtable
= NULL
};
261 struct shmobile_iommu_domain
*sh_domain
= domain
->priv
;
262 unsigned int l1index
, l2index
;
263 uint32_t l2entry
= 0;
266 l1index
= iova
>> 20;
267 if (!(iova
& 0xfffff) && size
>= SZ_1M
) {
268 spin_lock(&sh_domain
->map_lock
);
269 l2free(sh_domain
, l1index
, &l2
);
270 spin_unlock(&sh_domain
->map_lock
);
274 l2index
= (iova
>> 12) & 0xff;
275 spin_lock(&sh_domain
->map_lock
);
276 if (sh_domain
->l2
[l1index
].pgtable
)
277 l2entry
= pgtable_read(&sh_domain
->l2
[l1index
], l2index
);
278 switch (l2entry
& 3) {
282 pgtable_write(&sh_domain
->l2
[l1index
], l2index
, 0x10, 0);
286 pgtable_write(&sh_domain
->l2
[l1index
], l2index
, 1, 0);
290 spin_unlock(&sh_domain
->map_lock
);
293 domain_tlb_flush(sh_domain
);
298 static phys_addr_t
shmobile_iommu_iova_to_phys(struct iommu_domain
*domain
,
301 struct shmobile_iommu_domain
*sh_domain
= domain
->priv
;
302 uint32_t l1entry
= 0, l2entry
= 0;
303 unsigned int l1index
, l2index
;
305 l1index
= iova
>> 20;
306 l2index
= (iova
>> 12) & 0xff;
307 spin_lock(&sh_domain
->map_lock
);
308 if (sh_domain
->l2
[l1index
].pgtable
)
309 l2entry
= pgtable_read(&sh_domain
->l2
[l1index
], l2index
);
311 l1entry
= pgtable_read(&sh_domain
->l1
, l1index
);
312 spin_unlock(&sh_domain
->map_lock
);
313 switch (l2entry
& 3) {
315 return (l2entry
& ~0xffff) | (iova
& 0xffff);
317 return (l2entry
& ~0xfff) | (iova
& 0xfff);
319 if ((l1entry
& 3) == 2)
320 return (l1entry
& ~0xfffff) | (iova
& 0xfffff);
325 static int find_dev_name(struct shmobile_ipmmu
*ipmmu
, const char *dev_name
)
327 unsigned int i
, n
= ipmmu
->num_dev_names
;
329 for (i
= 0; i
< n
; i
++) {
330 if (strcmp(ipmmu
->dev_names
[i
], dev_name
) == 0)
336 static int shmobile_iommu_add_device(struct device
*dev
)
338 struct shmobile_iommu_archdata
*archdata
= ipmmu_archdata
;
339 struct dma_iommu_mapping
*mapping
;
341 if (!find_dev_name(archdata
->ipmmu
, dev_name(dev
)))
343 mapping
= archdata
->iommu_mapping
;
345 mapping
= arm_iommu_create_mapping(&platform_bus_type
, 0,
348 return PTR_ERR(mapping
);
349 archdata
->iommu_mapping
= mapping
;
351 dev
->archdata
.iommu
= archdata
;
352 if (arm_iommu_attach_device(dev
, mapping
))
353 pr_err("arm_iommu_attach_device failed\n");
357 static struct iommu_ops shmobile_iommu_ops
= {
358 .domain_init
= shmobile_iommu_domain_init
,
359 .domain_destroy
= shmobile_iommu_domain_destroy
,
360 .attach_dev
= shmobile_iommu_attach_device
,
361 .detach_dev
= shmobile_iommu_detach_device
,
362 .map
= shmobile_iommu_map
,
363 .unmap
= shmobile_iommu_unmap
,
364 .iova_to_phys
= shmobile_iommu_iova_to_phys
,
365 .add_device
= shmobile_iommu_add_device
,
366 .pgsize_bitmap
= SZ_1M
| SZ_64K
| SZ_4K
,
369 int ipmmu_iommu_init(struct shmobile_ipmmu
*ipmmu
)
371 static struct shmobile_iommu_archdata
*archdata
;
373 l1cache
= kmem_cache_create("shmobile-iommu-pgtable1", L1_SIZE
,
374 L1_ALIGN
, SLAB_HWCACHE_ALIGN
, NULL
);
377 l2cache
= kmem_cache_create("shmobile-iommu-pgtable2", L2_SIZE
,
378 L2_ALIGN
, SLAB_HWCACHE_ALIGN
, NULL
);
380 kmem_cache_destroy(l1cache
);
383 archdata
= kmalloc(sizeof(*archdata
), GFP_KERNEL
);
385 kmem_cache_destroy(l1cache
);
386 kmem_cache_destroy(l2cache
);
389 spin_lock_init(&archdata
->attach_lock
);
390 archdata
->attached
= NULL
;
391 archdata
->ipmmu
= ipmmu
;
392 ipmmu_archdata
= archdata
;
393 bus_set_iommu(&platform_bus_type
, &shmobile_iommu_ops
);