Merge tag 'v3.3.7' into 3.3/master
[zen-stable.git] / arch / sh / kernel / cpu / sh4 / sq.c
blob0a47bd3e7bee1b20e94fa64a323319ae4dc26a54
1 /*
2 * arch/sh/kernel/cpu/sh4/sq.c
4 * General management API for SH-4 integrated Store Queues
6 * Copyright (C) 2001 - 2006 Paul Mundt
7 * Copyright (C) 2001, 2002 M. R. Brown
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
13 #include <linux/init.h>
14 #include <linux/cpu.h>
15 #include <linux/bitmap.h>
16 #include <linux/device.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 #include <linux/mm.h>
22 #include <linux/io.h>
23 #include <linux/prefetch.h>
24 #include <asm/page.h>
25 #include <asm/cacheflush.h>
26 #include <cpu/sq.h>
28 struct sq_mapping;
30 struct sq_mapping {
31 const char *name;
33 unsigned long sq_addr;
34 unsigned long addr;
35 unsigned int size;
37 struct sq_mapping *next;
40 static struct sq_mapping *sq_mapping_list;
41 static DEFINE_SPINLOCK(sq_mapping_lock);
42 static struct kmem_cache *sq_cache;
43 static unsigned long *sq_bitmap;
45 #define store_queue_barrier() \
46 do { \
47 (void)__raw_readl(P4SEG_STORE_QUE); \
48 __raw_writel(0, P4SEG_STORE_QUE + 0); \
49 __raw_writel(0, P4SEG_STORE_QUE + 8); \
50 } while (0);
52 /**
53 * sq_flush_range - Flush (prefetch) a specific SQ range
54 * @start: the store queue address to start flushing from
55 * @len: the length to flush
57 * Flushes the store queue cache from @start to @start + @len in a
58 * linear fashion.
60 void sq_flush_range(unsigned long start, unsigned int len)
62 unsigned long *sq = (unsigned long *)start;
64 /* Flush the queues */
65 for (len >>= 5; len--; sq += 8)
66 prefetchw(sq);
68 /* Wait for completion */
69 store_queue_barrier();
71 EXPORT_SYMBOL(sq_flush_range);
73 static inline void sq_mapping_list_add(struct sq_mapping *map)
75 struct sq_mapping **p, *tmp;
77 spin_lock_irq(&sq_mapping_lock);
79 p = &sq_mapping_list;
80 while ((tmp = *p) != NULL)
81 p = &tmp->next;
83 map->next = tmp;
84 *p = map;
86 spin_unlock_irq(&sq_mapping_lock);
89 static inline void sq_mapping_list_del(struct sq_mapping *map)
91 struct sq_mapping **p, *tmp;
93 spin_lock_irq(&sq_mapping_lock);
95 for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
96 if (tmp == map) {
97 *p = tmp->next;
98 break;
101 spin_unlock_irq(&sq_mapping_lock);
104 static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
106 #if defined(CONFIG_MMU)
107 struct vm_struct *vma;
109 vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
110 if (!vma)
111 return -ENOMEM;
113 vma->phys_addr = map->addr;
115 if (ioremap_page_range((unsigned long)vma->addr,
116 (unsigned long)vma->addr + map->size,
117 vma->phys_addr, prot)) {
118 vunmap(vma->addr);
119 return -EAGAIN;
121 #else
123 * Without an MMU (or with it turned off), this is much more
124 * straightforward, as we can just load up each queue's QACR with
125 * the physical address appropriately masked.
127 __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
128 __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
129 #endif
131 return 0;
135 * sq_remap - Map a physical address through the Store Queues
136 * @phys: Physical address of mapping.
137 * @size: Length of mapping.
138 * @name: User invoking mapping.
139 * @prot: Protection bits.
141 * Remaps the physical address @phys through the next available store queue
142 * address of @size length. @name is logged at boot time as well as through
143 * the sysfs interface.
145 unsigned long sq_remap(unsigned long phys, unsigned int size,
146 const char *name, pgprot_t prot)
148 struct sq_mapping *map;
149 unsigned long end;
150 unsigned int psz;
151 int ret, page;
153 /* Don't allow wraparound or zero size */
154 end = phys + size - 1;
155 if (unlikely(!size || end < phys))
156 return -EINVAL;
157 /* Don't allow anyone to remap normal memory.. */
158 if (unlikely(phys < virt_to_phys(high_memory)))
159 return -EINVAL;
161 phys &= PAGE_MASK;
162 size = PAGE_ALIGN(end + 1) - phys;
164 map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
165 if (unlikely(!map))
166 return -ENOMEM;
168 map->addr = phys;
169 map->size = size;
170 map->name = name;
172 page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
173 get_order(map->size));
174 if (unlikely(page < 0)) {
175 ret = -ENOSPC;
176 goto out;
179 map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
181 ret = __sq_remap(map, prot);
182 if (unlikely(ret != 0))
183 goto out;
185 psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
186 pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
187 likely(map->name) ? map->name : "???",
188 psz, psz == 1 ? " " : "s",
189 map->sq_addr, map->addr);
191 sq_mapping_list_add(map);
193 return map->sq_addr;
195 out:
196 kmem_cache_free(sq_cache, map);
197 return ret;
199 EXPORT_SYMBOL(sq_remap);
202 * sq_unmap - Unmap a Store Queue allocation
203 * @vaddr: Pre-allocated Store Queue mapping.
205 * Unmaps the store queue allocation @map that was previously created by
206 * sq_remap(). Also frees up the pte that was previously inserted into
207 * the kernel page table and discards the UTLB translation.
209 void sq_unmap(unsigned long vaddr)
211 struct sq_mapping **p, *map;
212 int page;
214 for (p = &sq_mapping_list; (map = *p); p = &map->next)
215 if (map->sq_addr == vaddr)
216 break;
218 if (unlikely(!map)) {
219 printk("%s: bad store queue address 0x%08lx\n",
220 __func__, vaddr);
221 return;
224 page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
225 bitmap_release_region(sq_bitmap, page, get_order(map->size));
227 #ifdef CONFIG_MMU
230 * Tear down the VMA in the MMU case.
232 struct vm_struct *vma;
234 vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
235 if (!vma) {
236 printk(KERN_ERR "%s: bad address 0x%08lx\n",
237 __func__, map->sq_addr);
238 return;
241 #endif
243 sq_mapping_list_del(map);
245 kmem_cache_free(sq_cache, map);
247 EXPORT_SYMBOL(sq_unmap);
250 * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
251 * there is any other easy way to add things on a per-cpu basis without
252 * putting the directory entries somewhere stupid and having to create
253 * links in sysfs by hand back in to the per-cpu directories.
255 * Some day we may want to have an additional abstraction per store
256 * queue, but considering the kobject hell we already have to deal with,
257 * it's simply not worth the trouble.
259 static struct kobject *sq_kobject[NR_CPUS];
261 struct sq_sysfs_attr {
262 struct attribute attr;
263 ssize_t (*show)(char *buf);
264 ssize_t (*store)(const char *buf, size_t count);
267 #define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr)
269 static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
270 char *buf)
272 struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
274 if (likely(sattr->show))
275 return sattr->show(buf);
277 return -EIO;
280 static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
281 const char *buf, size_t count)
283 struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
285 if (likely(sattr->store))
286 return sattr->store(buf, count);
288 return -EIO;
291 static ssize_t mapping_show(char *buf)
293 struct sq_mapping **list, *entry;
294 char *p = buf;
296 for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
297 p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
298 entry->sq_addr, entry->sq_addr + entry->size,
299 entry->addr, entry->name);
301 return p - buf;
304 static ssize_t mapping_store(const char *buf, size_t count)
306 unsigned long base = 0, len = 0;
308 sscanf(buf, "%lx %lx", &base, &len);
309 if (!base)
310 return -EIO;
312 if (likely(len)) {
313 int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
314 if (ret < 0)
315 return ret;
316 } else
317 sq_unmap(base);
319 return count;
322 static struct sq_sysfs_attr mapping_attr =
323 __ATTR(mapping, 0644, mapping_show, mapping_store);
325 static struct attribute *sq_sysfs_attrs[] = {
326 &mapping_attr.attr,
327 NULL,
330 static const struct sysfs_ops sq_sysfs_ops = {
331 .show = sq_sysfs_show,
332 .store = sq_sysfs_store,
335 static struct kobj_type ktype_percpu_entry = {
336 .sysfs_ops = &sq_sysfs_ops,
337 .default_attrs = sq_sysfs_attrs,
340 static int sq_dev_add(struct device *dev, struct subsys_interface *sif)
342 unsigned int cpu = dev->id;
343 struct kobject *kobj;
344 int error;
346 sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
347 if (unlikely(!sq_kobject[cpu]))
348 return -ENOMEM;
350 kobj = sq_kobject[cpu];
351 error = kobject_init_and_add(kobj, &ktype_percpu_entry, &dev->kobj,
352 "%s", "sq");
353 if (!error)
354 kobject_uevent(kobj, KOBJ_ADD);
355 return error;
358 static int sq_dev_remove(struct device *dev, struct subsys_interface *sif)
360 unsigned int cpu = dev->id;
361 struct kobject *kobj = sq_kobject[cpu];
363 kobject_put(kobj);
364 return 0;
367 static struct subsys_interface sq_interface = {
368 .name = "sq",
369 .subsys = &cpu_subsys,
370 .add_dev = sq_dev_add,
371 .remove_dev = sq_dev_remove,
374 static int __init sq_api_init(void)
376 unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
377 unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG;
378 int ret = -ENOMEM;
380 printk(KERN_NOTICE "sq: Registering store queue API.\n");
382 sq_cache = kmem_cache_create("store_queue_cache",
383 sizeof(struct sq_mapping), 0, 0, NULL);
384 if (unlikely(!sq_cache))
385 return ret;
387 sq_bitmap = kzalloc(size, GFP_KERNEL);
388 if (unlikely(!sq_bitmap))
389 goto out;
391 ret = subsys_interface_register(&sq_interface);
392 if (unlikely(ret != 0))
393 goto out;
395 return 0;
397 out:
398 kfree(sq_bitmap);
399 kmem_cache_destroy(sq_cache);
401 return ret;
404 static void __exit sq_api_exit(void)
406 subsys_interface_unregister(&sq_interface);
407 kfree(sq_bitmap);
408 kmem_cache_destroy(sq_cache);
411 module_init(sq_api_init);
412 module_exit(sq_api_exit);
414 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
415 MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
416 MODULE_LICENSE("GPL");