2 * arch/sh/kernel/cpu/sh4/sq.c
4 * General management API for SH-4 integrated Store Queues
6 * Copyright (C) 2001 - 2006 Paul Mundt
7 * Copyright (C) 2001, 2002 M. R. Brown
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/cpu.h>
15 #include <linux/bitmap.h>
16 #include <linux/sysdev.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
23 #include <linux/prefetch.h>
25 #include <asm/cacheflush.h>
33 unsigned long sq_addr
;
37 struct sq_mapping
*next
;
40 static struct sq_mapping
*sq_mapping_list
;
41 static DEFINE_SPINLOCK(sq_mapping_lock
);
42 static struct kmem_cache
*sq_cache
;
43 static unsigned long *sq_bitmap
;
45 #define store_queue_barrier() \
47 (void)__raw_readl(P4SEG_STORE_QUE); \
48 __raw_writel(0, P4SEG_STORE_QUE + 0); \
49 __raw_writel(0, P4SEG_STORE_QUE + 8); \
53 * sq_flush_range - Flush (prefetch) a specific SQ range
54 * @start: the store queue address to start flushing from
55 * @len: the length to flush
57 * Flushes the store queue cache from @start to @start + @len in a
60 void sq_flush_range(unsigned long start
, unsigned int len
)
62 unsigned long *sq
= (unsigned long *)start
;
64 /* Flush the queues */
65 for (len
>>= 5; len
--; sq
+= 8)
68 /* Wait for completion */
69 store_queue_barrier();
71 EXPORT_SYMBOL(sq_flush_range
);
73 static inline void sq_mapping_list_add(struct sq_mapping
*map
)
75 struct sq_mapping
**p
, *tmp
;
77 spin_lock_irq(&sq_mapping_lock
);
80 while ((tmp
= *p
) != NULL
)
86 spin_unlock_irq(&sq_mapping_lock
);
89 static inline void sq_mapping_list_del(struct sq_mapping
*map
)
91 struct sq_mapping
**p
, *tmp
;
93 spin_lock_irq(&sq_mapping_lock
);
95 for (p
= &sq_mapping_list
; (tmp
= *p
); p
= &tmp
->next
)
101 spin_unlock_irq(&sq_mapping_lock
);
104 static int __sq_remap(struct sq_mapping
*map
, pgprot_t prot
)
106 #if defined(CONFIG_MMU)
107 struct vm_struct
*vma
;
109 vma
= __get_vm_area(map
->size
, VM_ALLOC
, map
->sq_addr
, SQ_ADDRMAX
);
113 vma
->phys_addr
= map
->addr
;
115 if (ioremap_page_range((unsigned long)vma
->addr
,
116 (unsigned long)vma
->addr
+ map
->size
,
117 vma
->phys_addr
, prot
)) {
123 * Without an MMU (or with it turned off), this is much more
124 * straightforward, as we can just load up each queue's QACR with
125 * the physical address appropriately masked.
127 __raw_writel(((map
->addr
>> 26) << 2) & 0x1c, SQ_QACR0
);
128 __raw_writel(((map
->addr
>> 26) << 2) & 0x1c, SQ_QACR1
);
135 * sq_remap - Map a physical address through the Store Queues
136 * @phys: Physical address of mapping.
137 * @size: Length of mapping.
138 * @name: User invoking mapping.
139 * @prot: Protection bits.
141 * Remaps the physical address @phys through the next available store queue
142 * address of @size length. @name is logged at boot time as well as through
143 * the sysfs interface.
145 unsigned long sq_remap(unsigned long phys
, unsigned int size
,
146 const char *name
, pgprot_t prot
)
148 struct sq_mapping
*map
;
153 /* Don't allow wraparound or zero size */
154 end
= phys
+ size
- 1;
155 if (unlikely(!size
|| end
< phys
))
157 /* Don't allow anyone to remap normal memory.. */
158 if (unlikely(phys
< virt_to_phys(high_memory
)))
162 size
= PAGE_ALIGN(end
+ 1) - phys
;
164 map
= kmem_cache_alloc(sq_cache
, GFP_KERNEL
);
172 page
= bitmap_find_free_region(sq_bitmap
, 0x04000000 >> PAGE_SHIFT
,
173 get_order(map
->size
));
174 if (unlikely(page
< 0)) {
179 map
->sq_addr
= P4SEG_STORE_QUE
+ (page
<< PAGE_SHIFT
);
181 ret
= __sq_remap(map
, prot
);
182 if (unlikely(ret
!= 0))
185 psz
= (size
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
186 pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
187 likely(map
->name
) ? map
->name
: "???",
188 psz
, psz
== 1 ? " " : "s",
189 map
->sq_addr
, map
->addr
);
191 sq_mapping_list_add(map
);
196 kmem_cache_free(sq_cache
, map
);
199 EXPORT_SYMBOL(sq_remap
);
202 * sq_unmap - Unmap a Store Queue allocation
203 * @vaddr: Pre-allocated Store Queue mapping.
205 * Unmaps the store queue allocation @map that was previously created by
206 * sq_remap(). Also frees up the pte that was previously inserted into
207 * the kernel page table and discards the UTLB translation.
209 void sq_unmap(unsigned long vaddr
)
211 struct sq_mapping
**p
, *map
;
214 for (p
= &sq_mapping_list
; (map
= *p
); p
= &map
->next
)
215 if (map
->sq_addr
== vaddr
)
218 if (unlikely(!map
)) {
219 printk("%s: bad store queue address 0x%08lx\n",
224 page
= (map
->sq_addr
- P4SEG_STORE_QUE
) >> PAGE_SHIFT
;
225 bitmap_release_region(sq_bitmap
, page
, get_order(map
->size
));
230 * Tear down the VMA in the MMU case.
232 struct vm_struct
*vma
;
234 vma
= remove_vm_area((void *)(map
->sq_addr
& PAGE_MASK
));
236 printk(KERN_ERR
"%s: bad address 0x%08lx\n",
237 __func__
, map
->sq_addr
);
243 sq_mapping_list_del(map
);
245 kmem_cache_free(sq_cache
, map
);
247 EXPORT_SYMBOL(sq_unmap
);
250 * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
251 * there is any other easy way to add things on a per-cpu basis without
252 * putting the directory entries somewhere stupid and having to create
253 * links in sysfs by hand back in to the per-cpu directories.
255 * Some day we may want to have an additional abstraction per store
256 * queue, but considering the kobject hell we already have to deal with,
257 * it's simply not worth the trouble.
259 static struct kobject
*sq_kobject
[NR_CPUS
];
261 struct sq_sysfs_attr
{
262 struct attribute attr
;
263 ssize_t (*show
)(char *buf
);
264 ssize_t (*store
)(const char *buf
, size_t count
);
267 #define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr)
269 static ssize_t
sq_sysfs_show(struct kobject
*kobj
, struct attribute
*attr
,
272 struct sq_sysfs_attr
*sattr
= to_sq_sysfs_attr(attr
);
274 if (likely(sattr
->show
))
275 return sattr
->show(buf
);
280 static ssize_t
sq_sysfs_store(struct kobject
*kobj
, struct attribute
*attr
,
281 const char *buf
, size_t count
)
283 struct sq_sysfs_attr
*sattr
= to_sq_sysfs_attr(attr
);
285 if (likely(sattr
->store
))
286 return sattr
->store(buf
, count
);
291 static ssize_t
mapping_show(char *buf
)
293 struct sq_mapping
**list
, *entry
;
296 for (list
= &sq_mapping_list
; (entry
= *list
); list
= &entry
->next
)
297 p
+= sprintf(p
, "%08lx-%08lx [%08lx]: %s\n",
298 entry
->sq_addr
, entry
->sq_addr
+ entry
->size
,
299 entry
->addr
, entry
->name
);
304 static ssize_t
mapping_store(const char *buf
, size_t count
)
306 unsigned long base
= 0, len
= 0;
308 sscanf(buf
, "%lx %lx", &base
, &len
);
313 int ret
= sq_remap(base
, len
, "Userspace", PAGE_SHARED
);
322 static struct sq_sysfs_attr mapping_attr
=
323 __ATTR(mapping
, 0644, mapping_show
, mapping_store
);
325 static struct attribute
*sq_sysfs_attrs
[] = {
330 static const struct sysfs_ops sq_sysfs_ops
= {
331 .show
= sq_sysfs_show
,
332 .store
= sq_sysfs_store
,
335 static struct kobj_type ktype_percpu_entry
= {
336 .sysfs_ops
= &sq_sysfs_ops
,
337 .default_attrs
= sq_sysfs_attrs
,
340 static int __devinit
sq_sysdev_add(struct sys_device
*sysdev
)
342 unsigned int cpu
= sysdev
->id
;
343 struct kobject
*kobj
;
346 sq_kobject
[cpu
] = kzalloc(sizeof(struct kobject
), GFP_KERNEL
);
347 if (unlikely(!sq_kobject
[cpu
]))
350 kobj
= sq_kobject
[cpu
];
351 error
= kobject_init_and_add(kobj
, &ktype_percpu_entry
, &sysdev
->kobj
,
354 kobject_uevent(kobj
, KOBJ_ADD
);
358 static int __devexit
sq_sysdev_remove(struct sys_device
*sysdev
)
360 unsigned int cpu
= sysdev
->id
;
361 struct kobject
*kobj
= sq_kobject
[cpu
];
367 static struct sysdev_driver sq_sysdev_driver
= {
368 .add
= sq_sysdev_add
,
369 .remove
= __devexit_p(sq_sysdev_remove
),
372 static int __init
sq_api_init(void)
374 unsigned int nr_pages
= 0x04000000 >> PAGE_SHIFT
;
375 unsigned int size
= (nr_pages
+ (BITS_PER_LONG
- 1)) / BITS_PER_LONG
;
378 printk(KERN_NOTICE
"sq: Registering store queue API.\n");
380 sq_cache
= kmem_cache_create("store_queue_cache",
381 sizeof(struct sq_mapping
), 0, 0, NULL
);
382 if (unlikely(!sq_cache
))
385 sq_bitmap
= kzalloc(size
, GFP_KERNEL
);
386 if (unlikely(!sq_bitmap
))
389 ret
= sysdev_driver_register(&cpu_sysdev_class
, &sq_sysdev_driver
);
390 if (unlikely(ret
!= 0))
397 kmem_cache_destroy(sq_cache
);
402 static void __exit
sq_api_exit(void)
404 sysdev_driver_unregister(&cpu_sysdev_class
, &sq_sysdev_driver
);
406 kmem_cache_destroy(sq_cache
);
409 module_init(sq_api_init
);
410 module_exit(sq_api_exit
);
412 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
413 MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
414 MODULE_LICENSE("GPL");