1 // SPDX-License-Identifier: GPL-2.0
3 * arch/sh/kernel/cpu/sh4/sq.c
5 * General management API for SH-4 integrated Store Queues
7 * Copyright (C) 2001 - 2006 Paul Mundt
8 * Copyright (C) 2001, 2002 M. R. Brown
10 #include <linux/init.h>
11 #include <linux/cpu.h>
12 #include <linux/bitmap.h>
13 #include <linux/device.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
20 #include <linux/prefetch.h>
22 #include <asm/cacheflush.h>
30 unsigned long sq_addr
;
34 struct sq_mapping
*next
;
37 static struct sq_mapping
*sq_mapping_list
;
38 static DEFINE_SPINLOCK(sq_mapping_lock
);
39 static struct kmem_cache
*sq_cache
;
40 static unsigned long *sq_bitmap
;
42 #define store_queue_barrier() \
44 (void)__raw_readl(P4SEG_STORE_QUE); \
45 __raw_writel(0, P4SEG_STORE_QUE + 0); \
46 __raw_writel(0, P4SEG_STORE_QUE + 8); \
50 * sq_flush_range - Flush (prefetch) a specific SQ range
51 * @start: the store queue address to start flushing from
52 * @len: the length to flush
54 * Flushes the store queue cache from @start to @start + @len in a
57 void sq_flush_range(unsigned long start
, unsigned int len
)
59 unsigned long *sq
= (unsigned long *)start
;
61 /* Flush the queues */
62 for (len
>>= 5; len
--; sq
+= 8)
65 /* Wait for completion */
66 store_queue_barrier();
68 EXPORT_SYMBOL(sq_flush_range
);
70 static inline void sq_mapping_list_add(struct sq_mapping
*map
)
72 struct sq_mapping
**p
, *tmp
;
74 spin_lock_irq(&sq_mapping_lock
);
77 while ((tmp
= *p
) != NULL
)
83 spin_unlock_irq(&sq_mapping_lock
);
86 static inline void sq_mapping_list_del(struct sq_mapping
*map
)
88 struct sq_mapping
**p
, *tmp
;
90 spin_lock_irq(&sq_mapping_lock
);
92 for (p
= &sq_mapping_list
; (tmp
= *p
); p
= &tmp
->next
)
98 spin_unlock_irq(&sq_mapping_lock
);
101 static int __sq_remap(struct sq_mapping
*map
, pgprot_t prot
)
103 #if defined(CONFIG_MMU)
104 struct vm_struct
*vma
;
106 vma
= __get_vm_area_caller(map
->size
, VM_IOREMAP
, map
->sq_addr
,
107 SQ_ADDRMAX
, __builtin_return_address(0));
111 vma
->phys_addr
= map
->addr
;
113 if (ioremap_page_range((unsigned long)vma
->addr
,
114 (unsigned long)vma
->addr
+ map
->size
,
115 vma
->phys_addr
, prot
)) {
121 * Without an MMU (or with it turned off), this is much more
122 * straightforward, as we can just load up each queue's QACR with
123 * the physical address appropriately masked.
125 __raw_writel(((map
->addr
>> 26) << 2) & 0x1c, SQ_QACR0
);
126 __raw_writel(((map
->addr
>> 26) << 2) & 0x1c, SQ_QACR1
);
133 * sq_remap - Map a physical address through the Store Queues
134 * @phys: Physical address of mapping.
135 * @size: Length of mapping.
136 * @name: User invoking mapping.
137 * @prot: Protection bits.
139 * Remaps the physical address @phys through the next available store queue
140 * address of @size length. @name is logged at boot time as well as through
141 * the sysfs interface.
143 unsigned long sq_remap(unsigned long phys
, unsigned int size
,
144 const char *name
, pgprot_t prot
)
146 struct sq_mapping
*map
;
151 /* Don't allow wraparound or zero size */
152 end
= phys
+ size
- 1;
153 if (unlikely(!size
|| end
< phys
))
155 /* Don't allow anyone to remap normal memory.. */
156 if (unlikely(phys
< virt_to_phys(high_memory
)))
160 size
= PAGE_ALIGN(end
+ 1) - phys
;
162 map
= kmem_cache_alloc(sq_cache
, GFP_KERNEL
);
170 page
= bitmap_find_free_region(sq_bitmap
, 0x04000000 >> PAGE_SHIFT
,
171 get_order(map
->size
));
172 if (unlikely(page
< 0)) {
177 map
->sq_addr
= P4SEG_STORE_QUE
+ (page
<< PAGE_SHIFT
);
179 ret
= __sq_remap(map
, prot
);
180 if (unlikely(ret
!= 0))
183 psz
= (size
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
184 pr_info("sqremap: %15s [%4d page%s] va 0x%08lx pa 0x%08lx\n",
185 likely(map
->name
) ? map
->name
: "???",
186 psz
, psz
== 1 ? " " : "s",
187 map
->sq_addr
, map
->addr
);
189 sq_mapping_list_add(map
);
194 kmem_cache_free(sq_cache
, map
);
197 EXPORT_SYMBOL(sq_remap
);
200 * sq_unmap - Unmap a Store Queue allocation
201 * @vaddr: Pre-allocated Store Queue mapping.
203 * Unmaps the store queue allocation @map that was previously created by
204 * sq_remap(). Also frees up the pte that was previously inserted into
205 * the kernel page table and discards the UTLB translation.
207 void sq_unmap(unsigned long vaddr
)
209 struct sq_mapping
**p
, *map
;
212 for (p
= &sq_mapping_list
; (map
= *p
); p
= &map
->next
)
213 if (map
->sq_addr
== vaddr
)
216 if (unlikely(!map
)) {
217 printk("%s: bad store queue address 0x%08lx\n",
222 page
= (map
->sq_addr
- P4SEG_STORE_QUE
) >> PAGE_SHIFT
;
223 bitmap_release_region(sq_bitmap
, page
, get_order(map
->size
));
228 * Tear down the VMA in the MMU case.
230 struct vm_struct
*vma
;
232 vma
= remove_vm_area((void *)(map
->sq_addr
& PAGE_MASK
));
234 printk(KERN_ERR
"%s: bad address 0x%08lx\n",
235 __func__
, map
->sq_addr
);
241 sq_mapping_list_del(map
);
243 kmem_cache_free(sq_cache
, map
);
245 EXPORT_SYMBOL(sq_unmap
);
248 * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
249 * there is any other easy way to add things on a per-cpu basis without
250 * putting the directory entries somewhere stupid and having to create
251 * links in sysfs by hand back in to the per-cpu directories.
253 * Some day we may want to have an additional abstraction per store
254 * queue, but considering the kobject hell we already have to deal with,
255 * it's simply not worth the trouble.
257 static struct kobject
*sq_kobject
[NR_CPUS
];
259 struct sq_sysfs_attr
{
260 struct attribute attr
;
261 ssize_t (*show
)(char *buf
);
262 ssize_t (*store
)(const char *buf
, size_t count
);
265 #define to_sq_sysfs_attr(a) container_of(a, struct sq_sysfs_attr, attr)
267 static ssize_t
sq_sysfs_show(struct kobject
*kobj
, struct attribute
*attr
,
270 struct sq_sysfs_attr
*sattr
= to_sq_sysfs_attr(attr
);
272 if (likely(sattr
->show
))
273 return sattr
->show(buf
);
278 static ssize_t
sq_sysfs_store(struct kobject
*kobj
, struct attribute
*attr
,
279 const char *buf
, size_t count
)
281 struct sq_sysfs_attr
*sattr
= to_sq_sysfs_attr(attr
);
283 if (likely(sattr
->store
))
284 return sattr
->store(buf
, count
);
289 static ssize_t
mapping_show(char *buf
)
291 struct sq_mapping
**list
, *entry
;
294 for (list
= &sq_mapping_list
; (entry
= *list
); list
= &entry
->next
)
295 p
+= sprintf(p
, "%08lx-%08lx [%08lx]: %s\n",
296 entry
->sq_addr
, entry
->sq_addr
+ entry
->size
,
297 entry
->addr
, entry
->name
);
302 static ssize_t
mapping_store(const char *buf
, size_t count
)
304 unsigned long base
= 0, len
= 0;
306 sscanf(buf
, "%lx %lx", &base
, &len
);
311 int ret
= sq_remap(base
, len
, "Userspace", PAGE_SHARED
);
320 static struct sq_sysfs_attr mapping_attr
=
321 __ATTR(mapping
, 0644, mapping_show
, mapping_store
);
323 static struct attribute
*sq_sysfs_attrs
[] = {
327 ATTRIBUTE_GROUPS(sq_sysfs
);
329 static const struct sysfs_ops sq_sysfs_ops
= {
330 .show
= sq_sysfs_show
,
331 .store
= sq_sysfs_store
,
334 static struct kobj_type ktype_percpu_entry
= {
335 .sysfs_ops
= &sq_sysfs_ops
,
336 .default_groups
= sq_sysfs_groups
,
339 static int sq_dev_add(struct device
*dev
, struct subsys_interface
*sif
)
341 unsigned int cpu
= dev
->id
;
342 struct kobject
*kobj
;
345 sq_kobject
[cpu
] = kzalloc(sizeof(struct kobject
), GFP_KERNEL
);
346 if (unlikely(!sq_kobject
[cpu
]))
349 kobj
= sq_kobject
[cpu
];
350 error
= kobject_init_and_add(kobj
, &ktype_percpu_entry
, &dev
->kobj
,
353 kobject_uevent(kobj
, KOBJ_ADD
);
357 static void sq_dev_remove(struct device
*dev
, struct subsys_interface
*sif
)
359 unsigned int cpu
= dev
->id
;
360 struct kobject
*kobj
= sq_kobject
[cpu
];
365 static struct subsys_interface sq_interface
= {
367 .subsys
= &cpu_subsys
,
368 .add_dev
= sq_dev_add
,
369 .remove_dev
= sq_dev_remove
,
372 static int __init
sq_api_init(void)
374 unsigned int nr_pages
= 0x04000000 >> PAGE_SHIFT
;
377 printk(KERN_NOTICE
"sq: Registering store queue API.\n");
379 sq_cache
= kmem_cache_create("store_queue_cache",
380 sizeof(struct sq_mapping
), 0, 0, NULL
);
381 if (unlikely(!sq_cache
))
384 sq_bitmap
= bitmap_zalloc(nr_pages
, GFP_KERNEL
);
385 if (unlikely(!sq_bitmap
))
388 ret
= subsys_interface_register(&sq_interface
);
389 if (unlikely(ret
!= 0))
395 bitmap_free(sq_bitmap
);
396 kmem_cache_destroy(sq_cache
);
401 static void __exit
sq_api_exit(void)
403 subsys_interface_unregister(&sq_interface
);
404 bitmap_free(sq_bitmap
);
405 kmem_cache_destroy(sq_cache
);
408 module_init(sq_api_init
);
409 module_exit(sq_api_exit
);
411 MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
412 MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
413 MODULE_LICENSE("GPL");