2 * SRAM allocator for Blackfin on-chip memory
4 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later.
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/miscdevice.h>
13 #include <linux/ioport.h>
14 #include <linux/fcntl.h>
15 #include <linux/init.h>
16 #include <linux/poll.h>
17 #include <linux/proc_fs.h>
18 #include <linux/spinlock.h>
19 #include <linux/rtc.h>
20 #include <linux/slab.h>
21 #include <asm/blackfin.h>
22 #include <asm/mem_map.h>
23 #include "blackfin_sram.h"
25 /* the data structure for L1 scratchpad and DATA SRAM */
30 struct sram_piece
*next
;
33 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1sram_lock
);
34 static DEFINE_PER_CPU(struct sram_piece
, free_l1_ssram_head
);
35 static DEFINE_PER_CPU(struct sram_piece
, used_l1_ssram_head
);
37 #if L1_DATA_A_LENGTH != 0
38 static DEFINE_PER_CPU(struct sram_piece
, free_l1_data_A_sram_head
);
39 static DEFINE_PER_CPU(struct sram_piece
, used_l1_data_A_sram_head
);
42 #if L1_DATA_B_LENGTH != 0
43 static DEFINE_PER_CPU(struct sram_piece
, free_l1_data_B_sram_head
);
44 static DEFINE_PER_CPU(struct sram_piece
, used_l1_data_B_sram_head
);
47 #if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
48 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1_data_sram_lock
);
51 #if L1_CODE_LENGTH != 0
52 static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t
, l1_inst_sram_lock
);
53 static DEFINE_PER_CPU(struct sram_piece
, free_l1_inst_sram_head
);
54 static DEFINE_PER_CPU(struct sram_piece
, used_l1_inst_sram_head
);
58 static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp
;
59 static struct sram_piece free_l2_sram_head
, used_l2_sram_head
;
62 static struct kmem_cache
*sram_piece_cache
;
64 /* L1 Scratchpad SRAM initialization function */
65 static void __init
l1sram_init(void)
68 unsigned long reserve
;
73 reserve
= sizeof(struct l1_scratch_task_info
);
76 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
77 per_cpu(free_l1_ssram_head
, cpu
).next
=
78 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
79 if (!per_cpu(free_l1_ssram_head
, cpu
).next
) {
80 printk(KERN_INFO
"Fail to initialize Scratchpad data SRAM.\n");
84 per_cpu(free_l1_ssram_head
, cpu
).next
->paddr
= (void *)get_l1_scratch_start_cpu(cpu
) + reserve
;
85 per_cpu(free_l1_ssram_head
, cpu
).next
->size
= L1_SCRATCH_LENGTH
- reserve
;
86 per_cpu(free_l1_ssram_head
, cpu
).next
->pid
= 0;
87 per_cpu(free_l1_ssram_head
, cpu
).next
->next
= NULL
;
89 per_cpu(used_l1_ssram_head
, cpu
).next
= NULL
;
91 /* mutex initialize */
92 spin_lock_init(&per_cpu(l1sram_lock
, cpu
));
93 printk(KERN_INFO
"Blackfin Scratchpad data SRAM: %d KB\n",
94 L1_SCRATCH_LENGTH
>> 10);
98 static void __init
l1_data_sram_init(void)
100 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
103 #if L1_DATA_A_LENGTH != 0
104 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
105 per_cpu(free_l1_data_A_sram_head
, cpu
).next
=
106 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
107 if (!per_cpu(free_l1_data_A_sram_head
, cpu
).next
) {
108 printk(KERN_INFO
"Fail to initialize L1 Data A SRAM.\n");
112 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->paddr
=
113 (void *)get_l1_data_a_start_cpu(cpu
) + (_ebss_l1
- _sdata_l1
);
114 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->size
=
115 L1_DATA_A_LENGTH
- (_ebss_l1
- _sdata_l1
);
116 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->pid
= 0;
117 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->next
= NULL
;
119 per_cpu(used_l1_data_A_sram_head
, cpu
).next
= NULL
;
121 printk(KERN_INFO
"Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
122 L1_DATA_A_LENGTH
>> 10,
123 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->size
>> 10);
126 #if L1_DATA_B_LENGTH != 0
127 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
128 per_cpu(free_l1_data_B_sram_head
, cpu
).next
=
129 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
130 if (!per_cpu(free_l1_data_B_sram_head
, cpu
).next
) {
131 printk(KERN_INFO
"Fail to initialize L1 Data B SRAM.\n");
135 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->paddr
=
136 (void *)get_l1_data_b_start_cpu(cpu
) + (_ebss_b_l1
- _sdata_b_l1
);
137 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->size
=
138 L1_DATA_B_LENGTH
- (_ebss_b_l1
- _sdata_b_l1
);
139 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->pid
= 0;
140 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->next
= NULL
;
142 per_cpu(used_l1_data_B_sram_head
, cpu
).next
= NULL
;
144 printk(KERN_INFO
"Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
145 L1_DATA_B_LENGTH
>> 10,
146 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->size
>> 10);
147 /* mutex initialize */
151 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
152 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
)
153 spin_lock_init(&per_cpu(l1_data_sram_lock
, cpu
));
157 static void __init
l1_inst_sram_init(void)
159 #if L1_CODE_LENGTH != 0
161 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
162 per_cpu(free_l1_inst_sram_head
, cpu
).next
=
163 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
164 if (!per_cpu(free_l1_inst_sram_head
, cpu
).next
) {
165 printk(KERN_INFO
"Failed to initialize L1 Instruction SRAM\n");
169 per_cpu(free_l1_inst_sram_head
, cpu
).next
->paddr
=
170 (void *)get_l1_code_start_cpu(cpu
) + (_etext_l1
- _stext_l1
);
171 per_cpu(free_l1_inst_sram_head
, cpu
).next
->size
=
172 L1_CODE_LENGTH
- (_etext_l1
- _stext_l1
);
173 per_cpu(free_l1_inst_sram_head
, cpu
).next
->pid
= 0;
174 per_cpu(free_l1_inst_sram_head
, cpu
).next
->next
= NULL
;
176 per_cpu(used_l1_inst_sram_head
, cpu
).next
= NULL
;
178 printk(KERN_INFO
"Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
179 L1_CODE_LENGTH
>> 10,
180 per_cpu(free_l1_inst_sram_head
, cpu
).next
->size
>> 10);
182 /* mutex initialize */
183 spin_lock_init(&per_cpu(l1_inst_sram_lock
, cpu
));
188 static void __init
l2_sram_init(void)
191 free_l2_sram_head
.next
=
192 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
193 if (!free_l2_sram_head
.next
) {
194 printk(KERN_INFO
"Fail to initialize L2 SRAM.\n");
198 free_l2_sram_head
.next
->paddr
=
199 (void *)L2_START
+ (_ebss_l2
- _stext_l2
);
200 free_l2_sram_head
.next
->size
=
201 L2_LENGTH
- (_ebss_l2
- _stext_l2
);
202 free_l2_sram_head
.next
->pid
= 0;
203 free_l2_sram_head
.next
->next
= NULL
;
205 used_l2_sram_head
.next
= NULL
;
207 printk(KERN_INFO
"Blackfin L2 SRAM: %d KB (%d KB free)\n",
209 free_l2_sram_head
.next
->size
>> 10);
211 /* mutex initialize */
212 spin_lock_init(&l2_sram_lock
);
216 static int __init
bfin_sram_init(void)
218 sram_piece_cache
= kmem_cache_create("sram_piece_cache",
219 sizeof(struct sram_piece
),
220 0, SLAB_PANIC
, NULL
);
229 pure_initcall(bfin_sram_init
);
231 /* SRAM allocate function */
232 static void *_sram_alloc(size_t size
, struct sram_piece
*pfree_head
,
233 struct sram_piece
*pused_head
)
235 struct sram_piece
*pslot
, *plast
, *pavail
;
237 if (size
<= 0 || !pfree_head
|| !pused_head
)
241 size
= (size
+ 3) & ~3;
243 pslot
= pfree_head
->next
;
246 /* search an available piece slot */
247 while (pslot
!= NULL
&& size
> pslot
->size
) {
255 if (pslot
->size
== size
) {
256 plast
->next
= pslot
->next
;
259 /* use atomic so our L1 allocator can be used atomically */
260 pavail
= kmem_cache_alloc(sram_piece_cache
, GFP_ATOMIC
);
265 pavail
->paddr
= pslot
->paddr
;
267 pslot
->paddr
+= size
;
271 pavail
->pid
= current
->pid
;
273 pslot
= pused_head
->next
;
276 /* insert new piece into used piece list !!! */
277 while (pslot
!= NULL
&& pavail
->paddr
< pslot
->paddr
) {
282 pavail
->next
= pslot
;
283 plast
->next
= pavail
;
285 return pavail
->paddr
;
288 /* Allocate the largest available block. */
289 static void *_sram_alloc_max(struct sram_piece
*pfree_head
,
290 struct sram_piece
*pused_head
,
291 unsigned long *psize
)
293 struct sram_piece
*pslot
, *pmax
;
295 if (!pfree_head
|| !pused_head
)
298 pmax
= pslot
= pfree_head
->next
;
300 /* search an available piece slot */
301 while (pslot
!= NULL
) {
302 if (pslot
->size
> pmax
->size
)
312 return _sram_alloc(*psize
, pfree_head
, pused_head
);
315 /* SRAM free function */
316 static int _sram_free(const void *addr
,
317 struct sram_piece
*pfree_head
,
318 struct sram_piece
*pused_head
)
320 struct sram_piece
*pslot
, *plast
, *pavail
;
322 if (!pfree_head
|| !pused_head
)
325 /* search the relevant memory slot */
326 pslot
= pused_head
->next
;
329 /* search an available piece slot */
330 while (pslot
!= NULL
&& pslot
->paddr
!= addr
) {
338 plast
->next
= pslot
->next
;
342 /* insert free pieces back to the free list */
343 pslot
= pfree_head
->next
;
346 while (pslot
!= NULL
&& addr
> pslot
->paddr
) {
351 if (plast
!= pfree_head
&& plast
->paddr
+ plast
->size
== pavail
->paddr
) {
352 plast
->size
+= pavail
->size
;
353 kmem_cache_free(sram_piece_cache
, pavail
);
355 pavail
->next
= plast
->next
;
356 plast
->next
= pavail
;
360 if (pslot
&& plast
->paddr
+ plast
->size
== pslot
->paddr
) {
361 plast
->size
+= pslot
->size
;
362 plast
->next
= pslot
->next
;
363 kmem_cache_free(sram_piece_cache
, pslot
);
369 int sram_free(const void *addr
)
372 #if L1_CODE_LENGTH != 0
373 if (addr
>= (void *)get_l1_code_start()
374 && addr
< (void *)(get_l1_code_start() + L1_CODE_LENGTH
))
375 return l1_inst_sram_free(addr
);
378 #if L1_DATA_A_LENGTH != 0
379 if (addr
>= (void *)get_l1_data_a_start()
380 && addr
< (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH
))
381 return l1_data_A_sram_free(addr
);
384 #if L1_DATA_B_LENGTH != 0
385 if (addr
>= (void *)get_l1_data_b_start()
386 && addr
< (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH
))
387 return l1_data_B_sram_free(addr
);
391 if (addr
>= (void *)L2_START
392 && addr
< (void *)(L2_START
+ L2_LENGTH
))
393 return l2_sram_free(addr
);
398 EXPORT_SYMBOL(sram_free
);
400 void *l1_data_A_sram_alloc(size_t size
)
402 #if L1_DATA_A_LENGTH != 0
407 cpu
= smp_processor_id();
408 /* add mutex operation */
409 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
411 addr
= _sram_alloc(size
, &per_cpu(free_l1_data_A_sram_head
, cpu
),
412 &per_cpu(used_l1_data_A_sram_head
, cpu
));
414 /* add mutex operation */
415 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
417 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
418 (long unsigned int)addr
, size
);
425 EXPORT_SYMBOL(l1_data_A_sram_alloc
);
427 int l1_data_A_sram_free(const void *addr
)
429 #if L1_DATA_A_LENGTH != 0
434 cpu
= smp_processor_id();
435 /* add mutex operation */
436 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
438 ret
= _sram_free(addr
, &per_cpu(free_l1_data_A_sram_head
, cpu
),
439 &per_cpu(used_l1_data_A_sram_head
, cpu
));
441 /* add mutex operation */
442 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
449 EXPORT_SYMBOL(l1_data_A_sram_free
);
451 void *l1_data_B_sram_alloc(size_t size
)
453 #if L1_DATA_B_LENGTH != 0
458 cpu
= smp_processor_id();
459 /* add mutex operation */
460 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
462 addr
= _sram_alloc(size
, &per_cpu(free_l1_data_B_sram_head
, cpu
),
463 &per_cpu(used_l1_data_B_sram_head
, cpu
));
465 /* add mutex operation */
466 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
468 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
469 (long unsigned int)addr
, size
);
476 EXPORT_SYMBOL(l1_data_B_sram_alloc
);
478 int l1_data_B_sram_free(const void *addr
)
480 #if L1_DATA_B_LENGTH != 0
485 cpu
= smp_processor_id();
486 /* add mutex operation */
487 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
489 ret
= _sram_free(addr
, &per_cpu(free_l1_data_B_sram_head
, cpu
),
490 &per_cpu(used_l1_data_B_sram_head
, cpu
));
492 /* add mutex operation */
493 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
500 EXPORT_SYMBOL(l1_data_B_sram_free
);
502 void *l1_data_sram_alloc(size_t size
)
504 void *addr
= l1_data_A_sram_alloc(size
);
507 addr
= l1_data_B_sram_alloc(size
);
511 EXPORT_SYMBOL(l1_data_sram_alloc
);
513 void *l1_data_sram_zalloc(size_t size
)
515 void *addr
= l1_data_sram_alloc(size
);
518 memset(addr
, 0x00, size
);
522 EXPORT_SYMBOL(l1_data_sram_zalloc
);
524 int l1_data_sram_free(const void *addr
)
527 ret
= l1_data_A_sram_free(addr
);
529 ret
= l1_data_B_sram_free(addr
);
532 EXPORT_SYMBOL(l1_data_sram_free
);
534 void *l1_inst_sram_alloc(size_t size
)
536 #if L1_CODE_LENGTH != 0
541 cpu
= smp_processor_id();
542 /* add mutex operation */
543 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
545 addr
= _sram_alloc(size
, &per_cpu(free_l1_inst_sram_head
, cpu
),
546 &per_cpu(used_l1_inst_sram_head
, cpu
));
548 /* add mutex operation */
549 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
551 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
552 (long unsigned int)addr
, size
);
559 EXPORT_SYMBOL(l1_inst_sram_alloc
);
561 int l1_inst_sram_free(const void *addr
)
563 #if L1_CODE_LENGTH != 0
568 cpu
= smp_processor_id();
569 /* add mutex operation */
570 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
572 ret
= _sram_free(addr
, &per_cpu(free_l1_inst_sram_head
, cpu
),
573 &per_cpu(used_l1_inst_sram_head
, cpu
));
575 /* add mutex operation */
576 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
583 EXPORT_SYMBOL(l1_inst_sram_free
);
585 /* L1 Scratchpad memory allocate function */
586 void *l1sram_alloc(size_t size
)
592 cpu
= smp_processor_id();
593 /* add mutex operation */
594 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
596 addr
= _sram_alloc(size
, &per_cpu(free_l1_ssram_head
, cpu
),
597 &per_cpu(used_l1_ssram_head
, cpu
));
599 /* add mutex operation */
600 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
605 /* L1 Scratchpad memory allocate function */
606 void *l1sram_alloc_max(size_t *psize
)
612 cpu
= smp_processor_id();
613 /* add mutex operation */
614 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
616 addr
= _sram_alloc_max(&per_cpu(free_l1_ssram_head
, cpu
),
617 &per_cpu(used_l1_ssram_head
, cpu
), psize
);
619 /* add mutex operation */
620 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
625 /* L1 Scratchpad memory free function */
626 int l1sram_free(const void *addr
)
632 cpu
= smp_processor_id();
633 /* add mutex operation */
634 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
636 ret
= _sram_free(addr
, &per_cpu(free_l1_ssram_head
, cpu
),
637 &per_cpu(used_l1_ssram_head
, cpu
));
639 /* add mutex operation */
640 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
645 void *l2_sram_alloc(size_t size
)
651 /* add mutex operation */
652 spin_lock_irqsave(&l2_sram_lock
, flags
);
654 addr
= _sram_alloc(size
, &free_l2_sram_head
,
657 /* add mutex operation */
658 spin_unlock_irqrestore(&l2_sram_lock
, flags
);
660 pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
661 (long unsigned int)addr
, size
);
668 EXPORT_SYMBOL(l2_sram_alloc
);
670 void *l2_sram_zalloc(size_t size
)
672 void *addr
= l2_sram_alloc(size
);
675 memset(addr
, 0x00, size
);
679 EXPORT_SYMBOL(l2_sram_zalloc
);
681 int l2_sram_free(const void *addr
)
687 /* add mutex operation */
688 spin_lock_irqsave(&l2_sram_lock
, flags
);
690 ret
= _sram_free(addr
, &free_l2_sram_head
,
693 /* add mutex operation */
694 spin_unlock_irqrestore(&l2_sram_lock
, flags
);
701 EXPORT_SYMBOL(l2_sram_free
);
703 int sram_free_with_lsl(const void *addr
)
705 struct sram_list_struct
*lsl
, **tmp
;
706 struct mm_struct
*mm
= current
->mm
;
709 for (tmp
= &mm
->context
.sram_list
; *tmp
; tmp
= &(*tmp
)->next
)
710 if ((*tmp
)->addr
== addr
) {
712 ret
= sram_free(addr
);
720 EXPORT_SYMBOL(sram_free_with_lsl
);
722 /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
723 * tracked. These are designed for userspace so that when a process exits,
724 * we can safely reap their resources.
726 void *sram_alloc_with_lsl(size_t size
, unsigned long flags
)
729 struct sram_list_struct
*lsl
= NULL
;
730 struct mm_struct
*mm
= current
->mm
;
732 lsl
= kzalloc(sizeof(struct sram_list_struct
), GFP_KERNEL
);
736 if (flags
& L1_INST_SRAM
)
737 addr
= l1_inst_sram_alloc(size
);
739 if (addr
== NULL
&& (flags
& L1_DATA_A_SRAM
))
740 addr
= l1_data_A_sram_alloc(size
);
742 if (addr
== NULL
&& (flags
& L1_DATA_B_SRAM
))
743 addr
= l1_data_B_sram_alloc(size
);
745 if (addr
== NULL
&& (flags
& L2_SRAM
))
746 addr
= l2_sram_alloc(size
);
754 lsl
->next
= mm
->context
.sram_list
;
755 mm
->context
.sram_list
= lsl
;
758 EXPORT_SYMBOL(sram_alloc_with_lsl
);
760 #ifdef CONFIG_PROC_FS
761 /* Once we get a real allocator, we'll throw all of this away.
762 * Until then, we need some sort of visibility into the L1 alloc.
764 /* Need to keep line of output the same. Currently, that is 44 bytes
765 * (including newline).
767 static int _sram_proc_read(char *buf
, int *len
, int count
, const char *desc
,
768 struct sram_piece
*pfree_head
,
769 struct sram_piece
*pused_head
)
771 struct sram_piece
*pslot
;
773 if (!pfree_head
|| !pused_head
)
776 *len
+= sprintf(&buf
[*len
], "--- SRAM %-14s Size PID State \n", desc
);
778 /* search the relevant memory slot */
779 pslot
= pused_head
->next
;
781 while (pslot
!= NULL
) {
782 *len
+= sprintf(&buf
[*len
], "%p-%p %10i %5i %-10s\n",
783 pslot
->paddr
, pslot
->paddr
+ pslot
->size
,
784 pslot
->size
, pslot
->pid
, "ALLOCATED");
789 pslot
= pfree_head
->next
;
791 while (pslot
!= NULL
) {
792 *len
+= sprintf(&buf
[*len
], "%p-%p %10i %5i %-10s\n",
793 pslot
->paddr
, pslot
->paddr
+ pslot
->size
,
794 pslot
->size
, pslot
->pid
, "FREE");
801 static int sram_proc_read(char *buf
, char **start
, off_t offset
, int count
,
802 int *eof
, void *data
)
807 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
808 if (_sram_proc_read(buf
, &len
, count
, "Scratchpad",
809 &per_cpu(free_l1_ssram_head
, cpu
), &per_cpu(used_l1_ssram_head
, cpu
)))
811 #if L1_DATA_A_LENGTH != 0
812 if (_sram_proc_read(buf
, &len
, count
, "L1 Data A",
813 &per_cpu(free_l1_data_A_sram_head
, cpu
),
814 &per_cpu(used_l1_data_A_sram_head
, cpu
)))
817 #if L1_DATA_B_LENGTH != 0
818 if (_sram_proc_read(buf
, &len
, count
, "L1 Data B",
819 &per_cpu(free_l1_data_B_sram_head
, cpu
),
820 &per_cpu(used_l1_data_B_sram_head
, cpu
)))
823 #if L1_CODE_LENGTH != 0
824 if (_sram_proc_read(buf
, &len
, count
, "L1 Instruction",
825 &per_cpu(free_l1_inst_sram_head
, cpu
),
826 &per_cpu(used_l1_inst_sram_head
, cpu
)))
831 if (_sram_proc_read(buf
, &len
, count
, "L2", &free_l2_sram_head
,
840 static int __init
sram_proc_init(void)
842 struct proc_dir_entry
*ptr
;
843 ptr
= create_proc_entry("sram", S_IFREG
| S_IRUGO
, NULL
);
845 printk(KERN_WARNING
"unable to create /proc/sram\n");
848 ptr
->read_proc
= sram_proc_read
;
851 late_initcall(sram_proc_init
);