2 * File: arch/blackfin/mm/sram-alloc.c
7 * Description: SRAM allocator for Blackfin L1 and L2 memory
10 * Copyright 2004-2008 Analog Devices Inc.
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 #include <linux/module.h>
31 #include <linux/kernel.h>
32 #include <linux/types.h>
33 #include <linux/miscdevice.h>
34 #include <linux/ioport.h>
35 #include <linux/fcntl.h>
36 #include <linux/init.h>
37 #include <linux/poll.h>
38 #include <linux/proc_fs.h>
39 #include <linux/spinlock.h>
40 #include <linux/rtc.h>
41 #include <asm/blackfin.h>
42 #include <asm/mem_map.h>
43 #include "blackfin_sram.h"
45 static DEFINE_PER_CPU(spinlock_t
, l1sram_lock
) ____cacheline_aligned_in_smp
;
46 static DEFINE_PER_CPU(spinlock_t
, l1_data_sram_lock
) ____cacheline_aligned_in_smp
;
47 static DEFINE_PER_CPU(spinlock_t
, l1_inst_sram_lock
) ____cacheline_aligned_in_smp
;
48 static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp
;
50 /* the data structure for L1 scratchpad and DATA SRAM */
55 struct sram_piece
*next
;
58 static DEFINE_PER_CPU(struct sram_piece
, free_l1_ssram_head
);
59 static DEFINE_PER_CPU(struct sram_piece
, used_l1_ssram_head
);
61 #if L1_DATA_A_LENGTH != 0
62 static DEFINE_PER_CPU(struct sram_piece
, free_l1_data_A_sram_head
);
63 static DEFINE_PER_CPU(struct sram_piece
, used_l1_data_A_sram_head
);
66 #if L1_DATA_B_LENGTH != 0
67 static DEFINE_PER_CPU(struct sram_piece
, free_l1_data_B_sram_head
);
68 static DEFINE_PER_CPU(struct sram_piece
, used_l1_data_B_sram_head
);
71 #if L1_CODE_LENGTH != 0
72 static DEFINE_PER_CPU(struct sram_piece
, free_l1_inst_sram_head
);
73 static DEFINE_PER_CPU(struct sram_piece
, used_l1_inst_sram_head
);
77 static struct sram_piece free_l2_sram_head
, used_l2_sram_head
;
80 static struct kmem_cache
*sram_piece_cache
;
82 /* L1 Scratchpad SRAM initialization function */
83 static void __init
l1sram_init(void)
86 unsigned long reserve
;
91 reserve
= sizeof(struct l1_scratch_task_info
);
94 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
95 per_cpu(free_l1_ssram_head
, cpu
).next
=
96 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
97 if (!per_cpu(free_l1_ssram_head
, cpu
).next
) {
98 printk(KERN_INFO
"Fail to initialize Scratchpad data SRAM.\n");
102 per_cpu(free_l1_ssram_head
, cpu
).next
->paddr
= (void *)get_l1_scratch_start_cpu(cpu
) + reserve
;
103 per_cpu(free_l1_ssram_head
, cpu
).next
->size
= L1_SCRATCH_LENGTH
- reserve
;
104 per_cpu(free_l1_ssram_head
, cpu
).next
->pid
= 0;
105 per_cpu(free_l1_ssram_head
, cpu
).next
->next
= NULL
;
107 per_cpu(used_l1_ssram_head
, cpu
).next
= NULL
;
109 /* mutex initialize */
110 spin_lock_init(&per_cpu(l1sram_lock
, cpu
));
111 printk(KERN_INFO
"Blackfin Scratchpad data SRAM: %d KB\n",
112 L1_SCRATCH_LENGTH
>> 10);
116 static void __init
l1_data_sram_init(void)
118 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
121 #if L1_DATA_A_LENGTH != 0
122 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
123 per_cpu(free_l1_data_A_sram_head
, cpu
).next
=
124 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
125 if (!per_cpu(free_l1_data_A_sram_head
, cpu
).next
) {
126 printk(KERN_INFO
"Fail to initialize L1 Data A SRAM.\n");
130 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->paddr
=
131 (void *)get_l1_data_a_start_cpu(cpu
) + (_ebss_l1
- _sdata_l1
);
132 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->size
=
133 L1_DATA_A_LENGTH
- (_ebss_l1
- _sdata_l1
);
134 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->pid
= 0;
135 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->next
= NULL
;
137 per_cpu(used_l1_data_A_sram_head
, cpu
).next
= NULL
;
139 printk(KERN_INFO
"Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
140 L1_DATA_A_LENGTH
>> 10,
141 per_cpu(free_l1_data_A_sram_head
, cpu
).next
->size
>> 10);
144 #if L1_DATA_B_LENGTH != 0
145 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
146 per_cpu(free_l1_data_B_sram_head
, cpu
).next
=
147 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
148 if (!per_cpu(free_l1_data_B_sram_head
, cpu
).next
) {
149 printk(KERN_INFO
"Fail to initialize L1 Data B SRAM.\n");
153 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->paddr
=
154 (void *)get_l1_data_b_start_cpu(cpu
) + (_ebss_b_l1
- _sdata_b_l1
);
155 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->size
=
156 L1_DATA_B_LENGTH
- (_ebss_b_l1
- _sdata_b_l1
);
157 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->pid
= 0;
158 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->next
= NULL
;
160 per_cpu(used_l1_data_B_sram_head
, cpu
).next
= NULL
;
162 printk(KERN_INFO
"Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
163 L1_DATA_B_LENGTH
>> 10,
164 per_cpu(free_l1_data_B_sram_head
, cpu
).next
->size
>> 10);
165 /* mutex initialize */
169 #if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
170 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
)
171 spin_lock_init(&per_cpu(l1_data_sram_lock
, cpu
));
175 static void __init
l1_inst_sram_init(void)
177 #if L1_CODE_LENGTH != 0
179 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
180 per_cpu(free_l1_inst_sram_head
, cpu
).next
=
181 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
182 if (!per_cpu(free_l1_inst_sram_head
, cpu
).next
) {
183 printk(KERN_INFO
"Failed to initialize L1 Instruction SRAM\n");
187 per_cpu(free_l1_inst_sram_head
, cpu
).next
->paddr
=
188 (void *)get_l1_code_start_cpu(cpu
) + (_etext_l1
- _stext_l1
);
189 per_cpu(free_l1_inst_sram_head
, cpu
).next
->size
=
190 L1_CODE_LENGTH
- (_etext_l1
- _stext_l1
);
191 per_cpu(free_l1_inst_sram_head
, cpu
).next
->pid
= 0;
192 per_cpu(free_l1_inst_sram_head
, cpu
).next
->next
= NULL
;
194 per_cpu(used_l1_inst_sram_head
, cpu
).next
= NULL
;
196 printk(KERN_INFO
"Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
197 L1_CODE_LENGTH
>> 10,
198 per_cpu(free_l1_inst_sram_head
, cpu
).next
->size
>> 10);
200 /* mutex initialize */
201 spin_lock_init(&per_cpu(l1_inst_sram_lock
, cpu
));
206 static void __init
l2_sram_init(void)
209 free_l2_sram_head
.next
=
210 kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
211 if (!free_l2_sram_head
.next
) {
212 printk(KERN_INFO
"Fail to initialize L2 SRAM.\n");
216 free_l2_sram_head
.next
->paddr
=
217 (void *)L2_START
+ (_ebss_l2
- _stext_l2
);
218 free_l2_sram_head
.next
->size
=
219 L2_LENGTH
- (_ebss_l2
- _stext_l2
);
220 free_l2_sram_head
.next
->pid
= 0;
221 free_l2_sram_head
.next
->next
= NULL
;
223 used_l2_sram_head
.next
= NULL
;
225 printk(KERN_INFO
"Blackfin L2 SRAM: %d KB (%d KB free)\n",
227 free_l2_sram_head
.next
->size
>> 10);
230 /* mutex initialize */
231 spin_lock_init(&l2_sram_lock
);
234 static int __init
bfin_sram_init(void)
236 sram_piece_cache
= kmem_cache_create("sram_piece_cache",
237 sizeof(struct sram_piece
),
238 0, SLAB_PANIC
, NULL
);
247 pure_initcall(bfin_sram_init
);
249 /* SRAM allocate function */
250 static void *_sram_alloc(size_t size
, struct sram_piece
*pfree_head
,
251 struct sram_piece
*pused_head
)
253 struct sram_piece
*pslot
, *plast
, *pavail
;
255 if (size
<= 0 || !pfree_head
|| !pused_head
)
259 size
= (size
+ 3) & ~3;
261 pslot
= pfree_head
->next
;
264 /* search an available piece slot */
265 while (pslot
!= NULL
&& size
> pslot
->size
) {
273 if (pslot
->size
== size
) {
274 plast
->next
= pslot
->next
;
277 pavail
= kmem_cache_alloc(sram_piece_cache
, GFP_KERNEL
);
282 pavail
->paddr
= pslot
->paddr
;
284 pslot
->paddr
+= size
;
288 pavail
->pid
= current
->pid
;
290 pslot
= pused_head
->next
;
293 /* insert new piece into used piece list !!! */
294 while (pslot
!= NULL
&& pavail
->paddr
< pslot
->paddr
) {
299 pavail
->next
= pslot
;
300 plast
->next
= pavail
;
302 return pavail
->paddr
;
305 /* Allocate the largest available block. */
306 static void *_sram_alloc_max(struct sram_piece
*pfree_head
,
307 struct sram_piece
*pused_head
,
308 unsigned long *psize
)
310 struct sram_piece
*pslot
, *pmax
;
312 if (!pfree_head
|| !pused_head
)
315 pmax
= pslot
= pfree_head
->next
;
317 /* search an available piece slot */
318 while (pslot
!= NULL
) {
319 if (pslot
->size
> pmax
->size
)
329 return _sram_alloc(*psize
, pfree_head
, pused_head
);
332 /* SRAM free function */
333 static int _sram_free(const void *addr
,
334 struct sram_piece
*pfree_head
,
335 struct sram_piece
*pused_head
)
337 struct sram_piece
*pslot
, *plast
, *pavail
;
339 if (!pfree_head
|| !pused_head
)
342 /* search the relevant memory slot */
343 pslot
= pused_head
->next
;
346 /* search an available piece slot */
347 while (pslot
!= NULL
&& pslot
->paddr
!= addr
) {
355 plast
->next
= pslot
->next
;
359 /* insert free pieces back to the free list */
360 pslot
= pfree_head
->next
;
363 while (pslot
!= NULL
&& addr
> pslot
->paddr
) {
368 if (plast
!= pfree_head
&& plast
->paddr
+ plast
->size
== pavail
->paddr
) {
369 plast
->size
+= pavail
->size
;
370 kmem_cache_free(sram_piece_cache
, pavail
);
372 pavail
->next
= plast
->next
;
373 plast
->next
= pavail
;
377 if (pslot
&& plast
->paddr
+ plast
->size
== pslot
->paddr
) {
378 plast
->size
+= pslot
->size
;
379 plast
->next
= pslot
->next
;
380 kmem_cache_free(sram_piece_cache
, pslot
);
386 int sram_free(const void *addr
)
389 #if L1_CODE_LENGTH != 0
390 if (addr
>= (void *)get_l1_code_start()
391 && addr
< (void *)(get_l1_code_start() + L1_CODE_LENGTH
))
392 return l1_inst_sram_free(addr
);
395 #if L1_DATA_A_LENGTH != 0
396 if (addr
>= (void *)get_l1_data_a_start()
397 && addr
< (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH
))
398 return l1_data_A_sram_free(addr
);
401 #if L1_DATA_B_LENGTH != 0
402 if (addr
>= (void *)get_l1_data_b_start()
403 && addr
< (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH
))
404 return l1_data_B_sram_free(addr
);
408 if (addr
>= (void *)L2_START
409 && addr
< (void *)(L2_START
+ L2_LENGTH
))
410 return l2_sram_free(addr
);
415 EXPORT_SYMBOL(sram_free
);
417 void *l1_data_A_sram_alloc(size_t size
)
424 /* add mutex operation */
425 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
427 #if L1_DATA_A_LENGTH != 0
428 addr
= _sram_alloc(size
, &per_cpu(free_l1_data_A_sram_head
, cpu
),
429 &per_cpu(used_l1_data_A_sram_head
, cpu
));
432 /* add mutex operation */
433 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
436 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
437 (long unsigned int)addr
, size
);
441 EXPORT_SYMBOL(l1_data_A_sram_alloc
);
443 int l1_data_A_sram_free(const void *addr
)
450 /* add mutex operation */
451 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
453 #if L1_DATA_A_LENGTH != 0
454 ret
= _sram_free(addr
, &per_cpu(free_l1_data_A_sram_head
, cpu
),
455 &per_cpu(used_l1_data_A_sram_head
, cpu
));
460 /* add mutex operation */
461 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
466 EXPORT_SYMBOL(l1_data_A_sram_free
);
468 void *l1_data_B_sram_alloc(size_t size
)
470 #if L1_DATA_B_LENGTH != 0
476 /* add mutex operation */
477 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
479 addr
= _sram_alloc(size
, &per_cpu(free_l1_data_B_sram_head
, cpu
),
480 &per_cpu(used_l1_data_B_sram_head
, cpu
));
482 /* add mutex operation */
483 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
486 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
487 (long unsigned int)addr
, size
);
494 EXPORT_SYMBOL(l1_data_B_sram_alloc
);
496 int l1_data_B_sram_free(const void *addr
)
498 #if L1_DATA_B_LENGTH != 0
504 /* add mutex operation */
505 spin_lock_irqsave(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
507 ret
= _sram_free(addr
, &per_cpu(free_l1_data_B_sram_head
, cpu
),
508 &per_cpu(used_l1_data_B_sram_head
, cpu
));
510 /* add mutex operation */
511 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock
, cpu
), flags
);
519 EXPORT_SYMBOL(l1_data_B_sram_free
);
521 void *l1_data_sram_alloc(size_t size
)
523 void *addr
= l1_data_A_sram_alloc(size
);
526 addr
= l1_data_B_sram_alloc(size
);
530 EXPORT_SYMBOL(l1_data_sram_alloc
);
532 void *l1_data_sram_zalloc(size_t size
)
534 void *addr
= l1_data_sram_alloc(size
);
537 memset(addr
, 0x00, size
);
541 EXPORT_SYMBOL(l1_data_sram_zalloc
);
543 int l1_data_sram_free(const void *addr
)
546 ret
= l1_data_A_sram_free(addr
);
548 ret
= l1_data_B_sram_free(addr
);
551 EXPORT_SYMBOL(l1_data_sram_free
);
553 void *l1_inst_sram_alloc(size_t size
)
555 #if L1_CODE_LENGTH != 0
561 /* add mutex operation */
562 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
564 addr
= _sram_alloc(size
, &per_cpu(free_l1_inst_sram_head
, cpu
),
565 &per_cpu(used_l1_inst_sram_head
, cpu
));
567 /* add mutex operation */
568 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
571 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
572 (long unsigned int)addr
, size
);
579 EXPORT_SYMBOL(l1_inst_sram_alloc
);
581 int l1_inst_sram_free(const void *addr
)
583 #if L1_CODE_LENGTH != 0
589 /* add mutex operation */
590 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
592 ret
= _sram_free(addr
, &per_cpu(free_l1_inst_sram_head
, cpu
),
593 &per_cpu(used_l1_inst_sram_head
, cpu
));
595 /* add mutex operation */
596 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock
, cpu
), flags
);
604 EXPORT_SYMBOL(l1_inst_sram_free
);
606 /* L1 Scratchpad memory allocate function */
607 void *l1sram_alloc(size_t size
)
614 /* add mutex operation */
615 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
617 addr
= _sram_alloc(size
, &per_cpu(free_l1_ssram_head
, cpu
),
618 &per_cpu(used_l1_ssram_head
, cpu
));
620 /* add mutex operation */
621 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
627 /* L1 Scratchpad memory allocate function */
628 void *l1sram_alloc_max(size_t *psize
)
635 /* add mutex operation */
636 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
638 addr
= _sram_alloc_max(&per_cpu(free_l1_ssram_head
, cpu
),
639 &per_cpu(used_l1_ssram_head
, cpu
), psize
);
641 /* add mutex operation */
642 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
648 /* L1 Scratchpad memory free function */
649 int l1sram_free(const void *addr
)
656 /* add mutex operation */
657 spin_lock_irqsave(&per_cpu(l1sram_lock
, cpu
), flags
);
659 ret
= _sram_free(addr
, &per_cpu(free_l1_ssram_head
, cpu
),
660 &per_cpu(used_l1_ssram_head
, cpu
));
662 /* add mutex operation */
663 spin_unlock_irqrestore(&per_cpu(l1sram_lock
, cpu
), flags
);
669 void *l2_sram_alloc(size_t size
)
675 /* add mutex operation */
676 spin_lock_irqsave(&l2_sram_lock
, flags
);
678 addr
= _sram_alloc(size
, &free_l2_sram_head
,
681 /* add mutex operation */
682 spin_unlock_irqrestore(&l2_sram_lock
, flags
);
684 pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
685 (long unsigned int)addr
, size
);
692 EXPORT_SYMBOL(l2_sram_alloc
);
694 void *l2_sram_zalloc(size_t size
)
696 void *addr
= l2_sram_alloc(size
);
699 memset(addr
, 0x00, size
);
703 EXPORT_SYMBOL(l2_sram_zalloc
);
705 int l2_sram_free(const void *addr
)
711 /* add mutex operation */
712 spin_lock_irqsave(&l2_sram_lock
, flags
);
714 ret
= _sram_free(addr
, &free_l2_sram_head
,
717 /* add mutex operation */
718 spin_unlock_irqrestore(&l2_sram_lock
, flags
);
725 EXPORT_SYMBOL(l2_sram_free
);
727 int sram_free_with_lsl(const void *addr
)
729 struct sram_list_struct
*lsl
, **tmp
;
730 struct mm_struct
*mm
= current
->mm
;
732 for (tmp
= &mm
->context
.sram_list
; *tmp
; tmp
= &(*tmp
)->next
)
733 if ((*tmp
)->addr
== addr
)
744 EXPORT_SYMBOL(sram_free_with_lsl
);
746 /* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
747 * tracked. These are designed for userspace so that when a process exits,
748 * we can safely reap their resources.
750 void *sram_alloc_with_lsl(size_t size
, unsigned long flags
)
753 struct sram_list_struct
*lsl
= NULL
;
754 struct mm_struct
*mm
= current
->mm
;
756 lsl
= kzalloc(sizeof(struct sram_list_struct
), GFP_KERNEL
);
760 if (flags
& L1_INST_SRAM
)
761 addr
= l1_inst_sram_alloc(size
);
763 if (addr
== NULL
&& (flags
& L1_DATA_A_SRAM
))
764 addr
= l1_data_A_sram_alloc(size
);
766 if (addr
== NULL
&& (flags
& L1_DATA_B_SRAM
))
767 addr
= l1_data_B_sram_alloc(size
);
769 if (addr
== NULL
&& (flags
& L2_SRAM
))
770 addr
= l2_sram_alloc(size
);
778 lsl
->next
= mm
->context
.sram_list
;
779 mm
->context
.sram_list
= lsl
;
782 EXPORT_SYMBOL(sram_alloc_with_lsl
);
784 #ifdef CONFIG_PROC_FS
785 /* Once we get a real allocator, we'll throw all of this away.
786 * Until then, we need some sort of visibility into the L1 alloc.
788 /* Need to keep line of output the same. Currently, that is 44 bytes
789 * (including newline).
791 static int _sram_proc_read(char *buf
, int *len
, int count
, const char *desc
,
792 struct sram_piece
*pfree_head
,
793 struct sram_piece
*pused_head
)
795 struct sram_piece
*pslot
;
797 if (!pfree_head
|| !pused_head
)
800 *len
+= sprintf(&buf
[*len
], "--- SRAM %-14s Size PID State \n", desc
);
802 /* search the relevant memory slot */
803 pslot
= pused_head
->next
;
805 while (pslot
!= NULL
) {
806 *len
+= sprintf(&buf
[*len
], "%p-%p %10i %5i %-10s\n",
807 pslot
->paddr
, pslot
->paddr
+ pslot
->size
,
808 pslot
->size
, pslot
->pid
, "ALLOCATED");
813 pslot
= pfree_head
->next
;
815 while (pslot
!= NULL
) {
816 *len
+= sprintf(&buf
[*len
], "%p-%p %10i %5i %-10s\n",
817 pslot
->paddr
, pslot
->paddr
+ pslot
->size
,
818 pslot
->size
, pslot
->pid
, "FREE");
825 static int sram_proc_read(char *buf
, char **start
, off_t offset
, int count
,
826 int *eof
, void *data
)
831 for (cpu
= 0; cpu
< num_possible_cpus(); ++cpu
) {
832 if (_sram_proc_read(buf
, &len
, count
, "Scratchpad",
833 &per_cpu(free_l1_ssram_head
, cpu
), &per_cpu(used_l1_ssram_head
, cpu
)))
835 #if L1_DATA_A_LENGTH != 0
836 if (_sram_proc_read(buf
, &len
, count
, "L1 Data A",
837 &per_cpu(free_l1_data_A_sram_head
, cpu
),
838 &per_cpu(used_l1_data_A_sram_head
, cpu
)))
841 #if L1_DATA_B_LENGTH != 0
842 if (_sram_proc_read(buf
, &len
, count
, "L1 Data B",
843 &per_cpu(free_l1_data_B_sram_head
, cpu
),
844 &per_cpu(used_l1_data_B_sram_head
, cpu
)))
847 #if L1_CODE_LENGTH != 0
848 if (_sram_proc_read(buf
, &len
, count
, "L1 Instruction",
849 &per_cpu(free_l1_inst_sram_head
, cpu
),
850 &per_cpu(used_l1_inst_sram_head
, cpu
)))
855 if (_sram_proc_read(buf
, &len
, count
, "L2", &free_l2_sram_head
,
864 static int __init
sram_proc_init(void)
866 struct proc_dir_entry
*ptr
;
867 ptr
= create_proc_entry("sram", S_IFREG
| S_IRUGO
, NULL
);
869 printk(KERN_WARNING
"unable to create /proc/sram\n");
872 ptr
->read_proc
= sram_proc_read
;
875 late_initcall(sram_proc_init
);