1 /* arch/arm/mach-msm/memory.c
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 #include <linux/mm_types.h>
19 #include <linux/bootmem.h>
20 #include <linux/module.h>
21 #include <asm/pgtable.h>
23 #include <asm/mach/map.h>
24 #include "memory_ll.h"
25 #include <asm/cacheflush.h>
26 #if defined(CONFIG_MSM_NPA_REMOTE)
27 #include "npa_remote.h"
28 #include <linux/completion.h>
29 #include <linux/err.h>
32 int arch_io_remap_pfn_range(struct vm_area_struct
*vma
, unsigned long addr
,
33 unsigned long pfn
, unsigned long size
, pgprot_t prot
)
35 unsigned long pfn_addr
= pfn
<< PAGE_SHIFT
;
37 if ((pfn_addr >= 0x88000000) && (pfn_addr < 0xD0000000)) {
38 prot = pgprot_device(prot);
39 printk("remapping device %lx\n", prot);
42 panic("Memory remap PFN stuff not done\n");
43 return remap_pfn_range(vma
, addr
, pfn
, size
, prot
);
46 void *zero_page_strongly_ordered
;
48 static void map_zero_page_strongly_ordered(void)
50 if (zero_page_strongly_ordered
)
53 zero_page_strongly_ordered =
54 ioremap_strongly_ordered(page_to_pfn(empty_zero_page)
55 << PAGE_SHIFT, PAGE_SIZE);
57 panic("Strongly ordered memory functions not implemented\n");
60 void write_to_strongly_ordered_memory(void)
62 map_zero_page_strongly_ordered();
63 *(int *)zero_page_strongly_ordered
= 0;
65 EXPORT_SYMBOL(write_to_strongly_ordered_memory
);
67 void flush_axi_bus_buffer(void)
69 __asm__
__volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
70 : : "r" (0) : "memory");
71 write_to_strongly_ordered_memory();
74 #define CACHE_LINE_SIZE 32
76 /* These cache related routines make the assumption that the associated
77 * physical memory is contiguous. They will operate on all (L1
78 * and L2 if present) caches.
80 void clean_and_invalidate_caches(unsigned long vstart
,
81 unsigned long length
, unsigned long pstart
)
85 for (vaddr
= vstart
; vaddr
< vstart
+ length
; vaddr
+= CACHE_LINE_SIZE
)
86 asm ("mcr p15, 0, %0, c7, c14, 1" : : "r" (vaddr
));
87 #ifdef CONFIG_OUTER_CACHE
88 outer_flush_range(pstart
, pstart
+ length
);
90 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
91 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
93 flush_axi_bus_buffer();
96 void clean_caches(unsigned long vstart
,
97 unsigned long length
, unsigned long pstart
)
101 for (vaddr
= vstart
; vaddr
< vstart
+ length
; vaddr
+= CACHE_LINE_SIZE
)
102 asm ("mcr p15, 0, %0, c7, c10, 1" : : "r" (vaddr
));
103 #ifdef CONFIG_OUTER_CACHE
104 outer_clean_range(pstart
, pstart
+ length
);
106 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
107 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
109 flush_axi_bus_buffer();
112 void invalidate_caches(unsigned long vstart
,
113 unsigned long length
, unsigned long pstart
)
117 for (vaddr
= vstart
; vaddr
< vstart
+ length
; vaddr
+= CACHE_LINE_SIZE
)
118 asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (vaddr
));
119 #ifdef CONFIG_OUTER_CACHE
120 outer_inv_range(pstart
, pstart
+ length
);
122 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
123 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
125 flush_axi_bus_buffer();
128 void *alloc_bootmem_aligned(unsigned long size
, unsigned long alignment
)
130 void *unused_addr
= NULL
;
131 unsigned long addr
, tmp_size
, unused_size
;
133 /* Allocate maximum size needed, see where it ends up.
134 * Then free it -- in this path there are no other allocators
135 * so we can depend on getting the same address back
136 * when we allocate a smaller piece that is aligned
137 * at the end (if necessary) and the piece we really want,
138 * then free the unused first piece.
141 tmp_size
= size
+ alignment
- PAGE_SIZE
;
142 addr
= (unsigned long)alloc_bootmem(tmp_size
);
143 free_bootmem(__pa(addr
), tmp_size
);
145 unused_size
= alignment
- (addr
% alignment
);
147 unused_addr
= alloc_bootmem(unused_size
);
149 addr
= (unsigned long)alloc_bootmem(size
);
151 free_bootmem(__pa(unused_addr
), unused_size
);
156 #if defined(CONFIG_MSM_NPA_REMOTE)
157 struct npa_client
*npa_memory_client
;
160 static int change_memory_power_state(unsigned long start_pfn
,
161 unsigned long nr_pages
, int state
)
163 #if defined(CONFIG_MSM_NPA_REMOTE)
164 static atomic_t node_created_flag
= ATOMIC_INIT(1);
168 unsigned long virtual;
172 #if defined(CONFIG_MSM_NPA_REMOTE)
173 if (atomic_dec_and_test(&node_created_flag
)) {
174 /* Create NPA 'required' client. */
175 npa_memory_client
= npa_create_sync_client(NPA_MEMORY_NODE_NAME
,
176 "memory node", NPA_CLIENT_REQUIRED
);
177 if (IS_ERR(npa_memory_client
)) {
178 rc
= PTR_ERR(npa_memory_client
);
183 rc
= npa_issue_required_request(npa_memory_client
, state
);
185 if (state
== MEMORY_DEEP_POWERDOWN
) {
186 /* simulate turning off memory by writing bit pattern into it */
187 start
= start_pfn
<< PAGE_SHIFT
;
188 size
= nr_pages
<< PAGE_SHIFT
;
189 virtual = __phys_to_virt(start
);
190 memset((void *)virtual, 0x27, size
);
196 int platform_physical_remove_pages(unsigned long start_pfn
,
197 unsigned long nr_pages
)
199 return change_memory_power_state(start_pfn
, nr_pages
,
200 MEMORY_DEEP_POWERDOWN
);
203 int platform_physical_add_pages(unsigned long start_pfn
,
204 unsigned long nr_pages
)
206 return change_memory_power_state(start_pfn
, nr_pages
, MEMORY_ACTIVE
);
209 int platform_physical_low_power_pages(unsigned long start_pfn
,
210 unsigned long nr_pages
)
212 return change_memory_power_state(start_pfn
, nr_pages
,
213 MEMORY_SELF_REFRESH
);